2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
25 #include "hns_dsaf_mac.h"
27 #define NIC_MAX_Q_PER_VF 16
28 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
30 #define SERVICE_TIMER_HZ (1 * HZ)
32 #define RCB_IRQ_NOT_INITED 0
33 #define RCB_IRQ_INITED 1
34 #define HNS_BUFFER_SIZE_2048 2048
36 #define BD_MAX_SEND_SIZE 8191
37 #define SKB_TMP_LEN(SKB) \
38 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
40 static void fill_v2_desc_hw(struct hnae_ring
*ring
, void *priv
, int size
,
41 int send_sz
, dma_addr_t dma
, int frag_end
,
42 int buf_num
, enum hns_desc_type type
, int mtu
)
44 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
45 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
47 struct ipv6hdr
*ipv6hdr
;
59 desc_cb
->length
= size
;
63 desc
->addr
= cpu_to_le64(dma
);
64 desc
->tx
.send_size
= cpu_to_le16((u16
)send_sz
);
66 /* config bd buffer end */
67 hnae_set_bit(rrcfv
, HNSV2_TXD_VLD_B
, 1);
68 hnae_set_field(bn_pid
, HNSV2_TXD_BUFNUM_M
, 0, buf_num
- 1);
70 /* fill port_id in the tx bd for sending management pkts */
71 hnae_set_field(bn_pid
, HNSV2_TXD_PORTID_M
,
72 HNSV2_TXD_PORTID_S
, ring
->q
->handle
->dport_id
);
74 if (type
== DESC_TYPE_SKB
) {
75 skb
= (struct sk_buff
*)priv
;
77 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
78 skb_reset_mac_len(skb
);
79 protocol
= skb
->protocol
;
82 if (protocol
== htons(ETH_P_8021Q
)) {
83 ip_offset
+= VLAN_HLEN
;
84 protocol
= vlan_get_protocol(skb
);
85 skb
->protocol
= protocol
;
88 if (skb
->protocol
== htons(ETH_P_IP
)) {
90 hnae_set_bit(rrcfv
, HNSV2_TXD_L3CS_B
, 1);
91 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
93 /* check for tcp/udp header */
94 if (iphdr
->protocol
== IPPROTO_TCP
&&
98 l4_len
= tcp_hdrlen(skb
);
99 mss
= skb_shinfo(skb
)->gso_size
;
100 paylen
= skb
->len
- SKB_TMP_LEN(skb
);
102 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
103 hnae_set_bit(tvsvsn
, HNSV2_TXD_IPV6_B
, 1);
104 ipv6hdr
= ipv6_hdr(skb
);
105 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
107 /* check for tcp/udp header */
108 if (ipv6hdr
->nexthdr
== IPPROTO_TCP
&&
109 skb_is_gso(skb
) && skb_is_gso_v6(skb
)) {
112 l4_len
= tcp_hdrlen(skb
);
113 mss
= skb_shinfo(skb
)->gso_size
;
114 paylen
= skb
->len
- SKB_TMP_LEN(skb
);
117 desc
->tx
.ip_offset
= ip_offset
;
118 desc
->tx
.tse_vlan_snap_v6_sctp_nth
= tvsvsn
;
119 desc
->tx
.mss
= cpu_to_le16(mss
);
120 desc
->tx
.l4_len
= l4_len
;
121 desc
->tx
.paylen
= cpu_to_le16(paylen
);
125 hnae_set_bit(rrcfv
, HNSV2_TXD_FE_B
, frag_end
);
127 desc
->tx
.bn_pid
= bn_pid
;
128 desc
->tx
.ra_ri_cs_fe_vld
= rrcfv
;
130 ring_ptr_move_fw(ring
, next_to_use
);
133 static void fill_v2_desc(struct hnae_ring
*ring
, void *priv
,
134 int size
, dma_addr_t dma
, int frag_end
,
135 int buf_num
, enum hns_desc_type type
, int mtu
)
137 fill_v2_desc_hw(ring
, priv
, size
, size
, dma
, frag_end
,
141 static const struct acpi_device_id hns_enet_acpi_match
[] = {
146 MODULE_DEVICE_TABLE(acpi
, hns_enet_acpi_match
);
148 static void fill_desc(struct hnae_ring
*ring
, void *priv
,
149 int size
, dma_addr_t dma
, int frag_end
,
150 int buf_num
, enum hns_desc_type type
, int mtu
)
152 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
153 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
157 u32 asid_bufnum_pid
= 0;
158 u32 flag_ipoffset
= 0;
160 desc_cb
->priv
= priv
;
161 desc_cb
->length
= size
;
163 desc_cb
->type
= type
;
165 desc
->addr
= cpu_to_le64(dma
);
166 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
168 /*config bd buffer end */
169 flag_ipoffset
|= 1 << HNS_TXD_VLD_B
;
171 asid_bufnum_pid
|= buf_num
<< HNS_TXD_BUFNUM_S
;
173 if (type
== DESC_TYPE_SKB
) {
174 skb
= (struct sk_buff
*)priv
;
176 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
177 protocol
= skb
->protocol
;
178 ip_offset
= ETH_HLEN
;
180 /*if it is a SW VLAN check the next protocol*/
181 if (protocol
== htons(ETH_P_8021Q
)) {
182 ip_offset
+= VLAN_HLEN
;
183 protocol
= vlan_get_protocol(skb
);
184 skb
->protocol
= protocol
;
187 if (skb
->protocol
== htons(ETH_P_IP
)) {
188 flag_ipoffset
|= 1 << HNS_TXD_L3CS_B
;
189 /* check for tcp/udp header */
190 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
192 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
193 /* ipv6 has not l3 cs, check for L4 header */
194 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
197 flag_ipoffset
|= ip_offset
<< HNS_TXD_IPOFFSET_S
;
201 flag_ipoffset
|= frag_end
<< HNS_TXD_FE_B
;
203 desc
->tx
.asid_bufnum_pid
= cpu_to_le16(asid_bufnum_pid
);
204 desc
->tx
.flag_ipoffset
= cpu_to_le32(flag_ipoffset
);
206 ring_ptr_move_fw(ring
, next_to_use
);
209 static void unfill_desc(struct hnae_ring
*ring
)
211 ring_ptr_move_bw(ring
, next_to_use
);
214 static int hns_nic_maybe_stop_tx(
215 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
217 struct sk_buff
*skb
= *out_skb
;
218 struct sk_buff
*new_skb
= NULL
;
221 /* no. of segments (plus a header) */
222 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
224 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
225 if (ring_space(ring
) < 1)
228 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
232 dev_kfree_skb_any(skb
);
235 } else if (buf_num
> ring_space(ring
)) {
243 static int hns_nic_maybe_stop_tso(
244 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
250 struct sk_buff
*skb
= *out_skb
;
251 struct sk_buff
*new_skb
= NULL
;
252 struct skb_frag_struct
*frag
;
254 size
= skb_headlen(skb
);
255 buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
257 frag_num
= skb_shinfo(skb
)->nr_frags
;
258 for (i
= 0; i
< frag_num
; i
++) {
259 frag
= &skb_shinfo(skb
)->frags
[i
];
260 size
= skb_frag_size(frag
);
261 buf_num
+= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
264 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
265 buf_num
= (skb
->len
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
266 if (ring_space(ring
) < buf_num
)
268 /* manual split the send packet */
269 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
272 dev_kfree_skb_any(skb
);
275 } else if (ring_space(ring
) < buf_num
) {
283 static void fill_tso_desc(struct hnae_ring
*ring
, void *priv
,
284 int size
, dma_addr_t dma
, int frag_end
,
285 int buf_num
, enum hns_desc_type type
, int mtu
)
291 frag_buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
292 sizeoflast
= size
% BD_MAX_SEND_SIZE
;
293 sizeoflast
= sizeoflast
? sizeoflast
: BD_MAX_SEND_SIZE
;
295 /* when the frag size is bigger than hardware, split this frag */
296 for (k
= 0; k
< frag_buf_num
; k
++)
297 fill_v2_desc_hw(ring
, priv
, k
== 0 ? size
: 0,
298 (k
== frag_buf_num
- 1) ?
299 sizeoflast
: BD_MAX_SEND_SIZE
,
300 dma
+ BD_MAX_SEND_SIZE
* k
,
301 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
303 (type
== DESC_TYPE_SKB
&& !k
) ?
304 DESC_TYPE_SKB
: DESC_TYPE_PAGE
,
308 netdev_tx_t
hns_nic_net_xmit_hw(struct net_device
*ndev
,
310 struct hns_nic_ring_data
*ring_data
)
312 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
313 struct hnae_ring
*ring
= ring_data
->ring
;
314 struct device
*dev
= ring_to_dev(ring
);
315 struct netdev_queue
*dev_queue
;
316 struct skb_frag_struct
*frag
;
320 int size
, next_to_use
;
323 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
325 ring
->stats
.tx_busy
++;
326 goto out_net_tx_busy
;
328 ring
->stats
.sw_err_cnt
++;
329 netdev_err(ndev
, "no memory to xmit!\n");
335 /* no. of segments (plus a header) */
336 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
337 next_to_use
= ring
->next_to_use
;
339 /* fill the first part */
340 size
= skb_headlen(skb
);
341 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
342 if (dma_mapping_error(dev
, dma
)) {
343 netdev_err(ndev
, "TX head DMA map failed\n");
344 ring
->stats
.sw_err_cnt
++;
347 priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
348 buf_num
, DESC_TYPE_SKB
, ndev
->mtu
);
350 /* fill the fragments */
351 for (i
= 1; i
< seg_num
; i
++) {
352 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
353 size
= skb_frag_size(frag
);
354 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
355 if (dma_mapping_error(dev
, dma
)) {
356 netdev_err(ndev
, "TX frag(%d) DMA map failed\n", i
);
357 ring
->stats
.sw_err_cnt
++;
358 goto out_map_frag_fail
;
360 priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
361 seg_num
- 1 == i
? 1 : 0, buf_num
,
362 DESC_TYPE_PAGE
, ndev
->mtu
);
365 /*complete translate all packets*/
366 dev_queue
= netdev_get_tx_queue(ndev
, skb
->queue_mapping
);
367 netdev_tx_sent_queue(dev_queue
, skb
->len
);
369 netif_trans_update(ndev
);
370 ndev
->stats
.tx_bytes
+= skb
->len
;
371 ndev
->stats
.tx_packets
++;
373 wmb(); /* commit all data before submit */
374 assert(skb
->queue_mapping
< priv
->ae_handle
->q_num
);
375 hnae_queue_xmit(priv
->ae_handle
->qs
[skb
->queue_mapping
], buf_num
);
381 while (ring
->next_to_use
!= next_to_use
) {
383 if (ring
->next_to_use
!= next_to_use
)
385 ring
->desc_cb
[ring
->next_to_use
].dma
,
386 ring
->desc_cb
[ring
->next_to_use
].length
,
389 dma_unmap_single(dev
,
390 ring
->desc_cb
[next_to_use
].dma
,
391 ring
->desc_cb
[next_to_use
].length
,
397 dev_kfree_skb_any(skb
);
402 netif_stop_subqueue(ndev
, skb
->queue_mapping
);
404 /* Herbert's original patch had:
405 * smp_mb__after_netif_stop_queue();
406 * but since that doesn't exist yet, just open code it.
409 return NETDEV_TX_BUSY
;
412 static void hns_nic_reuse_page(struct sk_buff
*skb
, int i
,
413 struct hnae_ring
*ring
, int pull_len
,
414 struct hnae_desc_cb
*desc_cb
)
416 struct hnae_desc
*desc
;
422 twobufs
= ((PAGE_SIZE
< 8192) &&
423 hnae_buf_size(ring
) == HNS_BUFFER_SIZE_2048
);
425 desc
= &ring
->desc
[ring
->next_to_clean
];
426 size
= le16_to_cpu(desc
->rx
.size
);
429 truesize
= hnae_buf_size(ring
);
431 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
432 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
435 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
436 size
- pull_len
, truesize
);
438 /* avoid re-using remote pages,flag default unreuse */
439 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
443 /* if we are only owner of page we can reuse it */
444 if (likely(page_count(desc_cb
->priv
) == 1)) {
445 /* flip page offset to other buffer */
446 desc_cb
->page_offset
^= truesize
;
448 desc_cb
->reuse_flag
= 1;
449 /* bump ref count on page before it is given*/
450 get_page(desc_cb
->priv
);
455 /* move offset up to the next cache line */
456 desc_cb
->page_offset
+= truesize
;
458 if (desc_cb
->page_offset
<= last_offset
) {
459 desc_cb
->reuse_flag
= 1;
460 /* bump ref count on page before it is given*/
461 get_page(desc_cb
->priv
);
465 static void get_v2rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
467 *out_bnum
= hnae_get_field(bnum_flag
,
468 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
) + 1;
471 static void get_rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
473 *out_bnum
= hnae_get_field(bnum_flag
,
474 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
);
477 static void hns_nic_rx_checksum(struct hns_nic_ring_data
*ring_data
,
478 struct sk_buff
*skb
, u32 flag
)
480 struct net_device
*netdev
= ring_data
->napi
.dev
;
484 /* check if RX checksum offload is enabled */
485 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
488 /* In hardware, we only support checksum for the following protocols:
490 * 2) TCP(over IPv4 or IPv6),
491 * 3) UDP(over IPv4 or IPv6),
492 * 4) SCTP(over IPv4 or IPv6)
493 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
494 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
496 * Hardware limitation:
497 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
498 * Error" bit (which usually can be used to indicate whether checksum
499 * was calculated by the hardware and if there was any error encountered
500 * during checksum calculation).
502 * Software workaround:
503 * We do get info within the RX descriptor about the kind of L3/L4
504 * protocol coming in the packet and the error status. These errors
505 * might not just be checksum errors but could be related to version,
506 * length of IPv4, UDP, TCP etc.
507 * Because there is no-way of knowing if it is a L3/L4 error due to bad
508 * checksum or any other L3/L4 error, we will not (cannot) convey
509 * checksum status for such cases to upper stack and will not maintain
510 * the RX L3/L4 checksum counters as well.
513 l3id
= hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
);
514 l4id
= hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
);
516 /* check L3 protocol for which checksum is supported */
517 if ((l3id
!= HNS_RX_FLAG_L3ID_IPV4
) && (l3id
!= HNS_RX_FLAG_L3ID_IPV6
))
520 /* check for any(not just checksum)flagged L3 protocol errors */
521 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L3E_B
)))
524 /* we do not support checksum of fragmented packets */
525 if (unlikely(hnae_get_bit(flag
, HNS_RXD_FRAG_B
)))
528 /* check L4 protocol for which checksum is supported */
529 if ((l4id
!= HNS_RX_FLAG_L4ID_TCP
) &&
530 (l4id
!= HNS_RX_FLAG_L4ID_UDP
) &&
531 (l4id
!= HNS_RX_FLAG_L4ID_SCTP
))
534 /* check for any(not just checksum)flagged L4 protocol errors */
535 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L4E_B
)))
538 /* now, this has to be a packet with valid RX checksum */
539 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
542 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data
*ring_data
,
543 struct sk_buff
**out_skb
, int *out_bnum
)
545 struct hnae_ring
*ring
= ring_data
->ring
;
546 struct net_device
*ndev
= ring_data
->napi
.dev
;
547 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
549 struct hnae_desc
*desc
;
550 struct hnae_desc_cb
*desc_cb
;
556 desc
= &ring
->desc
[ring
->next_to_clean
];
557 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
561 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
563 /* prefetch first cache line of first page */
565 #if L1_CACHE_BYTES < 128
566 prefetch(va
+ L1_CACHE_BYTES
);
569 skb
= *out_skb
= napi_alloc_skb(&ring_data
->napi
,
571 if (unlikely(!skb
)) {
572 ring
->stats
.sw_err_cnt
++;
576 prefetchw(skb
->data
);
577 length
= le16_to_cpu(desc
->rx
.pkt_len
);
578 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
579 priv
->ops
.get_rxd_bnum(bnum_flag
, &bnum
);
582 if (length
<= HNS_RX_HEAD_SIZE
) {
583 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
585 /* we can reuse buffer as-is, just make sure it is local */
586 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
587 desc_cb
->reuse_flag
= 1;
588 else /* this page cannot be reused so discard it */
589 put_page(desc_cb
->priv
);
591 ring_ptr_move_fw(ring
, next_to_clean
);
593 if (unlikely(bnum
!= 1)) { /* check err*/
598 ring
->stats
.seg_pkt_cnt
++;
600 pull_len
= eth_get_headlen(va
, HNS_RX_HEAD_SIZE
);
601 memcpy(__skb_put(skb
, pull_len
), va
,
602 ALIGN(pull_len
, sizeof(long)));
604 hns_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
605 ring_ptr_move_fw(ring
, next_to_clean
);
607 if (unlikely(bnum
>= (int)MAX_SKB_FRAGS
)) { /* check err*/
611 for (i
= 1; i
< bnum
; i
++) {
612 desc
= &ring
->desc
[ring
->next_to_clean
];
613 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
615 hns_nic_reuse_page(skb
, i
, ring
, 0, desc_cb
);
616 ring_ptr_move_fw(ring
, next_to_clean
);
620 /* check except process, free skb and jump the desc */
621 if (unlikely((!bnum
) || (bnum
> ring
->max_desc_num_per_pkt
))) {
623 *out_bnum
= *out_bnum
? *out_bnum
: 1; /* ntc moved,cannot 0*/
624 netdev_err(ndev
, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
625 bnum
, ring
->max_desc_num_per_pkt
,
626 length
, (int)MAX_SKB_FRAGS
,
627 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
628 ring
->stats
.err_bd_num
++;
629 dev_kfree_skb_any(skb
);
633 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
635 if (unlikely(!hnae_get_bit(bnum_flag
, HNS_RXD_VLD_B
))) {
636 netdev_err(ndev
, "no valid bd,%016llx,%016llx\n",
637 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
638 ring
->stats
.non_vld_descs
++;
639 dev_kfree_skb_any(skb
);
643 if (unlikely((!desc
->rx
.pkt_len
) ||
644 hnae_get_bit(bnum_flag
, HNS_RXD_DROP_B
))) {
645 ring
->stats
.err_pkt_len
++;
646 dev_kfree_skb_any(skb
);
650 if (unlikely(hnae_get_bit(bnum_flag
, HNS_RXD_L2E_B
))) {
651 ring
->stats
.l2_err
++;
652 dev_kfree_skb_any(skb
);
656 ring
->stats
.rx_pkts
++;
657 ring
->stats
.rx_bytes
+= skb
->len
;
659 /* indicate to upper stack if our hardware has already calculated
662 hns_nic_rx_checksum(ring_data
, skb
, bnum_flag
);
668 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data
*ring_data
, int cleand_count
)
671 struct hnae_desc_cb res_cbs
;
672 struct hnae_desc_cb
*desc_cb
;
673 struct hnae_ring
*ring
= ring_data
->ring
;
674 struct net_device
*ndev
= ring_data
->napi
.dev
;
676 for (i
= 0; i
< cleand_count
; i
++) {
677 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
678 if (desc_cb
->reuse_flag
) {
679 ring
->stats
.reuse_pg_cnt
++;
680 hnae_reuse_buffer(ring
, ring
->next_to_use
);
682 ret
= hnae_reserve_buffer_map(ring
, &res_cbs
);
684 ring
->stats
.sw_err_cnt
++;
685 netdev_err(ndev
, "hnae reserve buffer map failed.\n");
688 hnae_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
691 ring_ptr_move_fw(ring
, next_to_use
);
694 wmb(); /* make all data has been write before submit */
695 writel_relaxed(i
, ring
->io_base
+ RCB_REG_HEAD
);
698 /* return error number for error or number of desc left to take
700 static void hns_nic_rx_up_pro(struct hns_nic_ring_data
*ring_data
,
703 struct net_device
*ndev
= ring_data
->napi
.dev
;
705 skb
->protocol
= eth_type_trans(skb
, ndev
);
706 (void)napi_gro_receive(&ring_data
->napi
, skb
);
709 static int hns_desc_unused(struct hnae_ring
*ring
)
711 int ntc
= ring
->next_to_clean
;
712 int ntu
= ring
->next_to_use
;
714 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
717 #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
718 #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
720 #define HNS_COAL_BDNUM 3
722 static u32
hns_coal_rx_bdnum(struct hnae_ring
*ring
)
724 bool coal_enable
= ring
->q
->handle
->coal_adapt_en
;
727 ring
->coal_last_rx_bytes
> HNS_LOWEST_LATENCY_RATE
)
728 return HNS_COAL_BDNUM
;
733 static void hns_update_rx_rate(struct hnae_ring
*ring
)
735 bool coal_enable
= ring
->q
->handle
->coal_adapt_en
;
740 time_before(jiffies
, ring
->coal_last_jiffies
+ (HZ
>> 4)))
743 /* ring->stats.rx_bytes overflowed */
744 if (ring
->coal_last_rx_bytes
> ring
->stats
.rx_bytes
) {
745 ring
->coal_last_rx_bytes
= ring
->stats
.rx_bytes
;
746 ring
->coal_last_jiffies
= jiffies
;
750 total_bytes
= ring
->stats
.rx_bytes
- ring
->coal_last_rx_bytes
;
751 time_passed_ms
= jiffies_to_msecs(jiffies
- ring
->coal_last_jiffies
);
752 do_div(total_bytes
, time_passed_ms
);
753 ring
->coal_rx_rate
= total_bytes
>> 10;
755 ring
->coal_last_rx_bytes
= ring
->stats
.rx_bytes
;
756 ring
->coal_last_jiffies
= jiffies
;
760 * smooth_alg - smoothing algrithm for adjusting coalesce parameter
762 static u32
smooth_alg(u32 new_param
, u32 old_param
)
764 u32 gap
= (new_param
> old_param
) ? new_param
- old_param
765 : old_param
- new_param
;
770 if (new_param
> old_param
)
771 return old_param
+ gap
;
773 return old_param
- gap
;
777 * hns_nic_adp_coalesce - self adapte coalesce according to rx rate
778 * @ring_data: pointer to hns_nic_ring_data
780 static void hns_nic_adpt_coalesce(struct hns_nic_ring_data
*ring_data
)
782 struct hnae_ring
*ring
= ring_data
->ring
;
783 struct hnae_handle
*handle
= ring
->q
->handle
;
784 u32 new_coal_param
, old_coal_param
= ring
->coal_param
;
786 if (ring
->coal_rx_rate
< HNS_LOWEST_LATENCY_RATE
)
787 new_coal_param
= HNAE_LOWEST_LATENCY_COAL_PARAM
;
788 else if (ring
->coal_rx_rate
< HNS_LOW_LATENCY_RATE
)
789 new_coal_param
= HNAE_LOW_LATENCY_COAL_PARAM
;
791 new_coal_param
= HNAE_BULK_LATENCY_COAL_PARAM
;
793 if (new_coal_param
== old_coal_param
&&
794 new_coal_param
== handle
->coal_param
)
797 new_coal_param
= smooth_alg(new_coal_param
, old_coal_param
);
798 ring
->coal_param
= new_coal_param
;
801 * Because all ring in one port has one coalesce param, when one ring
802 * calculate its own coalesce param, it cannot write to hardware at
803 * once. There are three conditions as follows:
804 * 1. current ring's coalesce param is larger than the hardware.
805 * 2. or ring which adapt last time can change again.
808 if (new_coal_param
== handle
->coal_param
) {
809 handle
->coal_last_jiffies
= jiffies
;
810 handle
->coal_ring_idx
= ring_data
->queue_index
;
811 } else if (new_coal_param
> handle
->coal_param
||
812 handle
->coal_ring_idx
== ring_data
->queue_index
||
813 time_after(jiffies
, handle
->coal_last_jiffies
+ (HZ
>> 4))) {
814 handle
->dev
->ops
->set_coalesce_usecs(handle
,
816 handle
->dev
->ops
->set_coalesce_frames(handle
,
818 handle
->coal_param
= new_coal_param
;
819 handle
->coal_ring_idx
= ring_data
->queue_index
;
820 handle
->coal_last_jiffies
= jiffies
;
824 static int hns_nic_rx_poll_one(struct hns_nic_ring_data
*ring_data
,
827 struct hnae_ring
*ring
= ring_data
->ring
;
830 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
831 int recv_pkts
, recv_bds
, clean_count
, err
;
832 int unused_count
= hns_desc_unused(ring
);
834 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
835 rmb(); /* make sure num taken effect before the other data is touched */
837 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
840 while (recv_pkts
< budget
&& recv_bds
< num
) {
841 /* reuse or realloc buffers */
842 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
843 hns_nic_alloc_rx_buffers(ring_data
,
844 clean_count
+ unused_count
);
846 unused_count
= hns_desc_unused(ring
);
850 err
= hns_nic_poll_rx_skb(ring_data
, &skb
, &bnum
);
851 if (unlikely(!skb
)) /* this fault cannot be repaired */
856 if (unlikely(err
)) { /* do jump the err */
861 /* do update ip stack process*/
862 ((void (*)(struct hns_nic_ring_data
*, struct sk_buff
*))v
)(
868 /* make all data has been write before submit */
869 if (clean_count
+ unused_count
> 0)
870 hns_nic_alloc_rx_buffers(ring_data
,
871 clean_count
+ unused_count
);
876 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data
*ring_data
)
878 struct hnae_ring
*ring
= ring_data
->ring
;
882 hns_update_rx_rate(ring
);
884 /* for hardware bug fixed */
885 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
886 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
888 if (num
<= hns_coal_rx_bdnum(ring
)) {
889 if (ring
->q
->handle
->coal_adapt_en
)
890 hns_nic_adpt_coalesce(ring_data
);
894 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
903 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
905 struct hnae_ring
*ring
= ring_data
->ring
;
908 hns_update_rx_rate(ring
);
909 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
911 if (num
<= hns_coal_rx_bdnum(ring
)) {
912 if (ring
->q
->handle
->coal_adapt_en
)
913 hns_nic_adpt_coalesce(ring_data
);
921 static inline void hns_nic_reclaim_one_desc(struct hnae_ring
*ring
,
922 int *bytes
, int *pkts
)
924 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
926 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
927 (*bytes
) += desc_cb
->length
;
928 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
929 hnae_free_buffer_detach(ring
, ring
->next_to_clean
);
931 ring_ptr_move_fw(ring
, next_to_clean
);
934 static int is_valid_clean_head(struct hnae_ring
*ring
, int h
)
936 int u
= ring
->next_to_use
;
937 int c
= ring
->next_to_clean
;
939 if (unlikely(h
> ring
->desc_num
))
942 assert(u
> 0 && u
< ring
->desc_num
);
943 assert(c
> 0 && c
< ring
->desc_num
);
944 assert(u
!= c
&& h
!= c
); /* must be checked before call this func */
946 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
949 /* reclaim all desc in one budget
950 * return error or number of desc left
952 static int hns_nic_tx_poll_one(struct hns_nic_ring_data
*ring_data
,
955 struct hnae_ring
*ring
= ring_data
->ring
;
956 struct net_device
*ndev
= ring_data
->napi
.dev
;
957 struct netdev_queue
*dev_queue
;
958 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
962 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
963 rmb(); /* make sure head is ready before touch any data */
965 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
966 return 0; /* no data to poll */
968 if (!is_valid_clean_head(ring
, head
)) {
969 netdev_err(ndev
, "wrong head (%d, %d-%d)\n", head
,
970 ring
->next_to_use
, ring
->next_to_clean
);
971 ring
->stats
.io_err_cnt
++;
977 while (head
!= ring
->next_to_clean
) {
978 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
979 /* issue prefetch for next Tx descriptor */
980 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
982 /* update tx ring statistics. */
983 ring
->stats
.tx_pkts
+= pkts
;
984 ring
->stats
.tx_bytes
+= bytes
;
986 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
987 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
989 if (unlikely(priv
->link
&& !netif_carrier_ok(ndev
)))
990 netif_carrier_on(ndev
);
992 if (unlikely(pkts
&& netif_carrier_ok(ndev
) &&
993 (ring_space(ring
) >= ring
->max_desc_num_per_pkt
* 2))) {
994 /* Make sure that anybody stopping the queue after this
995 * sees the new next_to_clean.
998 if (netif_tx_queue_stopped(dev_queue
) &&
999 !test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
1000 netif_tx_wake_queue(dev_queue
);
1001 ring
->stats
.restart_queue
++;
1007 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data
*ring_data
)
1009 struct hnae_ring
*ring
= ring_data
->ring
;
1012 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1014 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1016 if (head
!= ring
->next_to_clean
) {
1017 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1018 ring_data
->ring
, 1);
1026 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
1028 struct hnae_ring
*ring
= ring_data
->ring
;
1029 int head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1031 if (head
== ring
->next_to_clean
)
1037 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data
*ring_data
)
1039 struct hnae_ring
*ring
= ring_data
->ring
;
1040 struct net_device
*ndev
= ring_data
->napi
.dev
;
1041 struct netdev_queue
*dev_queue
;
1045 head
= ring
->next_to_use
; /* ntu :soft setted ring position*/
1048 while (head
!= ring
->next_to_clean
)
1049 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1051 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
1052 netdev_tx_reset_queue(dev_queue
);
1055 static int hns_nic_common_poll(struct napi_struct
*napi
, int budget
)
1057 int clean_complete
= 0;
1058 struct hns_nic_ring_data
*ring_data
=
1059 container_of(napi
, struct hns_nic_ring_data
, napi
);
1060 struct hnae_ring
*ring
= ring_data
->ring
;
1062 clean_complete
+= ring_data
->poll_one(
1063 ring_data
, budget
- clean_complete
,
1064 ring_data
->ex_process
);
1066 if (clean_complete
< budget
) {
1067 if (ring_data
->fini_process(ring_data
)) {
1068 napi_complete(napi
);
1069 ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1075 return clean_complete
;
1078 static irqreturn_t
hns_irq_handle(int irq
, void *dev
)
1080 struct hns_nic_ring_data
*ring_data
= (struct hns_nic_ring_data
*)dev
;
1082 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1083 ring_data
->ring
, 1);
1084 napi_schedule(&ring_data
->napi
);
1090 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1093 static void hns_nic_adjust_link(struct net_device
*ndev
)
1095 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1096 struct hnae_handle
*h
= priv
->ae_handle
;
1099 /* If there is no phy, do not need adjust link */
1101 /* When phy link down, do nothing */
1102 if (ndev
->phydev
->link
== 0)
1105 if (h
->dev
->ops
->need_adjust_link(h
, ndev
->phydev
->speed
,
1106 ndev
->phydev
->duplex
)) {
1107 /* because Hi161X chip don't support to change gmac
1108 * speed and duplex with traffic. Delay 200ms to
1109 * make sure there is no more data in chip FIFO.
1111 netif_carrier_off(ndev
);
1113 h
->dev
->ops
->adjust_link(h
, ndev
->phydev
->speed
,
1114 ndev
->phydev
->duplex
);
1115 netif_carrier_on(ndev
);
1119 state
= state
&& h
->dev
->ops
->get_status(h
);
1121 if (state
!= priv
->link
) {
1123 netif_carrier_on(ndev
);
1124 netif_tx_wake_all_queues(ndev
);
1125 netdev_info(ndev
, "link up\n");
1127 netif_carrier_off(ndev
);
1128 netdev_info(ndev
, "link down\n");
1135 *hns_nic_init_phy - init phy
1138 * Return 0 on success, negative on failure
1140 int hns_nic_init_phy(struct net_device
*ndev
, struct hnae_handle
*h
)
1142 struct phy_device
*phy_dev
= h
->phy_dev
;
1148 phy_dev
->supported
&= h
->if_support
;
1149 phy_dev
->advertising
= phy_dev
->supported
;
1151 if (h
->phy_if
== PHY_INTERFACE_MODE_XGMII
)
1152 phy_dev
->autoneg
= false;
1154 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
) {
1155 phy_dev
->dev_flags
= 0;
1157 ret
= phy_connect_direct(ndev
, phy_dev
, hns_nic_adjust_link
,
1160 ret
= phy_attach_direct(ndev
, phy_dev
, 0, h
->phy_if
);
1168 static int hns_nic_ring_open(struct net_device
*netdev
, int idx
)
1170 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1171 struct hnae_handle
*h
= priv
->ae_handle
;
1173 napi_enable(&priv
->ring_data
[idx
].napi
);
1175 enable_irq(priv
->ring_data
[idx
].ring
->irq
);
1176 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 0);
1181 static int hns_nic_net_set_mac_address(struct net_device
*ndev
, void *p
)
1183 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1184 struct hnae_handle
*h
= priv
->ae_handle
;
1185 struct sockaddr
*mac_addr
= p
;
1188 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1189 return -EADDRNOTAVAIL
;
1191 ret
= h
->dev
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1193 netdev_err(ndev
, "set_mac_address fail, ret=%d!\n", ret
);
1197 memcpy(ndev
->dev_addr
, mac_addr
->sa_data
, ndev
->addr_len
);
1202 static void hns_nic_update_stats(struct net_device
*netdev
)
1204 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1205 struct hnae_handle
*h
= priv
->ae_handle
;
1207 h
->dev
->ops
->update_stats(h
, &netdev
->stats
);
1210 /* set mac addr if it is configed. or leave it to the AE driver */
1211 static void hns_init_mac_addr(struct net_device
*ndev
)
1213 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1215 if (!device_get_mac_address(priv
->dev
, ndev
->dev_addr
, ETH_ALEN
)) {
1216 eth_hw_addr_random(ndev
);
1217 dev_warn(priv
->dev
, "No valid mac, use random mac %pM",
1222 static void hns_nic_ring_close(struct net_device
*netdev
, int idx
)
1224 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1225 struct hnae_handle
*h
= priv
->ae_handle
;
1227 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 1);
1228 disable_irq(priv
->ring_data
[idx
].ring
->irq
);
1230 napi_disable(&priv
->ring_data
[idx
].napi
);
1233 static int hns_nic_init_affinity_mask(int q_num
, int ring_idx
,
1234 struct hnae_ring
*ring
, cpumask_t
*mask
)
1238 /* Diffrent irq banlance between 16core and 32core.
1239 * The cpu mask set by ring index according to the ring flag
1240 * which indicate the ring is tx or rx.
1242 if (q_num
== num_possible_cpus()) {
1243 if (is_tx_ring(ring
))
1246 cpu
= ring_idx
- q_num
;
1248 if (is_tx_ring(ring
))
1251 cpu
= (ring_idx
- q_num
) * 2 + 1;
1254 cpumask_clear(mask
);
1255 cpumask_set_cpu(cpu
, mask
);
1260 static void hns_nic_free_irq(int q_num
, struct hns_nic_priv
*priv
)
1264 for (i
= 0; i
< q_num
* 2; i
++) {
1265 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
1266 irq_set_affinity_hint(priv
->ring_data
[i
].ring
->irq
,
1268 free_irq(priv
->ring_data
[i
].ring
->irq
,
1269 &priv
->ring_data
[i
]);
1270 priv
->ring_data
[i
].ring
->irq_init_flag
=
1276 static int hns_nic_init_irq(struct hns_nic_priv
*priv
)
1278 struct hnae_handle
*h
= priv
->ae_handle
;
1279 struct hns_nic_ring_data
*rd
;
1284 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1285 rd
= &priv
->ring_data
[i
];
1287 if (rd
->ring
->irq_init_flag
== RCB_IRQ_INITED
)
1290 snprintf(rd
->ring
->ring_name
, RCB_RING_NAME_LEN
,
1291 "%s-%s%d", priv
->netdev
->name
,
1292 (is_tx_ring(rd
->ring
) ? "tx" : "rx"), rd
->queue_index
);
1294 rd
->ring
->ring_name
[RCB_RING_NAME_LEN
- 1] = '\0';
1296 ret
= request_irq(rd
->ring
->irq
,
1297 hns_irq_handle
, 0, rd
->ring
->ring_name
, rd
);
1299 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
1303 disable_irq(rd
->ring
->irq
);
1305 cpu
= hns_nic_init_affinity_mask(h
->q_num
, i
,
1306 rd
->ring
, &rd
->mask
);
1308 if (cpu_online(cpu
))
1309 irq_set_affinity_hint(rd
->ring
->irq
,
1312 rd
->ring
->irq_init_flag
= RCB_IRQ_INITED
;
1318 hns_nic_free_irq(h
->q_num
, priv
);
1322 static int hns_nic_net_up(struct net_device
*ndev
)
1324 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1325 struct hnae_handle
*h
= priv
->ae_handle
;
1329 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
))
1332 ret
= hns_nic_init_irq(priv
);
1334 netdev_err(ndev
, "hns init irq failed! ret=%d\n", ret
);
1338 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1339 ret
= hns_nic_ring_open(ndev
, i
);
1341 goto out_has_some_queues
;
1344 ret
= h
->dev
->ops
->set_mac_addr(h
, ndev
->dev_addr
);
1346 goto out_set_mac_addr_err
;
1348 ret
= h
->dev
->ops
->start
? h
->dev
->ops
->start(h
) : 0;
1353 phy_start(ndev
->phydev
);
1355 clear_bit(NIC_STATE_DOWN
, &priv
->state
);
1356 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1361 netif_stop_queue(ndev
);
1362 out_set_mac_addr_err
:
1363 out_has_some_queues
:
1364 for (j
= i
- 1; j
>= 0; j
--)
1365 hns_nic_ring_close(ndev
, j
);
1367 hns_nic_free_irq(h
->q_num
, priv
);
1368 set_bit(NIC_STATE_DOWN
, &priv
->state
);
1373 static void hns_nic_net_down(struct net_device
*ndev
)
1376 struct hnae_ae_ops
*ops
;
1377 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1379 if (test_and_set_bit(NIC_STATE_DOWN
, &priv
->state
))
1382 (void)del_timer_sync(&priv
->service_timer
);
1383 netif_tx_stop_all_queues(ndev
);
1384 netif_carrier_off(ndev
);
1385 netif_tx_disable(ndev
);
1389 phy_stop(ndev
->phydev
);
1391 ops
= priv
->ae_handle
->dev
->ops
;
1394 ops
->stop(priv
->ae_handle
);
1396 netif_tx_stop_all_queues(ndev
);
1398 for (i
= priv
->ae_handle
->q_num
- 1; i
>= 0; i
--) {
1399 hns_nic_ring_close(ndev
, i
);
1400 hns_nic_ring_close(ndev
, i
+ priv
->ae_handle
->q_num
);
1402 /* clean tx buffers*/
1403 hns_nic_tx_clr_all_bufs(priv
->ring_data
+ i
);
1407 void hns_nic_net_reset(struct net_device
*ndev
)
1409 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1410 struct hnae_handle
*handle
= priv
->ae_handle
;
1412 while (test_and_set_bit(NIC_STATE_RESETTING
, &priv
->state
))
1413 usleep_range(1000, 2000);
1415 (void)hnae_reinit_handle(handle
);
1417 clear_bit(NIC_STATE_RESETTING
, &priv
->state
);
1420 void hns_nic_net_reinit(struct net_device
*netdev
)
1422 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1423 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
1425 netif_trans_update(priv
->netdev
);
1426 while (test_and_set_bit(NIC_STATE_REINITING
, &priv
->state
))
1427 usleep_range(1000, 2000);
1429 hns_nic_net_down(netdev
);
1431 /* Only do hns_nic_net_reset in debug mode
1432 * because of hardware limitation.
1434 if (type
== HNAE_PORT_DEBUG
)
1435 hns_nic_net_reset(netdev
);
1437 (void)hns_nic_net_up(netdev
);
1438 clear_bit(NIC_STATE_REINITING
, &priv
->state
);
1441 static int hns_nic_net_open(struct net_device
*ndev
)
1443 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1444 struct hnae_handle
*h
= priv
->ae_handle
;
1447 if (test_bit(NIC_STATE_TESTING
, &priv
->state
))
1451 netif_carrier_off(ndev
);
1453 ret
= netif_set_real_num_tx_queues(ndev
, h
->q_num
);
1455 netdev_err(ndev
, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1460 ret
= netif_set_real_num_rx_queues(ndev
, h
->q_num
);
1463 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
1467 ret
= hns_nic_net_up(ndev
);
1470 "hns net up fail, ret=%d!\n", ret
);
1477 static int hns_nic_net_stop(struct net_device
*ndev
)
1479 hns_nic_net_down(ndev
);
1484 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
);
1485 #define HNS_TX_TIMEO_LIMIT (40 * HZ)
1486 static void hns_nic_net_timeout(struct net_device
*ndev
)
1488 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1490 if (ndev
->watchdog_timeo
< HNS_TX_TIMEO_LIMIT
) {
1491 ndev
->watchdog_timeo
*= 2;
1492 netdev_info(ndev
, "watchdog_timo changed to %d.\n",
1493 ndev
->watchdog_timeo
);
1495 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
1496 hns_tx_timeout_reset(priv
);
1500 static int hns_nic_do_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
1503 struct phy_device
*phy_dev
= netdev
->phydev
;
1505 if (!netif_running(netdev
))
1511 return phy_mii_ioctl(phy_dev
, ifr
, cmd
);
1514 static netdev_tx_t
hns_nic_net_xmit(struct sk_buff
*skb
,
1515 struct net_device
*ndev
)
1517 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1519 assert(skb
->queue_mapping
< ndev
->ae_handle
->q_num
);
1521 return hns_nic_net_xmit_hw(ndev
, skb
,
1522 &tx_ring_data(priv
, skb
->queue_mapping
));
1525 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data
*ring_data
,
1526 struct sk_buff
*skb
)
1528 dev_kfree_skb_any(skb
);
1531 #define HNS_LB_TX_RING 0
1532 static struct sk_buff
*hns_assemble_skb(struct net_device
*ndev
)
1534 struct sk_buff
*skb
;
1535 struct ethhdr
*ethhdr
;
1538 /* allocate test skb */
1539 skb
= alloc_skb(64, GFP_KERNEL
);
1545 memset(skb
->data
, 0xFF, skb
->len
);
1547 /* must be tcp/ip package */
1548 ethhdr
= (struct ethhdr
*)skb
->data
;
1549 ethhdr
->h_proto
= htons(ETH_P_IP
);
1551 frame_len
= skb
->len
& (~1ul);
1552 memset(&skb
->data
[frame_len
/ 2], 0xAA,
1555 skb
->queue_mapping
= HNS_LB_TX_RING
;
1560 static int hns_enable_serdes_lb(struct net_device
*ndev
)
1562 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1563 struct hnae_handle
*h
= priv
->ae_handle
;
1564 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1568 ret
= ops
->set_loopback(h
, MAC_INTERNALLOOP_SERDES
, 1);
1572 ret
= ops
->start
? ops
->start(h
) : 0;
1576 /* link adjust duplex*/
1577 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
1583 ops
->adjust_link(h
, speed
, duplex
);
1585 /* wait h/w ready */
1591 static void hns_disable_serdes_lb(struct net_device
*ndev
)
1593 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1594 struct hnae_handle
*h
= priv
->ae_handle
;
1595 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1598 ops
->set_loopback(h
, MAC_INTERNALLOOP_SERDES
, 0);
1602 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1603 *function as follows:
1604 * 1. if one rx ring has found the page_offset is not equal 0 between head
1605 * and tail, it means that the chip fetched the wrong descs for the ring
1606 * which buffer size is 4096.
1607 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1608 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1609 * recieving all packages and it will fetch new descriptions.
1610 * 4. recover to the original state.
1614 static int hns_nic_clear_all_rx_fetch(struct net_device
*ndev
)
1616 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1617 struct hnae_handle
*h
= priv
->ae_handle
;
1618 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1619 struct hns_nic_ring_data
*rd
;
1620 struct hnae_ring
*ring
;
1621 struct sk_buff
*skb
;
1632 /* alloc indir memory */
1633 indir_size
= ops
->get_rss_indir_size(h
) * sizeof(*org_indir
);
1634 org_indir
= kzalloc(indir_size
, GFP_KERNEL
);
1638 /* store the orginal indirection */
1639 ops
->get_rss(h
, org_indir
, NULL
, NULL
);
1641 cur_indir
= kzalloc(indir_size
, GFP_KERNEL
);
1644 goto cur_indir_alloc_err
;
1648 if (hns_enable_serdes_lb(ndev
)) {
1650 goto enable_serdes_lb_err
;
1653 /* foreach every rx ring to clear fetch desc */
1654 for (i
= 0; i
< h
->q_num
; i
++) {
1655 ring
= &h
->qs
[i
]->rx_ring
;
1656 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1657 tail
= readl_relaxed(ring
->io_base
+ RCB_REG_TAIL
);
1659 fetch_num
= ring_dist(ring
, head
, tail
);
1661 while (head
!= tail
) {
1662 if (ring
->desc_cb
[head
].page_offset
!= 0) {
1668 if (head
== ring
->desc_num
)
1673 for (j
= 0; j
< indir_size
/ sizeof(*org_indir
); j
++)
1675 ops
->set_rss(h
, cur_indir
, NULL
, 0);
1677 for (j
= 0; j
< fetch_num
; j
++) {
1678 /* alloc one skb and init */
1679 skb
= hns_assemble_skb(ndev
);
1682 rd
= &tx_ring_data(priv
, skb
->queue_mapping
);
1683 hns_nic_net_xmit_hw(ndev
, skb
, rd
);
1686 while (retry_times
++ < 10) {
1689 rd
= &rx_ring_data(priv
, i
);
1690 if (rd
->poll_one(rd
, fetch_num
,
1691 hns_nic_drop_rx_fetch
))
1696 while (retry_times
++ < 10) {
1698 /* clean tx ring 0 send package */
1699 rd
= &tx_ring_data(priv
,
1701 if (rd
->poll_one(rd
, fetch_num
, NULL
))
1709 /* restore everything */
1710 ops
->set_rss(h
, org_indir
, NULL
, 0);
1711 hns_disable_serdes_lb(ndev
);
1712 enable_serdes_lb_err
:
1714 cur_indir_alloc_err
:
1720 static int hns_nic_change_mtu(struct net_device
*ndev
, int new_mtu
)
1722 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1723 struct hnae_handle
*h
= priv
->ae_handle
;
1724 bool if_running
= netif_running(ndev
);
1727 /* MTU < 68 is an error and causes problems on some kernels */
1732 if (new_mtu
== ndev
->mtu
)
1735 if (!h
->dev
->ops
->set_mtu
)
1739 (void)hns_nic_net_stop(ndev
);
1743 if (priv
->enet_ver
!= AE_VERSION_1
&&
1744 ndev
->mtu
<= BD_SIZE_2048_MAX_MTU
&&
1745 new_mtu
> BD_SIZE_2048_MAX_MTU
) {
1747 hnae_reinit_all_ring_desc(h
);
1749 /* clear the package which the chip has fetched */
1750 ret
= hns_nic_clear_all_rx_fetch(ndev
);
1752 /* the page offset must be consist with desc */
1753 hnae_reinit_all_ring_page_off(h
);
1756 netdev_err(ndev
, "clear the fetched desc fail\n");
1761 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1763 netdev_err(ndev
, "set mtu fail, return value %d\n",
1768 /* finally, set new mtu to netdevice */
1769 ndev
->mtu
= new_mtu
;
1773 if (hns_nic_net_open(ndev
)) {
1774 netdev_err(ndev
, "hns net open fail\n");
1782 static int hns_nic_set_features(struct net_device
*netdev
,
1783 netdev_features_t features
)
1785 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1787 switch (priv
->enet_ver
) {
1789 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
1790 netdev_info(netdev
, "enet v1 do not support tso!\n");
1793 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1794 priv
->ops
.fill_desc
= fill_tso_desc
;
1795 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tso
;
1796 /* The chip only support 7*4096 */
1797 netif_set_gso_max_size(netdev
, 7 * 4096);
1799 priv
->ops
.fill_desc
= fill_v2_desc
;
1800 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1804 netdev
->features
= features
;
1808 static netdev_features_t
hns_nic_fix_features(
1809 struct net_device
*netdev
, netdev_features_t features
)
1811 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1813 switch (priv
->enet_ver
) {
1815 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
1816 NETIF_F_HW_VLAN_CTAG_FILTER
);
1824 static int hns_nic_uc_sync(struct net_device
*netdev
, const unsigned char *addr
)
1826 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1827 struct hnae_handle
*h
= priv
->ae_handle
;
1829 if (h
->dev
->ops
->add_uc_addr
)
1830 return h
->dev
->ops
->add_uc_addr(h
, addr
);
1835 static int hns_nic_uc_unsync(struct net_device
*netdev
,
1836 const unsigned char *addr
)
1838 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1839 struct hnae_handle
*h
= priv
->ae_handle
;
1841 if (h
->dev
->ops
->rm_uc_addr
)
1842 return h
->dev
->ops
->rm_uc_addr(h
, addr
);
1848 * nic_set_multicast_list - set mutl mac address
1849 * @netdev: net device
1854 static void hns_set_multicast_list(struct net_device
*ndev
)
1856 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1857 struct hnae_handle
*h
= priv
->ae_handle
;
1858 struct netdev_hw_addr
*ha
= NULL
;
1861 netdev_err(ndev
, "hnae handle is null\n");
1865 if (h
->dev
->ops
->clr_mc_addr
)
1866 if (h
->dev
->ops
->clr_mc_addr(h
))
1867 netdev_err(ndev
, "clear multicast address fail\n");
1869 if (h
->dev
->ops
->set_mc_addr
) {
1870 netdev_for_each_mc_addr(ha
, ndev
)
1871 if (h
->dev
->ops
->set_mc_addr(h
, ha
->addr
))
1872 netdev_err(ndev
, "set multicast fail\n");
1876 static void hns_nic_set_rx_mode(struct net_device
*ndev
)
1878 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1879 struct hnae_handle
*h
= priv
->ae_handle
;
1881 if (h
->dev
->ops
->set_promisc_mode
) {
1882 if (ndev
->flags
& IFF_PROMISC
)
1883 h
->dev
->ops
->set_promisc_mode(h
, 1);
1885 h
->dev
->ops
->set_promisc_mode(h
, 0);
1888 hns_set_multicast_list(ndev
);
1890 if (__dev_uc_sync(ndev
, hns_nic_uc_sync
, hns_nic_uc_unsync
))
1891 netdev_err(ndev
, "sync uc address fail\n");
1894 static void hns_nic_get_stats64(struct net_device
*ndev
,
1895 struct rtnl_link_stats64
*stats
)
1902 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1903 struct hnae_handle
*h
= priv
->ae_handle
;
1905 for (idx
= 0; idx
< h
->q_num
; idx
++) {
1906 tx_bytes
+= h
->qs
[idx
]->tx_ring
.stats
.tx_bytes
;
1907 tx_pkts
+= h
->qs
[idx
]->tx_ring
.stats
.tx_pkts
;
1908 rx_bytes
+= h
->qs
[idx
]->rx_ring
.stats
.rx_bytes
;
1909 rx_pkts
+= h
->qs
[idx
]->rx_ring
.stats
.rx_pkts
;
1912 stats
->tx_bytes
= tx_bytes
;
1913 stats
->tx_packets
= tx_pkts
;
1914 stats
->rx_bytes
= rx_bytes
;
1915 stats
->rx_packets
= rx_pkts
;
1917 stats
->rx_errors
= ndev
->stats
.rx_errors
;
1918 stats
->multicast
= ndev
->stats
.multicast
;
1919 stats
->rx_length_errors
= ndev
->stats
.rx_length_errors
;
1920 stats
->rx_crc_errors
= ndev
->stats
.rx_crc_errors
;
1921 stats
->rx_missed_errors
= ndev
->stats
.rx_missed_errors
;
1923 stats
->tx_errors
= ndev
->stats
.tx_errors
;
1924 stats
->rx_dropped
= ndev
->stats
.rx_dropped
;
1925 stats
->tx_dropped
= ndev
->stats
.tx_dropped
;
1926 stats
->collisions
= ndev
->stats
.collisions
;
1927 stats
->rx_over_errors
= ndev
->stats
.rx_over_errors
;
1928 stats
->rx_frame_errors
= ndev
->stats
.rx_frame_errors
;
1929 stats
->rx_fifo_errors
= ndev
->stats
.rx_fifo_errors
;
1930 stats
->tx_aborted_errors
= ndev
->stats
.tx_aborted_errors
;
1931 stats
->tx_carrier_errors
= ndev
->stats
.tx_carrier_errors
;
1932 stats
->tx_fifo_errors
= ndev
->stats
.tx_fifo_errors
;
1933 stats
->tx_heartbeat_errors
= ndev
->stats
.tx_heartbeat_errors
;
1934 stats
->tx_window_errors
= ndev
->stats
.tx_window_errors
;
1935 stats
->rx_compressed
= ndev
->stats
.rx_compressed
;
1936 stats
->tx_compressed
= ndev
->stats
.tx_compressed
;
1940 hns_nic_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
1941 void *accel_priv
, select_queue_fallback_t fallback
)
1943 struct ethhdr
*eth_hdr
= (struct ethhdr
*)skb
->data
;
1944 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1946 /* fix hardware broadcast/multicast packets queue loopback */
1947 if (!AE_IS_VER1(priv
->enet_ver
) &&
1948 is_multicast_ether_addr(eth_hdr
->h_dest
))
1951 return fallback(ndev
, skb
);
1954 static const struct net_device_ops hns_nic_netdev_ops
= {
1955 .ndo_open
= hns_nic_net_open
,
1956 .ndo_stop
= hns_nic_net_stop
,
1957 .ndo_start_xmit
= hns_nic_net_xmit
,
1958 .ndo_tx_timeout
= hns_nic_net_timeout
,
1959 .ndo_set_mac_address
= hns_nic_net_set_mac_address
,
1960 .ndo_change_mtu
= hns_nic_change_mtu
,
1961 .ndo_do_ioctl
= hns_nic_do_ioctl
,
1962 .ndo_set_features
= hns_nic_set_features
,
1963 .ndo_fix_features
= hns_nic_fix_features
,
1964 .ndo_get_stats64
= hns_nic_get_stats64
,
1965 .ndo_set_rx_mode
= hns_nic_set_rx_mode
,
1966 .ndo_select_queue
= hns_nic_select_queue
,
1969 static void hns_nic_update_link_status(struct net_device
*netdev
)
1971 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1973 struct hnae_handle
*h
= priv
->ae_handle
;
1976 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
1979 (void)genphy_read_status(h
->phy_dev
);
1981 hns_nic_adjust_link(netdev
);
1984 /* for dumping key regs*/
1985 static void hns_nic_dump(struct hns_nic_priv
*priv
)
1987 struct hnae_handle
*h
= priv
->ae_handle
;
1988 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1989 u32
*data
, reg_num
, i
;
1991 if (ops
->get_regs_len
&& ops
->get_regs
) {
1992 reg_num
= ops
->get_regs_len(priv
->ae_handle
);
1993 reg_num
= (reg_num
+ 3ul) & ~3ul;
1994 data
= kcalloc(reg_num
, sizeof(u32
), GFP_KERNEL
);
1996 ops
->get_regs(priv
->ae_handle
, data
);
1997 for (i
= 0; i
< reg_num
; i
+= 4)
1998 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1999 i
, data
[i
], data
[i
+ 1],
2000 data
[i
+ 2], data
[i
+ 3]);
2005 for (i
= 0; i
< h
->q_num
; i
++) {
2006 pr_info("tx_queue%d_next_to_clean:%d\n",
2007 i
, h
->qs
[i
]->tx_ring
.next_to_clean
);
2008 pr_info("tx_queue%d_next_to_use:%d\n",
2009 i
, h
->qs
[i
]->tx_ring
.next_to_use
);
2010 pr_info("rx_queue%d_next_to_clean:%d\n",
2011 i
, h
->qs
[i
]->rx_ring
.next_to_clean
);
2012 pr_info("rx_queue%d_next_to_use:%d\n",
2013 i
, h
->qs
[i
]->rx_ring
.next_to_use
);
2017 /* for resetting subtask */
2018 static void hns_nic_reset_subtask(struct hns_nic_priv
*priv
)
2020 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
2022 if (!test_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
))
2024 clear_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
2026 /* If we're already down, removing or resetting, just bail */
2027 if (test_bit(NIC_STATE_DOWN
, &priv
->state
) ||
2028 test_bit(NIC_STATE_REMOVING
, &priv
->state
) ||
2029 test_bit(NIC_STATE_RESETTING
, &priv
->state
))
2033 netdev_info(priv
->netdev
, "try to reset %s port!\n",
2034 (type
== HNAE_PORT_DEBUG
? "debug" : "service"));
2037 /* put off any impending NetWatchDogTimeout */
2038 netif_trans_update(priv
->netdev
);
2039 hns_nic_net_reinit(priv
->netdev
);
2044 /* for doing service complete*/
2045 static void hns_nic_service_event_complete(struct hns_nic_priv
*priv
)
2047 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
));
2048 /* make sure to commit the things */
2049 smp_mb__before_atomic();
2050 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
2053 static void hns_nic_service_task(struct work_struct
*work
)
2055 struct hns_nic_priv
*priv
2056 = container_of(work
, struct hns_nic_priv
, service_task
);
2057 struct hnae_handle
*h
= priv
->ae_handle
;
2059 hns_nic_reset_subtask(priv
);
2060 hns_nic_update_link_status(priv
->netdev
);
2061 h
->dev
->ops
->update_led_status(h
);
2062 hns_nic_update_stats(priv
->netdev
);
2064 hns_nic_service_event_complete(priv
);
2067 static void hns_nic_task_schedule(struct hns_nic_priv
*priv
)
2069 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
) &&
2070 !test_bit(NIC_STATE_REMOVING
, &priv
->state
) &&
2071 !test_and_set_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
))
2072 (void)schedule_work(&priv
->service_task
);
2075 static void hns_nic_service_timer(struct timer_list
*t
)
2077 struct hns_nic_priv
*priv
= from_timer(priv
, t
, service_timer
);
2079 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
2081 hns_nic_task_schedule(priv
);
2085 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2086 * @priv: driver private struct
2088 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
)
2090 /* Do the reset outside of interrupt context */
2091 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
2092 set_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
2093 netdev_warn(priv
->netdev
,
2094 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2095 priv
->tx_timeout_count
, priv
->state
);
2096 priv
->tx_timeout_count
++;
2097 hns_nic_task_schedule(priv
);
2101 static int hns_nic_init_ring_data(struct hns_nic_priv
*priv
)
2103 struct hnae_handle
*h
= priv
->ae_handle
;
2104 struct hns_nic_ring_data
*rd
;
2105 bool is_ver1
= AE_IS_VER1(priv
->enet_ver
);
2108 if (h
->q_num
> NIC_MAX_Q_PER_VF
) {
2109 netdev_err(priv
->netdev
, "too much queue (%d)\n", h
->q_num
);
2113 priv
->ring_data
= kzalloc(h
->q_num
* sizeof(*priv
->ring_data
) * 2,
2115 if (!priv
->ring_data
)
2118 for (i
= 0; i
< h
->q_num
; i
++) {
2119 rd
= &priv
->ring_data
[i
];
2120 rd
->queue_index
= i
;
2121 rd
->ring
= &h
->qs
[i
]->tx_ring
;
2122 rd
->poll_one
= hns_nic_tx_poll_one
;
2123 rd
->fini_process
= is_ver1
? hns_nic_tx_fini_pro
:
2124 hns_nic_tx_fini_pro_v2
;
2126 netif_napi_add(priv
->netdev
, &rd
->napi
,
2127 hns_nic_common_poll
, NAPI_POLL_WEIGHT
);
2128 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2130 for (i
= h
->q_num
; i
< h
->q_num
* 2; i
++) {
2131 rd
= &priv
->ring_data
[i
];
2132 rd
->queue_index
= i
- h
->q_num
;
2133 rd
->ring
= &h
->qs
[i
- h
->q_num
]->rx_ring
;
2134 rd
->poll_one
= hns_nic_rx_poll_one
;
2135 rd
->ex_process
= hns_nic_rx_up_pro
;
2136 rd
->fini_process
= is_ver1
? hns_nic_rx_fini_pro
:
2137 hns_nic_rx_fini_pro_v2
;
2139 netif_napi_add(priv
->netdev
, &rd
->napi
,
2140 hns_nic_common_poll
, NAPI_POLL_WEIGHT
);
2141 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2147 static void hns_nic_uninit_ring_data(struct hns_nic_priv
*priv
)
2149 struct hnae_handle
*h
= priv
->ae_handle
;
2152 for (i
= 0; i
< h
->q_num
* 2; i
++) {
2153 netif_napi_del(&priv
->ring_data
[i
].napi
);
2154 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
2155 (void)irq_set_affinity_hint(
2156 priv
->ring_data
[i
].ring
->irq
,
2158 free_irq(priv
->ring_data
[i
].ring
->irq
,
2159 &priv
->ring_data
[i
]);
2162 priv
->ring_data
[i
].ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2164 kfree(priv
->ring_data
);
2167 static void hns_nic_set_priv_ops(struct net_device
*netdev
)
2169 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
2170 struct hnae_handle
*h
= priv
->ae_handle
;
2172 if (AE_IS_VER1(priv
->enet_ver
)) {
2173 priv
->ops
.fill_desc
= fill_desc
;
2174 priv
->ops
.get_rxd_bnum
= get_rx_desc_bnum
;
2175 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
2177 priv
->ops
.get_rxd_bnum
= get_v2rx_desc_bnum
;
2178 if ((netdev
->features
& NETIF_F_TSO
) ||
2179 (netdev
->features
& NETIF_F_TSO6
)) {
2180 priv
->ops
.fill_desc
= fill_tso_desc
;
2181 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tso
;
2182 /* This chip only support 7*4096 */
2183 netif_set_gso_max_size(netdev
, 7 * 4096);
2185 priv
->ops
.fill_desc
= fill_v2_desc
;
2186 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
2188 /* enable tso when init
2189 * control tso on/off through TSE bit in bd
2191 h
->dev
->ops
->set_tso_stats(h
, 1);
2195 static int hns_nic_try_get_ae(struct net_device
*ndev
)
2197 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
2198 struct hnae_handle
*h
;
2201 h
= hnae_get_handle(&priv
->netdev
->dev
,
2202 priv
->fwnode
, priv
->port_id
, NULL
);
2203 if (IS_ERR_OR_NULL(h
)) {
2205 dev_dbg(priv
->dev
, "has not handle, register notifier!\n");
2208 priv
->ae_handle
= h
;
2210 ret
= hns_nic_init_phy(ndev
, h
);
2212 dev_err(priv
->dev
, "probe phy device fail!\n");
2216 ret
= hns_nic_init_ring_data(priv
);
2219 goto out_init_ring_data
;
2222 hns_nic_set_priv_ops(ndev
);
2224 ret
= register_netdev(ndev
);
2226 dev_err(priv
->dev
, "probe register netdev fail!\n");
2227 goto out_reg_ndev_fail
;
2232 hns_nic_uninit_ring_data(priv
);
2233 priv
->ring_data
= NULL
;
2236 hnae_put_handle(priv
->ae_handle
);
2237 priv
->ae_handle
= NULL
;
2242 static int hns_nic_notifier_action(struct notifier_block
*nb
,
2243 unsigned long action
, void *data
)
2245 struct hns_nic_priv
*priv
=
2246 container_of(nb
, struct hns_nic_priv
, notifier_block
);
2248 assert(action
== HNAE_AE_REGISTER
);
2250 if (!hns_nic_try_get_ae(priv
->netdev
)) {
2251 hnae_unregister_notifier(&priv
->notifier_block
);
2252 priv
->notifier_block
.notifier_call
= NULL
;
2257 static int hns_nic_dev_probe(struct platform_device
*pdev
)
2259 struct device
*dev
= &pdev
->dev
;
2260 struct net_device
*ndev
;
2261 struct hns_nic_priv
*priv
;
2265 ndev
= alloc_etherdev_mq(sizeof(struct hns_nic_priv
), NIC_MAX_Q_PER_VF
);
2269 platform_set_drvdata(pdev
, ndev
);
2271 priv
= netdev_priv(ndev
);
2273 priv
->netdev
= ndev
;
2275 if (dev_of_node(dev
)) {
2276 struct device_node
*ae_node
;
2278 if (of_device_is_compatible(dev
->of_node
,
2279 "hisilicon,hns-nic-v1"))
2280 priv
->enet_ver
= AE_VERSION_1
;
2282 priv
->enet_ver
= AE_VERSION_2
;
2284 ae_node
= of_parse_phandle(dev
->of_node
, "ae-handle", 0);
2287 dev_err(dev
, "not find ae-handle\n");
2288 goto out_read_prop_fail
;
2290 priv
->fwnode
= &ae_node
->fwnode
;
2291 } else if (is_acpi_node(dev
->fwnode
)) {
2292 struct acpi_reference_args args
;
2294 if (acpi_dev_found(hns_enet_acpi_match
[0].id
))
2295 priv
->enet_ver
= AE_VERSION_1
;
2296 else if (acpi_dev_found(hns_enet_acpi_match
[1].id
))
2297 priv
->enet_ver
= AE_VERSION_2
;
2301 /* try to find port-idx-in-ae first */
2302 ret
= acpi_node_get_property_reference(dev
->fwnode
,
2303 "ae-handle", 0, &args
);
2305 dev_err(dev
, "not find ae-handle\n");
2306 goto out_read_prop_fail
;
2308 priv
->fwnode
= acpi_fwnode_handle(args
.adev
);
2310 dev_err(dev
, "cannot read cfg data from OF or acpi\n");
2314 ret
= device_property_read_u32(dev
, "port-idx-in-ae", &port_id
);
2316 /* only for old code compatible */
2317 ret
= device_property_read_u32(dev
, "port-id", &port_id
);
2319 goto out_read_prop_fail
;
2320 /* for old dts, we need to caculate the port offset */
2321 port_id
= port_id
< HNS_SRV_OFFSET
? port_id
+ HNS_DEBUG_OFFSET
2322 : port_id
- HNS_SRV_OFFSET
;
2324 priv
->port_id
= port_id
;
2326 hns_init_mac_addr(ndev
);
2328 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
2329 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
2330 ndev
->netdev_ops
= &hns_nic_netdev_ops
;
2331 hns_ethtool_set_ops(ndev
);
2333 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2334 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2336 ndev
->vlan_features
|=
2337 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
2338 ndev
->vlan_features
|= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
2340 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2341 ndev
->min_mtu
= MAC_MIN_MTU
;
2342 switch (priv
->enet_ver
) {
2344 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_NTUPLE
;
2345 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2346 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2347 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
;
2348 ndev
->vlan_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2349 ndev
->max_mtu
= MAC_MAX_MTU_V2
-
2350 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2353 ndev
->max_mtu
= MAC_MAX_MTU
-
2354 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2358 SET_NETDEV_DEV(ndev
, dev
);
2360 if (!dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
2361 dev_dbg(dev
, "set mask to 64bit\n");
2363 dev_err(dev
, "set mask to 64bit fail!\n");
2365 /* carrier off reporting is important to ethtool even BEFORE open */
2366 netif_carrier_off(ndev
);
2368 timer_setup(&priv
->service_timer
, hns_nic_service_timer
, 0);
2369 INIT_WORK(&priv
->service_task
, hns_nic_service_task
);
2371 set_bit(NIC_STATE_SERVICE_INITED
, &priv
->state
);
2372 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
2373 set_bit(NIC_STATE_DOWN
, &priv
->state
);
2375 if (hns_nic_try_get_ae(priv
->netdev
)) {
2376 priv
->notifier_block
.notifier_call
= hns_nic_notifier_action
;
2377 ret
= hnae_register_notifier(&priv
->notifier_block
);
2379 dev_err(dev
, "register notifier fail!\n");
2380 goto out_notify_fail
;
2382 dev_dbg(dev
, "has not handle, register notifier!\n");
2388 (void)cancel_work_sync(&priv
->service_task
);
2390 /* safe for ACPI FW */
2391 of_node_put(to_of_node(priv
->fwnode
));
2396 static int hns_nic_dev_remove(struct platform_device
*pdev
)
2398 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2399 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
2401 if (ndev
->reg_state
!= NETREG_UNINITIALIZED
)
2402 unregister_netdev(ndev
);
2404 if (priv
->ring_data
)
2405 hns_nic_uninit_ring_data(priv
);
2406 priv
->ring_data
= NULL
;
2409 phy_disconnect(ndev
->phydev
);
2411 if (!IS_ERR_OR_NULL(priv
->ae_handle
))
2412 hnae_put_handle(priv
->ae_handle
);
2413 priv
->ae_handle
= NULL
;
2414 if (priv
->notifier_block
.notifier_call
)
2415 hnae_unregister_notifier(&priv
->notifier_block
);
2416 priv
->notifier_block
.notifier_call
= NULL
;
2418 set_bit(NIC_STATE_REMOVING
, &priv
->state
);
2419 (void)cancel_work_sync(&priv
->service_task
);
2421 /* safe for ACPI FW */
2422 of_node_put(to_of_node(priv
->fwnode
));
2428 static const struct of_device_id hns_enet_of_match
[] = {
2429 {.compatible
= "hisilicon,hns-nic-v1",},
2430 {.compatible
= "hisilicon,hns-nic-v2",},
2434 MODULE_DEVICE_TABLE(of
, hns_enet_of_match
);
2436 static struct platform_driver hns_nic_dev_driver
= {
2439 .of_match_table
= hns_enet_of_match
,
2440 .acpi_match_table
= ACPI_PTR(hns_enet_acpi_match
),
2442 .probe
= hns_nic_dev_probe
,
2443 .remove
= hns_nic_dev_remove
,
2446 module_platform_driver(hns_nic_dev_driver
);
2448 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2449 MODULE_AUTHOR("Hisilicon, Inc.");
2450 MODULE_LICENSE("GPL");
2451 MODULE_ALIAS("platform:hns-nic");