2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
25 #include "hns_dsaf_mac.h"
27 #define NIC_MAX_Q_PER_VF 16
28 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
30 #define SERVICE_TIMER_HZ (1 * HZ)
32 #define NIC_TX_CLEAN_MAX_NUM 256
33 #define NIC_RX_CLEAN_MAX_NUM 64
35 #define RCB_IRQ_NOT_INITED 0
36 #define RCB_IRQ_INITED 1
37 #define HNS_BUFFER_SIZE_2048 2048
39 #define BD_MAX_SEND_SIZE 8191
40 #define SKB_TMP_LEN(SKB) \
41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
43 static void fill_v2_desc(struct hnae_ring
*ring
, void *priv
,
44 int size
, dma_addr_t dma
, int frag_end
,
45 int buf_num
, enum hns_desc_type type
, int mtu
)
47 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
48 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
50 struct ipv6hdr
*ipv6hdr
;
62 desc_cb
->length
= size
;
66 desc
->addr
= cpu_to_le64(dma
);
67 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
69 /* config bd buffer end */
70 hnae_set_bit(rrcfv
, HNSV2_TXD_VLD_B
, 1);
71 hnae_set_field(bn_pid
, HNSV2_TXD_BUFNUM_M
, 0, buf_num
- 1);
73 /* fill port_id in the tx bd for sending management pkts */
74 hnae_set_field(bn_pid
, HNSV2_TXD_PORTID_M
,
75 HNSV2_TXD_PORTID_S
, ring
->q
->handle
->dport_id
);
77 if (type
== DESC_TYPE_SKB
) {
78 skb
= (struct sk_buff
*)priv
;
80 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
81 skb_reset_mac_len(skb
);
82 protocol
= skb
->protocol
;
85 if (protocol
== htons(ETH_P_8021Q
)) {
86 ip_offset
+= VLAN_HLEN
;
87 protocol
= vlan_get_protocol(skb
);
88 skb
->protocol
= protocol
;
91 if (skb
->protocol
== htons(ETH_P_IP
)) {
93 hnae_set_bit(rrcfv
, HNSV2_TXD_L3CS_B
, 1);
94 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
96 /* check for tcp/udp header */
97 if (iphdr
->protocol
== IPPROTO_TCP
&&
101 l4_len
= tcp_hdrlen(skb
);
102 mss
= skb_shinfo(skb
)->gso_size
;
103 paylen
= skb
->len
- SKB_TMP_LEN(skb
);
105 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
106 hnae_set_bit(tvsvsn
, HNSV2_TXD_IPV6_B
, 1);
107 ipv6hdr
= ipv6_hdr(skb
);
108 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
110 /* check for tcp/udp header */
111 if (ipv6hdr
->nexthdr
== IPPROTO_TCP
&&
112 skb_is_gso(skb
) && skb_is_gso_v6(skb
)) {
115 l4_len
= tcp_hdrlen(skb
);
116 mss
= skb_shinfo(skb
)->gso_size
;
117 paylen
= skb
->len
- SKB_TMP_LEN(skb
);
120 desc
->tx
.ip_offset
= ip_offset
;
121 desc
->tx
.tse_vlan_snap_v6_sctp_nth
= tvsvsn
;
122 desc
->tx
.mss
= cpu_to_le16(mss
);
123 desc
->tx
.l4_len
= l4_len
;
124 desc
->tx
.paylen
= cpu_to_le16(paylen
);
128 hnae_set_bit(rrcfv
, HNSV2_TXD_FE_B
, frag_end
);
130 desc
->tx
.bn_pid
= bn_pid
;
131 desc
->tx
.ra_ri_cs_fe_vld
= rrcfv
;
133 ring_ptr_move_fw(ring
, next_to_use
);
136 static const struct acpi_device_id hns_enet_acpi_match
[] = {
141 MODULE_DEVICE_TABLE(acpi
, hns_enet_acpi_match
);
143 static void fill_desc(struct hnae_ring
*ring
, void *priv
,
144 int size
, dma_addr_t dma
, int frag_end
,
145 int buf_num
, enum hns_desc_type type
, int mtu
)
147 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
148 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
152 u32 asid_bufnum_pid
= 0;
153 u32 flag_ipoffset
= 0;
155 desc_cb
->priv
= priv
;
156 desc_cb
->length
= size
;
158 desc_cb
->type
= type
;
160 desc
->addr
= cpu_to_le64(dma
);
161 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
163 /*config bd buffer end */
164 flag_ipoffset
|= 1 << HNS_TXD_VLD_B
;
166 asid_bufnum_pid
|= buf_num
<< HNS_TXD_BUFNUM_S
;
168 if (type
== DESC_TYPE_SKB
) {
169 skb
= (struct sk_buff
*)priv
;
171 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
172 protocol
= skb
->protocol
;
173 ip_offset
= ETH_HLEN
;
175 /*if it is a SW VLAN check the next protocol*/
176 if (protocol
== htons(ETH_P_8021Q
)) {
177 ip_offset
+= VLAN_HLEN
;
178 protocol
= vlan_get_protocol(skb
);
179 skb
->protocol
= protocol
;
182 if (skb
->protocol
== htons(ETH_P_IP
)) {
183 flag_ipoffset
|= 1 << HNS_TXD_L3CS_B
;
184 /* check for tcp/udp header */
185 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
187 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
188 /* ipv6 has not l3 cs, check for L4 header */
189 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
192 flag_ipoffset
|= ip_offset
<< HNS_TXD_IPOFFSET_S
;
196 flag_ipoffset
|= frag_end
<< HNS_TXD_FE_B
;
198 desc
->tx
.asid_bufnum_pid
= cpu_to_le16(asid_bufnum_pid
);
199 desc
->tx
.flag_ipoffset
= cpu_to_le32(flag_ipoffset
);
201 ring_ptr_move_fw(ring
, next_to_use
);
204 static void unfill_desc(struct hnae_ring
*ring
)
206 ring_ptr_move_bw(ring
, next_to_use
);
209 static int hns_nic_maybe_stop_tx(
210 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
212 struct sk_buff
*skb
= *out_skb
;
213 struct sk_buff
*new_skb
= NULL
;
216 /* no. of segments (plus a header) */
217 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
219 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
220 if (ring_space(ring
) < 1)
223 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
227 dev_kfree_skb_any(skb
);
230 } else if (buf_num
> ring_space(ring
)) {
238 static int hns_nic_maybe_stop_tso(
239 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
245 struct sk_buff
*skb
= *out_skb
;
246 struct sk_buff
*new_skb
= NULL
;
247 struct skb_frag_struct
*frag
;
249 size
= skb_headlen(skb
);
250 buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
252 frag_num
= skb_shinfo(skb
)->nr_frags
;
253 for (i
= 0; i
< frag_num
; i
++) {
254 frag
= &skb_shinfo(skb
)->frags
[i
];
255 size
= skb_frag_size(frag
);
256 buf_num
+= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
259 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
260 buf_num
= (skb
->len
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
261 if (ring_space(ring
) < buf_num
)
263 /* manual split the send packet */
264 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
267 dev_kfree_skb_any(skb
);
270 } else if (ring_space(ring
) < buf_num
) {
278 static void fill_tso_desc(struct hnae_ring
*ring
, void *priv
,
279 int size
, dma_addr_t dma
, int frag_end
,
280 int buf_num
, enum hns_desc_type type
, int mtu
)
286 frag_buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
287 sizeoflast
= size
% BD_MAX_SEND_SIZE
;
288 sizeoflast
= sizeoflast
? sizeoflast
: BD_MAX_SEND_SIZE
;
290 /* when the frag size is bigger than hardware, split this frag */
291 for (k
= 0; k
< frag_buf_num
; k
++)
292 fill_v2_desc(ring
, priv
,
293 (k
== frag_buf_num
- 1) ?
294 sizeoflast
: BD_MAX_SEND_SIZE
,
295 dma
+ BD_MAX_SEND_SIZE
* k
,
296 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
298 (type
== DESC_TYPE_SKB
&& !k
) ?
299 DESC_TYPE_SKB
: DESC_TYPE_PAGE
,
303 int hns_nic_net_xmit_hw(struct net_device
*ndev
,
305 struct hns_nic_ring_data
*ring_data
)
307 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
308 struct hnae_ring
*ring
= ring_data
->ring
;
309 struct device
*dev
= ring_to_dev(ring
);
310 struct netdev_queue
*dev_queue
;
311 struct skb_frag_struct
*frag
;
315 int size
, next_to_use
;
318 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
320 ring
->stats
.tx_busy
++;
321 goto out_net_tx_busy
;
323 ring
->stats
.sw_err_cnt
++;
324 netdev_err(ndev
, "no memory to xmit!\n");
330 /* no. of segments (plus a header) */
331 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
332 next_to_use
= ring
->next_to_use
;
334 /* fill the first part */
335 size
= skb_headlen(skb
);
336 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
337 if (dma_mapping_error(dev
, dma
)) {
338 netdev_err(ndev
, "TX head DMA map failed\n");
339 ring
->stats
.sw_err_cnt
++;
342 priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
343 buf_num
, DESC_TYPE_SKB
, ndev
->mtu
);
345 /* fill the fragments */
346 for (i
= 1; i
< seg_num
; i
++) {
347 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
348 size
= skb_frag_size(frag
);
349 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
350 if (dma_mapping_error(dev
, dma
)) {
351 netdev_err(ndev
, "TX frag(%d) DMA map failed\n", i
);
352 ring
->stats
.sw_err_cnt
++;
353 goto out_map_frag_fail
;
355 priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
356 seg_num
- 1 == i
? 1 : 0, buf_num
,
357 DESC_TYPE_PAGE
, ndev
->mtu
);
360 /*complete translate all packets*/
361 dev_queue
= netdev_get_tx_queue(ndev
, skb
->queue_mapping
);
362 netdev_tx_sent_queue(dev_queue
, skb
->len
);
364 wmb(); /* commit all data before submit */
365 assert(skb
->queue_mapping
< priv
->ae_handle
->q_num
);
366 hnae_queue_xmit(priv
->ae_handle
->qs
[skb
->queue_mapping
], buf_num
);
367 ring
->stats
.tx_pkts
++;
368 ring
->stats
.tx_bytes
+= skb
->len
;
374 while (ring
->next_to_use
!= next_to_use
) {
376 if (ring
->next_to_use
!= next_to_use
)
378 ring
->desc_cb
[ring
->next_to_use
].dma
,
379 ring
->desc_cb
[ring
->next_to_use
].length
,
382 dma_unmap_single(dev
,
383 ring
->desc_cb
[next_to_use
].dma
,
384 ring
->desc_cb
[next_to_use
].length
,
390 dev_kfree_skb_any(skb
);
395 netif_stop_subqueue(ndev
, skb
->queue_mapping
);
397 /* Herbert's original patch had:
398 * smp_mb__after_netif_stop_queue();
399 * but since that doesn't exist yet, just open code it.
402 return NETDEV_TX_BUSY
;
406 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
407 * @data: pointer to the start of the headers
408 * @max: total length of section to find headers in
410 * This function is meant to determine the length of headers that will
411 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
412 * motivation of doing this is to only perform one pull for IPv4 TCP
413 * packets so that we can do basic things like calculating the gso_size
414 * based on the average data per packet.
416 static unsigned int hns_nic_get_headlen(unsigned char *data
, u32 flag
,
417 unsigned int max_size
)
419 unsigned char *network
;
422 /* this should never happen, but better safe than sorry */
423 if (max_size
< ETH_HLEN
)
426 /* initialize network frame pointer */
429 /* set first protocol and move network header forward */
432 /* handle any vlan tag if present */
433 if (hnae_get_field(flag
, HNS_RXD_VLAN_M
, HNS_RXD_VLAN_S
)
434 == HNS_RX_FLAG_VLAN_PRESENT
) {
435 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
438 network
+= VLAN_HLEN
;
441 /* handle L3 protocols */
442 if (hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
)
443 == HNS_RX_FLAG_L3ID_IPV4
) {
444 if ((typeof(max_size
))(network
- data
) >
445 (max_size
- sizeof(struct iphdr
)))
448 /* access ihl as a u8 to avoid unaligned access on ia64 */
449 hlen
= (network
[0] & 0x0F) << 2;
451 /* verify hlen meets minimum size requirements */
452 if (hlen
< sizeof(struct iphdr
))
453 return network
- data
;
455 /* record next protocol if header is present */
456 } else if (hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
)
457 == HNS_RX_FLAG_L3ID_IPV6
) {
458 if ((typeof(max_size
))(network
- data
) >
459 (max_size
- sizeof(struct ipv6hdr
)))
462 /* record next protocol */
463 hlen
= sizeof(struct ipv6hdr
);
465 return network
- data
;
468 /* relocate pointer to start of L4 header */
471 /* finally sort out TCP/UDP */
472 if (hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
)
473 == HNS_RX_FLAG_L4ID_TCP
) {
474 if ((typeof(max_size
))(network
- data
) >
475 (max_size
- sizeof(struct tcphdr
)))
478 /* access doff as a u8 to avoid unaligned access on ia64 */
479 hlen
= (network
[12] & 0xF0) >> 2;
481 /* verify hlen meets minimum size requirements */
482 if (hlen
< sizeof(struct tcphdr
))
483 return network
- data
;
486 } else if (hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
)
487 == HNS_RX_FLAG_L4ID_UDP
) {
488 if ((typeof(max_size
))(network
- data
) >
489 (max_size
- sizeof(struct udphdr
)))
492 network
+= sizeof(struct udphdr
);
495 /* If everything has gone correctly network should be the
496 * data section of the packet and will be the end of the header.
497 * If not then it probably represents the end of the last recognized
500 if ((typeof(max_size
))(network
- data
) < max_size
)
501 return network
- data
;
506 static void hns_nic_reuse_page(struct sk_buff
*skb
, int i
,
507 struct hnae_ring
*ring
, int pull_len
,
508 struct hnae_desc_cb
*desc_cb
)
510 struct hnae_desc
*desc
;
515 twobufs
= ((PAGE_SIZE
< 8192) && hnae_buf_size(ring
) == HNS_BUFFER_SIZE_2048
);
517 desc
= &ring
->desc
[ring
->next_to_clean
];
518 size
= le16_to_cpu(desc
->rx
.size
);
521 truesize
= hnae_buf_size(ring
);
523 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
524 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
527 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
528 size
- pull_len
, truesize
- pull_len
);
530 /* avoid re-using remote pages,flag default unreuse */
531 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
535 /* if we are only owner of page we can reuse it */
536 if (likely(page_count(desc_cb
->priv
) == 1)) {
537 /* flip page offset to other buffer */
538 desc_cb
->page_offset
^= truesize
;
540 desc_cb
->reuse_flag
= 1;
541 /* bump ref count on page before it is given*/
542 get_page(desc_cb
->priv
);
547 /* move offset up to the next cache line */
548 desc_cb
->page_offset
+= truesize
;
550 if (desc_cb
->page_offset
<= last_offset
) {
551 desc_cb
->reuse_flag
= 1;
552 /* bump ref count on page before it is given*/
553 get_page(desc_cb
->priv
);
557 static void get_v2rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
559 *out_bnum
= hnae_get_field(bnum_flag
,
560 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
) + 1;
563 static void get_rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
565 *out_bnum
= hnae_get_field(bnum_flag
,
566 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
);
569 static void hns_nic_rx_checksum(struct hns_nic_ring_data
*ring_data
,
570 struct sk_buff
*skb
, u32 flag
)
572 struct net_device
*netdev
= ring_data
->napi
.dev
;
576 /* check if RX checksum offload is enabled */
577 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
580 /* In hardware, we only support checksum for the following protocols:
582 * 2) TCP(over IPv4 or IPv6),
583 * 3) UDP(over IPv4 or IPv6),
584 * 4) SCTP(over IPv4 or IPv6)
585 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
586 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
588 * Hardware limitation:
589 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
590 * Error" bit (which usually can be used to indicate whether checksum
591 * was calculated by the hardware and if there was any error encountered
592 * during checksum calculation).
594 * Software workaround:
595 * We do get info within the RX descriptor about the kind of L3/L4
596 * protocol coming in the packet and the error status. These errors
597 * might not just be checksum errors but could be related to version,
598 * length of IPv4, UDP, TCP etc.
599 * Because there is no-way of knowing if it is a L3/L4 error due to bad
600 * checksum or any other L3/L4 error, we will not (cannot) convey
601 * checksum status for such cases to upper stack and will not maintain
602 * the RX L3/L4 checksum counters as well.
605 l3id
= hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
);
606 l4id
= hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
);
608 /* check L3 protocol for which checksum is supported */
609 if ((l3id
!= HNS_RX_FLAG_L3ID_IPV4
) && (l3id
!= HNS_RX_FLAG_L3ID_IPV6
))
612 /* check for any(not just checksum)flagged L3 protocol errors */
613 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L3E_B
)))
616 /* we do not support checksum of fragmented packets */
617 if (unlikely(hnae_get_bit(flag
, HNS_RXD_FRAG_B
)))
620 /* check L4 protocol for which checksum is supported */
621 if ((l4id
!= HNS_RX_FLAG_L4ID_TCP
) &&
622 (l4id
!= HNS_RX_FLAG_L4ID_UDP
) &&
623 (l4id
!= HNS_RX_FLAG_L4ID_SCTP
))
626 /* check for any(not just checksum)flagged L4 protocol errors */
627 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L4E_B
)))
630 /* now, this has to be a packet with valid RX checksum */
631 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
634 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data
*ring_data
,
635 struct sk_buff
**out_skb
, int *out_bnum
)
637 struct hnae_ring
*ring
= ring_data
->ring
;
638 struct net_device
*ndev
= ring_data
->napi
.dev
;
639 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
641 struct hnae_desc
*desc
;
642 struct hnae_desc_cb
*desc_cb
;
648 desc
= &ring
->desc
[ring
->next_to_clean
];
649 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
653 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
655 /* prefetch first cache line of first page */
657 #if L1_CACHE_BYTES < 128
658 prefetch(va
+ L1_CACHE_BYTES
);
661 skb
= *out_skb
= napi_alloc_skb(&ring_data
->napi
,
663 if (unlikely(!skb
)) {
664 netdev_err(ndev
, "alloc rx skb fail\n");
665 ring
->stats
.sw_err_cnt
++;
669 prefetchw(skb
->data
);
670 length
= le16_to_cpu(desc
->rx
.pkt_len
);
671 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
672 priv
->ops
.get_rxd_bnum(bnum_flag
, &bnum
);
675 if (length
<= HNS_RX_HEAD_SIZE
) {
676 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
678 /* we can reuse buffer as-is, just make sure it is local */
679 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
680 desc_cb
->reuse_flag
= 1;
681 else /* this page cannot be reused so discard it */
682 put_page(desc_cb
->priv
);
684 ring_ptr_move_fw(ring
, next_to_clean
);
686 if (unlikely(bnum
!= 1)) { /* check err*/
691 ring
->stats
.seg_pkt_cnt
++;
693 pull_len
= hns_nic_get_headlen(va
, bnum_flag
, HNS_RX_HEAD_SIZE
);
694 memcpy(__skb_put(skb
, pull_len
), va
,
695 ALIGN(pull_len
, sizeof(long)));
697 hns_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
698 ring_ptr_move_fw(ring
, next_to_clean
);
700 if (unlikely(bnum
>= (int)MAX_SKB_FRAGS
)) { /* check err*/
704 for (i
= 1; i
< bnum
; i
++) {
705 desc
= &ring
->desc
[ring
->next_to_clean
];
706 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
708 hns_nic_reuse_page(skb
, i
, ring
, 0, desc_cb
);
709 ring_ptr_move_fw(ring
, next_to_clean
);
713 /* check except process, free skb and jump the desc */
714 if (unlikely((!bnum
) || (bnum
> ring
->max_desc_num_per_pkt
))) {
716 *out_bnum
= *out_bnum
? *out_bnum
: 1; /* ntc moved,cannot 0*/
717 netdev_err(ndev
, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
718 bnum
, ring
->max_desc_num_per_pkt
,
719 length
, (int)MAX_SKB_FRAGS
,
720 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
721 ring
->stats
.err_bd_num
++;
722 dev_kfree_skb_any(skb
);
726 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
728 if (unlikely(!hnae_get_bit(bnum_flag
, HNS_RXD_VLD_B
))) {
729 netdev_err(ndev
, "no valid bd,%016llx,%016llx\n",
730 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
731 ring
->stats
.non_vld_descs
++;
732 dev_kfree_skb_any(skb
);
736 if (unlikely((!desc
->rx
.pkt_len
) ||
737 hnae_get_bit(bnum_flag
, HNS_RXD_DROP_B
))) {
738 ring
->stats
.err_pkt_len
++;
739 dev_kfree_skb_any(skb
);
743 if (unlikely(hnae_get_bit(bnum_flag
, HNS_RXD_L2E_B
))) {
744 ring
->stats
.l2_err
++;
745 dev_kfree_skb_any(skb
);
749 ring
->stats
.rx_pkts
++;
750 ring
->stats
.rx_bytes
+= skb
->len
;
752 /* indicate to upper stack if our hardware has already calculated
755 hns_nic_rx_checksum(ring_data
, skb
, bnum_flag
);
761 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data
*ring_data
, int cleand_count
)
764 struct hnae_desc_cb res_cbs
;
765 struct hnae_desc_cb
*desc_cb
;
766 struct hnae_ring
*ring
= ring_data
->ring
;
767 struct net_device
*ndev
= ring_data
->napi
.dev
;
769 for (i
= 0; i
< cleand_count
; i
++) {
770 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
771 if (desc_cb
->reuse_flag
) {
772 ring
->stats
.reuse_pg_cnt
++;
773 hnae_reuse_buffer(ring
, ring
->next_to_use
);
775 ret
= hnae_reserve_buffer_map(ring
, &res_cbs
);
777 ring
->stats
.sw_err_cnt
++;
778 netdev_err(ndev
, "hnae reserve buffer map failed.\n");
781 hnae_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
784 ring_ptr_move_fw(ring
, next_to_use
);
787 wmb(); /* make all data has been write before submit */
788 writel_relaxed(i
, ring
->io_base
+ RCB_REG_HEAD
);
791 /* return error number for error or number of desc left to take
793 static void hns_nic_rx_up_pro(struct hns_nic_ring_data
*ring_data
,
796 struct net_device
*ndev
= ring_data
->napi
.dev
;
798 skb
->protocol
= eth_type_trans(skb
, ndev
);
799 (void)napi_gro_receive(&ring_data
->napi
, skb
);
800 ndev
->last_rx
= jiffies
;
803 static int hns_desc_unused(struct hnae_ring
*ring
)
805 int ntc
= ring
->next_to_clean
;
806 int ntu
= ring
->next_to_use
;
808 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
811 static int hns_nic_rx_poll_one(struct hns_nic_ring_data
*ring_data
,
814 struct hnae_ring
*ring
= ring_data
->ring
;
817 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
818 int recv_pkts
, recv_bds
, clean_count
, err
;
819 int unused_count
= hns_desc_unused(ring
);
821 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
822 rmb(); /* make sure num taken effect before the other data is touched */
824 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
827 while (recv_pkts
< budget
&& recv_bds
< num
) {
828 /* reuse or realloc buffers */
829 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
830 hns_nic_alloc_rx_buffers(ring_data
,
831 clean_count
+ unused_count
);
833 unused_count
= hns_desc_unused(ring
);
837 err
= hns_nic_poll_rx_skb(ring_data
, &skb
, &bnum
);
838 if (unlikely(!skb
)) /* this fault cannot be repaired */
843 if (unlikely(err
)) { /* do jump the err */
848 /* do update ip stack process*/
849 ((void (*)(struct hns_nic_ring_data
*, struct sk_buff
*))v
)(
855 /* make all data has been write before submit */
856 if (clean_count
+ unused_count
> 0)
857 hns_nic_alloc_rx_buffers(ring_data
,
858 clean_count
+ unused_count
);
863 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data
*ring_data
)
865 struct hnae_ring
*ring
= ring_data
->ring
;
868 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
870 /* for hardware bug fixed */
871 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
874 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
883 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
885 struct hnae_ring
*ring
= ring_data
->ring
;
888 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
896 static inline void hns_nic_reclaim_one_desc(struct hnae_ring
*ring
,
897 int *bytes
, int *pkts
)
899 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
901 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
902 (*bytes
) += desc_cb
->length
;
903 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
904 hnae_free_buffer_detach(ring
, ring
->next_to_clean
);
906 ring_ptr_move_fw(ring
, next_to_clean
);
909 static int is_valid_clean_head(struct hnae_ring
*ring
, int h
)
911 int u
= ring
->next_to_use
;
912 int c
= ring
->next_to_clean
;
914 if (unlikely(h
> ring
->desc_num
))
917 assert(u
> 0 && u
< ring
->desc_num
);
918 assert(c
> 0 && c
< ring
->desc_num
);
919 assert(u
!= c
&& h
!= c
); /* must be checked before call this func */
921 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
924 /* netif_tx_lock will turn down the performance, set only when necessary */
925 #ifdef CONFIG_NET_POLL_CONTROLLER
926 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
927 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
929 #define NETIF_TX_LOCK(ndev)
930 #define NETIF_TX_UNLOCK(ndev)
932 /* reclaim all desc in one budget
933 * return error or number of desc left
935 static int hns_nic_tx_poll_one(struct hns_nic_ring_data
*ring_data
,
938 struct hnae_ring
*ring
= ring_data
->ring
;
939 struct net_device
*ndev
= ring_data
->napi
.dev
;
940 struct netdev_queue
*dev_queue
;
941 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
947 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
948 rmb(); /* make sure head is ready before touch any data */
950 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
) {
951 NETIF_TX_UNLOCK(ndev
);
952 return 0; /* no data to poll */
955 if (!is_valid_clean_head(ring
, head
)) {
956 netdev_err(ndev
, "wrong head (%d, %d-%d)\n", head
,
957 ring
->next_to_use
, ring
->next_to_clean
);
958 ring
->stats
.io_err_cnt
++;
959 NETIF_TX_UNLOCK(ndev
);
965 while (head
!= ring
->next_to_clean
) {
966 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
967 /* issue prefetch for next Tx descriptor */
968 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
971 NETIF_TX_UNLOCK(ndev
);
973 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
974 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
976 if (unlikely(priv
->link
&& !netif_carrier_ok(ndev
)))
977 netif_carrier_on(ndev
);
979 if (unlikely(pkts
&& netif_carrier_ok(ndev
) &&
980 (ring_space(ring
) >= ring
->max_desc_num_per_pkt
* 2))) {
981 /* Make sure that anybody stopping the queue after this
982 * sees the new next_to_clean.
985 if (netif_tx_queue_stopped(dev_queue
) &&
986 !test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
987 netif_tx_wake_queue(dev_queue
);
988 ring
->stats
.restart_queue
++;
994 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data
*ring_data
)
996 struct hnae_ring
*ring
= ring_data
->ring
;
999 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1001 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1003 if (head
!= ring
->next_to_clean
) {
1004 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1005 ring_data
->ring
, 1);
1013 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
1015 struct hnae_ring
*ring
= ring_data
->ring
;
1016 int head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1018 if (head
== ring
->next_to_clean
)
1024 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data
*ring_data
)
1026 struct hnae_ring
*ring
= ring_data
->ring
;
1027 struct net_device
*ndev
= ring_data
->napi
.dev
;
1028 struct netdev_queue
*dev_queue
;
1032 NETIF_TX_LOCK(ndev
);
1034 head
= ring
->next_to_use
; /* ntu :soft setted ring position*/
1037 while (head
!= ring
->next_to_clean
)
1038 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1040 NETIF_TX_UNLOCK(ndev
);
1042 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
1043 netdev_tx_reset_queue(dev_queue
);
1046 static int hns_nic_common_poll(struct napi_struct
*napi
, int budget
)
1048 int clean_complete
= 0;
1049 struct hns_nic_ring_data
*ring_data
=
1050 container_of(napi
, struct hns_nic_ring_data
, napi
);
1051 struct hnae_ring
*ring
= ring_data
->ring
;
1054 clean_complete
+= ring_data
->poll_one(
1055 ring_data
, budget
- clean_complete
,
1056 ring_data
->ex_process
);
1058 if (clean_complete
< budget
) {
1059 if (ring_data
->fini_process(ring_data
)) {
1060 napi_complete(napi
);
1061 ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1067 return clean_complete
;
1070 static irqreturn_t
hns_irq_handle(int irq
, void *dev
)
1072 struct hns_nic_ring_data
*ring_data
= (struct hns_nic_ring_data
*)dev
;
1074 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1075 ring_data
->ring
, 1);
1076 napi_schedule(&ring_data
->napi
);
1082 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1085 static void hns_nic_adjust_link(struct net_device
*ndev
)
1087 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1088 struct hnae_handle
*h
= priv
->ae_handle
;
1092 h
->dev
->ops
->adjust_link(h
, ndev
->phydev
->speed
,
1093 ndev
->phydev
->duplex
);
1094 state
= ndev
->phydev
->link
;
1096 state
= state
&& h
->dev
->ops
->get_status(h
);
1098 if (state
!= priv
->link
) {
1100 netif_carrier_on(ndev
);
1101 netif_tx_wake_all_queues(ndev
);
1102 netdev_info(ndev
, "link up\n");
1104 netif_carrier_off(ndev
);
1105 netdev_info(ndev
, "link down\n");
1112 *hns_nic_init_phy - init phy
1115 * Return 0 on success, negative on failure
1117 int hns_nic_init_phy(struct net_device
*ndev
, struct hnae_handle
*h
)
1119 struct phy_device
*phy_dev
= h
->phy_dev
;
1125 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
) {
1126 phy_dev
->dev_flags
= 0;
1128 ret
= phy_connect_direct(ndev
, phy_dev
, hns_nic_adjust_link
,
1131 ret
= phy_attach_direct(ndev
, phy_dev
, 0, h
->phy_if
);
1136 phy_dev
->supported
&= h
->if_support
;
1137 phy_dev
->advertising
= phy_dev
->supported
;
1139 if (h
->phy_if
== PHY_INTERFACE_MODE_XGMII
)
1140 phy_dev
->autoneg
= false;
1145 static int hns_nic_ring_open(struct net_device
*netdev
, int idx
)
1147 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1148 struct hnae_handle
*h
= priv
->ae_handle
;
1150 napi_enable(&priv
->ring_data
[idx
].napi
);
1152 enable_irq(priv
->ring_data
[idx
].ring
->irq
);
1153 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 0);
1158 static int hns_nic_net_set_mac_address(struct net_device
*ndev
, void *p
)
1160 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1161 struct hnae_handle
*h
= priv
->ae_handle
;
1162 struct sockaddr
*mac_addr
= p
;
1165 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1166 return -EADDRNOTAVAIL
;
1168 ret
= h
->dev
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1170 netdev_err(ndev
, "set_mac_address fail, ret=%d!\n", ret
);
1174 memcpy(ndev
->dev_addr
, mac_addr
->sa_data
, ndev
->addr_len
);
1179 void hns_nic_update_stats(struct net_device
*netdev
)
1181 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1182 struct hnae_handle
*h
= priv
->ae_handle
;
1184 h
->dev
->ops
->update_stats(h
, &netdev
->stats
);
1187 /* set mac addr if it is configed. or leave it to the AE driver */
1188 static void hns_init_mac_addr(struct net_device
*ndev
)
1190 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1192 if (!device_get_mac_address(priv
->dev
, ndev
->dev_addr
, ETH_ALEN
)) {
1193 eth_hw_addr_random(ndev
);
1194 dev_warn(priv
->dev
, "No valid mac, use random mac %pM",
1199 static void hns_nic_ring_close(struct net_device
*netdev
, int idx
)
1201 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1202 struct hnae_handle
*h
= priv
->ae_handle
;
1204 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 1);
1205 disable_irq(priv
->ring_data
[idx
].ring
->irq
);
1207 napi_disable(&priv
->ring_data
[idx
].napi
);
1210 static int hns_nic_init_affinity_mask(int q_num
, int ring_idx
,
1211 struct hnae_ring
*ring
, cpumask_t
*mask
)
1215 /* Diffrent irq banlance between 16core and 32core.
1216 * The cpu mask set by ring index according to the ring flag
1217 * which indicate the ring is tx or rx.
1219 if (q_num
== num_possible_cpus()) {
1220 if (is_tx_ring(ring
))
1223 cpu
= ring_idx
- q_num
;
1225 if (is_tx_ring(ring
))
1228 cpu
= (ring_idx
- q_num
) * 2 + 1;
1231 cpumask_clear(mask
);
1232 cpumask_set_cpu(cpu
, mask
);
1237 static int hns_nic_init_irq(struct hns_nic_priv
*priv
)
1239 struct hnae_handle
*h
= priv
->ae_handle
;
1240 struct hns_nic_ring_data
*rd
;
1245 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1246 rd
= &priv
->ring_data
[i
];
1248 if (rd
->ring
->irq_init_flag
== RCB_IRQ_INITED
)
1251 snprintf(rd
->ring
->ring_name
, RCB_RING_NAME_LEN
,
1252 "%s-%s%d", priv
->netdev
->name
,
1253 (is_tx_ring(rd
->ring
) ? "tx" : "rx"), rd
->queue_index
);
1255 rd
->ring
->ring_name
[RCB_RING_NAME_LEN
- 1] = '\0';
1257 ret
= request_irq(rd
->ring
->irq
,
1258 hns_irq_handle
, 0, rd
->ring
->ring_name
, rd
);
1260 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
1264 disable_irq(rd
->ring
->irq
);
1266 cpu
= hns_nic_init_affinity_mask(h
->q_num
, i
,
1267 rd
->ring
, &rd
->mask
);
1269 if (cpu_online(cpu
))
1270 irq_set_affinity_hint(rd
->ring
->irq
,
1273 rd
->ring
->irq_init_flag
= RCB_IRQ_INITED
;
1279 static int hns_nic_net_up(struct net_device
*ndev
)
1281 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1282 struct hnae_handle
*h
= priv
->ae_handle
;
1286 ret
= hns_nic_init_irq(priv
);
1288 netdev_err(ndev
, "hns init irq failed! ret=%d\n", ret
);
1292 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1293 ret
= hns_nic_ring_open(ndev
, i
);
1295 goto out_has_some_queues
;
1298 ret
= h
->dev
->ops
->set_mac_addr(h
, ndev
->dev_addr
);
1300 goto out_set_mac_addr_err
;
1302 ret
= h
->dev
->ops
->start
? h
->dev
->ops
->start(h
) : 0;
1307 phy_start(ndev
->phydev
);
1309 clear_bit(NIC_STATE_DOWN
, &priv
->state
);
1310 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1315 netif_stop_queue(ndev
);
1316 out_set_mac_addr_err
:
1317 out_has_some_queues
:
1318 for (j
= i
- 1; j
>= 0; j
--)
1319 hns_nic_ring_close(ndev
, j
);
1321 set_bit(NIC_STATE_DOWN
, &priv
->state
);
1326 static void hns_nic_net_down(struct net_device
*ndev
)
1329 struct hnae_ae_ops
*ops
;
1330 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1332 if (test_and_set_bit(NIC_STATE_DOWN
, &priv
->state
))
1335 (void)del_timer_sync(&priv
->service_timer
);
1336 netif_tx_stop_all_queues(ndev
);
1337 netif_carrier_off(ndev
);
1338 netif_tx_disable(ndev
);
1342 phy_stop(ndev
->phydev
);
1344 ops
= priv
->ae_handle
->dev
->ops
;
1347 ops
->stop(priv
->ae_handle
);
1349 netif_tx_stop_all_queues(ndev
);
1351 for (i
= priv
->ae_handle
->q_num
- 1; i
>= 0; i
--) {
1352 hns_nic_ring_close(ndev
, i
);
1353 hns_nic_ring_close(ndev
, i
+ priv
->ae_handle
->q_num
);
1355 /* clean tx buffers*/
1356 hns_nic_tx_clr_all_bufs(priv
->ring_data
+ i
);
1360 void hns_nic_net_reset(struct net_device
*ndev
)
1362 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1363 struct hnae_handle
*handle
= priv
->ae_handle
;
1365 while (test_and_set_bit(NIC_STATE_RESETTING
, &priv
->state
))
1366 usleep_range(1000, 2000);
1368 (void)hnae_reinit_handle(handle
);
1370 clear_bit(NIC_STATE_RESETTING
, &priv
->state
);
1373 void hns_nic_net_reinit(struct net_device
*netdev
)
1375 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1377 netif_trans_update(priv
->netdev
);
1378 while (test_and_set_bit(NIC_STATE_REINITING
, &priv
->state
))
1379 usleep_range(1000, 2000);
1381 hns_nic_net_down(netdev
);
1382 hns_nic_net_reset(netdev
);
1383 (void)hns_nic_net_up(netdev
);
1384 clear_bit(NIC_STATE_REINITING
, &priv
->state
);
1387 static int hns_nic_net_open(struct net_device
*ndev
)
1389 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1390 struct hnae_handle
*h
= priv
->ae_handle
;
1393 if (test_bit(NIC_STATE_TESTING
, &priv
->state
))
1397 netif_carrier_off(ndev
);
1399 ret
= netif_set_real_num_tx_queues(ndev
, h
->q_num
);
1401 netdev_err(ndev
, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1406 ret
= netif_set_real_num_rx_queues(ndev
, h
->q_num
);
1409 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
1413 ret
= hns_nic_net_up(ndev
);
1416 "hns net up fail, ret=%d!\n", ret
);
1423 static int hns_nic_net_stop(struct net_device
*ndev
)
1425 hns_nic_net_down(ndev
);
1430 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
);
1431 static void hns_nic_net_timeout(struct net_device
*ndev
)
1433 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1435 hns_tx_timeout_reset(priv
);
1438 static int hns_nic_do_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
1441 struct phy_device
*phy_dev
= netdev
->phydev
;
1443 if (!netif_running(netdev
))
1449 return phy_mii_ioctl(phy_dev
, ifr
, cmd
);
1452 /* use only for netconsole to poll with the device without interrupt */
1453 #ifdef CONFIG_NET_POLL_CONTROLLER
1454 void hns_nic_poll_controller(struct net_device
*ndev
)
1456 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1457 unsigned long flags
;
1460 local_irq_save(flags
);
1461 for (i
= 0; i
< priv
->ae_handle
->q_num
* 2; i
++)
1462 napi_schedule(&priv
->ring_data
[i
].napi
);
1463 local_irq_restore(flags
);
1467 static netdev_tx_t
hns_nic_net_xmit(struct sk_buff
*skb
,
1468 struct net_device
*ndev
)
1470 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1473 assert(skb
->queue_mapping
< ndev
->ae_handle
->q_num
);
1474 ret
= hns_nic_net_xmit_hw(ndev
, skb
,
1475 &tx_ring_data(priv
, skb
->queue_mapping
));
1476 if (ret
== NETDEV_TX_OK
) {
1477 netif_trans_update(ndev
);
1478 ndev
->stats
.tx_bytes
+= skb
->len
;
1479 ndev
->stats
.tx_packets
++;
1481 return (netdev_tx_t
)ret
;
1484 static int hns_nic_change_mtu(struct net_device
*ndev
, int new_mtu
)
1486 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1487 struct hnae_handle
*h
= priv
->ae_handle
;
1490 if (!h
->dev
->ops
->set_mtu
)
1493 if (netif_running(ndev
)) {
1494 (void)hns_nic_net_stop(ndev
);
1497 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1499 netdev_err(ndev
, "set mtu fail, return value %d\n",
1502 if (hns_nic_net_open(ndev
))
1503 netdev_err(ndev
, "hns net open fail\n");
1505 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1509 ndev
->mtu
= new_mtu
;
1514 static int hns_nic_set_features(struct net_device
*netdev
,
1515 netdev_features_t features
)
1517 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1519 switch (priv
->enet_ver
) {
1521 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
1522 netdev_info(netdev
, "enet v1 do not support tso!\n");
1525 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1526 priv
->ops
.fill_desc
= fill_tso_desc
;
1527 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tso
;
1528 /* The chip only support 7*4096 */
1529 netif_set_gso_max_size(netdev
, 7 * 4096);
1531 priv
->ops
.fill_desc
= fill_v2_desc
;
1532 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1536 netdev
->features
= features
;
1540 static netdev_features_t
hns_nic_fix_features(
1541 struct net_device
*netdev
, netdev_features_t features
)
1543 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1545 switch (priv
->enet_ver
) {
1547 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
1548 NETIF_F_HW_VLAN_CTAG_FILTER
);
1556 static int hns_nic_uc_sync(struct net_device
*netdev
, const unsigned char *addr
)
1558 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1559 struct hnae_handle
*h
= priv
->ae_handle
;
1561 if (h
->dev
->ops
->add_uc_addr
)
1562 return h
->dev
->ops
->add_uc_addr(h
, addr
);
1567 static int hns_nic_uc_unsync(struct net_device
*netdev
,
1568 const unsigned char *addr
)
1570 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1571 struct hnae_handle
*h
= priv
->ae_handle
;
1573 if (h
->dev
->ops
->rm_uc_addr
)
1574 return h
->dev
->ops
->rm_uc_addr(h
, addr
);
1580 * nic_set_multicast_list - set mutl mac address
1581 * @netdev: net device
1586 void hns_set_multicast_list(struct net_device
*ndev
)
1588 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1589 struct hnae_handle
*h
= priv
->ae_handle
;
1590 struct netdev_hw_addr
*ha
= NULL
;
1593 netdev_err(ndev
, "hnae handle is null\n");
1597 if (h
->dev
->ops
->clr_mc_addr
)
1598 if (h
->dev
->ops
->clr_mc_addr(h
))
1599 netdev_err(ndev
, "clear multicast address fail\n");
1601 if (h
->dev
->ops
->set_mc_addr
) {
1602 netdev_for_each_mc_addr(ha
, ndev
)
1603 if (h
->dev
->ops
->set_mc_addr(h
, ha
->addr
))
1604 netdev_err(ndev
, "set multicast fail\n");
1608 void hns_nic_set_rx_mode(struct net_device
*ndev
)
1610 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1611 struct hnae_handle
*h
= priv
->ae_handle
;
1613 if (h
->dev
->ops
->set_promisc_mode
) {
1614 if (ndev
->flags
& IFF_PROMISC
)
1615 h
->dev
->ops
->set_promisc_mode(h
, 1);
1617 h
->dev
->ops
->set_promisc_mode(h
, 0);
1620 hns_set_multicast_list(ndev
);
1622 if (__dev_uc_sync(ndev
, hns_nic_uc_sync
, hns_nic_uc_unsync
))
1623 netdev_err(ndev
, "sync uc address fail\n");
1626 struct rtnl_link_stats64
*hns_nic_get_stats64(struct net_device
*ndev
,
1627 struct rtnl_link_stats64
*stats
)
1634 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1635 struct hnae_handle
*h
= priv
->ae_handle
;
1637 for (idx
= 0; idx
< h
->q_num
; idx
++) {
1638 tx_bytes
+= h
->qs
[idx
]->tx_ring
.stats
.tx_bytes
;
1639 tx_pkts
+= h
->qs
[idx
]->tx_ring
.stats
.tx_pkts
;
1640 rx_bytes
+= h
->qs
[idx
]->rx_ring
.stats
.rx_bytes
;
1641 rx_pkts
+= h
->qs
[idx
]->rx_ring
.stats
.rx_pkts
;
1644 stats
->tx_bytes
= tx_bytes
;
1645 stats
->tx_packets
= tx_pkts
;
1646 stats
->rx_bytes
= rx_bytes
;
1647 stats
->rx_packets
= rx_pkts
;
1649 stats
->rx_errors
= ndev
->stats
.rx_errors
;
1650 stats
->multicast
= ndev
->stats
.multicast
;
1651 stats
->rx_length_errors
= ndev
->stats
.rx_length_errors
;
1652 stats
->rx_crc_errors
= ndev
->stats
.rx_crc_errors
;
1653 stats
->rx_missed_errors
= ndev
->stats
.rx_missed_errors
;
1655 stats
->tx_errors
= ndev
->stats
.tx_errors
;
1656 stats
->rx_dropped
= ndev
->stats
.rx_dropped
;
1657 stats
->tx_dropped
= ndev
->stats
.tx_dropped
;
1658 stats
->collisions
= ndev
->stats
.collisions
;
1659 stats
->rx_over_errors
= ndev
->stats
.rx_over_errors
;
1660 stats
->rx_frame_errors
= ndev
->stats
.rx_frame_errors
;
1661 stats
->rx_fifo_errors
= ndev
->stats
.rx_fifo_errors
;
1662 stats
->tx_aborted_errors
= ndev
->stats
.tx_aborted_errors
;
1663 stats
->tx_carrier_errors
= ndev
->stats
.tx_carrier_errors
;
1664 stats
->tx_fifo_errors
= ndev
->stats
.tx_fifo_errors
;
1665 stats
->tx_heartbeat_errors
= ndev
->stats
.tx_heartbeat_errors
;
1666 stats
->tx_window_errors
= ndev
->stats
.tx_window_errors
;
1667 stats
->rx_compressed
= ndev
->stats
.rx_compressed
;
1668 stats
->tx_compressed
= ndev
->stats
.tx_compressed
;
1674 hns_nic_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
1675 void *accel_priv
, select_queue_fallback_t fallback
)
1677 struct ethhdr
*eth_hdr
= (struct ethhdr
*)skb
->data
;
1678 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1680 /* fix hardware broadcast/multicast packets queue loopback */
1681 if (!AE_IS_VER1(priv
->enet_ver
) &&
1682 is_multicast_ether_addr(eth_hdr
->h_dest
))
1685 return fallback(ndev
, skb
);
1688 static const struct net_device_ops hns_nic_netdev_ops
= {
1689 .ndo_open
= hns_nic_net_open
,
1690 .ndo_stop
= hns_nic_net_stop
,
1691 .ndo_start_xmit
= hns_nic_net_xmit
,
1692 .ndo_tx_timeout
= hns_nic_net_timeout
,
1693 .ndo_set_mac_address
= hns_nic_net_set_mac_address
,
1694 .ndo_change_mtu
= hns_nic_change_mtu
,
1695 .ndo_do_ioctl
= hns_nic_do_ioctl
,
1696 .ndo_set_features
= hns_nic_set_features
,
1697 .ndo_fix_features
= hns_nic_fix_features
,
1698 .ndo_get_stats64
= hns_nic_get_stats64
,
1699 #ifdef CONFIG_NET_POLL_CONTROLLER
1700 .ndo_poll_controller
= hns_nic_poll_controller
,
1702 .ndo_set_rx_mode
= hns_nic_set_rx_mode
,
1703 .ndo_select_queue
= hns_nic_select_queue
,
1706 static void hns_nic_update_link_status(struct net_device
*netdev
)
1708 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1710 struct hnae_handle
*h
= priv
->ae_handle
;
1713 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
1716 (void)genphy_read_status(h
->phy_dev
);
1718 hns_nic_adjust_link(netdev
);
1721 /* for dumping key regs*/
1722 static void hns_nic_dump(struct hns_nic_priv
*priv
)
1724 struct hnae_handle
*h
= priv
->ae_handle
;
1725 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1726 u32
*data
, reg_num
, i
;
1728 if (ops
->get_regs_len
&& ops
->get_regs
) {
1729 reg_num
= ops
->get_regs_len(priv
->ae_handle
);
1730 reg_num
= (reg_num
+ 3ul) & ~3ul;
1731 data
= kcalloc(reg_num
, sizeof(u32
), GFP_KERNEL
);
1733 ops
->get_regs(priv
->ae_handle
, data
);
1734 for (i
= 0; i
< reg_num
; i
+= 4)
1735 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1736 i
, data
[i
], data
[i
+ 1],
1737 data
[i
+ 2], data
[i
+ 3]);
1742 for (i
= 0; i
< h
->q_num
; i
++) {
1743 pr_info("tx_queue%d_next_to_clean:%d\n",
1744 i
, h
->qs
[i
]->tx_ring
.next_to_clean
);
1745 pr_info("tx_queue%d_next_to_use:%d\n",
1746 i
, h
->qs
[i
]->tx_ring
.next_to_use
);
1747 pr_info("rx_queue%d_next_to_clean:%d\n",
1748 i
, h
->qs
[i
]->rx_ring
.next_to_clean
);
1749 pr_info("rx_queue%d_next_to_use:%d\n",
1750 i
, h
->qs
[i
]->rx_ring
.next_to_use
);
1754 /* for resetting subtask */
1755 static void hns_nic_reset_subtask(struct hns_nic_priv
*priv
)
1757 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
1759 if (!test_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
))
1761 clear_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
1763 /* If we're already down, removing or resetting, just bail */
1764 if (test_bit(NIC_STATE_DOWN
, &priv
->state
) ||
1765 test_bit(NIC_STATE_REMOVING
, &priv
->state
) ||
1766 test_bit(NIC_STATE_RESETTING
, &priv
->state
))
1770 netdev_info(priv
->netdev
, "try to reset %s port!\n",
1771 (type
== HNAE_PORT_DEBUG
? "debug" : "service"));
1774 /* put off any impending NetWatchDogTimeout */
1775 netif_trans_update(priv
->netdev
);
1777 if (type
== HNAE_PORT_DEBUG
) {
1778 hns_nic_net_reinit(priv
->netdev
);
1780 netif_carrier_off(priv
->netdev
);
1781 netif_tx_disable(priv
->netdev
);
1786 /* for doing service complete*/
1787 static void hns_nic_service_event_complete(struct hns_nic_priv
*priv
)
1789 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
));
1791 smp_mb__before_atomic();
1792 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
1795 static void hns_nic_service_task(struct work_struct
*work
)
1797 struct hns_nic_priv
*priv
1798 = container_of(work
, struct hns_nic_priv
, service_task
);
1799 struct hnae_handle
*h
= priv
->ae_handle
;
1801 hns_nic_update_link_status(priv
->netdev
);
1802 h
->dev
->ops
->update_led_status(h
);
1803 hns_nic_update_stats(priv
->netdev
);
1805 hns_nic_reset_subtask(priv
);
1806 hns_nic_service_event_complete(priv
);
1809 static void hns_nic_task_schedule(struct hns_nic_priv
*priv
)
1811 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
) &&
1812 !test_bit(NIC_STATE_REMOVING
, &priv
->state
) &&
1813 !test_and_set_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
))
1814 (void)schedule_work(&priv
->service_task
);
1817 static void hns_nic_service_timer(unsigned long data
)
1819 struct hns_nic_priv
*priv
= (struct hns_nic_priv
*)data
;
1821 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1823 hns_nic_task_schedule(priv
);
1827 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1828 * @priv: driver private struct
1830 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
)
1832 /* Do the reset outside of interrupt context */
1833 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
1834 set_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
1835 netdev_warn(priv
->netdev
,
1836 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1837 priv
->tx_timeout_count
, priv
->state
);
1838 priv
->tx_timeout_count
++;
1839 hns_nic_task_schedule(priv
);
1843 static int hns_nic_init_ring_data(struct hns_nic_priv
*priv
)
1845 struct hnae_handle
*h
= priv
->ae_handle
;
1846 struct hns_nic_ring_data
*rd
;
1847 bool is_ver1
= AE_IS_VER1(priv
->enet_ver
);
1850 if (h
->q_num
> NIC_MAX_Q_PER_VF
) {
1851 netdev_err(priv
->netdev
, "too much queue (%d)\n", h
->q_num
);
1855 priv
->ring_data
= kzalloc(h
->q_num
* sizeof(*priv
->ring_data
) * 2,
1857 if (!priv
->ring_data
)
1860 for (i
= 0; i
< h
->q_num
; i
++) {
1861 rd
= &priv
->ring_data
[i
];
1862 rd
->queue_index
= i
;
1863 rd
->ring
= &h
->qs
[i
]->tx_ring
;
1864 rd
->poll_one
= hns_nic_tx_poll_one
;
1865 rd
->fini_process
= is_ver1
? hns_nic_tx_fini_pro
:
1866 hns_nic_tx_fini_pro_v2
;
1868 netif_napi_add(priv
->netdev
, &rd
->napi
,
1869 hns_nic_common_poll
, NIC_TX_CLEAN_MAX_NUM
);
1870 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1872 for (i
= h
->q_num
; i
< h
->q_num
* 2; i
++) {
1873 rd
= &priv
->ring_data
[i
];
1874 rd
->queue_index
= i
- h
->q_num
;
1875 rd
->ring
= &h
->qs
[i
- h
->q_num
]->rx_ring
;
1876 rd
->poll_one
= hns_nic_rx_poll_one
;
1877 rd
->ex_process
= hns_nic_rx_up_pro
;
1878 rd
->fini_process
= is_ver1
? hns_nic_rx_fini_pro
:
1879 hns_nic_rx_fini_pro_v2
;
1881 netif_napi_add(priv
->netdev
, &rd
->napi
,
1882 hns_nic_common_poll
, NIC_RX_CLEAN_MAX_NUM
);
1883 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1889 static void hns_nic_uninit_ring_data(struct hns_nic_priv
*priv
)
1891 struct hnae_handle
*h
= priv
->ae_handle
;
1894 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1895 netif_napi_del(&priv
->ring_data
[i
].napi
);
1896 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
1897 (void)irq_set_affinity_hint(
1898 priv
->ring_data
[i
].ring
->irq
,
1900 free_irq(priv
->ring_data
[i
].ring
->irq
,
1901 &priv
->ring_data
[i
]);
1904 priv
->ring_data
[i
].ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1906 kfree(priv
->ring_data
);
1909 static void hns_nic_set_priv_ops(struct net_device
*netdev
)
1911 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1912 struct hnae_handle
*h
= priv
->ae_handle
;
1914 if (AE_IS_VER1(priv
->enet_ver
)) {
1915 priv
->ops
.fill_desc
= fill_desc
;
1916 priv
->ops
.get_rxd_bnum
= get_rx_desc_bnum
;
1917 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1919 priv
->ops
.get_rxd_bnum
= get_v2rx_desc_bnum
;
1920 if ((netdev
->features
& NETIF_F_TSO
) ||
1921 (netdev
->features
& NETIF_F_TSO6
)) {
1922 priv
->ops
.fill_desc
= fill_tso_desc
;
1923 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tso
;
1924 /* This chip only support 7*4096 */
1925 netif_set_gso_max_size(netdev
, 7 * 4096);
1927 priv
->ops
.fill_desc
= fill_v2_desc
;
1928 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1930 /* enable tso when init
1931 * control tso on/off through TSE bit in bd
1933 h
->dev
->ops
->set_tso_stats(h
, 1);
1937 static int hns_nic_try_get_ae(struct net_device
*ndev
)
1939 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1940 struct hnae_handle
*h
;
1943 h
= hnae_get_handle(&priv
->netdev
->dev
,
1944 priv
->fwnode
, priv
->port_id
, NULL
);
1945 if (IS_ERR_OR_NULL(h
)) {
1947 dev_dbg(priv
->dev
, "has not handle, register notifier!\n");
1950 priv
->ae_handle
= h
;
1952 ret
= hns_nic_init_phy(ndev
, h
);
1954 dev_err(priv
->dev
, "probe phy device fail!\n");
1958 ret
= hns_nic_init_ring_data(priv
);
1961 goto out_init_ring_data
;
1964 hns_nic_set_priv_ops(ndev
);
1966 ret
= register_netdev(ndev
);
1968 dev_err(priv
->dev
, "probe register netdev fail!\n");
1969 goto out_reg_ndev_fail
;
1974 hns_nic_uninit_ring_data(priv
);
1975 priv
->ring_data
= NULL
;
1978 hnae_put_handle(priv
->ae_handle
);
1979 priv
->ae_handle
= NULL
;
1984 static int hns_nic_notifier_action(struct notifier_block
*nb
,
1985 unsigned long action
, void *data
)
1987 struct hns_nic_priv
*priv
=
1988 container_of(nb
, struct hns_nic_priv
, notifier_block
);
1990 assert(action
== HNAE_AE_REGISTER
);
1992 if (!hns_nic_try_get_ae(priv
->netdev
)) {
1993 hnae_unregister_notifier(&priv
->notifier_block
);
1994 priv
->notifier_block
.notifier_call
= NULL
;
1999 static int hns_nic_dev_probe(struct platform_device
*pdev
)
2001 struct device
*dev
= &pdev
->dev
;
2002 struct net_device
*ndev
;
2003 struct hns_nic_priv
*priv
;
2007 ndev
= alloc_etherdev_mq(sizeof(struct hns_nic_priv
), NIC_MAX_Q_PER_VF
);
2011 platform_set_drvdata(pdev
, ndev
);
2013 priv
= netdev_priv(ndev
);
2015 priv
->netdev
= ndev
;
2017 if (dev_of_node(dev
)) {
2018 struct device_node
*ae_node
;
2020 if (of_device_is_compatible(dev
->of_node
,
2021 "hisilicon,hns-nic-v1"))
2022 priv
->enet_ver
= AE_VERSION_1
;
2024 priv
->enet_ver
= AE_VERSION_2
;
2026 ae_node
= of_parse_phandle(dev
->of_node
, "ae-handle", 0);
2027 if (IS_ERR_OR_NULL(ae_node
)) {
2028 ret
= PTR_ERR(ae_node
);
2029 dev_err(dev
, "not find ae-handle\n");
2030 goto out_read_prop_fail
;
2032 priv
->fwnode
= &ae_node
->fwnode
;
2033 } else if (is_acpi_node(dev
->fwnode
)) {
2034 struct acpi_reference_args args
;
2036 if (acpi_dev_found(hns_enet_acpi_match
[0].id
))
2037 priv
->enet_ver
= AE_VERSION_1
;
2038 else if (acpi_dev_found(hns_enet_acpi_match
[1].id
))
2039 priv
->enet_ver
= AE_VERSION_2
;
2043 /* try to find port-idx-in-ae first */
2044 ret
= acpi_node_get_property_reference(dev
->fwnode
,
2045 "ae-handle", 0, &args
);
2047 dev_err(dev
, "not find ae-handle\n");
2048 goto out_read_prop_fail
;
2050 priv
->fwnode
= acpi_fwnode_handle(args
.adev
);
2052 dev_err(dev
, "cannot read cfg data from OF or acpi\n");
2056 ret
= device_property_read_u32(dev
, "port-idx-in-ae", &port_id
);
2058 /* only for old code compatible */
2059 ret
= device_property_read_u32(dev
, "port-id", &port_id
);
2061 goto out_read_prop_fail
;
2062 /* for old dts, we need to caculate the port offset */
2063 port_id
= port_id
< HNS_SRV_OFFSET
? port_id
+ HNS_DEBUG_OFFSET
2064 : port_id
- HNS_SRV_OFFSET
;
2066 priv
->port_id
= port_id
;
2068 hns_init_mac_addr(ndev
);
2070 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
2071 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
2072 ndev
->netdev_ops
= &hns_nic_netdev_ops
;
2073 hns_ethtool_set_ops(ndev
);
2075 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2076 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2078 ndev
->vlan_features
|=
2079 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
2080 ndev
->vlan_features
|= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
2082 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2083 ndev
->min_mtu
= MAC_MIN_MTU
;
2084 switch (priv
->enet_ver
) {
2086 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2087 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2088 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2089 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
;
2090 ndev
->max_mtu
= MAC_MAX_MTU_V2
-
2091 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2094 ndev
->max_mtu
= MAC_MAX_MTU
-
2095 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2099 SET_NETDEV_DEV(ndev
, dev
);
2101 if (!dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
2102 dev_dbg(dev
, "set mask to 64bit\n");
2104 dev_err(dev
, "set mask to 64bit fail!\n");
2106 /* carrier off reporting is important to ethtool even BEFORE open */
2107 netif_carrier_off(ndev
);
2109 setup_timer(&priv
->service_timer
, hns_nic_service_timer
,
2110 (unsigned long)priv
);
2111 INIT_WORK(&priv
->service_task
, hns_nic_service_task
);
2113 set_bit(NIC_STATE_SERVICE_INITED
, &priv
->state
);
2114 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
2115 set_bit(NIC_STATE_DOWN
, &priv
->state
);
2117 if (hns_nic_try_get_ae(priv
->netdev
)) {
2118 priv
->notifier_block
.notifier_call
= hns_nic_notifier_action
;
2119 ret
= hnae_register_notifier(&priv
->notifier_block
);
2121 dev_err(dev
, "register notifier fail!\n");
2122 goto out_notify_fail
;
2124 dev_dbg(dev
, "has not handle, register notifier!\n");
2130 (void)cancel_work_sync(&priv
->service_task
);
2136 static int hns_nic_dev_remove(struct platform_device
*pdev
)
2138 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2139 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
2141 if (ndev
->reg_state
!= NETREG_UNINITIALIZED
)
2142 unregister_netdev(ndev
);
2144 if (priv
->ring_data
)
2145 hns_nic_uninit_ring_data(priv
);
2146 priv
->ring_data
= NULL
;
2149 phy_disconnect(ndev
->phydev
);
2151 if (!IS_ERR_OR_NULL(priv
->ae_handle
))
2152 hnae_put_handle(priv
->ae_handle
);
2153 priv
->ae_handle
= NULL
;
2154 if (priv
->notifier_block
.notifier_call
)
2155 hnae_unregister_notifier(&priv
->notifier_block
);
2156 priv
->notifier_block
.notifier_call
= NULL
;
2158 set_bit(NIC_STATE_REMOVING
, &priv
->state
);
2159 (void)cancel_work_sync(&priv
->service_task
);
2165 static const struct of_device_id hns_enet_of_match
[] = {
2166 {.compatible
= "hisilicon,hns-nic-v1",},
2167 {.compatible
= "hisilicon,hns-nic-v2",},
2171 MODULE_DEVICE_TABLE(of
, hns_enet_of_match
);
2173 static struct platform_driver hns_nic_dev_driver
= {
2176 .of_match_table
= hns_enet_of_match
,
2177 .acpi_match_table
= ACPI_PTR(hns_enet_acpi_match
),
2179 .probe
= hns_nic_dev_probe
,
2180 .remove
= hns_nic_dev_remove
,
2183 module_platform_driver(hns_nic_dev_driver
);
2185 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2186 MODULE_AUTHOR("Hisilicon, Inc.");
2187 MODULE_LICENSE("GPL");
2188 MODULE_ALIAS("platform:hns-nic");