2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
26 #define NIC_MAX_Q_PER_VF 16
27 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
29 #define SERVICE_TIMER_HZ (1 * HZ)
31 #define NIC_TX_CLEAN_MAX_NUM 256
32 #define NIC_RX_CLEAN_MAX_NUM 64
34 #define RCB_IRQ_NOT_INITED 0
35 #define RCB_IRQ_INITED 1
37 #define BD_MAX_SEND_SIZE 8191
38 #define SKB_TMP_LEN(SKB) \
39 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
41 static void fill_v2_desc(struct hnae_ring
*ring
, void *priv
,
42 int size
, dma_addr_t dma
, int frag_end
,
43 int buf_num
, enum hns_desc_type type
, int mtu
)
45 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
46 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
48 struct ipv6hdr
*ipv6hdr
;
61 desc_cb
->length
= size
;
65 desc
->addr
= cpu_to_le64(dma
);
66 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
68 /*config bd buffer end */
69 hnae_set_bit(rrcfv
, HNSV2_TXD_VLD_B
, 1);
70 hnae_set_field(bn_pid
, HNSV2_TXD_BUFNUM_M
, 0, buf_num
- 1);
72 if (type
== DESC_TYPE_SKB
) {
73 skb
= (struct sk_buff
*)priv
;
75 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
76 skb_reset_mac_len(skb
);
77 protocol
= skb
->protocol
;
80 if (protocol
== htons(ETH_P_8021Q
)) {
81 ip_offset
+= VLAN_HLEN
;
82 protocol
= vlan_get_protocol(skb
);
83 skb
->protocol
= protocol
;
86 if (skb
->protocol
== htons(ETH_P_IP
)) {
88 hnae_set_bit(rrcfv
, HNSV2_TXD_L3CS_B
, 1);
89 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
91 /* check for tcp/udp header */
92 if (iphdr
->protocol
== IPPROTO_TCP
) {
95 skb_tmp_len
= SKB_TMP_LEN(skb
);
96 l4_len
= tcp_hdrlen(skb
);
97 mss
= mtu
- skb_tmp_len
- ETH_FCS_LEN
;
98 paylen
= skb
->len
- skb_tmp_len
;
100 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
101 hnae_set_bit(tvsvsn
, HNSV2_TXD_IPV6_B
, 1);
102 ipv6hdr
= ipv6_hdr(skb
);
103 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
105 /* check for tcp/udp header */
106 if (ipv6hdr
->nexthdr
== IPPROTO_TCP
) {
109 skb_tmp_len
= SKB_TMP_LEN(skb
);
110 l4_len
= tcp_hdrlen(skb
);
111 mss
= mtu
- skb_tmp_len
- ETH_FCS_LEN
;
112 paylen
= skb
->len
- skb_tmp_len
;
115 desc
->tx
.ip_offset
= ip_offset
;
116 desc
->tx
.tse_vlan_snap_v6_sctp_nth
= tvsvsn
;
117 desc
->tx
.mss
= cpu_to_le16(mss
);
118 desc
->tx
.l4_len
= l4_len
;
119 desc
->tx
.paylen
= cpu_to_le16(paylen
);
123 hnae_set_bit(rrcfv
, HNSV2_TXD_FE_B
, frag_end
);
125 desc
->tx
.bn_pid
= bn_pid
;
126 desc
->tx
.ra_ri_cs_fe_vld
= rrcfv
;
128 ring_ptr_move_fw(ring
, next_to_use
);
131 static void fill_desc(struct hnae_ring
*ring
, void *priv
,
132 int size
, dma_addr_t dma
, int frag_end
,
133 int buf_num
, enum hns_desc_type type
, int mtu
)
135 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
136 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
140 u32 asid_bufnum_pid
= 0;
141 u32 flag_ipoffset
= 0;
143 desc_cb
->priv
= priv
;
144 desc_cb
->length
= size
;
146 desc_cb
->type
= type
;
148 desc
->addr
= cpu_to_le64(dma
);
149 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
151 /*config bd buffer end */
152 flag_ipoffset
|= 1 << HNS_TXD_VLD_B
;
154 asid_bufnum_pid
|= buf_num
<< HNS_TXD_BUFNUM_S
;
156 if (type
== DESC_TYPE_SKB
) {
157 skb
= (struct sk_buff
*)priv
;
159 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
160 protocol
= skb
->protocol
;
161 ip_offset
= ETH_HLEN
;
163 /*if it is a SW VLAN check the next protocol*/
164 if (protocol
== htons(ETH_P_8021Q
)) {
165 ip_offset
+= VLAN_HLEN
;
166 protocol
= vlan_get_protocol(skb
);
167 skb
->protocol
= protocol
;
170 if (skb
->protocol
== htons(ETH_P_IP
)) {
171 flag_ipoffset
|= 1 << HNS_TXD_L3CS_B
;
172 /* check for tcp/udp header */
173 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
175 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
176 /* ipv6 has not l3 cs, check for L4 header */
177 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
180 flag_ipoffset
|= ip_offset
<< HNS_TXD_IPOFFSET_S
;
184 flag_ipoffset
|= frag_end
<< HNS_TXD_FE_B
;
186 desc
->tx
.asid_bufnum_pid
= cpu_to_le16(asid_bufnum_pid
);
187 desc
->tx
.flag_ipoffset
= cpu_to_le32(flag_ipoffset
);
189 ring_ptr_move_fw(ring
, next_to_use
);
192 static void unfill_desc(struct hnae_ring
*ring
)
194 ring_ptr_move_bw(ring
, next_to_use
);
197 static int hns_nic_maybe_stop_tx(
198 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
200 struct sk_buff
*skb
= *out_skb
;
201 struct sk_buff
*new_skb
= NULL
;
204 /* no. of segments (plus a header) */
205 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
207 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
208 if (ring_space(ring
) < 1)
211 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
215 dev_kfree_skb_any(skb
);
218 } else if (buf_num
> ring_space(ring
)) {
226 int hns_nic_net_xmit_hw(struct net_device
*ndev
,
228 struct hns_nic_ring_data
*ring_data
)
230 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
231 struct device
*dev
= priv
->dev
;
232 struct hnae_ring
*ring
= ring_data
->ring
;
233 struct netdev_queue
*dev_queue
;
234 struct skb_frag_struct
*frag
;
238 int size
, next_to_use
;
241 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
243 ring
->stats
.tx_busy
++;
244 goto out_net_tx_busy
;
246 ring
->stats
.sw_err_cnt
++;
247 netdev_err(ndev
, "no memory to xmit!\n");
253 /* no. of segments (plus a header) */
254 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
255 next_to_use
= ring
->next_to_use
;
257 /* fill the first part */
258 size
= skb_headlen(skb
);
259 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
260 if (dma_mapping_error(dev
, dma
)) {
261 netdev_err(ndev
, "TX head DMA map failed\n");
262 ring
->stats
.sw_err_cnt
++;
265 priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
266 buf_num
, DESC_TYPE_SKB
, ndev
->mtu
);
268 /* fill the fragments */
269 for (i
= 1; i
< seg_num
; i
++) {
270 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
271 size
= skb_frag_size(frag
);
272 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
273 if (dma_mapping_error(dev
, dma
)) {
274 netdev_err(ndev
, "TX frag(%d) DMA map failed\n", i
);
275 ring
->stats
.sw_err_cnt
++;
276 goto out_map_frag_fail
;
278 priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
279 seg_num
- 1 == i
? 1 : 0, buf_num
,
280 DESC_TYPE_PAGE
, ndev
->mtu
);
283 /*complete translate all packets*/
284 dev_queue
= netdev_get_tx_queue(ndev
, skb
->queue_mapping
);
285 netdev_tx_sent_queue(dev_queue
, skb
->len
);
287 wmb(); /* commit all data before submit */
288 assert(skb
->queue_mapping
< priv
->ae_handle
->q_num
);
289 hnae_queue_xmit(priv
->ae_handle
->qs
[skb
->queue_mapping
], buf_num
);
290 ring
->stats
.tx_pkts
++;
291 ring
->stats
.tx_bytes
+= skb
->len
;
297 while (ring
->next_to_use
!= next_to_use
) {
299 if (ring
->next_to_use
!= next_to_use
)
301 ring
->desc_cb
[ring
->next_to_use
].dma
,
302 ring
->desc_cb
[ring
->next_to_use
].length
,
305 dma_unmap_single(dev
,
306 ring
->desc_cb
[next_to_use
].dma
,
307 ring
->desc_cb
[next_to_use
].length
,
313 dev_kfree_skb_any(skb
);
318 netif_stop_subqueue(ndev
, skb
->queue_mapping
);
320 /* Herbert's original patch had:
321 * smp_mb__after_netif_stop_queue();
322 * but since that doesn't exist yet, just open code it.
325 return NETDEV_TX_BUSY
;
329 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
330 * @data: pointer to the start of the headers
331 * @max: total length of section to find headers in
333 * This function is meant to determine the length of headers that will
334 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
335 * motivation of doing this is to only perform one pull for IPv4 TCP
336 * packets so that we can do basic things like calculating the gso_size
337 * based on the average data per packet.
339 static unsigned int hns_nic_get_headlen(unsigned char *data
, u32 flag
,
340 unsigned int max_size
)
342 unsigned char *network
;
345 /* this should never happen, but better safe than sorry */
346 if (max_size
< ETH_HLEN
)
349 /* initialize network frame pointer */
352 /* set first protocol and move network header forward */
355 /* handle any vlan tag if present */
356 if (hnae_get_field(flag
, HNS_RXD_VLAN_M
, HNS_RXD_VLAN_S
)
357 == HNS_RX_FLAG_VLAN_PRESENT
) {
358 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
361 network
+= VLAN_HLEN
;
364 /* handle L3 protocols */
365 if (hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
)
366 == HNS_RX_FLAG_L3ID_IPV4
) {
367 if ((typeof(max_size
))(network
- data
) >
368 (max_size
- sizeof(struct iphdr
)))
371 /* access ihl as a u8 to avoid unaligned access on ia64 */
372 hlen
= (network
[0] & 0x0F) << 2;
374 /* verify hlen meets minimum size requirements */
375 if (hlen
< sizeof(struct iphdr
))
376 return network
- data
;
378 /* record next protocol if header is present */
379 } else if (hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
)
380 == HNS_RX_FLAG_L3ID_IPV6
) {
381 if ((typeof(max_size
))(network
- data
) >
382 (max_size
- sizeof(struct ipv6hdr
)))
385 /* record next protocol */
386 hlen
= sizeof(struct ipv6hdr
);
388 return network
- data
;
391 /* relocate pointer to start of L4 header */
394 /* finally sort out TCP/UDP */
395 if (hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
)
396 == HNS_RX_FLAG_L4ID_TCP
) {
397 if ((typeof(max_size
))(network
- data
) >
398 (max_size
- sizeof(struct tcphdr
)))
401 /* access doff as a u8 to avoid unaligned access on ia64 */
402 hlen
= (network
[12] & 0xF0) >> 2;
404 /* verify hlen meets minimum size requirements */
405 if (hlen
< sizeof(struct tcphdr
))
406 return network
- data
;
409 } else if (hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
)
410 == HNS_RX_FLAG_L4ID_UDP
) {
411 if ((typeof(max_size
))(network
- data
) >
412 (max_size
- sizeof(struct udphdr
)))
415 network
+= sizeof(struct udphdr
);
418 /* If everything has gone correctly network should be the
419 * data section of the packet and will be the end of the header.
420 * If not then it probably represents the end of the last recognized
423 if ((typeof(max_size
))(network
- data
) < max_size
)
424 return network
- data
;
430 hns_nic_reuse_page(struct hnae_desc_cb
*desc_cb
, int tsize
, int last_offset
)
432 /* avoid re-using remote pages,flag default unreuse */
433 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id())) {
434 /* move offset up to the next cache line */
435 desc_cb
->page_offset
+= tsize
;
437 if (desc_cb
->page_offset
<= last_offset
) {
438 desc_cb
->reuse_flag
= 1;
439 /* bump ref count on page before it is given*/
440 get_page(desc_cb
->priv
);
445 static void get_v2rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
447 *out_bnum
= hnae_get_field(bnum_flag
,
448 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
) + 1;
451 static void get_rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
453 *out_bnum
= hnae_get_field(bnum_flag
,
454 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
);
457 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data
*ring_data
,
458 struct sk_buff
**out_skb
, int *out_bnum
)
460 struct hnae_ring
*ring
= ring_data
->ring
;
461 struct net_device
*ndev
= ring_data
->napi
.dev
;
462 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
464 struct hnae_desc
*desc
;
465 struct hnae_desc_cb
*desc_cb
;
467 int bnum
, length
, size
, i
, truesize
, last_offset
;
471 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
472 desc
= &ring
->desc
[ring
->next_to_clean
];
473 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
477 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
479 /* prefetch first cache line of first page */
481 #if L1_CACHE_BYTES < 128
482 prefetch(va
+ L1_CACHE_BYTES
);
485 skb
= *out_skb
= napi_alloc_skb(&ring_data
->napi
,
487 if (unlikely(!skb
)) {
488 netdev_err(ndev
, "alloc rx skb fail\n");
489 ring
->stats
.sw_err_cnt
++;
493 length
= le16_to_cpu(desc
->rx
.pkt_len
);
494 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
495 priv
->ops
.get_rxd_bnum(bnum_flag
, &bnum
);
498 /* we will be copying header into skb->data in
499 * pskb_may_pull so it is in our interest to prefetch
500 * it now to avoid a possible cache miss
502 prefetchw(skb
->data
);
504 if (length
<= HNS_RX_HEAD_SIZE
) {
505 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
507 /* we can reuse buffer as-is, just make sure it is local */
508 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
509 desc_cb
->reuse_flag
= 1;
510 else /* this page cannot be reused so discard it */
511 put_page(desc_cb
->priv
);
513 ring_ptr_move_fw(ring
, next_to_clean
);
515 if (unlikely(bnum
!= 1)) { /* check err*/
520 ring
->stats
.seg_pkt_cnt
++;
522 pull_len
= hns_nic_get_headlen(va
, bnum_flag
, HNS_RX_HEAD_SIZE
);
523 memcpy(__skb_put(skb
, pull_len
), va
,
524 ALIGN(pull_len
, sizeof(long)));
526 size
= le16_to_cpu(desc
->rx
.size
);
527 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
528 skb_add_rx_frag(skb
, 0, desc_cb
->priv
,
529 desc_cb
->page_offset
+ pull_len
,
530 size
- pull_len
, truesize
- pull_len
);
532 hns_nic_reuse_page(desc_cb
, truesize
, last_offset
);
533 ring_ptr_move_fw(ring
, next_to_clean
);
535 if (unlikely(bnum
>= (int)MAX_SKB_FRAGS
)) { /* check err*/
539 for (i
= 1; i
< bnum
; i
++) {
540 desc
= &ring
->desc
[ring
->next_to_clean
];
541 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
542 size
= le16_to_cpu(desc
->rx
.size
);
543 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
544 skb_add_rx_frag(skb
, i
, desc_cb
->priv
,
545 desc_cb
->page_offset
,
548 hns_nic_reuse_page(desc_cb
, truesize
, last_offset
);
549 ring_ptr_move_fw(ring
, next_to_clean
);
553 /* check except process, free skb and jump the desc */
554 if (unlikely((!bnum
) || (bnum
> ring
->max_desc_num_per_pkt
))) {
556 *out_bnum
= *out_bnum
? *out_bnum
: 1; /* ntc moved,cannot 0*/
557 netdev_err(ndev
, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
558 bnum
, ring
->max_desc_num_per_pkt
,
559 length
, (int)MAX_SKB_FRAGS
,
560 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
561 ring
->stats
.err_bd_num
++;
562 dev_kfree_skb_any(skb
);
566 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
568 if (unlikely(!hnae_get_bit(bnum_flag
, HNS_RXD_VLD_B
))) {
569 netdev_err(ndev
, "no valid bd,%016llx,%016llx\n",
570 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
571 ring
->stats
.non_vld_descs
++;
572 dev_kfree_skb_any(skb
);
576 if (unlikely((!desc
->rx
.pkt_len
) ||
577 hnae_get_bit(bnum_flag
, HNS_RXD_DROP_B
))) {
578 ring
->stats
.err_pkt_len
++;
579 dev_kfree_skb_any(skb
);
583 if (unlikely(hnae_get_bit(bnum_flag
, HNS_RXD_L2E_B
))) {
584 ring
->stats
.l2_err
++;
585 dev_kfree_skb_any(skb
);
589 ring
->stats
.rx_pkts
++;
590 ring
->stats
.rx_bytes
+= skb
->len
;
592 if (unlikely(hnae_get_bit(bnum_flag
, HNS_RXD_L3E_B
) ||
593 hnae_get_bit(bnum_flag
, HNS_RXD_L4E_B
))) {
594 ring
->stats
.l3l4_csum_err
++;
598 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
604 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data
*ring_data
, int cleand_count
)
607 struct hnae_desc_cb res_cbs
;
608 struct hnae_desc_cb
*desc_cb
;
609 struct hnae_ring
*ring
= ring_data
->ring
;
610 struct net_device
*ndev
= ring_data
->napi
.dev
;
612 for (i
= 0; i
< cleand_count
; i
++) {
613 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
614 if (desc_cb
->reuse_flag
) {
615 ring
->stats
.reuse_pg_cnt
++;
616 hnae_reuse_buffer(ring
, ring
->next_to_use
);
618 ret
= hnae_reserve_buffer_map(ring
, &res_cbs
);
620 ring
->stats
.sw_err_cnt
++;
621 netdev_err(ndev
, "hnae reserve buffer map failed.\n");
624 hnae_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
627 ring_ptr_move_fw(ring
, next_to_use
);
630 wmb(); /* make all data has been write before submit */
631 writel_relaxed(i
, ring
->io_base
+ RCB_REG_HEAD
);
634 /* return error number for error or number of desc left to take
636 static void hns_nic_rx_up_pro(struct hns_nic_ring_data
*ring_data
,
639 struct net_device
*ndev
= ring_data
->napi
.dev
;
641 skb
->protocol
= eth_type_trans(skb
, ndev
);
642 (void)napi_gro_receive(&ring_data
->napi
, skb
);
643 ndev
->last_rx
= jiffies
;
646 static int hns_nic_rx_poll_one(struct hns_nic_ring_data
*ring_data
,
649 struct hnae_ring
*ring
= ring_data
->ring
;
651 int num
, bnum
, ex_num
;
652 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
653 int recv_pkts
, recv_bds
, clean_count
, err
;
655 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
656 rmb(); /* make sure num taken effect before the other data is touched */
658 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
660 while (recv_pkts
< budget
&& recv_bds
< num
) {
661 /* reuse or realloc buffers*/
662 if (clean_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
663 hns_nic_alloc_rx_buffers(ring_data
, clean_count
);
668 err
= hns_nic_poll_rx_skb(ring_data
, &skb
, &bnum
);
669 if (unlikely(!skb
)) /* this fault cannot be repaired */
674 if (unlikely(err
)) { /* do jump the err */
679 /* do update ip stack process*/
680 ((void (*)(struct hns_nic_ring_data
*, struct sk_buff
*))v
)(
685 /* make all data has been write before submit */
686 if (recv_pkts
< budget
) {
687 ex_num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
688 rmb(); /*complete read rx ring bd number*/
689 if (ex_num
> clean_count
) {
690 num
+= ex_num
- clean_count
;
695 /* make all data has been write before submit */
697 hns_nic_alloc_rx_buffers(ring_data
, clean_count
);
702 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data
*ring_data
)
704 struct hnae_ring
*ring
= ring_data
->ring
;
707 /* for hardware bug fixed */
708 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
711 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
714 napi_schedule(&ring_data
->napi
);
718 static inline void hns_nic_reclaim_one_desc(struct hnae_ring
*ring
,
719 int *bytes
, int *pkts
)
721 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
723 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
724 (*bytes
) += desc_cb
->length
;
725 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
726 hnae_free_buffer_detach(ring
, ring
->next_to_clean
);
728 ring_ptr_move_fw(ring
, next_to_clean
);
731 static int is_valid_clean_head(struct hnae_ring
*ring
, int h
)
733 int u
= ring
->next_to_use
;
734 int c
= ring
->next_to_clean
;
736 if (unlikely(h
> ring
->desc_num
))
739 assert(u
> 0 && u
< ring
->desc_num
);
740 assert(c
> 0 && c
< ring
->desc_num
);
741 assert(u
!= c
&& h
!= c
); /* must be checked before call this func */
743 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
746 /* netif_tx_lock will turn down the performance, set only when necessary */
747 #ifdef CONFIG_NET_POLL_CONTROLLER
748 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
749 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
751 #define NETIF_TX_LOCK(ndev)
752 #define NETIF_TX_UNLOCK(ndev)
754 /* reclaim all desc in one budget
755 * return error or number of desc left
757 static int hns_nic_tx_poll_one(struct hns_nic_ring_data
*ring_data
,
760 struct hnae_ring
*ring
= ring_data
->ring
;
761 struct net_device
*ndev
= ring_data
->napi
.dev
;
762 struct netdev_queue
*dev_queue
;
763 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
769 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
770 rmb(); /* make sure head is ready before touch any data */
772 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
) {
773 NETIF_TX_UNLOCK(ndev
);
774 return 0; /* no data to poll */
777 if (!is_valid_clean_head(ring
, head
)) {
778 netdev_err(ndev
, "wrong head (%d, %d-%d)\n", head
,
779 ring
->next_to_use
, ring
->next_to_clean
);
780 ring
->stats
.io_err_cnt
++;
781 NETIF_TX_UNLOCK(ndev
);
787 while (head
!= ring
->next_to_clean
)
788 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
790 NETIF_TX_UNLOCK(ndev
);
792 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
793 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
795 if (unlikely(priv
->link
&& !netif_carrier_ok(ndev
)))
796 netif_carrier_on(ndev
);
798 if (unlikely(pkts
&& netif_carrier_ok(ndev
) &&
799 (ring_space(ring
) >= ring
->max_desc_num_per_pkt
* 2))) {
800 /* Make sure that anybody stopping the queue after this
801 * sees the new next_to_clean.
804 if (netif_tx_queue_stopped(dev_queue
) &&
805 !test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
806 netif_tx_wake_queue(dev_queue
);
807 ring
->stats
.restart_queue
++;
813 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data
*ring_data
)
815 struct hnae_ring
*ring
= ring_data
->ring
;
816 int head
= ring
->next_to_clean
;
818 /* for hardware bug fixed */
819 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
821 if (head
!= ring
->next_to_clean
) {
822 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
825 napi_schedule(&ring_data
->napi
);
829 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data
*ring_data
)
831 struct hnae_ring
*ring
= ring_data
->ring
;
832 struct net_device
*ndev
= ring_data
->napi
.dev
;
833 struct netdev_queue
*dev_queue
;
839 head
= ring
->next_to_use
; /* ntu :soft setted ring position*/
842 while (head
!= ring
->next_to_clean
)
843 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
845 NETIF_TX_UNLOCK(ndev
);
847 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
848 netdev_tx_reset_queue(dev_queue
);
851 static int hns_nic_common_poll(struct napi_struct
*napi
, int budget
)
853 struct hns_nic_ring_data
*ring_data
=
854 container_of(napi
, struct hns_nic_ring_data
, napi
);
855 int clean_complete
= ring_data
->poll_one(
856 ring_data
, budget
, ring_data
->ex_process
);
858 if (clean_complete
>= 0 && clean_complete
< budget
) {
860 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
863 ring_data
->fini_process(ring_data
);
866 return clean_complete
;
869 static irqreturn_t
hns_irq_handle(int irq
, void *dev
)
871 struct hns_nic_ring_data
*ring_data
= (struct hns_nic_ring_data
*)dev
;
873 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
875 napi_schedule(&ring_data
->napi
);
881 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
884 static void hns_nic_adjust_link(struct net_device
*ndev
)
886 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
887 struct hnae_handle
*h
= priv
->ae_handle
;
889 h
->dev
->ops
->adjust_link(h
, ndev
->phydev
->speed
, ndev
->phydev
->duplex
);
893 *hns_nic_init_phy - init phy
896 * Return 0 on success, negative on failure
898 int hns_nic_init_phy(struct net_device
*ndev
, struct hnae_handle
*h
)
900 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
901 struct phy_device
*phy_dev
= NULL
;
906 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
907 phy_dev
= of_phy_connect(ndev
, h
->phy_node
,
908 hns_nic_adjust_link
, 0, h
->phy_if
);
910 phy_dev
= of_phy_attach(ndev
, h
->phy_node
, 0, h
->phy_if
);
912 if (unlikely(!phy_dev
) || IS_ERR(phy_dev
))
913 return !phy_dev
? -ENODEV
: PTR_ERR(phy_dev
);
915 phy_dev
->supported
&= h
->if_support
;
916 phy_dev
->advertising
= phy_dev
->supported
;
918 if (h
->phy_if
== PHY_INTERFACE_MODE_XGMII
)
919 phy_dev
->autoneg
= false;
926 static int hns_nic_ring_open(struct net_device
*netdev
, int idx
)
928 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
929 struct hnae_handle
*h
= priv
->ae_handle
;
931 napi_enable(&priv
->ring_data
[idx
].napi
);
933 enable_irq(priv
->ring_data
[idx
].ring
->irq
);
934 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 0);
939 static int hns_nic_net_set_mac_address(struct net_device
*ndev
, void *p
)
941 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
942 struct hnae_handle
*h
= priv
->ae_handle
;
943 struct sockaddr
*mac_addr
= p
;
946 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
947 return -EADDRNOTAVAIL
;
949 ret
= h
->dev
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
951 netdev_err(ndev
, "set_mac_address fail, ret=%d!\n", ret
);
955 memcpy(ndev
->dev_addr
, mac_addr
->sa_data
, ndev
->addr_len
);
960 void hns_nic_update_stats(struct net_device
*netdev
)
962 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
963 struct hnae_handle
*h
= priv
->ae_handle
;
965 h
->dev
->ops
->update_stats(h
, &netdev
->stats
);
968 /* set mac addr if it is configed. or leave it to the AE driver */
969 static void hns_init_mac_addr(struct net_device
*ndev
)
971 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
972 struct device_node
*node
= priv
->dev
->of_node
;
973 const void *mac_addr_temp
;
975 mac_addr_temp
= of_get_mac_address(node
);
976 if (mac_addr_temp
&& is_valid_ether_addr(mac_addr_temp
)) {
977 memcpy(ndev
->dev_addr
, mac_addr_temp
, ndev
->addr_len
);
979 eth_hw_addr_random(ndev
);
980 dev_warn(priv
->dev
, "No valid mac, use random mac %pM",
985 static void hns_nic_ring_close(struct net_device
*netdev
, int idx
)
987 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
988 struct hnae_handle
*h
= priv
->ae_handle
;
990 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 1);
991 disable_irq(priv
->ring_data
[idx
].ring
->irq
);
993 napi_disable(&priv
->ring_data
[idx
].napi
);
996 static void hns_set_irq_affinity(struct hns_nic_priv
*priv
)
998 struct hnae_handle
*h
= priv
->ae_handle
;
999 struct hns_nic_ring_data
*rd
;
1004 /*diffrent irq banlance for 16core and 32core*/
1005 if (h
->q_num
== num_possible_cpus()) {
1006 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1007 rd
= &priv
->ring_data
[i
];
1008 if (cpu_online(rd
->queue_index
)) {
1009 cpumask_clear(&mask
);
1010 cpu
= rd
->queue_index
;
1011 cpumask_set_cpu(cpu
, &mask
);
1012 (void)irq_set_affinity_hint(rd
->ring
->irq
,
1017 for (i
= 0; i
< h
->q_num
; i
++) {
1018 rd
= &priv
->ring_data
[i
];
1019 if (cpu_online(rd
->queue_index
* 2)) {
1020 cpumask_clear(&mask
);
1021 cpu
= rd
->queue_index
* 2;
1022 cpumask_set_cpu(cpu
, &mask
);
1023 (void)irq_set_affinity_hint(rd
->ring
->irq
,
1028 for (i
= h
->q_num
; i
< h
->q_num
* 2; i
++) {
1029 rd
= &priv
->ring_data
[i
];
1030 if (cpu_online(rd
->queue_index
* 2 + 1)) {
1031 cpumask_clear(&mask
);
1032 cpu
= rd
->queue_index
* 2 + 1;
1033 cpumask_set_cpu(cpu
, &mask
);
1034 (void)irq_set_affinity_hint(rd
->ring
->irq
,
1041 static int hns_nic_init_irq(struct hns_nic_priv
*priv
)
1043 struct hnae_handle
*h
= priv
->ae_handle
;
1044 struct hns_nic_ring_data
*rd
;
1048 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1049 rd
= &priv
->ring_data
[i
];
1051 if (rd
->ring
->irq_init_flag
== RCB_IRQ_INITED
)
1054 snprintf(rd
->ring
->ring_name
, RCB_RING_NAME_LEN
,
1055 "%s-%s%d", priv
->netdev
->name
,
1056 (i
< h
->q_num
? "tx" : "rx"), rd
->queue_index
);
1058 rd
->ring
->ring_name
[RCB_RING_NAME_LEN
- 1] = '\0';
1060 ret
= request_irq(rd
->ring
->irq
,
1061 hns_irq_handle
, 0, rd
->ring
->ring_name
, rd
);
1063 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
1067 disable_irq(rd
->ring
->irq
);
1068 rd
->ring
->irq_init_flag
= RCB_IRQ_INITED
;
1071 /*set cpu affinity*/
1072 hns_set_irq_affinity(priv
);
1077 static int hns_nic_net_up(struct net_device
*ndev
)
1079 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1080 struct hnae_handle
*h
= priv
->ae_handle
;
1084 ret
= hns_nic_init_irq(priv
);
1086 netdev_err(ndev
, "hns init irq failed! ret=%d\n", ret
);
1090 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1091 ret
= hns_nic_ring_open(ndev
, i
);
1093 goto out_has_some_queues
;
1096 for (k
= 0; k
< h
->q_num
; k
++)
1097 h
->dev
->ops
->toggle_queue_status(h
->qs
[k
], 1);
1099 ret
= h
->dev
->ops
->set_mac_addr(h
, ndev
->dev_addr
);
1101 goto out_set_mac_addr_err
;
1103 ret
= h
->dev
->ops
->start
? h
->dev
->ops
->start(h
) : 0;
1108 phy_start(priv
->phy
);
1110 clear_bit(NIC_STATE_DOWN
, &priv
->state
);
1111 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1116 netif_stop_queue(ndev
);
1117 out_set_mac_addr_err
:
1118 for (k
= 0; k
< h
->q_num
; k
++)
1119 h
->dev
->ops
->toggle_queue_status(h
->qs
[k
], 0);
1120 out_has_some_queues
:
1121 for (j
= i
- 1; j
>= 0; j
--)
1122 hns_nic_ring_close(ndev
, j
);
1124 set_bit(NIC_STATE_DOWN
, &priv
->state
);
1129 static void hns_nic_net_down(struct net_device
*ndev
)
1132 struct hnae_ae_ops
*ops
;
1133 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1135 if (test_and_set_bit(NIC_STATE_DOWN
, &priv
->state
))
1138 (void)del_timer_sync(&priv
->service_timer
);
1139 netif_tx_stop_all_queues(ndev
);
1140 netif_carrier_off(ndev
);
1141 netif_tx_disable(ndev
);
1145 phy_stop(priv
->phy
);
1147 ops
= priv
->ae_handle
->dev
->ops
;
1150 ops
->stop(priv
->ae_handle
);
1152 netif_tx_stop_all_queues(ndev
);
1154 for (i
= priv
->ae_handle
->q_num
- 1; i
>= 0; i
--) {
1155 hns_nic_ring_close(ndev
, i
);
1156 hns_nic_ring_close(ndev
, i
+ priv
->ae_handle
->q_num
);
1158 /* clean tx buffers*/
1159 hns_nic_tx_clr_all_bufs(priv
->ring_data
+ i
);
1163 void hns_nic_net_reset(struct net_device
*ndev
)
1165 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1166 struct hnae_handle
*handle
= priv
->ae_handle
;
1168 while (test_and_set_bit(NIC_STATE_RESETTING
, &priv
->state
))
1169 usleep_range(1000, 2000);
1171 (void)hnae_reinit_handle(handle
);
1173 clear_bit(NIC_STATE_RESETTING
, &priv
->state
);
1176 void hns_nic_net_reinit(struct net_device
*netdev
)
1178 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1180 priv
->netdev
->trans_start
= jiffies
;
1181 while (test_and_set_bit(NIC_STATE_REINITING
, &priv
->state
))
1182 usleep_range(1000, 2000);
1184 hns_nic_net_down(netdev
);
1185 hns_nic_net_reset(netdev
);
1186 (void)hns_nic_net_up(netdev
);
1187 clear_bit(NIC_STATE_REINITING
, &priv
->state
);
1190 static int hns_nic_net_open(struct net_device
*ndev
)
1192 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1193 struct hnae_handle
*h
= priv
->ae_handle
;
1196 if (test_bit(NIC_STATE_TESTING
, &priv
->state
))
1200 netif_carrier_off(ndev
);
1202 ret
= netif_set_real_num_tx_queues(ndev
, h
->q_num
);
1204 netdev_err(ndev
, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1209 ret
= netif_set_real_num_rx_queues(ndev
, h
->q_num
);
1212 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
1216 ret
= hns_nic_net_up(ndev
);
1219 "hns net up fail, ret=%d!\n", ret
);
1226 static int hns_nic_net_stop(struct net_device
*ndev
)
1228 hns_nic_net_down(ndev
);
1233 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
);
1234 static void hns_nic_net_timeout(struct net_device
*ndev
)
1236 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1238 hns_tx_timeout_reset(priv
);
1241 static int hns_nic_do_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
1244 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1245 struct phy_device
*phy_dev
= priv
->phy
;
1247 if (!netif_running(netdev
))
1253 return phy_mii_ioctl(phy_dev
, ifr
, cmd
);
1256 /* use only for netconsole to poll with the device without interrupt */
1257 #ifdef CONFIG_NET_POLL_CONTROLLER
1258 void hns_nic_poll_controller(struct net_device
*ndev
)
1260 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1261 unsigned long flags
;
1264 local_irq_save(flags
);
1265 for (i
= 0; i
< priv
->ae_handle
->q_num
* 2; i
++)
1266 napi_schedule(&priv
->ring_data
[i
].napi
);
1267 local_irq_restore(flags
);
1271 static netdev_tx_t
hns_nic_net_xmit(struct sk_buff
*skb
,
1272 struct net_device
*ndev
)
1274 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1277 assert(skb
->queue_mapping
< ndev
->ae_handle
->q_num
);
1278 ret
= hns_nic_net_xmit_hw(ndev
, skb
,
1279 &tx_ring_data(priv
, skb
->queue_mapping
));
1280 if (ret
== NETDEV_TX_OK
) {
1281 ndev
->trans_start
= jiffies
;
1282 ndev
->stats
.tx_bytes
+= skb
->len
;
1283 ndev
->stats
.tx_packets
++;
1285 return (netdev_tx_t
)ret
;
1288 static int hns_nic_change_mtu(struct net_device
*ndev
, int new_mtu
)
1290 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1291 struct hnae_handle
*h
= priv
->ae_handle
;
1294 /* MTU < 68 is an error and causes problems on some kernels */
1298 if (!h
->dev
->ops
->set_mtu
)
1301 if (netif_running(ndev
)) {
1302 (void)hns_nic_net_stop(ndev
);
1305 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1307 netdev_err(ndev
, "set mtu fail, return value %d\n",
1310 if (hns_nic_net_open(ndev
))
1311 netdev_err(ndev
, "hns net open fail\n");
1313 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1317 ndev
->mtu
= new_mtu
;
1323 * nic_set_multicast_list - set mutl mac address
1324 * @netdev: net device
1329 void hns_set_multicast_list(struct net_device
*ndev
)
1331 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1332 struct hnae_handle
*h
= priv
->ae_handle
;
1333 struct netdev_hw_addr
*ha
= NULL
;
1336 netdev_err(ndev
, "hnae handle is null\n");
1340 if (h
->dev
->ops
->set_mc_addr
) {
1341 netdev_for_each_mc_addr(ha
, ndev
)
1342 if (h
->dev
->ops
->set_mc_addr(h
, ha
->addr
))
1343 netdev_err(ndev
, "set multicast fail\n");
1347 void hns_nic_set_rx_mode(struct net_device
*ndev
)
1349 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1350 struct hnae_handle
*h
= priv
->ae_handle
;
1352 if (h
->dev
->ops
->set_promisc_mode
) {
1353 if (ndev
->flags
& IFF_PROMISC
)
1354 h
->dev
->ops
->set_promisc_mode(h
, 1);
1356 h
->dev
->ops
->set_promisc_mode(h
, 0);
1359 hns_set_multicast_list(ndev
);
1362 struct rtnl_link_stats64
*hns_nic_get_stats64(struct net_device
*ndev
,
1363 struct rtnl_link_stats64
*stats
)
1370 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1371 struct hnae_handle
*h
= priv
->ae_handle
;
1373 for (idx
= 0; idx
< h
->q_num
; idx
++) {
1374 tx_bytes
+= h
->qs
[idx
]->tx_ring
.stats
.tx_bytes
;
1375 tx_pkts
+= h
->qs
[idx
]->tx_ring
.stats
.tx_pkts
;
1376 rx_bytes
+= h
->qs
[idx
]->rx_ring
.stats
.rx_bytes
;
1377 rx_pkts
+= h
->qs
[idx
]->rx_ring
.stats
.rx_pkts
;
1380 stats
->tx_bytes
= tx_bytes
;
1381 stats
->tx_packets
= tx_pkts
;
1382 stats
->rx_bytes
= rx_bytes
;
1383 stats
->rx_packets
= rx_pkts
;
1385 stats
->rx_errors
= ndev
->stats
.rx_errors
;
1386 stats
->multicast
= ndev
->stats
.multicast
;
1387 stats
->rx_length_errors
= ndev
->stats
.rx_length_errors
;
1388 stats
->rx_crc_errors
= ndev
->stats
.rx_crc_errors
;
1389 stats
->rx_missed_errors
= ndev
->stats
.rx_missed_errors
;
1391 stats
->tx_errors
= ndev
->stats
.tx_errors
;
1392 stats
->rx_dropped
= ndev
->stats
.rx_dropped
;
1393 stats
->tx_dropped
= ndev
->stats
.tx_dropped
;
1394 stats
->collisions
= ndev
->stats
.collisions
;
1395 stats
->rx_over_errors
= ndev
->stats
.rx_over_errors
;
1396 stats
->rx_frame_errors
= ndev
->stats
.rx_frame_errors
;
1397 stats
->rx_fifo_errors
= ndev
->stats
.rx_fifo_errors
;
1398 stats
->tx_aborted_errors
= ndev
->stats
.tx_aborted_errors
;
1399 stats
->tx_carrier_errors
= ndev
->stats
.tx_carrier_errors
;
1400 stats
->tx_fifo_errors
= ndev
->stats
.tx_fifo_errors
;
1401 stats
->tx_heartbeat_errors
= ndev
->stats
.tx_heartbeat_errors
;
1402 stats
->tx_window_errors
= ndev
->stats
.tx_window_errors
;
1403 stats
->rx_compressed
= ndev
->stats
.rx_compressed
;
1404 stats
->tx_compressed
= ndev
->stats
.tx_compressed
;
1409 static const struct net_device_ops hns_nic_netdev_ops
= {
1410 .ndo_open
= hns_nic_net_open
,
1411 .ndo_stop
= hns_nic_net_stop
,
1412 .ndo_start_xmit
= hns_nic_net_xmit
,
1413 .ndo_tx_timeout
= hns_nic_net_timeout
,
1414 .ndo_set_mac_address
= hns_nic_net_set_mac_address
,
1415 .ndo_change_mtu
= hns_nic_change_mtu
,
1416 .ndo_do_ioctl
= hns_nic_do_ioctl
,
1417 .ndo_get_stats64
= hns_nic_get_stats64
,
1418 #ifdef CONFIG_NET_POLL_CONTROLLER
1419 .ndo_poll_controller
= hns_nic_poll_controller
,
1421 .ndo_set_rx_mode
= hns_nic_set_rx_mode
,
1424 static void hns_nic_update_link_status(struct net_device
*netdev
)
1426 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1428 struct hnae_handle
*h
= priv
->ae_handle
;
1432 if (!genphy_update_link(priv
->phy
))
1433 state
= priv
->phy
->link
;
1437 state
= state
&& h
->dev
->ops
->get_status(h
);
1439 if (state
!= priv
->link
) {
1441 netif_carrier_on(netdev
);
1442 netif_tx_wake_all_queues(netdev
);
1443 netdev_info(netdev
, "link up\n");
1445 netif_carrier_off(netdev
);
1446 netdev_info(netdev
, "link down\n");
1452 /* for dumping key regs*/
1453 static void hns_nic_dump(struct hns_nic_priv
*priv
)
1455 struct hnae_handle
*h
= priv
->ae_handle
;
1456 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1457 u32
*data
, reg_num
, i
;
1459 if (ops
->get_regs_len
&& ops
->get_regs
) {
1460 reg_num
= ops
->get_regs_len(priv
->ae_handle
);
1461 reg_num
= (reg_num
+ 3ul) & ~3ul;
1462 data
= kcalloc(reg_num
, sizeof(u32
), GFP_KERNEL
);
1464 ops
->get_regs(priv
->ae_handle
, data
);
1465 for (i
= 0; i
< reg_num
; i
+= 4)
1466 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1467 i
, data
[i
], data
[i
+ 1],
1468 data
[i
+ 2], data
[i
+ 3]);
1473 for (i
= 0; i
< h
->q_num
; i
++) {
1474 pr_info("tx_queue%d_next_to_clean:%d\n",
1475 i
, h
->qs
[i
]->tx_ring
.next_to_clean
);
1476 pr_info("tx_queue%d_next_to_use:%d\n",
1477 i
, h
->qs
[i
]->tx_ring
.next_to_use
);
1478 pr_info("rx_queue%d_next_to_clean:%d\n",
1479 i
, h
->qs
[i
]->rx_ring
.next_to_clean
);
1480 pr_info("rx_queue%d_next_to_use:%d\n",
1481 i
, h
->qs
[i
]->rx_ring
.next_to_use
);
1485 /* for resetting suntask*/
1486 static void hns_nic_reset_subtask(struct hns_nic_priv
*priv
)
1488 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
1490 if (!test_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
))
1492 clear_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
1494 /* If we're already down, removing or resetting, just bail */
1495 if (test_bit(NIC_STATE_DOWN
, &priv
->state
) ||
1496 test_bit(NIC_STATE_REMOVING
, &priv
->state
) ||
1497 test_bit(NIC_STATE_RESETTING
, &priv
->state
))
1501 netdev_info(priv
->netdev
, "try to reset %s port!\n",
1502 (type
== HNAE_PORT_DEBUG
? "debug" : "service"));
1505 /* put off any impending NetWatchDogTimeout */
1506 priv
->netdev
->trans_start
= jiffies
;
1508 if (type
== HNAE_PORT_DEBUG
) {
1509 hns_nic_net_reinit(priv
->netdev
);
1511 netif_carrier_off(priv
->netdev
);
1512 netif_tx_disable(priv
->netdev
);
1517 /* for doing service complete*/
1518 static void hns_nic_service_event_complete(struct hns_nic_priv
*priv
)
1520 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
));
1522 smp_mb__before_atomic();
1523 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
1526 static void hns_nic_service_task(struct work_struct
*work
)
1528 struct hns_nic_priv
*priv
1529 = container_of(work
, struct hns_nic_priv
, service_task
);
1530 struct hnae_handle
*h
= priv
->ae_handle
;
1532 hns_nic_update_link_status(priv
->netdev
);
1533 h
->dev
->ops
->update_led_status(h
);
1534 hns_nic_update_stats(priv
->netdev
);
1536 hns_nic_reset_subtask(priv
);
1537 hns_nic_service_event_complete(priv
);
1540 static void hns_nic_task_schedule(struct hns_nic_priv
*priv
)
1542 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
) &&
1543 !test_bit(NIC_STATE_REMOVING
, &priv
->state
) &&
1544 !test_and_set_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
))
1545 (void)schedule_work(&priv
->service_task
);
1548 static void hns_nic_service_timer(unsigned long data
)
1550 struct hns_nic_priv
*priv
= (struct hns_nic_priv
*)data
;
1552 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1554 hns_nic_task_schedule(priv
);
1558 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1559 * @priv: driver private struct
1561 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
)
1563 /* Do the reset outside of interrupt context */
1564 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
1565 set_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
1566 netdev_warn(priv
->netdev
,
1567 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1568 priv
->tx_timeout_count
, priv
->state
);
1569 priv
->tx_timeout_count
++;
1570 hns_nic_task_schedule(priv
);
1574 static int hns_nic_init_ring_data(struct hns_nic_priv
*priv
)
1576 struct hnae_handle
*h
= priv
->ae_handle
;
1577 struct hns_nic_ring_data
*rd
;
1580 if (h
->q_num
> NIC_MAX_Q_PER_VF
) {
1581 netdev_err(priv
->netdev
, "too much queue (%d)\n", h
->q_num
);
1585 priv
->ring_data
= kzalloc(h
->q_num
* sizeof(*priv
->ring_data
) * 2,
1587 if (!priv
->ring_data
)
1590 for (i
= 0; i
< h
->q_num
; i
++) {
1591 rd
= &priv
->ring_data
[i
];
1592 rd
->queue_index
= i
;
1593 rd
->ring
= &h
->qs
[i
]->tx_ring
;
1594 rd
->poll_one
= hns_nic_tx_poll_one
;
1595 rd
->fini_process
= hns_nic_tx_fini_pro
;
1597 netif_napi_add(priv
->netdev
, &rd
->napi
,
1598 hns_nic_common_poll
, NIC_TX_CLEAN_MAX_NUM
);
1599 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1601 for (i
= h
->q_num
; i
< h
->q_num
* 2; i
++) {
1602 rd
= &priv
->ring_data
[i
];
1603 rd
->queue_index
= i
- h
->q_num
;
1604 rd
->ring
= &h
->qs
[i
- h
->q_num
]->rx_ring
;
1605 rd
->poll_one
= hns_nic_rx_poll_one
;
1606 rd
->ex_process
= hns_nic_rx_up_pro
;
1607 rd
->fini_process
= hns_nic_rx_fini_pro
;
1609 netif_napi_add(priv
->netdev
, &rd
->napi
,
1610 hns_nic_common_poll
, NIC_RX_CLEAN_MAX_NUM
);
1611 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1617 static void hns_nic_uninit_ring_data(struct hns_nic_priv
*priv
)
1619 struct hnae_handle
*h
= priv
->ae_handle
;
1622 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1623 netif_napi_del(&priv
->ring_data
[i
].napi
);
1624 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
1625 (void)irq_set_affinity_hint(
1626 priv
->ring_data
[i
].ring
->irq
,
1628 free_irq(priv
->ring_data
[i
].ring
->irq
,
1629 &priv
->ring_data
[i
]);
1632 priv
->ring_data
[i
].ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
1634 kfree(priv
->ring_data
);
1637 static void hns_nic_set_priv_ops(struct net_device
*netdev
)
1639 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1641 if (AE_IS_VER1(priv
->enet_ver
)) {
1642 priv
->ops
.fill_desc
= fill_desc
;
1643 priv
->ops
.get_rxd_bnum
= get_rx_desc_bnum
;
1644 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1646 priv
->ops
.get_rxd_bnum
= get_v2rx_desc_bnum
;
1647 priv
->ops
.fill_desc
= fill_v2_desc
;
1648 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
1652 static int hns_nic_try_get_ae(struct net_device
*ndev
)
1654 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1655 struct hnae_handle
*h
;
1658 h
= hnae_get_handle(&priv
->netdev
->dev
,
1659 priv
->ae_name
, priv
->port_id
, NULL
);
1660 if (IS_ERR_OR_NULL(h
)) {
1662 dev_dbg(priv
->dev
, "has not handle, register notifier!\n");
1665 priv
->ae_handle
= h
;
1667 ret
= hns_nic_init_phy(ndev
, h
);
1669 dev_err(priv
->dev
, "probe phy device fail!\n");
1673 ret
= hns_nic_init_ring_data(priv
);
1676 goto out_init_ring_data
;
1679 hns_nic_set_priv_ops(ndev
);
1681 ret
= register_netdev(ndev
);
1683 dev_err(priv
->dev
, "probe register netdev fail!\n");
1684 goto out_reg_ndev_fail
;
1689 hns_nic_uninit_ring_data(priv
);
1690 priv
->ring_data
= NULL
;
1693 hnae_put_handle(priv
->ae_handle
);
1694 priv
->ae_handle
= NULL
;
1699 static int hns_nic_notifier_action(struct notifier_block
*nb
,
1700 unsigned long action
, void *data
)
1702 struct hns_nic_priv
*priv
=
1703 container_of(nb
, struct hns_nic_priv
, notifier_block
);
1705 assert(action
== HNAE_AE_REGISTER
);
1707 if (!hns_nic_try_get_ae(priv
->netdev
)) {
1708 hnae_unregister_notifier(&priv
->notifier_block
);
1709 priv
->notifier_block
.notifier_call
= NULL
;
1714 static int hns_nic_dev_probe(struct platform_device
*pdev
)
1716 struct device
*dev
= &pdev
->dev
;
1717 struct net_device
*ndev
;
1718 struct hns_nic_priv
*priv
;
1719 struct device_node
*node
= dev
->of_node
;
1722 ndev
= alloc_etherdev_mq(sizeof(struct hns_nic_priv
), NIC_MAX_Q_PER_VF
);
1726 platform_set_drvdata(pdev
, ndev
);
1728 priv
= netdev_priv(ndev
);
1730 priv
->netdev
= ndev
;
1732 if (of_device_is_compatible(node
, "hisilicon,hns-nic-v1"))
1733 priv
->enet_ver
= AE_VERSION_1
;
1735 priv
->enet_ver
= AE_VERSION_2
;
1737 ret
= of_property_read_string(node
, "ae-name", &priv
->ae_name
);
1739 goto out_read_string_fail
;
1741 ret
= of_property_read_u32(node
, "port-id", &priv
->port_id
);
1743 goto out_read_string_fail
;
1745 hns_init_mac_addr(ndev
);
1747 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
1748 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
1749 ndev
->netdev_ops
= &hns_nic_netdev_ops
;
1750 hns_ethtool_set_ops(ndev
);
1752 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1753 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1755 ndev
->vlan_features
|=
1756 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
1757 ndev
->vlan_features
|= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
1759 switch (priv
->enet_ver
) {
1761 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1762 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1769 SET_NETDEV_DEV(ndev
, dev
);
1771 if (!dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
1772 dev_dbg(dev
, "set mask to 64bit\n");
1774 dev_err(dev
, "set mask to 32bit fail!\n");
1776 /* carrier off reporting is important to ethtool even BEFORE open */
1777 netif_carrier_off(ndev
);
1779 setup_timer(&priv
->service_timer
, hns_nic_service_timer
,
1780 (unsigned long)priv
);
1781 INIT_WORK(&priv
->service_task
, hns_nic_service_task
);
1783 set_bit(NIC_STATE_SERVICE_INITED
, &priv
->state
);
1784 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
1785 set_bit(NIC_STATE_DOWN
, &priv
->state
);
1787 if (hns_nic_try_get_ae(priv
->netdev
)) {
1788 priv
->notifier_block
.notifier_call
= hns_nic_notifier_action
;
1789 ret
= hnae_register_notifier(&priv
->notifier_block
);
1791 dev_err(dev
, "register notifier fail!\n");
1792 goto out_notify_fail
;
1794 dev_dbg(dev
, "has not handle, register notifier!\n");
1800 (void)cancel_work_sync(&priv
->service_task
);
1801 out_read_string_fail
:
1806 static int hns_nic_dev_remove(struct platform_device
*pdev
)
1808 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1809 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1811 if (ndev
->reg_state
!= NETREG_UNINITIALIZED
)
1812 unregister_netdev(ndev
);
1814 if (priv
->ring_data
)
1815 hns_nic_uninit_ring_data(priv
);
1816 priv
->ring_data
= NULL
;
1819 phy_disconnect(priv
->phy
);
1822 if (!IS_ERR_OR_NULL(priv
->ae_handle
))
1823 hnae_put_handle(priv
->ae_handle
);
1824 priv
->ae_handle
= NULL
;
1825 if (priv
->notifier_block
.notifier_call
)
1826 hnae_unregister_notifier(&priv
->notifier_block
);
1827 priv
->notifier_block
.notifier_call
= NULL
;
1829 set_bit(NIC_STATE_REMOVING
, &priv
->state
);
1830 (void)cancel_work_sync(&priv
->service_task
);
1836 static const struct of_device_id hns_enet_of_match
[] = {
1837 {.compatible
= "hisilicon,hns-nic-v1",},
1838 {.compatible
= "hisilicon,hns-nic-v2",},
1842 MODULE_DEVICE_TABLE(of
, hns_enet_of_match
);
1844 static struct platform_driver hns_nic_dev_driver
= {
1847 .of_match_table
= hns_enet_of_match
,
1849 .probe
= hns_nic_dev_probe
,
1850 .remove
= hns_nic_dev_remove
,
1853 module_platform_driver(hns_nic_dev_driver
);
1855 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
1856 MODULE_AUTHOR("Hisilicon, Inc.");
1857 MODULE_LICENSE("GPL");
1858 MODULE_ALIAS("platform:hns-nic");