1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_sgmac.h"
26 #include "xgene_enet_xgmac.h"
28 #define RES_ENET_CSR 0
29 #define RES_RING_CSR 1
30 #define RES_RING_CMD 2
32 static const struct of_device_id xgene_enet_of_match
[];
33 static const struct acpi_device_id xgene_enet_acpi_match
[];
35 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
37 struct xgene_enet_raw_desc16
*raw_desc
;
43 for (i
= 0; i
< buf_pool
->slots
; i
++) {
44 raw_desc
= &buf_pool
->raw_desc16
[i
];
46 /* Hardware expects descriptor in little endian format */
47 raw_desc
->m0
= cpu_to_le64(i
|
48 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
53 static u16
xgene_enet_get_data_len(u64 bufdatalen
)
57 hw_len
= GET_VAL(BUFDATALEN
, bufdatalen
);
59 if (unlikely(hw_len
== 0x7800)) {
61 } else if (!(hw_len
& BIT(14))) {
62 mask
= GENMASK(13, 0);
63 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_16K
;
64 } else if (!(hw_len
& GENMASK(13, 12))) {
65 mask
= GENMASK(11, 0);
66 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_4K
;
68 mask
= GENMASK(11, 0);
69 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_2K
;
73 static u16
xgene_enet_set_data_len(u32 size
)
77 hw_len
= (size
== SIZE_4K
) ? BIT(14) : 0;
82 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring
*buf_pool
,
85 struct xgene_enet_raw_desc16
*raw_desc
;
86 struct xgene_enet_pdata
*pdata
;
87 struct net_device
*ndev
;
95 if (unlikely(!buf_pool
))
98 ndev
= buf_pool
->ndev
;
99 pdata
= netdev_priv(ndev
);
100 dev
= ndev_to_dev(ndev
);
101 slots
= buf_pool
->slots
- 1;
102 tail
= buf_pool
->tail
;
104 for (i
= 0; i
< nbuf
; i
++) {
105 raw_desc
= &buf_pool
->raw_desc16
[tail
];
107 page
= dev_alloc_page();
111 dma_addr
= dma_map_page(dev
, page
, 0,
112 PAGE_SIZE
, DMA_FROM_DEVICE
);
113 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
118 hw_len
= xgene_enet_set_data_len(PAGE_SIZE
);
119 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
120 SET_VAL(BUFDATALEN
, hw_len
) |
123 buf_pool
->frag_page
[tail
] = page
;
124 tail
= (tail
+ 1) & slots
;
127 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
128 buf_pool
->tail
= tail
;
133 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
137 struct xgene_enet_raw_desc16
*raw_desc
;
138 struct xgene_enet_pdata
*pdata
;
139 struct net_device
*ndev
;
142 u32 tail
= buf_pool
->tail
;
143 u32 slots
= buf_pool
->slots
- 1;
147 ndev
= buf_pool
->ndev
;
148 dev
= ndev_to_dev(buf_pool
->ndev
);
149 pdata
= netdev_priv(ndev
);
151 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
152 len
= XGENE_ENET_STD_MTU
;
154 for (i
= 0; i
< nbuf
; i
++) {
155 raw_desc
= &buf_pool
->raw_desc16
[tail
];
157 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
161 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
162 if (dma_mapping_error(dev
, dma_addr
)) {
163 netdev_err(ndev
, "DMA mapping error\n");
164 dev_kfree_skb_any(skb
);
168 buf_pool
->rx_skb
[tail
] = skb
;
170 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
171 SET_VAL(BUFDATALEN
, bufdatalen
) |
173 tail
= (tail
+ 1) & slots
;
176 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
177 buf_pool
->tail
= tail
;
182 static u8
xgene_enet_hdr_len(const void *data
)
184 const struct ethhdr
*eth
= data
;
186 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
189 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
191 struct device
*dev
= ndev_to_dev(buf_pool
->ndev
);
192 struct xgene_enet_raw_desc16
*raw_desc
;
196 /* Free up the buffers held by hardware */
197 for (i
= 0; i
< buf_pool
->slots
; i
++) {
198 if (buf_pool
->rx_skb
[i
]) {
199 dev_kfree_skb_any(buf_pool
->rx_skb
[i
]);
201 raw_desc
= &buf_pool
->raw_desc16
[i
];
202 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
));
203 dma_unmap_single(dev
, dma_addr
, XGENE_ENET_MAX_MTU
,
209 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring
*buf_pool
)
211 struct device
*dev
= ndev_to_dev(buf_pool
->ndev
);
216 /* Free up the buffers held by hardware */
217 for (i
= 0; i
< buf_pool
->slots
; i
++) {
218 page
= buf_pool
->frag_page
[i
];
220 dma_addr
= buf_pool
->frag_dma_addr
[i
];
221 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
,
228 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
230 struct xgene_enet_desc_ring
*rx_ring
= data
;
232 if (napi_schedule_prep(&rx_ring
->napi
)) {
233 disable_irq_nosync(irq
);
234 __napi_schedule(&rx_ring
->napi
);
240 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
241 struct xgene_enet_raw_desc
*raw_desc
)
243 struct xgene_enet_pdata
*pdata
= netdev_priv(cp_ring
->ndev
);
247 dma_addr_t
*frag_dma_addr
;
253 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
254 skb
= cp_ring
->cp_skb
[skb_index
];
255 frag_dma_addr
= &cp_ring
->frag_dma_addr
[skb_index
* MAX_SKB_FRAGS
];
257 dev
= ndev_to_dev(cp_ring
->ndev
);
258 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
262 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
263 frag
= &skb_shinfo(skb
)->frags
[i
];
264 dma_unmap_page(dev
, frag_dma_addr
[i
], skb_frag_size(frag
),
268 if (GET_BIT(ET
, le64_to_cpu(raw_desc
->m3
))) {
269 mss_index
= GET_VAL(MSS
, le64_to_cpu(raw_desc
->m3
));
270 spin_lock(&pdata
->mss_lock
);
271 pdata
->mss_refcnt
[mss_index
]--;
272 spin_unlock(&pdata
->mss_lock
);
275 /* Checking for error */
276 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
277 if (unlikely(status
> 2)) {
278 xgene_enet_parse_error(cp_ring
, netdev_priv(cp_ring
->ndev
),
284 dev_kfree_skb_any(skb
);
286 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
293 static int xgene_enet_setup_mss(struct net_device
*ndev
, u32 mss
)
295 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
296 int mss_index
= -EBUSY
;
299 spin_lock(&pdata
->mss_lock
);
301 /* Reuse the slot if MSS matches */
302 for (i
= 0; mss_index
< 0 && i
< NUM_MSS_REG
; i
++) {
303 if (pdata
->mss
[i
] == mss
) {
304 pdata
->mss_refcnt
[i
]++;
309 /* Overwrite the slot with ref_count = 0 */
310 for (i
= 0; mss_index
< 0 && i
< NUM_MSS_REG
; i
++) {
311 if (!pdata
->mss_refcnt
[i
]) {
312 pdata
->mss_refcnt
[i
]++;
313 pdata
->mac_ops
->set_mss(pdata
, mss
, i
);
319 spin_unlock(&pdata
->mss_lock
);
324 static int xgene_enet_work_msg(struct sk_buff
*skb
, u64
*hopinfo
)
326 struct net_device
*ndev
= skb
->dev
;
328 u8 l3hlen
= 0, l4hlen
= 0;
329 u8 ethhdr
, proto
= 0, csum_enable
= 0;
330 u32 hdr_len
, mss
= 0;
331 u32 i
, len
, nr_frags
;
334 ethhdr
= xgene_enet_hdr_len(skb
->data
);
336 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
337 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
340 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
344 if (unlikely(ip_is_fragment(iph
)))
347 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
348 l4hlen
= tcp_hdrlen(skb
) >> 2;
350 proto
= TSO_IPPROTO_TCP
;
351 if (ndev
->features
& NETIF_F_TSO
) {
352 hdr_len
= ethhdr
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
353 mss
= skb_shinfo(skb
)->gso_size
;
355 if (skb_is_nonlinear(skb
)) {
356 len
= skb_headlen(skb
);
357 nr_frags
= skb_shinfo(skb
)->nr_frags
;
359 for (i
= 0; i
< 2 && i
< nr_frags
; i
++)
360 len
+= skb_shinfo(skb
)->frags
[i
].size
;
362 /* HW requires header must reside in 3 buffer */
363 if (unlikely(hdr_len
> len
)) {
364 if (skb_linearize(skb
))
369 if (!mss
|| ((skb
->len
- hdr_len
) <= mss
))
372 mss_index
= xgene_enet_setup_mss(ndev
, mss
);
373 if (unlikely(mss_index
< 0))
376 *hopinfo
|= SET_BIT(ET
) | SET_VAL(MSS
, mss_index
);
378 } else if (iph
->protocol
== IPPROTO_UDP
) {
379 l4hlen
= UDP_HDR_SIZE
;
383 l3hlen
= ip_hdrlen(skb
) >> 2;
384 *hopinfo
|= SET_VAL(TCPHDR
, l4hlen
) |
385 SET_VAL(IPHDR
, l3hlen
) |
386 SET_VAL(ETHHDR
, ethhdr
) |
387 SET_VAL(EC
, csum_enable
) |
390 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
395 static u16
xgene_enet_encode_len(u16 len
)
397 return (len
== BUFLEN_16K
) ? 0 : len
;
400 static void xgene_set_addr_len(__le64
*desc
, u32 idx
, dma_addr_t addr
, u32 len
)
402 desc
[idx
^ 1] = cpu_to_le64(SET_VAL(DATAADDR
, addr
) |
403 SET_VAL(BUFDATALEN
, len
));
406 static __le64
*xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring
*ring
)
410 exp_bufs
= &ring
->exp_bufs
[ring
->exp_buf_tail
* MAX_EXP_BUFFS
];
411 memset(exp_bufs
, 0, sizeof(__le64
) * MAX_EXP_BUFFS
);
412 ring
->exp_buf_tail
= (ring
->exp_buf_tail
+ 1) & ((ring
->slots
/ 2) - 1);
417 static dma_addr_t
*xgene_get_frag_dma_array(struct xgene_enet_desc_ring
*ring
)
419 return &ring
->cp_ring
->frag_dma_addr
[ring
->tail
* MAX_SKB_FRAGS
];
422 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
425 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
426 struct xgene_enet_pdata
*pdata
= netdev_priv(tx_ring
->ndev
);
427 struct xgene_enet_raw_desc
*raw_desc
;
428 __le64
*exp_desc
= NULL
, *exp_bufs
= NULL
;
429 dma_addr_t dma_addr
, pbuf_addr
, *frag_dma_addr
;
431 u16 tail
= tx_ring
->tail
;
434 u8 ll
= 0, nv
= 0, idx
= 0;
436 u32 size
, offset
, ell_bytes
= 0;
437 u32 i
, fidx
, nr_frags
, count
= 1;
440 raw_desc
= &tx_ring
->raw_desc
[tail
];
441 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
442 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
444 ret
= xgene_enet_work_msg(skb
, &hopinfo
);
448 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
451 len
= skb_headlen(skb
);
452 hw_len
= xgene_enet_encode_len(len
);
454 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
455 if (dma_mapping_error(dev
, dma_addr
)) {
456 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
460 /* Hardware expects descriptor in little endian format */
461 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
462 SET_VAL(BUFDATALEN
, hw_len
) |
465 if (!skb_is_nonlinear(skb
))
470 exp_desc
= (void *)&tx_ring
->raw_desc
[tail
];
471 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
472 memset(exp_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
474 nr_frags
= skb_shinfo(skb
)->nr_frags
;
475 for (i
= nr_frags
; i
< 4 ; i
++)
476 exp_desc
[i
^ 1] = cpu_to_le64(LAST_BUFFER
);
478 frag_dma_addr
= xgene_get_frag_dma_array(tx_ring
);
480 for (i
= 0, fidx
= 0; split
|| (fidx
< nr_frags
); i
++) {
482 frag
= &skb_shinfo(skb
)->frags
[fidx
];
483 size
= skb_frag_size(frag
);
486 pbuf_addr
= skb_frag_dma_map(dev
, frag
, 0, size
,
488 if (dma_mapping_error(dev
, pbuf_addr
))
491 frag_dma_addr
[fidx
] = pbuf_addr
;
494 if (size
> BUFLEN_16K
)
498 if (size
> BUFLEN_16K
) {
506 dma_addr
= pbuf_addr
+ offset
;
507 hw_len
= xgene_enet_encode_len(len
);
513 xgene_set_addr_len(exp_desc
, i
, dma_addr
, hw_len
);
516 if (split
|| (fidx
!= nr_frags
)) {
517 exp_bufs
= xgene_enet_get_exp_bufs(tx_ring
);
518 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
,
523 xgene_set_addr_len(exp_desc
, i
, dma_addr
,
528 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
, hw_len
);
535 offset
+= BUFLEN_16K
;
541 dma_addr
= dma_map_single(dev
, exp_bufs
,
542 sizeof(u64
) * MAX_EXP_BUFFS
,
544 if (dma_mapping_error(dev
, dma_addr
)) {
545 dev_kfree_skb_any(skb
);
548 i
= ell_bytes
>> LL_BYTES_LSB_LEN
;
549 exp_desc
[2] = cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
550 SET_VAL(LL_BYTES_MSB
, i
) |
551 SET_VAL(LL_LEN
, idx
));
552 raw_desc
->m2
= cpu_to_le64(SET_VAL(LL_BYTES_LSB
, ell_bytes
));
556 raw_desc
->m0
= cpu_to_le64(SET_VAL(LL
, ll
) | SET_VAL(NV
, nv
) |
557 SET_VAL(USERINFO
, tx_ring
->tail
));
558 tx_ring
->cp_ring
->cp_skb
[tx_ring
->tail
] = skb
;
559 pdata
->tx_level
[tx_ring
->cp_ring
->index
] += count
;
560 tx_ring
->tail
= tail
;
565 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
566 struct net_device
*ndev
)
568 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
569 struct xgene_enet_desc_ring
*tx_ring
;
570 int index
= skb
->queue_mapping
;
571 u32 tx_level
= pdata
->tx_level
[index
];
574 tx_ring
= pdata
->tx_ring
[index
];
575 if (tx_level
< pdata
->txc_level
[index
])
576 tx_level
+= ((typeof(pdata
->tx_level
[index
]))~0U);
578 if ((tx_level
- pdata
->txc_level
[index
]) > pdata
->tx_qcnt_hi
) {
579 netif_stop_subqueue(ndev
, index
);
580 return NETDEV_TX_BUSY
;
583 if (skb_padto(skb
, XGENE_MIN_ENET_FRAME_SIZE
))
586 count
= xgene_enet_setup_tx_desc(tx_ring
, skb
);
588 return NETDEV_TX_BUSY
;
591 dev_kfree_skb_any(skb
);
595 skb_tx_timestamp(skb
);
597 tx_ring
->tx_packets
++;
598 tx_ring
->tx_bytes
+= skb
->len
;
600 pdata
->ring_ops
->wr_cmd(tx_ring
, count
);
604 static void xgene_enet_skip_csum(struct sk_buff
*skb
)
606 struct iphdr
*iph
= ip_hdr(skb
);
608 if (!ip_is_fragment(iph
) ||
609 (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)) {
610 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
614 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring
*buf_pool
,
615 struct xgene_enet_raw_desc
*raw_desc
,
616 struct xgene_enet_raw_desc
*exp_desc
)
618 __le64
*desc
= (void *)exp_desc
;
626 if (!buf_pool
|| !raw_desc
|| !exp_desc
||
627 (!GET_VAL(NV
, le64_to_cpu(raw_desc
->m0
))))
630 dev
= ndev_to_dev(buf_pool
->ndev
);
631 slots
= buf_pool
->slots
- 1;
632 head
= buf_pool
->head
;
634 for (i
= 0; i
< 4; i
++) {
635 frag_size
= xgene_enet_get_data_len(le64_to_cpu(desc
[i
^ 1]));
639 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(desc
[i
^ 1]));
640 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
642 page
= buf_pool
->frag_page
[head
];
645 buf_pool
->frag_page
[head
] = NULL
;
646 head
= (head
+ 1) & slots
;
648 buf_pool
->head
= head
;
651 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
652 struct xgene_enet_raw_desc
*raw_desc
,
653 struct xgene_enet_raw_desc
*exp_desc
)
655 struct xgene_enet_desc_ring
*buf_pool
, *page_pool
;
656 u32 datalen
, frag_size
, skb_index
;
657 struct net_device
*ndev
;
668 ndev
= rx_ring
->ndev
;
669 dev
= ndev_to_dev(rx_ring
->ndev
);
670 buf_pool
= rx_ring
->buf_pool
;
671 page_pool
= rx_ring
->page_pool
;
673 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
674 XGENE_ENET_STD_MTU
, DMA_FROM_DEVICE
);
675 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
676 skb
= buf_pool
->rx_skb
[skb_index
];
677 buf_pool
->rx_skb
[skb_index
] = NULL
;
679 /* checking for error */
680 status
= (GET_VAL(ELERR
, le64_to_cpu(raw_desc
->m0
)) << LERR_LEN
) ||
681 GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
682 if (unlikely(status
> 2)) {
683 dev_kfree_skb_any(skb
);
684 xgene_enet_free_pagepool(page_pool
, raw_desc
, exp_desc
);
685 xgene_enet_parse_error(rx_ring
, netdev_priv(rx_ring
->ndev
),
691 /* strip off CRC as HW isn't doing this */
692 datalen
= xgene_enet_get_data_len(le64_to_cpu(raw_desc
->m1
));
694 nv
= GET_VAL(NV
, le64_to_cpu(raw_desc
->m0
));
698 skb_put(skb
, datalen
);
699 prefetch(skb
->data
- NET_IP_ALIGN
);
704 slots
= page_pool
->slots
- 1;
705 head
= page_pool
->head
;
706 desc
= (void *)exp_desc
;
708 for (i
= 0; i
< 4; i
++) {
709 frag_size
= xgene_enet_get_data_len(le64_to_cpu(desc
[i
^ 1]));
713 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(desc
[i
^ 1]));
714 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
716 page
= page_pool
->frag_page
[head
];
717 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
, 0,
718 frag_size
, PAGE_SIZE
);
720 datalen
+= frag_size
;
722 page_pool
->frag_page
[head
] = NULL
;
723 head
= (head
+ 1) & slots
;
726 page_pool
->head
= head
;
727 rx_ring
->npagepool
-= skb_shinfo(skb
)->nr_frags
;
730 skb_checksum_none_assert(skb
);
731 skb
->protocol
= eth_type_trans(skb
, ndev
);
732 if (likely((ndev
->features
& NETIF_F_IP_CSUM
) &&
733 skb
->protocol
== htons(ETH_P_IP
))) {
734 xgene_enet_skip_csum(skb
);
737 rx_ring
->rx_packets
++;
738 rx_ring
->rx_bytes
+= datalen
;
739 napi_gro_receive(&rx_ring
->napi
, skb
);
742 if (rx_ring
->npagepool
<= 0) {
743 ret
= xgene_enet_refill_pagepool(page_pool
, NUM_NXTBUFPOOL
);
744 rx_ring
->npagepool
= NUM_NXTBUFPOOL
;
749 if (--rx_ring
->nbufpool
== 0) {
750 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
751 rx_ring
->nbufpool
= NUM_BUFPOOL
;
757 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
759 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
762 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
765 struct net_device
*ndev
= ring
->ndev
;
766 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
767 struct xgene_enet_raw_desc
*raw_desc
, *exp_desc
;
768 u16 head
= ring
->head
;
769 u16 slots
= ring
->slots
- 1;
770 int ret
, desc_count
, count
= 0, processed
= 0;
774 raw_desc
= &ring
->raw_desc
[head
];
776 is_completion
= false;
778 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
781 /* read fpqnum field after dataaddr field */
783 if (GET_BIT(NV
, le64_to_cpu(raw_desc
->m0
))) {
784 head
= (head
+ 1) & slots
;
785 exp_desc
= &ring
->raw_desc
[head
];
787 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc
))) {
788 head
= (head
- 1) & slots
;
795 if (is_rx_desc(raw_desc
)) {
796 ret
= xgene_enet_rx_frame(ring
, raw_desc
, exp_desc
);
798 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
799 is_completion
= true;
801 xgene_enet_mark_desc_slot_empty(raw_desc
);
803 xgene_enet_mark_desc_slot_empty(exp_desc
);
805 head
= (head
+ 1) & slots
;
810 pdata
->txc_level
[ring
->index
] += desc_count
;
817 pdata
->ring_ops
->wr_cmd(ring
, -count
);
820 if (__netif_subqueue_stopped(ndev
, ring
->index
))
821 netif_start_subqueue(ndev
, ring
->index
);
827 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
829 struct xgene_enet_desc_ring
*ring
;
832 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
833 processed
= xgene_enet_process_ring(ring
, budget
);
835 if (processed
!= budget
) {
836 napi_complete_done(napi
, processed
);
837 enable_irq(ring
->irq
);
843 static void xgene_enet_timeout(struct net_device
*ndev
)
845 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
846 struct netdev_queue
*txq
;
849 pdata
->mac_ops
->reset(pdata
);
851 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
852 txq
= netdev_get_tx_queue(ndev
, i
);
853 txq
->trans_start
= jiffies
;
854 netif_tx_start_queue(txq
);
858 static void xgene_enet_set_irq_name(struct net_device
*ndev
)
860 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
861 struct xgene_enet_desc_ring
*ring
;
864 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
865 ring
= pdata
->rx_ring
[i
];
866 if (!pdata
->cq_cnt
) {
867 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-txc",
870 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-%d",
875 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
876 ring
= pdata
->tx_ring
[i
]->cp_ring
;
877 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-txc-%d",
882 static int xgene_enet_register_irq(struct net_device
*ndev
)
884 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
885 struct device
*dev
= ndev_to_dev(ndev
);
886 struct xgene_enet_desc_ring
*ring
;
889 xgene_enet_set_irq_name(ndev
);
890 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
891 ring
= pdata
->rx_ring
[i
];
892 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
893 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
894 0, ring
->irq_name
, ring
);
896 netdev_err(ndev
, "Failed to request irq %s\n",
901 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
902 ring
= pdata
->tx_ring
[i
]->cp_ring
;
903 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
904 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
905 0, ring
->irq_name
, ring
);
907 netdev_err(ndev
, "Failed to request irq %s\n",
915 static void xgene_enet_free_irq(struct net_device
*ndev
)
917 struct xgene_enet_pdata
*pdata
;
918 struct xgene_enet_desc_ring
*ring
;
922 pdata
= netdev_priv(ndev
);
923 dev
= ndev_to_dev(ndev
);
925 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
926 ring
= pdata
->rx_ring
[i
];
927 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
928 devm_free_irq(dev
, ring
->irq
, ring
);
931 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
932 ring
= pdata
->tx_ring
[i
]->cp_ring
;
933 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
934 devm_free_irq(dev
, ring
->irq
, ring
);
938 static void xgene_enet_napi_enable(struct xgene_enet_pdata
*pdata
)
940 struct napi_struct
*napi
;
943 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
944 napi
= &pdata
->rx_ring
[i
]->napi
;
948 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
949 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
954 static void xgene_enet_napi_disable(struct xgene_enet_pdata
*pdata
)
956 struct napi_struct
*napi
;
959 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
960 napi
= &pdata
->rx_ring
[i
]->napi
;
964 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
965 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
970 static int xgene_enet_open(struct net_device
*ndev
)
972 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
973 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
976 ret
= netif_set_real_num_tx_queues(ndev
, pdata
->txq_cnt
);
980 ret
= netif_set_real_num_rx_queues(ndev
, pdata
->rxq_cnt
);
984 xgene_enet_napi_enable(pdata
);
985 ret
= xgene_enet_register_irq(ndev
);
990 phy_start(ndev
->phydev
);
992 schedule_delayed_work(&pdata
->link_work
, PHY_POLL_LINK_OFF
);
993 netif_carrier_off(ndev
);
996 mac_ops
->tx_enable(pdata
);
997 mac_ops
->rx_enable(pdata
);
998 netif_tx_start_all_queues(ndev
);
1003 static int xgene_enet_close(struct net_device
*ndev
)
1005 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1006 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
1009 netif_tx_stop_all_queues(ndev
);
1010 mac_ops
->tx_disable(pdata
);
1011 mac_ops
->rx_disable(pdata
);
1014 phy_stop(ndev
->phydev
);
1016 cancel_delayed_work_sync(&pdata
->link_work
);
1018 xgene_enet_free_irq(ndev
);
1019 xgene_enet_napi_disable(pdata
);
1020 for (i
= 0; i
< pdata
->rxq_cnt
; i
++)
1021 xgene_enet_process_ring(pdata
->rx_ring
[i
], -1);
1025 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
1027 struct xgene_enet_pdata
*pdata
;
1030 pdata
= netdev_priv(ring
->ndev
);
1031 dev
= ndev_to_dev(ring
->ndev
);
1033 pdata
->ring_ops
->clear(ring
);
1034 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
1037 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
1039 struct xgene_enet_desc_ring
*buf_pool
, *page_pool
;
1040 struct xgene_enet_desc_ring
*ring
;
1043 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1044 ring
= pdata
->tx_ring
[i
];
1046 xgene_enet_delete_ring(ring
);
1047 pdata
->port_ops
->clear(pdata
, ring
);
1049 xgene_enet_delete_ring(ring
->cp_ring
);
1050 pdata
->tx_ring
[i
] = NULL
;
1055 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1056 ring
= pdata
->rx_ring
[i
];
1058 page_pool
= ring
->page_pool
;
1060 xgene_enet_delete_pagepool(page_pool
);
1061 xgene_enet_delete_ring(page_pool
);
1062 pdata
->port_ops
->clear(pdata
, page_pool
);
1065 buf_pool
= ring
->buf_pool
;
1066 xgene_enet_delete_bufpool(buf_pool
);
1067 xgene_enet_delete_ring(buf_pool
);
1068 pdata
->port_ops
->clear(pdata
, buf_pool
);
1070 xgene_enet_delete_ring(ring
);
1071 pdata
->rx_ring
[i
] = NULL
;
1077 static int xgene_enet_get_ring_size(struct device
*dev
,
1078 enum xgene_enet_ring_cfgsize cfgsize
)
1083 case RING_CFGSIZE_512B
:
1086 case RING_CFGSIZE_2KB
:
1089 case RING_CFGSIZE_16KB
:
1092 case RING_CFGSIZE_64KB
:
1095 case RING_CFGSIZE_512KB
:
1099 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
1106 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
1108 struct xgene_enet_pdata
*pdata
;
1114 dev
= ndev_to_dev(ring
->ndev
);
1115 pdata
= netdev_priv(ring
->ndev
);
1117 if (ring
->desc_addr
) {
1118 pdata
->ring_ops
->clear(ring
);
1119 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
1121 devm_kfree(dev
, ring
);
1124 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
1126 struct xgene_enet_desc_ring
*page_pool
;
1127 struct device
*dev
= &pdata
->pdev
->dev
;
1128 struct xgene_enet_desc_ring
*ring
;
1132 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1133 ring
= pdata
->tx_ring
[i
];
1135 if (ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
1136 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
1138 if (ring
->cp_ring
&& pdata
->cq_cnt
)
1139 xgene_enet_free_desc_ring(ring
->cp_ring
);
1141 xgene_enet_free_desc_ring(ring
);
1146 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1147 ring
= pdata
->rx_ring
[i
];
1149 if (ring
->buf_pool
) {
1150 if (ring
->buf_pool
->rx_skb
)
1151 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
1153 xgene_enet_free_desc_ring(ring
->buf_pool
);
1156 page_pool
= ring
->page_pool
;
1158 p
= page_pool
->frag_page
;
1162 p
= page_pool
->frag_dma_addr
;
1167 xgene_enet_free_desc_ring(ring
);
1172 static bool is_irq_mbox_required(struct xgene_enet_pdata
*pdata
,
1173 struct xgene_enet_desc_ring
*ring
)
1175 if ((pdata
->enet_id
== XGENE_ENET2
) &&
1176 (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_CPU
)) {
1183 static void __iomem
*xgene_enet_ring_cmd_base(struct xgene_enet_pdata
*pdata
,
1184 struct xgene_enet_desc_ring
*ring
)
1186 u8 num_ring_id_shift
= pdata
->ring_ops
->num_ring_id_shift
;
1188 return pdata
->ring_cmd_addr
+ (ring
->num
<< num_ring_id_shift
);
1191 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
1192 struct net_device
*ndev
, u32 ring_num
,
1193 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
1195 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1196 struct device
*dev
= ndev_to_dev(ndev
);
1197 struct xgene_enet_desc_ring
*ring
;
1198 void *irq_mbox_addr
;
1201 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
1205 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
1211 ring
->num
= ring_num
;
1212 ring
->cfgsize
= cfgsize
;
1215 ring
->desc_addr
= dmam_alloc_coherent(dev
, size
, &ring
->dma
,
1216 GFP_KERNEL
| __GFP_ZERO
);
1217 if (!ring
->desc_addr
) {
1218 devm_kfree(dev
, ring
);
1223 if (is_irq_mbox_required(pdata
, ring
)) {
1224 irq_mbox_addr
= dmam_alloc_coherent(dev
, INTR_MBOX_SIZE
,
1225 &ring
->irq_mbox_dma
,
1226 GFP_KERNEL
| __GFP_ZERO
);
1227 if (!irq_mbox_addr
) {
1228 dmam_free_coherent(dev
, size
, ring
->desc_addr
,
1230 devm_kfree(dev
, ring
);
1233 ring
->irq_mbox_addr
= irq_mbox_addr
;
1236 ring
->cmd_base
= xgene_enet_ring_cmd_base(pdata
, ring
);
1237 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
1238 ring
= pdata
->ring_ops
->setup(ring
);
1239 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
1240 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
1245 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
1247 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
1250 static enum xgene_ring_owner
xgene_derive_ring_owner(struct xgene_enet_pdata
*p
)
1252 enum xgene_ring_owner owner
;
1254 if (p
->enet_id
== XGENE_ENET1
) {
1255 switch (p
->phy_mode
) {
1256 case PHY_INTERFACE_MODE_SGMII
:
1257 owner
= RING_OWNER_ETH0
;
1260 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
:
1265 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
: RING_OWNER_ETH1
;
1271 static u8
xgene_start_cpu_bufnum(struct xgene_enet_pdata
*pdata
)
1273 struct device
*dev
= &pdata
->pdev
->dev
;
1277 ret
= device_property_read_u32(dev
, "channel", &cpu_bufnum
);
1279 return (!ret
) ? cpu_bufnum
: pdata
->cpu_bufnum
;
1282 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
1284 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
1285 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1286 struct xgene_enet_desc_ring
*page_pool
= NULL
;
1287 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
1288 struct device
*dev
= ndev_to_dev(ndev
);
1289 u8 eth_bufnum
= pdata
->eth_bufnum
;
1290 u8 bp_bufnum
= pdata
->bp_bufnum
;
1291 u16 ring_num
= pdata
->ring_num
;
1292 enum xgene_ring_owner owner
;
1293 dma_addr_t dma_exp_bufs
;
1299 cpu_bufnum
= xgene_start_cpu_bufnum(pdata
);
1301 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1302 /* allocate rx descriptor ring */
1303 owner
= xgene_derive_ring_owner(pdata
);
1304 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
1305 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1313 /* allocate buffer pool for receiving packets */
1314 owner
= xgene_derive_ring_owner(pdata
);
1315 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
1316 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1324 rx_ring
->nbufpool
= NUM_BUFPOOL
;
1325 rx_ring
->npagepool
= NUM_NXTBUFPOOL
;
1326 rx_ring
->irq
= pdata
->irqs
[i
];
1327 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
1328 sizeof(struct sk_buff
*),
1330 if (!buf_pool
->rx_skb
) {
1335 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
1336 rx_ring
->buf_pool
= buf_pool
;
1337 pdata
->rx_ring
[i
] = rx_ring
;
1339 if ((pdata
->enet_id
== XGENE_ENET1
&& pdata
->rxq_cnt
> 4) ||
1340 (pdata
->enet_id
== XGENE_ENET2
&& pdata
->rxq_cnt
> 16)) {
1344 /* allocate next buffer pool for jumbo packets */
1345 owner
= xgene_derive_ring_owner(pdata
);
1346 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
1347 page_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1355 slots
= page_pool
->slots
;
1356 page_pool
->frag_page
= devm_kcalloc(dev
, slots
,
1357 sizeof(struct page
*),
1359 if (!page_pool
->frag_page
) {
1364 page_pool
->frag_dma_addr
= devm_kcalloc(dev
, slots
,
1367 if (!page_pool
->frag_dma_addr
) {
1372 page_pool
->dst_ring_num
= xgene_enet_dst_ring_num(page_pool
);
1373 rx_ring
->page_pool
= page_pool
;
1376 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1377 /* allocate tx descriptor ring */
1378 owner
= xgene_derive_ring_owner(pdata
);
1379 ring_id
= xgene_enet_get_ring_id(owner
, eth_bufnum
++);
1380 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1388 size
= (tx_ring
->slots
/ 2) * sizeof(__le64
) * MAX_EXP_BUFFS
;
1389 exp_bufs
= dmam_alloc_coherent(dev
, size
, &dma_exp_bufs
,
1390 GFP_KERNEL
| __GFP_ZERO
);
1395 tx_ring
->exp_bufs
= exp_bufs
;
1397 pdata
->tx_ring
[i
] = tx_ring
;
1399 if (!pdata
->cq_cnt
) {
1400 cp_ring
= pdata
->rx_ring
[i
];
1402 /* allocate tx completion descriptor ring */
1403 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
,
1405 cp_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1413 cp_ring
->irq
= pdata
->irqs
[pdata
->rxq_cnt
+ i
];
1417 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
1418 sizeof(struct sk_buff
*),
1420 if (!cp_ring
->cp_skb
) {
1425 size
= sizeof(dma_addr_t
) * MAX_SKB_FRAGS
;
1426 cp_ring
->frag_dma_addr
= devm_kcalloc(dev
, tx_ring
->slots
,
1428 if (!cp_ring
->frag_dma_addr
) {
1429 devm_kfree(dev
, cp_ring
->cp_skb
);
1434 tx_ring
->cp_ring
= cp_ring
;
1435 tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
1438 if (pdata
->ring_ops
->coalesce
)
1439 pdata
->ring_ops
->coalesce(pdata
->tx_ring
[0]);
1440 pdata
->tx_qcnt_hi
= pdata
->tx_ring
[0]->slots
- 128;
1445 xgene_enet_free_desc_rings(pdata
);
1449 static void xgene_enet_get_stats64(
1450 struct net_device
*ndev
,
1451 struct rtnl_link_stats64
*storage
)
1453 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1454 struct rtnl_link_stats64
*stats
= &pdata
->stats
;
1455 struct xgene_enet_desc_ring
*ring
;
1458 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1459 ring
= pdata
->tx_ring
[i
];
1461 stats
->tx_packets
+= ring
->tx_packets
;
1462 stats
->tx_bytes
+= ring
->tx_bytes
;
1466 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1467 ring
= pdata
->rx_ring
[i
];
1469 stats
->rx_packets
+= ring
->rx_packets
;
1470 stats
->rx_bytes
+= ring
->rx_bytes
;
1471 stats
->rx_errors
+= ring
->rx_length_errors
+
1472 ring
->rx_crc_errors
+
1473 ring
->rx_frame_errors
+
1474 ring
->rx_fifo_errors
;
1475 stats
->rx_dropped
+= ring
->rx_dropped
;
1478 memcpy(storage
, stats
, sizeof(struct rtnl_link_stats64
));
1481 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
1483 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1486 ret
= eth_mac_addr(ndev
, addr
);
1489 pdata
->mac_ops
->set_mac_addr(pdata
);
1494 static int xgene_change_mtu(struct net_device
*ndev
, int new_mtu
)
1496 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1499 if (!netif_running(ndev
))
1502 frame_size
= (new_mtu
> ETH_DATA_LEN
) ? (new_mtu
+ 18) : 0x600;
1504 xgene_enet_close(ndev
);
1505 ndev
->mtu
= new_mtu
;
1506 pdata
->mac_ops
->set_framesize(pdata
, frame_size
);
1507 xgene_enet_open(ndev
);
1512 static const struct net_device_ops xgene_ndev_ops
= {
1513 .ndo_open
= xgene_enet_open
,
1514 .ndo_stop
= xgene_enet_close
,
1515 .ndo_start_xmit
= xgene_enet_start_xmit
,
1516 .ndo_tx_timeout
= xgene_enet_timeout
,
1517 .ndo_get_stats64
= xgene_enet_get_stats64
,
1518 .ndo_change_mtu
= xgene_change_mtu
,
1519 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
1523 static void xgene_get_port_id_acpi(struct device
*dev
,
1524 struct xgene_enet_pdata
*pdata
)
1529 status
= acpi_evaluate_integer(ACPI_HANDLE(dev
), "_SUN", NULL
, &temp
);
1530 if (ACPI_FAILURE(status
)) {
1533 pdata
->port_id
= temp
;
1540 static void xgene_get_port_id_dt(struct device
*dev
, struct xgene_enet_pdata
*pdata
)
1544 of_property_read_u32(dev
->of_node
, "port-id", &id
);
1546 pdata
->port_id
= id
& BIT(0);
1551 static int xgene_get_tx_delay(struct xgene_enet_pdata
*pdata
)
1553 struct device
*dev
= &pdata
->pdev
->dev
;
1556 ret
= of_property_read_u32(dev
->of_node
, "tx-delay", &delay
);
1558 pdata
->tx_delay
= 4;
1562 if (delay
< 0 || delay
> 7) {
1563 dev_err(dev
, "Invalid tx-delay specified\n");
1567 pdata
->tx_delay
= delay
;
1572 static int xgene_get_rx_delay(struct xgene_enet_pdata
*pdata
)
1574 struct device
*dev
= &pdata
->pdev
->dev
;
1577 ret
= of_property_read_u32(dev
->of_node
, "rx-delay", &delay
);
1579 pdata
->rx_delay
= 2;
1583 if (delay
< 0 || delay
> 7) {
1584 dev_err(dev
, "Invalid rx-delay specified\n");
1588 pdata
->rx_delay
= delay
;
1593 static int xgene_enet_get_irqs(struct xgene_enet_pdata
*pdata
)
1595 struct platform_device
*pdev
= pdata
->pdev
;
1596 struct device
*dev
= &pdev
->dev
;
1597 int i
, ret
, max_irqs
;
1599 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1601 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
)
1604 max_irqs
= XGENE_MAX_ENET_IRQ
;
1606 for (i
= 0; i
< max_irqs
; i
++) {
1607 ret
= platform_get_irq(pdev
, i
);
1609 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1611 pdata
->rxq_cnt
= max_irqs
/ 2;
1612 pdata
->txq_cnt
= max_irqs
/ 2;
1613 pdata
->cq_cnt
= max_irqs
/ 2;
1616 dev_err(dev
, "Unable to get ENET IRQ\n");
1617 ret
= ret
? : -ENXIO
;
1620 pdata
->irqs
[i
] = ret
;
1626 static int xgene_enet_check_phy_handle(struct xgene_enet_pdata
*pdata
)
1630 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
)
1633 if (!IS_ENABLED(CONFIG_MDIO_XGENE
))
1636 ret
= xgene_enet_phy_connect(pdata
->ndev
);
1638 pdata
->mdio_driver
= true;
1643 static void xgene_enet_gpiod_get(struct xgene_enet_pdata
*pdata
)
1645 struct device
*dev
= &pdata
->pdev
->dev
;
1647 pdata
->sfp_gpio_en
= false;
1648 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
||
1649 (!device_property_present(dev
, "sfp-gpios") &&
1650 !device_property_present(dev
, "rxlos-gpios")))
1653 pdata
->sfp_gpio_en
= true;
1654 pdata
->sfp_rdy
= gpiod_get(dev
, "rxlos", GPIOD_IN
);
1655 if (IS_ERR(pdata
->sfp_rdy
))
1656 pdata
->sfp_rdy
= gpiod_get(dev
, "sfp", GPIOD_IN
);
1659 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
1661 struct platform_device
*pdev
;
1662 struct net_device
*ndev
;
1664 struct resource
*res
;
1665 void __iomem
*base_addr
;
1673 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_ENET_CSR
);
1675 dev_err(dev
, "Resource enet_csr not defined\n");
1678 pdata
->base_addr
= devm_ioremap(dev
, res
->start
, resource_size(res
));
1679 if (!pdata
->base_addr
) {
1680 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
1684 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CSR
);
1686 dev_err(dev
, "Resource ring_csr not defined\n");
1689 pdata
->ring_csr_addr
= devm_ioremap(dev
, res
->start
,
1690 resource_size(res
));
1691 if (!pdata
->ring_csr_addr
) {
1692 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
1696 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CMD
);
1698 dev_err(dev
, "Resource ring_cmd not defined\n");
1701 pdata
->ring_cmd_addr
= devm_ioremap(dev
, res
->start
,
1702 resource_size(res
));
1703 if (!pdata
->ring_cmd_addr
) {
1704 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
1709 xgene_get_port_id_dt(dev
, pdata
);
1712 xgene_get_port_id_acpi(dev
, pdata
);
1715 if (!device_get_mac_address(dev
, ndev
->dev_addr
, ETH_ALEN
))
1716 eth_hw_addr_random(ndev
);
1718 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
1720 pdata
->phy_mode
= device_get_phy_mode(dev
);
1721 if (pdata
->phy_mode
< 0) {
1722 dev_err(dev
, "Unable to get phy-connection-type\n");
1723 return pdata
->phy_mode
;
1725 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_RGMII
&&
1726 pdata
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
1727 pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
) {
1728 dev_err(dev
, "Incorrect phy-connection-type specified\n");
1732 ret
= xgene_get_tx_delay(pdata
);
1736 ret
= xgene_get_rx_delay(pdata
);
1740 ret
= xgene_enet_get_irqs(pdata
);
1744 ret
= xgene_enet_check_phy_handle(pdata
);
1748 xgene_enet_gpiod_get(pdata
);
1750 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1751 if (IS_ERR(pdata
->clk
)) {
1752 /* Abort if the clock is defined but couldn't be retrived.
1753 * Always abort if the clock is missing on DT system as
1754 * the driver can't cope with this case.
1756 if (PTR_ERR(pdata
->clk
) != -ENOENT
|| dev
->of_node
)
1757 return PTR_ERR(pdata
->clk
);
1758 /* Firmware may have set up the clock already. */
1759 dev_info(dev
, "clocks have been setup already\n");
1762 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
1763 base_addr
= pdata
->base_addr
- (pdata
->port_id
* MAC_OFFSET
);
1765 base_addr
= pdata
->base_addr
;
1766 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
1767 pdata
->cle
.base
= base_addr
+ BLOCK_ETH_CLE_CSR_OFFSET
;
1768 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
1769 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
1770 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
||
1771 pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
) {
1772 pdata
->mcx_mac_addr
= pdata
->base_addr
+ BLOCK_ETH_MAC_OFFSET
;
1773 offset
= (pdata
->enet_id
== XGENE_ENET1
) ?
1774 BLOCK_ETH_MAC_CSR_OFFSET
:
1775 X2_BLOCK_ETH_MAC_CSR_OFFSET
;
1776 pdata
->mcx_mac_csr_addr
= base_addr
+ offset
;
1778 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_AXG_MAC_OFFSET
;
1779 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_AXG_MAC_CSR_OFFSET
;
1780 pdata
->pcs_addr
= base_addr
+ BLOCK_PCS_OFFSET
;
1782 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
1787 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
1789 struct xgene_enet_cle
*enet_cle
= &pdata
->cle
;
1790 struct xgene_enet_desc_ring
*page_pool
;
1791 struct net_device
*ndev
= pdata
->ndev
;
1792 struct xgene_enet_desc_ring
*buf_pool
;
1793 u16 dst_ring_num
, ring_id
;
1797 ret
= pdata
->port_ops
->reset(pdata
);
1801 ret
= xgene_enet_create_desc_rings(ndev
);
1803 netdev_err(ndev
, "Error in ring configuration\n");
1807 /* setup buffer pool */
1808 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1809 buf_pool
= pdata
->rx_ring
[i
]->buf_pool
;
1810 xgene_enet_init_bufpool(buf_pool
);
1811 page_pool
= pdata
->rx_ring
[i
]->page_pool
;
1812 xgene_enet_init_bufpool(page_pool
);
1814 count
= pdata
->rx_buff_cnt
;
1815 ret
= xgene_enet_refill_bufpool(buf_pool
, count
);
1819 ret
= xgene_enet_refill_pagepool(page_pool
, count
);
1825 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
1826 buf_pool
= pdata
->rx_ring
[0]->buf_pool
;
1827 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1828 /* Initialize and Enable PreClassifier Tree */
1829 enet_cle
->max_nodes
= 512;
1830 enet_cle
->max_dbptrs
= 1024;
1831 enet_cle
->parsers
= 3;
1832 enet_cle
->active_parser
= PARSER_ALL
;
1833 enet_cle
->ptree
.start_node
= 0;
1834 enet_cle
->ptree
.start_dbptr
= 0;
1835 enet_cle
->jump_bytes
= 8;
1836 ret
= pdata
->cle_ops
->cle_init(pdata
);
1838 netdev_err(ndev
, "Preclass Tree init error\n");
1843 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
1844 buf_pool
= pdata
->rx_ring
[0]->buf_pool
;
1845 page_pool
= pdata
->rx_ring
[0]->page_pool
;
1846 ring_id
= (page_pool
) ? page_pool
->id
: 0;
1847 pdata
->port_ops
->cle_bypass(pdata
, dst_ring_num
,
1848 buf_pool
->id
, ring_id
);
1851 ndev
->max_mtu
= XGENE_ENET_MAX_MTU
;
1852 pdata
->phy_speed
= SPEED_UNKNOWN
;
1853 pdata
->mac_ops
->init(pdata
);
1858 xgene_enet_delete_desc_rings(pdata
);
1862 static void xgene_enet_setup_ops(struct xgene_enet_pdata
*pdata
)
1864 switch (pdata
->phy_mode
) {
1865 case PHY_INTERFACE_MODE_RGMII
:
1866 pdata
->mac_ops
= &xgene_gmac_ops
;
1867 pdata
->port_ops
= &xgene_gport_ops
;
1873 case PHY_INTERFACE_MODE_SGMII
:
1874 pdata
->mac_ops
= &xgene_sgmac_ops
;
1875 pdata
->port_ops
= &xgene_sgport_ops
;
1882 pdata
->mac_ops
= &xgene_xgmac_ops
;
1883 pdata
->port_ops
= &xgene_xgport_ops
;
1884 pdata
->cle_ops
= &xgene_cle3in_ops
;
1886 if (!pdata
->rxq_cnt
) {
1887 pdata
->rxq_cnt
= XGENE_NUM_RX_RING
;
1888 pdata
->txq_cnt
= XGENE_NUM_TX_RING
;
1889 pdata
->cq_cnt
= XGENE_NUM_TXC_RING
;
1894 if (pdata
->enet_id
== XGENE_ENET1
) {
1895 switch (pdata
->port_id
) {
1897 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1898 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1899 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1900 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1901 pdata
->ring_num
= START_RING_NUM_0
;
1903 pdata
->cpu_bufnum
= START_CPU_BUFNUM_0
;
1904 pdata
->eth_bufnum
= START_ETH_BUFNUM_0
;
1905 pdata
->bp_bufnum
= START_BP_BUFNUM_0
;
1906 pdata
->ring_num
= START_RING_NUM_0
;
1910 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1911 pdata
->cpu_bufnum
= XG_START_CPU_BUFNUM_1
;
1912 pdata
->eth_bufnum
= XG_START_ETH_BUFNUM_1
;
1913 pdata
->bp_bufnum
= XG_START_BP_BUFNUM_1
;
1914 pdata
->ring_num
= XG_START_RING_NUM_1
;
1916 pdata
->cpu_bufnum
= START_CPU_BUFNUM_1
;
1917 pdata
->eth_bufnum
= START_ETH_BUFNUM_1
;
1918 pdata
->bp_bufnum
= START_BP_BUFNUM_1
;
1919 pdata
->ring_num
= START_RING_NUM_1
;
1925 pdata
->ring_ops
= &xgene_ring1_ops
;
1927 switch (pdata
->port_id
) {
1929 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1930 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1931 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1932 pdata
->ring_num
= X2_START_RING_NUM_0
;
1935 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_1
;
1936 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_1
;
1937 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_1
;
1938 pdata
->ring_num
= X2_START_RING_NUM_1
;
1944 pdata
->ring_ops
= &xgene_ring2_ops
;
1948 static void xgene_enet_napi_add(struct xgene_enet_pdata
*pdata
)
1950 struct napi_struct
*napi
;
1953 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1954 napi
= &pdata
->rx_ring
[i
]->napi
;
1955 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1959 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
1960 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
1961 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1967 static const struct acpi_device_id xgene_enet_acpi_match
[] = {
1968 { "APMC0D05", XGENE_ENET1
},
1969 { "APMC0D30", XGENE_ENET1
},
1970 { "APMC0D31", XGENE_ENET1
},
1971 { "APMC0D3F", XGENE_ENET1
},
1972 { "APMC0D26", XGENE_ENET2
},
1973 { "APMC0D25", XGENE_ENET2
},
1976 MODULE_DEVICE_TABLE(acpi
, xgene_enet_acpi_match
);
1979 static const struct of_device_id xgene_enet_of_match
[] = {
1980 {.compatible
= "apm,xgene-enet", .data
= (void *)XGENE_ENET1
},
1981 {.compatible
= "apm,xgene1-sgenet", .data
= (void *)XGENE_ENET1
},
1982 {.compatible
= "apm,xgene1-xgenet", .data
= (void *)XGENE_ENET1
},
1983 {.compatible
= "apm,xgene2-sgenet", .data
= (void *)XGENE_ENET2
},
1984 {.compatible
= "apm,xgene2-xgenet", .data
= (void *)XGENE_ENET2
},
1988 MODULE_DEVICE_TABLE(of
, xgene_enet_of_match
);
1990 static int xgene_enet_probe(struct platform_device
*pdev
)
1992 struct net_device
*ndev
;
1993 struct xgene_enet_pdata
*pdata
;
1994 struct device
*dev
= &pdev
->dev
;
1995 void (*link_state
)(struct work_struct
*);
1996 const struct of_device_id
*of_id
;
1999 ndev
= alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata
),
2000 XGENE_NUM_RX_RING
, XGENE_NUM_TX_RING
);
2004 pdata
= netdev_priv(ndev
);
2008 SET_NETDEV_DEV(ndev
, dev
);
2009 platform_set_drvdata(pdev
, pdata
);
2010 ndev
->netdev_ops
= &xgene_ndev_ops
;
2011 xgene_enet_set_ethtool_ops(ndev
);
2012 ndev
->features
|= NETIF_F_IP_CSUM
|
2017 of_id
= of_match_device(xgene_enet_of_match
, &pdev
->dev
);
2019 pdata
->enet_id
= (enum xgene_enet_id
)of_id
->data
;
2023 const struct acpi_device_id
*acpi_id
;
2025 acpi_id
= acpi_match_device(xgene_enet_acpi_match
, &pdev
->dev
);
2027 pdata
->enet_id
= (enum xgene_enet_id
) acpi_id
->driver_data
;
2030 if (!pdata
->enet_id
) {
2035 ret
= xgene_enet_get_resources(pdata
);
2039 xgene_enet_setup_ops(pdata
);
2041 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
2042 ndev
->features
|= NETIF_F_TSO
;
2043 spin_lock_init(&pdata
->mss_lock
);
2045 ndev
->hw_features
= ndev
->features
;
2047 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2049 netdev_err(ndev
, "No usable DMA configuration\n");
2053 ret
= xgene_enet_init_hw(pdata
);
2057 link_state
= pdata
->mac_ops
->link_state
;
2058 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
2059 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
2060 } else if (!pdata
->mdio_driver
) {
2061 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
2062 ret
= xgene_enet_mdio_config(pdata
);
2064 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
2070 xgene_enet_napi_add(pdata
);
2071 ret
= register_netdev(ndev
);
2073 netdev_err(ndev
, "Failed to register netdev\n");
2081 * If necessary, free_netdev() will call netif_napi_del() and undo
2082 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2085 if (pdata
->mdio_driver
)
2086 xgene_enet_phy_disconnect(pdata
);
2087 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
2088 xgene_enet_mdio_remove(pdata
);
2090 xgene_enet_delete_desc_rings(pdata
);
2096 static int xgene_enet_remove(struct platform_device
*pdev
)
2098 struct xgene_enet_pdata
*pdata
;
2099 struct net_device
*ndev
;
2101 pdata
= platform_get_drvdata(pdev
);
2105 if (netif_running(ndev
))
2109 if (pdata
->mdio_driver
)
2110 xgene_enet_phy_disconnect(pdata
);
2111 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
2112 xgene_enet_mdio_remove(pdata
);
2114 unregister_netdev(ndev
);
2115 pdata
->port_ops
->shutdown(pdata
);
2116 xgene_enet_delete_desc_rings(pdata
);
2122 static void xgene_enet_shutdown(struct platform_device
*pdev
)
2124 struct xgene_enet_pdata
*pdata
;
2126 pdata
= platform_get_drvdata(pdev
);
2133 xgene_enet_remove(pdev
);
2136 static struct platform_driver xgene_enet_driver
= {
2138 .name
= "xgene-enet",
2139 .of_match_table
= of_match_ptr(xgene_enet_of_match
),
2140 .acpi_match_table
= ACPI_PTR(xgene_enet_acpi_match
),
2142 .probe
= xgene_enet_probe
,
2143 .remove
= xgene_enet_remove
,
2144 .shutdown
= xgene_enet_shutdown
,
2147 module_platform_driver(xgene_enet_driver
);
2149 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2150 MODULE_VERSION(XGENE_DRV_VERSION
);
2151 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2152 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2153 MODULE_LICENSE("GPL");