]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/apm/xgene/xgene_enet_main.c
179277ab034aab6ba25d75953cf59b110febf30a
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / apm / xgene / xgene_enet_main.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2 *
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
26
27 #define RES_ENET_CSR 0
28 #define RES_RING_CSR 1
29 #define RES_RING_CMD 2
30
31 static const struct of_device_id xgene_enet_of_match[];
32 static const struct acpi_device_id xgene_enet_acpi_match[];
33
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
35 {
36 struct xgene_enet_raw_desc16 *raw_desc;
37 int i;
38
39 for (i = 0; i < buf_pool->slots; i++) {
40 raw_desc = &buf_pool->raw_desc16[i];
41
42 /* Hardware expects descriptor in little endian format */
43 raw_desc->m0 = cpu_to_le64(i |
44 SET_VAL(FPQNUM, buf_pool->dst_ring_num) |
45 SET_VAL(STASH, 3));
46 }
47 }
48
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
50 u32 nbuf)
51 {
52 struct sk_buff *skb;
53 struct xgene_enet_raw_desc16 *raw_desc;
54 struct xgene_enet_pdata *pdata;
55 struct net_device *ndev;
56 struct device *dev;
57 dma_addr_t dma_addr;
58 u32 tail = buf_pool->tail;
59 u32 slots = buf_pool->slots - 1;
60 u16 bufdatalen, len;
61 int i;
62
63 ndev = buf_pool->ndev;
64 dev = ndev_to_dev(buf_pool->ndev);
65 pdata = netdev_priv(ndev);
66 bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
67 len = XGENE_ENET_MAX_MTU;
68
69 for (i = 0; i < nbuf; i++) {
70 raw_desc = &buf_pool->raw_desc16[tail];
71
72 skb = netdev_alloc_skb_ip_align(ndev, len);
73 if (unlikely(!skb))
74 return -ENOMEM;
75 buf_pool->rx_skb[tail] = skb;
76
77 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
78 if (dma_mapping_error(dev, dma_addr)) {
79 netdev_err(ndev, "DMA mapping error\n");
80 dev_kfree_skb_any(skb);
81 return -EINVAL;
82 }
83
84 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
85 SET_VAL(BUFDATALEN, bufdatalen) |
86 SET_BIT(COHERENT));
87 tail = (tail + 1) & slots;
88 }
89
90 pdata->ring_ops->wr_cmd(buf_pool, nbuf);
91 buf_pool->tail = tail;
92
93 return 0;
94 }
95
96 static u8 xgene_enet_hdr_len(const void *data)
97 {
98 const struct ethhdr *eth = data;
99
100 return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
101 }
102
103 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
104 {
105 struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
106 struct xgene_enet_raw_desc16 *raw_desc;
107 u32 slots = buf_pool->slots - 1;
108 u32 tail = buf_pool->tail;
109 u32 userinfo;
110 int i, len;
111
112 len = pdata->ring_ops->len(buf_pool);
113 for (i = 0; i < len; i++) {
114 tail = (tail - 1) & slots;
115 raw_desc = &buf_pool->raw_desc16[tail];
116
117 /* Hardware stores descriptor in little endian format */
118 userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
119 dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
120 }
121
122 pdata->ring_ops->wr_cmd(buf_pool, -len);
123 buf_pool->tail = tail;
124 }
125
126 static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
127 {
128 struct xgene_enet_desc_ring *rx_ring = data;
129
130 if (napi_schedule_prep(&rx_ring->napi)) {
131 disable_irq_nosync(irq);
132 __napi_schedule(&rx_ring->napi);
133 }
134
135 return IRQ_HANDLED;
136 }
137
138 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring,
139 struct xgene_enet_raw_desc *raw_desc)
140 {
141 struct sk_buff *skb;
142 struct device *dev;
143 skb_frag_t *frag;
144 dma_addr_t *frag_dma_addr;
145 u16 skb_index;
146 u8 status;
147 int i, ret = 0;
148
149 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
150 skb = cp_ring->cp_skb[skb_index];
151 frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
152
153 dev = ndev_to_dev(cp_ring->ndev);
154 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
155 skb_headlen(skb),
156 DMA_TO_DEVICE);
157
158 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
159 frag = &skb_shinfo(skb)->frags[i];
160 dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
161 DMA_TO_DEVICE);
162 }
163
164 /* Checking for error */
165 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
166 if (unlikely(status > 2)) {
167 xgene_enet_parse_error(cp_ring, netdev_priv(cp_ring->ndev),
168 status);
169 ret = -EIO;
170 }
171
172 if (likely(skb)) {
173 dev_kfree_skb_any(skb);
174 } else {
175 netdev_err(cp_ring->ndev, "completion skb is NULL\n");
176 ret = -EIO;
177 }
178
179 return ret;
180 }
181
182 static u64 xgene_enet_work_msg(struct sk_buff *skb)
183 {
184 struct net_device *ndev = skb->dev;
185 struct iphdr *iph;
186 u8 l3hlen = 0, l4hlen = 0;
187 u8 ethhdr, proto = 0, csum_enable = 0;
188 u64 hopinfo = 0;
189 u32 hdr_len, mss = 0;
190 u32 i, len, nr_frags;
191
192 ethhdr = xgene_enet_hdr_len(skb->data);
193
194 if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
195 unlikely(skb->protocol != htons(ETH_P_8021Q)))
196 goto out;
197
198 if (unlikely(!(skb->dev->features & NETIF_F_IP_CSUM)))
199 goto out;
200
201 iph = ip_hdr(skb);
202 if (unlikely(ip_is_fragment(iph)))
203 goto out;
204
205 if (likely(iph->protocol == IPPROTO_TCP)) {
206 l4hlen = tcp_hdrlen(skb) >> 2;
207 csum_enable = 1;
208 proto = TSO_IPPROTO_TCP;
209 if (ndev->features & NETIF_F_TSO) {
210 hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
211 mss = skb_shinfo(skb)->gso_size;
212
213 if (skb_is_nonlinear(skb)) {
214 len = skb_headlen(skb);
215 nr_frags = skb_shinfo(skb)->nr_frags;
216
217 for (i = 0; i < 2 && i < nr_frags; i++)
218 len += skb_shinfo(skb)->frags[i].size;
219
220 /* HW requires header must reside in 3 buffer */
221 if (unlikely(hdr_len > len)) {
222 if (skb_linearize(skb))
223 return 0;
224 }
225 }
226
227 if (!mss || ((skb->len - hdr_len) <= mss))
228 goto out;
229
230 hopinfo |= SET_BIT(ET);
231 }
232 } else if (iph->protocol == IPPROTO_UDP) {
233 l4hlen = UDP_HDR_SIZE;
234 csum_enable = 1;
235 }
236 out:
237 l3hlen = ip_hdrlen(skb) >> 2;
238 hopinfo |= SET_VAL(TCPHDR, l4hlen) |
239 SET_VAL(IPHDR, l3hlen) |
240 SET_VAL(ETHHDR, ethhdr) |
241 SET_VAL(EC, csum_enable) |
242 SET_VAL(IS, proto) |
243 SET_BIT(IC) |
244 SET_BIT(TYPE_ETH_WORK_MESSAGE);
245
246 return hopinfo;
247 }
248
249 static u16 xgene_enet_encode_len(u16 len)
250 {
251 return (len == BUFLEN_16K) ? 0 : len;
252 }
253
254 static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
255 {
256 desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
257 SET_VAL(BUFDATALEN, len));
258 }
259
260 static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
261 {
262 __le64 *exp_bufs;
263
264 exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
265 memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
266 ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
267
268 return exp_bufs;
269 }
270
271 static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
272 {
273 return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
274 }
275
276 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
277 struct sk_buff *skb)
278 {
279 struct device *dev = ndev_to_dev(tx_ring->ndev);
280 struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
281 struct xgene_enet_raw_desc *raw_desc;
282 __le64 *exp_desc = NULL, *exp_bufs = NULL;
283 dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
284 skb_frag_t *frag;
285 u16 tail = tx_ring->tail;
286 u64 hopinfo;
287 u32 len, hw_len;
288 u8 ll = 0, nv = 0, idx = 0;
289 bool split = false;
290 u32 size, offset, ell_bytes = 0;
291 u32 i, fidx, nr_frags, count = 1;
292
293 raw_desc = &tx_ring->raw_desc[tail];
294 tail = (tail + 1) & (tx_ring->slots - 1);
295 memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
296
297 hopinfo = xgene_enet_work_msg(skb);
298 if (!hopinfo)
299 return -EINVAL;
300 raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
301 hopinfo);
302
303 len = skb_headlen(skb);
304 hw_len = xgene_enet_encode_len(len);
305
306 dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
307 if (dma_mapping_error(dev, dma_addr)) {
308 netdev_err(tx_ring->ndev, "DMA mapping error\n");
309 return -EINVAL;
310 }
311
312 /* Hardware expects descriptor in little endian format */
313 raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
314 SET_VAL(BUFDATALEN, hw_len) |
315 SET_BIT(COHERENT));
316
317 if (!skb_is_nonlinear(skb))
318 goto out;
319
320 /* scatter gather */
321 nv = 1;
322 exp_desc = (void *)&tx_ring->raw_desc[tail];
323 tail = (tail + 1) & (tx_ring->slots - 1);
324 memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
325
326 nr_frags = skb_shinfo(skb)->nr_frags;
327 for (i = nr_frags; i < 4 ; i++)
328 exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
329
330 frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
331
332 for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
333 if (!split) {
334 frag = &skb_shinfo(skb)->frags[fidx];
335 size = skb_frag_size(frag);
336 offset = 0;
337
338 pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
339 DMA_TO_DEVICE);
340 if (dma_mapping_error(dev, pbuf_addr))
341 return -EINVAL;
342
343 frag_dma_addr[fidx] = pbuf_addr;
344 fidx++;
345
346 if (size > BUFLEN_16K)
347 split = true;
348 }
349
350 if (size > BUFLEN_16K) {
351 len = BUFLEN_16K;
352 size -= BUFLEN_16K;
353 } else {
354 len = size;
355 split = false;
356 }
357
358 dma_addr = pbuf_addr + offset;
359 hw_len = xgene_enet_encode_len(len);
360
361 switch (i) {
362 case 0:
363 case 1:
364 case 2:
365 xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
366 break;
367 case 3:
368 if (split || (fidx != nr_frags)) {
369 exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
370 xgene_set_addr_len(exp_bufs, idx, dma_addr,
371 hw_len);
372 idx++;
373 ell_bytes += len;
374 } else {
375 xgene_set_addr_len(exp_desc, i, dma_addr,
376 hw_len);
377 }
378 break;
379 default:
380 xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
381 idx++;
382 ell_bytes += len;
383 break;
384 }
385
386 if (split)
387 offset += BUFLEN_16K;
388 }
389 count++;
390
391 if (idx) {
392 ll = 1;
393 dma_addr = dma_map_single(dev, exp_bufs,
394 sizeof(u64) * MAX_EXP_BUFFS,
395 DMA_TO_DEVICE);
396 if (dma_mapping_error(dev, dma_addr)) {
397 dev_kfree_skb_any(skb);
398 return -EINVAL;
399 }
400 i = ell_bytes >> LL_BYTES_LSB_LEN;
401 exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
402 SET_VAL(LL_BYTES_MSB, i) |
403 SET_VAL(LL_LEN, idx));
404 raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
405 }
406
407 out:
408 raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
409 SET_VAL(USERINFO, tx_ring->tail));
410 tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
411 pdata->tx_level[tx_ring->cp_ring->index] += count;
412 tx_ring->tail = tail;
413
414 return count;
415 }
416
417 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
418 struct net_device *ndev)
419 {
420 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
421 struct xgene_enet_desc_ring *tx_ring;
422 int index = skb->queue_mapping;
423 u32 tx_level = pdata->tx_level[index];
424 int count;
425
426 tx_ring = pdata->tx_ring[index];
427 if (tx_level < pdata->txc_level[index])
428 tx_level += ((typeof(pdata->tx_level[index]))~0U);
429
430 if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) {
431 netif_stop_subqueue(ndev, index);
432 return NETDEV_TX_BUSY;
433 }
434
435 if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
436 return NETDEV_TX_OK;
437
438 count = xgene_enet_setup_tx_desc(tx_ring, skb);
439 if (count <= 0) {
440 dev_kfree_skb_any(skb);
441 return NETDEV_TX_OK;
442 }
443
444 skb_tx_timestamp(skb);
445
446 pdata->stats.tx_packets++;
447 pdata->stats.tx_bytes += skb->len;
448
449 pdata->ring_ops->wr_cmd(tx_ring, count);
450 return NETDEV_TX_OK;
451 }
452
453 static void xgene_enet_skip_csum(struct sk_buff *skb)
454 {
455 struct iphdr *iph = ip_hdr(skb);
456
457 if (!ip_is_fragment(iph) ||
458 (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)) {
459 skb->ip_summed = CHECKSUM_UNNECESSARY;
460 }
461 }
462
463 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
464 struct xgene_enet_raw_desc *raw_desc)
465 {
466 struct net_device *ndev;
467 struct xgene_enet_pdata *pdata;
468 struct device *dev;
469 struct xgene_enet_desc_ring *buf_pool;
470 u32 datalen, skb_index;
471 struct sk_buff *skb;
472 u8 status;
473 int ret = 0;
474
475 ndev = rx_ring->ndev;
476 pdata = netdev_priv(ndev);
477 dev = ndev_to_dev(rx_ring->ndev);
478 buf_pool = rx_ring->buf_pool;
479
480 dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
481 XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
482 skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
483 skb = buf_pool->rx_skb[skb_index];
484
485 /* checking for error */
486 status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
487 if (unlikely(status > 2)) {
488 dev_kfree_skb_any(skb);
489 xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
490 status);
491 pdata->stats.rx_dropped++;
492 ret = -EIO;
493 goto out;
494 }
495
496 /* strip off CRC as HW isn't doing this */
497 datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
498 datalen = (datalen & DATALEN_MASK) - 4;
499 prefetch(skb->data - NET_IP_ALIGN);
500 skb_put(skb, datalen);
501
502 skb_checksum_none_assert(skb);
503 skb->protocol = eth_type_trans(skb, ndev);
504 if (likely((ndev->features & NETIF_F_IP_CSUM) &&
505 skb->protocol == htons(ETH_P_IP))) {
506 xgene_enet_skip_csum(skb);
507 }
508
509 pdata->stats.rx_packets++;
510 pdata->stats.rx_bytes += datalen;
511 napi_gro_receive(&rx_ring->napi, skb);
512 out:
513 if (--rx_ring->nbufpool == 0) {
514 ret = xgene_enet_refill_bufpool(buf_pool, NUM_BUFPOOL);
515 rx_ring->nbufpool = NUM_BUFPOOL;
516 }
517
518 return ret;
519 }
520
521 static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc)
522 {
523 return GET_VAL(FPQNUM, le64_to_cpu(raw_desc->m0)) ? true : false;
524 }
525
526 static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
527 int budget)
528 {
529 struct net_device *ndev = ring->ndev;
530 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
531 struct xgene_enet_raw_desc *raw_desc, *exp_desc;
532 u16 head = ring->head;
533 u16 slots = ring->slots - 1;
534 int ret, desc_count, count = 0, processed = 0;
535 bool is_completion;
536
537 do {
538 raw_desc = &ring->raw_desc[head];
539 desc_count = 0;
540 is_completion = false;
541 exp_desc = NULL;
542 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
543 break;
544
545 /* read fpqnum field after dataaddr field */
546 dma_rmb();
547 if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
548 head = (head + 1) & slots;
549 exp_desc = &ring->raw_desc[head];
550
551 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
552 head = (head - 1) & slots;
553 break;
554 }
555 dma_rmb();
556 count++;
557 desc_count++;
558 }
559 if (is_rx_desc(raw_desc)) {
560 ret = xgene_enet_rx_frame(ring, raw_desc);
561 } else {
562 ret = xgene_enet_tx_completion(ring, raw_desc);
563 is_completion = true;
564 }
565 xgene_enet_mark_desc_slot_empty(raw_desc);
566 if (exp_desc)
567 xgene_enet_mark_desc_slot_empty(exp_desc);
568
569 head = (head + 1) & slots;
570 count++;
571 desc_count++;
572 processed++;
573 if (is_completion)
574 pdata->txc_level[ring->index] += desc_count;
575
576 if (ret)
577 break;
578 } while (--budget);
579
580 if (likely(count)) {
581 pdata->ring_ops->wr_cmd(ring, -count);
582 ring->head = head;
583
584 if (__netif_subqueue_stopped(ndev, ring->index))
585 netif_start_subqueue(ndev, ring->index);
586 }
587
588 return processed;
589 }
590
591 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
592 {
593 struct xgene_enet_desc_ring *ring;
594 int processed;
595
596 ring = container_of(napi, struct xgene_enet_desc_ring, napi);
597 processed = xgene_enet_process_ring(ring, budget);
598
599 if (processed != budget) {
600 napi_complete(napi);
601 enable_irq(ring->irq);
602 }
603
604 return processed;
605 }
606
607 static void xgene_enet_timeout(struct net_device *ndev)
608 {
609 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
610 struct netdev_queue *txq;
611 int i;
612
613 pdata->mac_ops->reset(pdata);
614
615 for (i = 0; i < pdata->txq_cnt; i++) {
616 txq = netdev_get_tx_queue(ndev, i);
617 txq->trans_start = jiffies;
618 netif_tx_start_queue(txq);
619 }
620 }
621
622 static int xgene_enet_register_irq(struct net_device *ndev)
623 {
624 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
625 struct device *dev = ndev_to_dev(ndev);
626 struct xgene_enet_desc_ring *ring;
627 int ret = 0, i;
628
629 for (i = 0; i < pdata->rxq_cnt; i++) {
630 ring = pdata->rx_ring[i];
631 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
632 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
633 0, ring->irq_name, ring);
634 if (ret) {
635 netdev_err(ndev, "Failed to request irq %s\n",
636 ring->irq_name);
637 }
638 }
639
640 for (i = 0; i < pdata->cq_cnt; i++) {
641 ring = pdata->tx_ring[i]->cp_ring;
642 irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
643 ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
644 0, ring->irq_name, ring);
645 if (ret) {
646 netdev_err(ndev, "Failed to request irq %s\n",
647 ring->irq_name);
648 }
649 }
650
651 return ret;
652 }
653
654 static void xgene_enet_free_irq(struct net_device *ndev)
655 {
656 struct xgene_enet_pdata *pdata;
657 struct xgene_enet_desc_ring *ring;
658 struct device *dev;
659 int i;
660
661 pdata = netdev_priv(ndev);
662 dev = ndev_to_dev(ndev);
663
664 for (i = 0; i < pdata->rxq_cnt; i++) {
665 ring = pdata->rx_ring[i];
666 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
667 devm_free_irq(dev, ring->irq, ring);
668 }
669
670 for (i = 0; i < pdata->cq_cnt; i++) {
671 ring = pdata->tx_ring[i]->cp_ring;
672 irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
673 devm_free_irq(dev, ring->irq, ring);
674 }
675 }
676
677 static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata)
678 {
679 struct napi_struct *napi;
680 int i;
681
682 for (i = 0; i < pdata->rxq_cnt; i++) {
683 napi = &pdata->rx_ring[i]->napi;
684 napi_enable(napi);
685 }
686
687 for (i = 0; i < pdata->cq_cnt; i++) {
688 napi = &pdata->tx_ring[i]->cp_ring->napi;
689 napi_enable(napi);
690 }
691 }
692
693 static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata)
694 {
695 struct napi_struct *napi;
696 int i;
697
698 for (i = 0; i < pdata->rxq_cnt; i++) {
699 napi = &pdata->rx_ring[i]->napi;
700 napi_disable(napi);
701 }
702
703 for (i = 0; i < pdata->cq_cnt; i++) {
704 napi = &pdata->tx_ring[i]->cp_ring->napi;
705 napi_disable(napi);
706 }
707 }
708
709 static int xgene_enet_open(struct net_device *ndev)
710 {
711 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
712 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
713 int ret;
714
715 ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt);
716 if (ret)
717 return ret;
718
719 ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt);
720 if (ret)
721 return ret;
722
723 mac_ops->tx_enable(pdata);
724 mac_ops->rx_enable(pdata);
725
726 xgene_enet_napi_enable(pdata);
727 ret = xgene_enet_register_irq(ndev);
728 if (ret)
729 return ret;
730
731 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
732 phy_start(pdata->phy_dev);
733 else
734 schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
735
736 netif_start_queue(ndev);
737
738 return ret;
739 }
740
741 static int xgene_enet_close(struct net_device *ndev)
742 {
743 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
744 const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
745 int i;
746
747 netif_stop_queue(ndev);
748
749 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
750 phy_stop(pdata->phy_dev);
751 else
752 cancel_delayed_work_sync(&pdata->link_work);
753
754 mac_ops->tx_disable(pdata);
755 mac_ops->rx_disable(pdata);
756
757 xgene_enet_free_irq(ndev);
758 xgene_enet_napi_disable(pdata);
759 for (i = 0; i < pdata->rxq_cnt; i++)
760 xgene_enet_process_ring(pdata->rx_ring[i], -1);
761
762 return 0;
763 }
764
765 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
766 {
767 struct xgene_enet_pdata *pdata;
768 struct device *dev;
769
770 pdata = netdev_priv(ring->ndev);
771 dev = ndev_to_dev(ring->ndev);
772
773 pdata->ring_ops->clear(ring);
774 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
775 }
776
777 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
778 {
779 struct xgene_enet_desc_ring *buf_pool;
780 struct xgene_enet_desc_ring *ring;
781 int i;
782
783 for (i = 0; i < pdata->txq_cnt; i++) {
784 ring = pdata->tx_ring[i];
785 if (ring) {
786 xgene_enet_delete_ring(ring);
787 pdata->tx_ring[i] = NULL;
788 }
789 }
790
791 for (i = 0; i < pdata->rxq_cnt; i++) {
792 ring = pdata->rx_ring[i];
793 if (ring) {
794 buf_pool = ring->buf_pool;
795 xgene_enet_delete_bufpool(buf_pool);
796 xgene_enet_delete_ring(buf_pool);
797 xgene_enet_delete_ring(ring);
798 pdata->rx_ring[i] = NULL;
799 }
800 }
801 }
802
803 static int xgene_enet_get_ring_size(struct device *dev,
804 enum xgene_enet_ring_cfgsize cfgsize)
805 {
806 int size = -EINVAL;
807
808 switch (cfgsize) {
809 case RING_CFGSIZE_512B:
810 size = 0x200;
811 break;
812 case RING_CFGSIZE_2KB:
813 size = 0x800;
814 break;
815 case RING_CFGSIZE_16KB:
816 size = 0x4000;
817 break;
818 case RING_CFGSIZE_64KB:
819 size = 0x10000;
820 break;
821 case RING_CFGSIZE_512KB:
822 size = 0x80000;
823 break;
824 default:
825 dev_err(dev, "Unsupported cfg ring size %d\n", cfgsize);
826 break;
827 }
828
829 return size;
830 }
831
832 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
833 {
834 struct xgene_enet_pdata *pdata;
835 struct device *dev;
836
837 if (!ring)
838 return;
839
840 dev = ndev_to_dev(ring->ndev);
841 pdata = netdev_priv(ring->ndev);
842
843 if (ring->desc_addr) {
844 pdata->ring_ops->clear(ring);
845 dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
846 }
847 devm_kfree(dev, ring);
848 }
849
850 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
851 {
852 struct device *dev = &pdata->pdev->dev;
853 struct xgene_enet_desc_ring *ring;
854 int i;
855
856 for (i = 0; i < pdata->txq_cnt; i++) {
857 ring = pdata->tx_ring[i];
858 if (ring) {
859 if (ring->cp_ring && ring->cp_ring->cp_skb)
860 devm_kfree(dev, ring->cp_ring->cp_skb);
861 if (ring->cp_ring && pdata->cq_cnt)
862 xgene_enet_free_desc_ring(ring->cp_ring);
863 xgene_enet_free_desc_ring(ring);
864 }
865 }
866
867 for (i = 0; i < pdata->rxq_cnt; i++) {
868 ring = pdata->rx_ring[i];
869 if (ring) {
870 if (ring->buf_pool) {
871 if (ring->buf_pool->rx_skb)
872 devm_kfree(dev, ring->buf_pool->rx_skb);
873 xgene_enet_free_desc_ring(ring->buf_pool);
874 }
875 xgene_enet_free_desc_ring(ring);
876 }
877 }
878 }
879
880 static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
881 struct xgene_enet_desc_ring *ring)
882 {
883 if ((pdata->enet_id == XGENE_ENET2) &&
884 (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
885 return true;
886 }
887
888 return false;
889 }
890
891 static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
892 struct xgene_enet_desc_ring *ring)
893 {
894 u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
895
896 return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
897 }
898
899 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
900 struct net_device *ndev, u32 ring_num,
901 enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
902 {
903 struct xgene_enet_desc_ring *ring;
904 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
905 struct device *dev = ndev_to_dev(ndev);
906 int size;
907
908 size = xgene_enet_get_ring_size(dev, cfgsize);
909 if (size < 0)
910 return NULL;
911
912 ring = devm_kzalloc(dev, sizeof(struct xgene_enet_desc_ring),
913 GFP_KERNEL);
914 if (!ring)
915 return NULL;
916
917 ring->ndev = ndev;
918 ring->num = ring_num;
919 ring->cfgsize = cfgsize;
920 ring->id = ring_id;
921
922 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
923 GFP_KERNEL);
924 if (!ring->desc_addr) {
925 devm_kfree(dev, ring);
926 return NULL;
927 }
928 ring->size = size;
929
930 if (is_irq_mbox_required(pdata, ring)) {
931 ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
932 &ring->irq_mbox_dma, GFP_KERNEL);
933 if (!ring->irq_mbox_addr) {
934 dma_free_coherent(dev, size, ring->desc_addr,
935 ring->dma);
936 devm_kfree(dev, ring);
937 return NULL;
938 }
939 }
940
941 ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
942 ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
943 ring = pdata->ring_ops->setup(ring);
944 netdev_dbg(ndev, "ring info: num=%d size=%d id=%d slots=%d\n",
945 ring->num, ring->size, ring->id, ring->slots);
946
947 return ring;
948 }
949
950 static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
951 {
952 return (owner << 6) | (bufnum & GENMASK(5, 0));
953 }
954
955 static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
956 {
957 enum xgene_ring_owner owner;
958
959 if (p->enet_id == XGENE_ENET1) {
960 switch (p->phy_mode) {
961 case PHY_INTERFACE_MODE_SGMII:
962 owner = RING_OWNER_ETH0;
963 break;
964 default:
965 owner = (!p->port_id) ? RING_OWNER_ETH0 :
966 RING_OWNER_ETH1;
967 break;
968 }
969 } else {
970 owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
971 }
972
973 return owner;
974 }
975
976 static int xgene_enet_create_desc_rings(struct net_device *ndev)
977 {
978 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
979 struct device *dev = ndev_to_dev(ndev);
980 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
981 struct xgene_enet_desc_ring *buf_pool = NULL;
982 enum xgene_ring_owner owner;
983 dma_addr_t dma_exp_bufs;
984 u8 cpu_bufnum = pdata->cpu_bufnum;
985 u8 eth_bufnum = pdata->eth_bufnum;
986 u8 bp_bufnum = pdata->bp_bufnum;
987 u16 ring_num = pdata->ring_num;
988 u16 ring_id;
989 int i, ret, size;
990
991 for (i = 0; i < pdata->rxq_cnt; i++) {
992 /* allocate rx descriptor ring */
993 owner = xgene_derive_ring_owner(pdata);
994 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
995 rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
996 RING_CFGSIZE_16KB,
997 ring_id);
998 if (!rx_ring) {
999 ret = -ENOMEM;
1000 goto err;
1001 }
1002
1003 /* allocate buffer pool for receiving packets */
1004 owner = xgene_derive_ring_owner(pdata);
1005 ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
1006 buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
1007 RING_CFGSIZE_2KB,
1008 ring_id);
1009 if (!buf_pool) {
1010 ret = -ENOMEM;
1011 goto err;
1012 }
1013
1014 rx_ring->nbufpool = NUM_BUFPOOL;
1015 rx_ring->buf_pool = buf_pool;
1016 rx_ring->irq = pdata->irqs[i];
1017 if (!pdata->cq_cnt) {
1018 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
1019 ndev->name);
1020 } else {
1021 snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
1022 ndev->name, i);
1023 }
1024 buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
1025 sizeof(struct sk_buff *),
1026 GFP_KERNEL);
1027 if (!buf_pool->rx_skb) {
1028 ret = -ENOMEM;
1029 goto err;
1030 }
1031
1032 buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool);
1033 rx_ring->buf_pool = buf_pool;
1034 pdata->rx_ring[i] = rx_ring;
1035 }
1036
1037 for (i = 0; i < pdata->txq_cnt; i++) {
1038 /* allocate tx descriptor ring */
1039 owner = xgene_derive_ring_owner(pdata);
1040 ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
1041 tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1042 RING_CFGSIZE_16KB,
1043 ring_id);
1044 if (!tx_ring) {
1045 ret = -ENOMEM;
1046 goto err;
1047 }
1048
1049 size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
1050 tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
1051 &dma_exp_bufs,
1052 GFP_KERNEL);
1053 if (!tx_ring->exp_bufs) {
1054 ret = -ENOMEM;
1055 goto err;
1056 }
1057
1058 pdata->tx_ring[i] = tx_ring;
1059
1060 if (!pdata->cq_cnt) {
1061 cp_ring = pdata->rx_ring[i];
1062 } else {
1063 /* allocate tx completion descriptor ring */
1064 ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU,
1065 cpu_bufnum++);
1066 cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
1067 RING_CFGSIZE_16KB,
1068 ring_id);
1069 if (!cp_ring) {
1070 ret = -ENOMEM;
1071 goto err;
1072 }
1073
1074 cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
1075 cp_ring->index = i;
1076 snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
1077 ndev->name, i);
1078 }
1079
1080 cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
1081 sizeof(struct sk_buff *),
1082 GFP_KERNEL);
1083 if (!cp_ring->cp_skb) {
1084 ret = -ENOMEM;
1085 goto err;
1086 }
1087
1088 size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
1089 cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
1090 size, GFP_KERNEL);
1091 if (!cp_ring->frag_dma_addr) {
1092 devm_kfree(dev, cp_ring->cp_skb);
1093 ret = -ENOMEM;
1094 goto err;
1095 }
1096
1097 tx_ring->cp_ring = cp_ring;
1098 tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
1099 }
1100
1101 pdata->ring_ops->coalesce(pdata->tx_ring[0]);
1102 pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128;
1103
1104 return 0;
1105
1106 err:
1107 xgene_enet_free_desc_rings(pdata);
1108 return ret;
1109 }
1110
1111 static struct rtnl_link_stats64 *xgene_enet_get_stats64(
1112 struct net_device *ndev,
1113 struct rtnl_link_stats64 *storage)
1114 {
1115 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1116 struct rtnl_link_stats64 *stats = &pdata->stats;
1117
1118 stats->rx_errors += stats->rx_length_errors +
1119 stats->rx_crc_errors +
1120 stats->rx_frame_errors +
1121 stats->rx_fifo_errors;
1122 memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
1123
1124 return storage;
1125 }
1126
1127 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
1128 {
1129 struct xgene_enet_pdata *pdata = netdev_priv(ndev);
1130 int ret;
1131
1132 ret = eth_mac_addr(ndev, addr);
1133 if (ret)
1134 return ret;
1135 pdata->mac_ops->set_mac_addr(pdata);
1136
1137 return ret;
1138 }
1139
1140 static const struct net_device_ops xgene_ndev_ops = {
1141 .ndo_open = xgene_enet_open,
1142 .ndo_stop = xgene_enet_close,
1143 .ndo_start_xmit = xgene_enet_start_xmit,
1144 .ndo_tx_timeout = xgene_enet_timeout,
1145 .ndo_get_stats64 = xgene_enet_get_stats64,
1146 .ndo_change_mtu = eth_change_mtu,
1147 .ndo_set_mac_address = xgene_enet_set_mac_address,
1148 };
1149
1150 #ifdef CONFIG_ACPI
1151 static void xgene_get_port_id_acpi(struct device *dev,
1152 struct xgene_enet_pdata *pdata)
1153 {
1154 acpi_status status;
1155 u64 temp;
1156
1157 status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_SUN", NULL, &temp);
1158 if (ACPI_FAILURE(status)) {
1159 pdata->port_id = 0;
1160 } else {
1161 pdata->port_id = temp;
1162 }
1163
1164 return;
1165 }
1166 #endif
1167
1168 static void xgene_get_port_id_dt(struct device *dev, struct xgene_enet_pdata *pdata)
1169 {
1170 u32 id = 0;
1171
1172 of_property_read_u32(dev->of_node, "port-id", &id);
1173
1174 pdata->port_id = id & BIT(0);
1175
1176 return;
1177 }
1178
1179 static int xgene_get_tx_delay(struct xgene_enet_pdata *pdata)
1180 {
1181 struct device *dev = &pdata->pdev->dev;
1182 int delay, ret;
1183
1184 ret = of_property_read_u32(dev->of_node, "tx-delay", &delay);
1185 if (ret) {
1186 pdata->tx_delay = 4;
1187 return 0;
1188 }
1189
1190 if (delay < 0 || delay > 7) {
1191 dev_err(dev, "Invalid tx-delay specified\n");
1192 return -EINVAL;
1193 }
1194
1195 pdata->tx_delay = delay;
1196
1197 return 0;
1198 }
1199
1200 static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
1201 {
1202 struct device *dev = &pdata->pdev->dev;
1203 int delay, ret;
1204
1205 ret = of_property_read_u32(dev->of_node, "rx-delay", &delay);
1206 if (ret) {
1207 pdata->rx_delay = 2;
1208 return 0;
1209 }
1210
1211 if (delay < 0 || delay > 7) {
1212 dev_err(dev, "Invalid rx-delay specified\n");
1213 return -EINVAL;
1214 }
1215
1216 pdata->rx_delay = delay;
1217
1218 return 0;
1219 }
1220
1221 static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
1222 {
1223 struct platform_device *pdev = pdata->pdev;
1224 struct device *dev = &pdev->dev;
1225 int i, ret, max_irqs;
1226
1227 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1228 max_irqs = 1;
1229 else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII)
1230 max_irqs = 2;
1231 else
1232 max_irqs = XGENE_MAX_ENET_IRQ;
1233
1234 for (i = 0; i < max_irqs; i++) {
1235 ret = platform_get_irq(pdev, i);
1236 if (ret <= 0) {
1237 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1238 max_irqs = i;
1239 pdata->rxq_cnt = max_irqs / 2;
1240 pdata->txq_cnt = max_irqs / 2;
1241 pdata->cq_cnt = max_irqs / 2;
1242 break;
1243 }
1244 dev_err(dev, "Unable to get ENET IRQ\n");
1245 ret = ret ? : -ENXIO;
1246 return ret;
1247 }
1248 pdata->irqs[i] = ret;
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1255 {
1256 struct platform_device *pdev;
1257 struct net_device *ndev;
1258 struct device *dev;
1259 struct resource *res;
1260 void __iomem *base_addr;
1261 u32 offset;
1262 int ret = 0;
1263
1264 pdev = pdata->pdev;
1265 dev = &pdev->dev;
1266 ndev = pdata->ndev;
1267
1268 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_ENET_CSR);
1269 if (!res) {
1270 dev_err(dev, "Resource enet_csr not defined\n");
1271 return -ENODEV;
1272 }
1273 pdata->base_addr = devm_ioremap(dev, res->start, resource_size(res));
1274 if (!pdata->base_addr) {
1275 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
1276 return -ENOMEM;
1277 }
1278
1279 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CSR);
1280 if (!res) {
1281 dev_err(dev, "Resource ring_csr not defined\n");
1282 return -ENODEV;
1283 }
1284 pdata->ring_csr_addr = devm_ioremap(dev, res->start,
1285 resource_size(res));
1286 if (!pdata->ring_csr_addr) {
1287 dev_err(dev, "Unable to retrieve ENET Ring CSR region\n");
1288 return -ENOMEM;
1289 }
1290
1291 res = platform_get_resource(pdev, IORESOURCE_MEM, RES_RING_CMD);
1292 if (!res) {
1293 dev_err(dev, "Resource ring_cmd not defined\n");
1294 return -ENODEV;
1295 }
1296 pdata->ring_cmd_addr = devm_ioremap(dev, res->start,
1297 resource_size(res));
1298 if (!pdata->ring_cmd_addr) {
1299 dev_err(dev, "Unable to retrieve ENET Ring command region\n");
1300 return -ENOMEM;
1301 }
1302
1303 if (dev->of_node)
1304 xgene_get_port_id_dt(dev, pdata);
1305 #ifdef CONFIG_ACPI
1306 else
1307 xgene_get_port_id_acpi(dev, pdata);
1308 #endif
1309
1310 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
1311 eth_hw_addr_random(ndev);
1312
1313 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
1314
1315 pdata->phy_mode = device_get_phy_mode(dev);
1316 if (pdata->phy_mode < 0) {
1317 dev_err(dev, "Unable to get phy-connection-type\n");
1318 return pdata->phy_mode;
1319 }
1320 if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII &&
1321 pdata->phy_mode != PHY_INTERFACE_MODE_SGMII &&
1322 pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
1323 dev_err(dev, "Incorrect phy-connection-type specified\n");
1324 return -ENODEV;
1325 }
1326
1327 ret = xgene_get_tx_delay(pdata);
1328 if (ret)
1329 return ret;
1330
1331 ret = xgene_get_rx_delay(pdata);
1332 if (ret)
1333 return ret;
1334
1335 ret = xgene_enet_get_irqs(pdata);
1336 if (ret)
1337 return ret;
1338
1339 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1340 if (IS_ERR(pdata->clk)) {
1341 /* Firmware may have set up the clock already. */
1342 dev_info(dev, "clocks have been setup already\n");
1343 }
1344
1345 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
1346 base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
1347 else
1348 base_addr = pdata->base_addr;
1349 pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
1350 pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET;
1351 pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
1352 pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
1353 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
1354 pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1355 pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
1356 offset = (pdata->enet_id == XGENE_ENET1) ?
1357 BLOCK_ETH_MAC_CSR_OFFSET :
1358 X2_BLOCK_ETH_MAC_CSR_OFFSET;
1359 pdata->mcx_mac_csr_addr = base_addr + offset;
1360 } else {
1361 pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
1362 pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
1363 }
1364 pdata->rx_buff_cnt = NUM_PKT_BUF;
1365
1366 return 0;
1367 }
1368
1369 static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
1370 {
1371 struct xgene_enet_cle *enet_cle = &pdata->cle;
1372 struct net_device *ndev = pdata->ndev;
1373 struct xgene_enet_desc_ring *buf_pool;
1374 u16 dst_ring_num;
1375 int i, ret;
1376
1377 ret = pdata->port_ops->reset(pdata);
1378 if (ret)
1379 return ret;
1380
1381 ret = xgene_enet_create_desc_rings(ndev);
1382 if (ret) {
1383 netdev_err(ndev, "Error in ring configuration\n");
1384 return ret;
1385 }
1386
1387 /* setup buffer pool */
1388 for (i = 0; i < pdata->rxq_cnt; i++) {
1389 buf_pool = pdata->rx_ring[i]->buf_pool;
1390 xgene_enet_init_bufpool(buf_pool);
1391 ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt);
1392 if (ret) {
1393 xgene_enet_delete_desc_rings(pdata);
1394 return ret;
1395 }
1396 }
1397
1398 dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
1399 buf_pool = pdata->rx_ring[0]->buf_pool;
1400 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1401 /* Initialize and Enable PreClassifier Tree */
1402 enet_cle->max_nodes = 512;
1403 enet_cle->max_dbptrs = 1024;
1404 enet_cle->parsers = 3;
1405 enet_cle->active_parser = PARSER_ALL;
1406 enet_cle->ptree.start_node = 0;
1407 enet_cle->ptree.start_dbptr = 0;
1408 enet_cle->jump_bytes = 8;
1409 ret = pdata->cle_ops->cle_init(pdata);
1410 if (ret) {
1411 netdev_err(ndev, "Preclass Tree init error\n");
1412 return ret;
1413 }
1414 } else {
1415 pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
1416 }
1417
1418 pdata->mac_ops->init(pdata);
1419
1420 return ret;
1421 }
1422
1423 static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
1424 {
1425 switch (pdata->phy_mode) {
1426 case PHY_INTERFACE_MODE_RGMII:
1427 pdata->mac_ops = &xgene_gmac_ops;
1428 pdata->port_ops = &xgene_gport_ops;
1429 pdata->rm = RM3;
1430 pdata->rxq_cnt = 1;
1431 pdata->txq_cnt = 1;
1432 pdata->cq_cnt = 0;
1433 break;
1434 case PHY_INTERFACE_MODE_SGMII:
1435 pdata->mac_ops = &xgene_sgmac_ops;
1436 pdata->port_ops = &xgene_sgport_ops;
1437 pdata->rm = RM1;
1438 pdata->rxq_cnt = 1;
1439 pdata->txq_cnt = 1;
1440 pdata->cq_cnt = 1;
1441 break;
1442 default:
1443 pdata->mac_ops = &xgene_xgmac_ops;
1444 pdata->port_ops = &xgene_xgport_ops;
1445 pdata->cle_ops = &xgene_cle3in_ops;
1446 pdata->rm = RM0;
1447 if (!pdata->rxq_cnt) {
1448 pdata->rxq_cnt = XGENE_NUM_RX_RING;
1449 pdata->txq_cnt = XGENE_NUM_TX_RING;
1450 pdata->cq_cnt = XGENE_NUM_TXC_RING;
1451 }
1452 break;
1453 }
1454
1455 if (pdata->enet_id == XGENE_ENET1) {
1456 switch (pdata->port_id) {
1457 case 0:
1458 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1459 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1460 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1461 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1462 pdata->ring_num = START_RING_NUM_0;
1463 } else {
1464 pdata->cpu_bufnum = START_CPU_BUFNUM_0;
1465 pdata->eth_bufnum = START_ETH_BUFNUM_0;
1466 pdata->bp_bufnum = START_BP_BUFNUM_0;
1467 pdata->ring_num = START_RING_NUM_0;
1468 }
1469 break;
1470 case 1:
1471 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1472 pdata->cpu_bufnum = XG_START_CPU_BUFNUM_1;
1473 pdata->eth_bufnum = XG_START_ETH_BUFNUM_1;
1474 pdata->bp_bufnum = XG_START_BP_BUFNUM_1;
1475 pdata->ring_num = XG_START_RING_NUM_1;
1476 } else {
1477 pdata->cpu_bufnum = START_CPU_BUFNUM_1;
1478 pdata->eth_bufnum = START_ETH_BUFNUM_1;
1479 pdata->bp_bufnum = START_BP_BUFNUM_1;
1480 pdata->ring_num = START_RING_NUM_1;
1481 }
1482 break;
1483 default:
1484 break;
1485 }
1486 pdata->ring_ops = &xgene_ring1_ops;
1487 } else {
1488 switch (pdata->port_id) {
1489 case 0:
1490 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
1491 pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
1492 pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
1493 pdata->ring_num = X2_START_RING_NUM_0;
1494 break;
1495 case 1:
1496 pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
1497 pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
1498 pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
1499 pdata->ring_num = X2_START_RING_NUM_1;
1500 break;
1501 default:
1502 break;
1503 }
1504 pdata->rm = RM0;
1505 pdata->ring_ops = &xgene_ring2_ops;
1506 }
1507 }
1508
1509 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
1510 {
1511 struct napi_struct *napi;
1512 int i;
1513
1514 for (i = 0; i < pdata->rxq_cnt; i++) {
1515 napi = &pdata->rx_ring[i]->napi;
1516 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1517 NAPI_POLL_WEIGHT);
1518 }
1519
1520 for (i = 0; i < pdata->cq_cnt; i++) {
1521 napi = &pdata->tx_ring[i]->cp_ring->napi;
1522 netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
1523 NAPI_POLL_WEIGHT);
1524 }
1525 }
1526
1527 static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
1528 {
1529 struct napi_struct *napi;
1530 int i;
1531
1532 for (i = 0; i < pdata->rxq_cnt; i++) {
1533 napi = &pdata->rx_ring[i]->napi;
1534 netif_napi_del(napi);
1535 }
1536
1537 for (i = 0; i < pdata->cq_cnt; i++) {
1538 napi = &pdata->tx_ring[i]->cp_ring->napi;
1539 netif_napi_del(napi);
1540 }
1541 }
1542
1543 static int xgene_enet_probe(struct platform_device *pdev)
1544 {
1545 struct net_device *ndev;
1546 struct xgene_enet_pdata *pdata;
1547 struct device *dev = &pdev->dev;
1548 const struct xgene_mac_ops *mac_ops;
1549 const struct of_device_id *of_id;
1550 int ret;
1551
1552 ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
1553 XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
1554 if (!ndev)
1555 return -ENOMEM;
1556
1557 pdata = netdev_priv(ndev);
1558
1559 pdata->pdev = pdev;
1560 pdata->ndev = ndev;
1561 SET_NETDEV_DEV(ndev, dev);
1562 platform_set_drvdata(pdev, pdata);
1563 ndev->netdev_ops = &xgene_ndev_ops;
1564 xgene_enet_set_ethtool_ops(ndev);
1565 ndev->features |= NETIF_F_IP_CSUM |
1566 NETIF_F_GSO |
1567 NETIF_F_GRO |
1568 NETIF_F_SG;
1569
1570 of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
1571 if (of_id) {
1572 pdata->enet_id = (enum xgene_enet_id)of_id->data;
1573 }
1574 #ifdef CONFIG_ACPI
1575 else {
1576 const struct acpi_device_id *acpi_id;
1577
1578 acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
1579 if (acpi_id)
1580 pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
1581 }
1582 #endif
1583 if (!pdata->enet_id) {
1584 free_netdev(ndev);
1585 return -ENODEV;
1586 }
1587
1588 ret = xgene_enet_get_resources(pdata);
1589 if (ret)
1590 goto err;
1591
1592 xgene_enet_setup_ops(pdata);
1593
1594 if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
1595 ndev->features |= NETIF_F_TSO;
1596 pdata->mss = XGENE_ENET_MSS;
1597 }
1598 ndev->hw_features = ndev->features;
1599
1600 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
1601 if (ret) {
1602 netdev_err(ndev, "No usable DMA configuration\n");
1603 goto err;
1604 }
1605
1606 ret = register_netdev(ndev);
1607 if (ret) {
1608 netdev_err(ndev, "Failed to register netdev\n");
1609 goto err;
1610 }
1611
1612 ret = xgene_enet_init_hw(pdata);
1613 if (ret)
1614 goto err_netdev;
1615
1616 mac_ops = pdata->mac_ops;
1617 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
1618 ret = xgene_enet_mdio_config(pdata);
1619 if (ret)
1620 goto err_netdev;
1621 } else {
1622 INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
1623 }
1624
1625 xgene_enet_napi_add(pdata);
1626 return 0;
1627 err_netdev:
1628 unregister_netdev(ndev);
1629 err:
1630 free_netdev(ndev);
1631 return ret;
1632 }
1633
1634 static int xgene_enet_remove(struct platform_device *pdev)
1635 {
1636 struct xgene_enet_pdata *pdata;
1637 const struct xgene_mac_ops *mac_ops;
1638 struct net_device *ndev;
1639
1640 pdata = platform_get_drvdata(pdev);
1641 mac_ops = pdata->mac_ops;
1642 ndev = pdata->ndev;
1643
1644 mac_ops->rx_disable(pdata);
1645 mac_ops->tx_disable(pdata);
1646
1647 xgene_enet_napi_del(pdata);
1648 if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
1649 xgene_enet_mdio_remove(pdata);
1650 unregister_netdev(ndev);
1651 xgene_enet_delete_desc_rings(pdata);
1652 pdata->port_ops->shutdown(pdata);
1653 free_netdev(ndev);
1654
1655 return 0;
1656 }
1657
1658 #ifdef CONFIG_ACPI
1659 static const struct acpi_device_id xgene_enet_acpi_match[] = {
1660 { "APMC0D05", XGENE_ENET1},
1661 { "APMC0D30", XGENE_ENET1},
1662 { "APMC0D31", XGENE_ENET1},
1663 { "APMC0D3F", XGENE_ENET1},
1664 { "APMC0D26", XGENE_ENET2},
1665 { "APMC0D25", XGENE_ENET2},
1666 { }
1667 };
1668 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1669 #endif
1670
1671 #ifdef CONFIG_OF
1672 static const struct of_device_id xgene_enet_of_match[] = {
1673 {.compatible = "apm,xgene-enet", .data = (void *)XGENE_ENET1},
1674 {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
1675 {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
1676 {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
1677 {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
1678 {},
1679 };
1680
1681 MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
1682 #endif
1683
1684 static struct platform_driver xgene_enet_driver = {
1685 .driver = {
1686 .name = "xgene-enet",
1687 .of_match_table = of_match_ptr(xgene_enet_of_match),
1688 .acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
1689 },
1690 .probe = xgene_enet_probe,
1691 .remove = xgene_enet_remove,
1692 };
1693
1694 module_platform_driver(xgene_enet_driver);
1695
1696 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1697 MODULE_VERSION(XGENE_DRV_VERSION);
1698 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1699 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1700 MODULE_LICENSE("GPL");