1 /* Renesas Ethernet AVB device driver
3 * Copyright (C) 2014-2015 Renesas Electronics Corporation
4 * Copyright (C) 2015 Renesas Solutions Corp.
5 * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * Based on the SuperH Ethernet driver
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License version 2,
11 * as published by the Free Software Foundation.
14 #include <linux/cache.h>
15 #include <linux/clk.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/err.h>
19 #include <linux/etherdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/net_tstamp.h>
27 #include <linux/of_device.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <asm/div64.h>
39 #define RAVB_DEF_MSG_ENABLE \
45 int ravb_wait(struct net_device
*ndev
, enum ravb_reg reg
, u32 mask
, u32 value
)
49 for (i
= 0; i
< 10000; i
++) {
50 if ((ravb_read(ndev
, reg
) & mask
) == value
)
57 static int ravb_config(struct net_device
*ndev
)
62 ravb_write(ndev
, (ravb_read(ndev
, CCC
) & ~CCC_OPC
) | CCC_OPC_CONFIG
,
64 /* Check if the operating mode is changed to the config mode */
65 error
= ravb_wait(ndev
, CSR
, CSR_OPS
, CSR_OPS_CONFIG
);
67 netdev_err(ndev
, "failed to switch device to config mode\n");
72 static void ravb_set_duplex(struct net_device
*ndev
)
74 struct ravb_private
*priv
= netdev_priv(ndev
);
75 u32 ecmr
= ravb_read(ndev
, ECMR
);
77 if (priv
->duplex
) /* Full */
81 ravb_write(ndev
, ecmr
, ECMR
);
84 static void ravb_set_rate(struct net_device
*ndev
)
86 struct ravb_private
*priv
= netdev_priv(ndev
);
88 switch (priv
->speed
) {
89 case 100: /* 100BASE */
90 ravb_write(ndev
, GECMR_SPEED_100
, GECMR
);
92 case 1000: /* 1000BASE */
93 ravb_write(ndev
, GECMR_SPEED_1000
, GECMR
);
100 static void ravb_set_buffer_align(struct sk_buff
*skb
)
102 u32 reserve
= (unsigned long)skb
->data
& (RAVB_ALIGN
- 1);
105 skb_reserve(skb
, RAVB_ALIGN
- reserve
);
108 /* Get MAC address from the MAC address registers
110 * Ethernet AVB device doesn't have ROM for MAC address.
111 * This function gets the MAC address that was used by a bootloader.
113 static void ravb_read_mac_address(struct net_device
*ndev
, const u8
*mac
)
116 ether_addr_copy(ndev
->dev_addr
, mac
);
118 u32 mahr
= ravb_read(ndev
, MAHR
);
119 u32 malr
= ravb_read(ndev
, MALR
);
121 ndev
->dev_addr
[0] = (mahr
>> 24) & 0xFF;
122 ndev
->dev_addr
[1] = (mahr
>> 16) & 0xFF;
123 ndev
->dev_addr
[2] = (mahr
>> 8) & 0xFF;
124 ndev
->dev_addr
[3] = (mahr
>> 0) & 0xFF;
125 ndev
->dev_addr
[4] = (malr
>> 8) & 0xFF;
126 ndev
->dev_addr
[5] = (malr
>> 0) & 0xFF;
130 static void ravb_mdio_ctrl(struct mdiobb_ctrl
*ctrl
, u32 mask
, int set
)
132 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
134 u32 pir
= ravb_read(priv
->ndev
, PIR
);
140 ravb_write(priv
->ndev
, pir
, PIR
);
143 /* MDC pin control */
144 static void ravb_set_mdc(struct mdiobb_ctrl
*ctrl
, int level
)
146 ravb_mdio_ctrl(ctrl
, PIR_MDC
, level
);
149 /* Data I/O pin control */
150 static void ravb_set_mdio_dir(struct mdiobb_ctrl
*ctrl
, int output
)
152 ravb_mdio_ctrl(ctrl
, PIR_MMD
, output
);
156 static void ravb_set_mdio_data(struct mdiobb_ctrl
*ctrl
, int value
)
158 ravb_mdio_ctrl(ctrl
, PIR_MDO
, value
);
162 static int ravb_get_mdio_data(struct mdiobb_ctrl
*ctrl
)
164 struct ravb_private
*priv
= container_of(ctrl
, struct ravb_private
,
167 return (ravb_read(priv
->ndev
, PIR
) & PIR_MDI
) != 0;
170 /* MDIO bus control struct */
171 static struct mdiobb_ops bb_ops
= {
172 .owner
= THIS_MODULE
,
173 .set_mdc
= ravb_set_mdc
,
174 .set_mdio_dir
= ravb_set_mdio_dir
,
175 .set_mdio_data
= ravb_set_mdio_data
,
176 .get_mdio_data
= ravb_get_mdio_data
,
179 /* Free skb's and DMA buffers for Ethernet AVB */
180 static void ravb_ring_free(struct net_device
*ndev
, int q
)
182 struct ravb_private
*priv
= netdev_priv(ndev
);
186 /* Free RX skb ringbuffer */
187 if (priv
->rx_skb
[q
]) {
188 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++)
189 dev_kfree_skb(priv
->rx_skb
[q
][i
]);
191 kfree(priv
->rx_skb
[q
]);
192 priv
->rx_skb
[q
] = NULL
;
194 /* Free TX skb ringbuffer */
195 if (priv
->tx_skb
[q
]) {
196 for (i
= 0; i
< priv
->num_tx_ring
[q
]; i
++)
197 dev_kfree_skb(priv
->tx_skb
[q
][i
]);
199 kfree(priv
->tx_skb
[q
]);
200 priv
->tx_skb
[q
] = NULL
;
202 /* Free aligned TX buffers */
203 kfree(priv
->tx_align
[q
]);
204 priv
->tx_align
[q
] = NULL
;
206 if (priv
->rx_ring
[q
]) {
207 ring_size
= sizeof(struct ravb_ex_rx_desc
) *
208 (priv
->num_rx_ring
[q
] + 1);
209 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->rx_ring
[q
],
210 priv
->rx_desc_dma
[q
]);
211 priv
->rx_ring
[q
] = NULL
;
214 if (priv
->tx_ring
[q
]) {
215 ring_size
= sizeof(struct ravb_tx_desc
) *
216 (priv
->num_tx_ring
[q
] * NUM_TX_DESC
+ 1);
217 dma_free_coherent(ndev
->dev
.parent
, ring_size
, priv
->tx_ring
[q
],
218 priv
->tx_desc_dma
[q
]);
219 priv
->tx_ring
[q
] = NULL
;
223 /* Format skb and descriptor buffer for Ethernet AVB */
224 static void ravb_ring_format(struct net_device
*ndev
, int q
)
226 struct ravb_private
*priv
= netdev_priv(ndev
);
227 struct ravb_ex_rx_desc
*rx_desc
;
228 struct ravb_tx_desc
*tx_desc
;
229 struct ravb_desc
*desc
;
230 int rx_ring_size
= sizeof(*rx_desc
) * priv
->num_rx_ring
[q
];
231 int tx_ring_size
= sizeof(*tx_desc
) * priv
->num_tx_ring
[q
] *
238 priv
->dirty_rx
[q
] = 0;
239 priv
->dirty_tx
[q
] = 0;
241 memset(priv
->rx_ring
[q
], 0, rx_ring_size
);
242 /* Build RX ring buffer */
243 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
245 rx_desc
= &priv
->rx_ring
[q
][i
];
246 /* The size of the buffer should be on 16-byte boundary. */
247 rx_desc
->ds_cc
= cpu_to_le16(ALIGN(PKT_BUF_SZ
, 16));
248 dma_addr
= dma_map_single(ndev
->dev
.parent
, priv
->rx_skb
[q
][i
]->data
,
249 ALIGN(PKT_BUF_SZ
, 16),
251 /* We just set the data size to 0 for a failed mapping which
252 * should prevent DMA from happening...
254 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
255 rx_desc
->ds_cc
= cpu_to_le16(0);
256 rx_desc
->dptr
= cpu_to_le32(dma_addr
);
257 rx_desc
->die_dt
= DT_FEMPTY
;
259 rx_desc
= &priv
->rx_ring
[q
][i
];
260 rx_desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
261 rx_desc
->die_dt
= DT_LINKFIX
; /* type */
263 memset(priv
->tx_ring
[q
], 0, tx_ring_size
);
264 /* Build TX ring buffer */
265 for (i
= 0, tx_desc
= priv
->tx_ring
[q
]; i
< priv
->num_tx_ring
[q
];
267 tx_desc
->die_dt
= DT_EEMPTY
;
269 tx_desc
->die_dt
= DT_EEMPTY
;
271 tx_desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
272 tx_desc
->die_dt
= DT_LINKFIX
; /* type */
274 /* RX descriptor base address for best effort */
275 desc
= &priv
->desc_bat
[RX_QUEUE_OFFSET
+ q
];
276 desc
->die_dt
= DT_LINKFIX
; /* type */
277 desc
->dptr
= cpu_to_le32((u32
)priv
->rx_desc_dma
[q
]);
279 /* TX descriptor base address for best effort */
280 desc
= &priv
->desc_bat
[q
];
281 desc
->die_dt
= DT_LINKFIX
; /* type */
282 desc
->dptr
= cpu_to_le32((u32
)priv
->tx_desc_dma
[q
]);
285 /* Init skb and descriptor buffer for Ethernet AVB */
286 static int ravb_ring_init(struct net_device
*ndev
, int q
)
288 struct ravb_private
*priv
= netdev_priv(ndev
);
293 /* Allocate RX and TX skb rings */
294 priv
->rx_skb
[q
] = kcalloc(priv
->num_rx_ring
[q
],
295 sizeof(*priv
->rx_skb
[q
]), GFP_KERNEL
);
296 priv
->tx_skb
[q
] = kcalloc(priv
->num_tx_ring
[q
],
297 sizeof(*priv
->tx_skb
[q
]), GFP_KERNEL
);
298 if (!priv
->rx_skb
[q
] || !priv
->tx_skb
[q
])
301 for (i
= 0; i
< priv
->num_rx_ring
[q
]; i
++) {
302 skb
= netdev_alloc_skb(ndev
, PKT_BUF_SZ
+ RAVB_ALIGN
- 1);
305 ravb_set_buffer_align(skb
);
306 priv
->rx_skb
[q
][i
] = skb
;
309 /* Allocate rings for the aligned buffers */
310 priv
->tx_align
[q
] = kmalloc(DPTR_ALIGN
* priv
->num_tx_ring
[q
] +
311 DPTR_ALIGN
- 1, GFP_KERNEL
);
312 if (!priv
->tx_align
[q
])
315 /* Allocate all RX descriptors. */
316 ring_size
= sizeof(struct ravb_ex_rx_desc
) * (priv
->num_rx_ring
[q
] + 1);
317 priv
->rx_ring
[q
] = dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
318 &priv
->rx_desc_dma
[q
],
320 if (!priv
->rx_ring
[q
])
323 priv
->dirty_rx
[q
] = 0;
325 /* Allocate all TX descriptors. */
326 ring_size
= sizeof(struct ravb_tx_desc
) *
327 (priv
->num_tx_ring
[q
] * NUM_TX_DESC
+ 1);
328 priv
->tx_ring
[q
] = dma_alloc_coherent(ndev
->dev
.parent
, ring_size
,
329 &priv
->tx_desc_dma
[q
],
331 if (!priv
->tx_ring
[q
])
337 ravb_ring_free(ndev
, q
);
342 /* E-MAC init function */
343 static void ravb_emac_init(struct net_device
*ndev
)
345 struct ravb_private
*priv
= netdev_priv(ndev
);
347 /* Receive frame limit set register */
348 ravb_write(ndev
, ndev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ ETH_FCS_LEN
, RFLR
);
350 /* PAUSE prohibition */
351 ravb_write(ndev
, ECMR_ZPF
| (priv
->duplex
? ECMR_DM
: 0) |
352 ECMR_TE
| ECMR_RE
, ECMR
);
356 /* Set MAC address */
358 (ndev
->dev_addr
[0] << 24) | (ndev
->dev_addr
[1] << 16) |
359 (ndev
->dev_addr
[2] << 8) | (ndev
->dev_addr
[3]), MAHR
);
361 (ndev
->dev_addr
[4] << 8) | (ndev
->dev_addr
[5]), MALR
);
363 ravb_write(ndev
, 1, MPR
);
365 /* E-MAC status register clear */
366 ravb_write(ndev
, ECSR_ICD
| ECSR_MPD
, ECSR
);
368 /* E-MAC interrupt enable register */
369 ravb_write(ndev
, ECSIPR_ICDIP
| ECSIPR_MPDIP
| ECSIPR_LCHNGIP
, ECSIPR
);
372 /* Device init function for Ethernet AVB */
373 static int ravb_dmac_init(struct net_device
*ndev
)
377 /* Set CONFIG mode */
378 error
= ravb_config(ndev
);
382 error
= ravb_ring_init(ndev
, RAVB_BE
);
385 error
= ravb_ring_init(ndev
, RAVB_NC
);
387 ravb_ring_free(ndev
, RAVB_BE
);
391 /* Descriptor format */
392 ravb_ring_format(ndev
, RAVB_BE
);
393 ravb_ring_format(ndev
, RAVB_NC
);
395 #if defined(__LITTLE_ENDIAN)
396 ravb_write(ndev
, ravb_read(ndev
, CCC
) & ~CCC_BOC
, CCC
);
398 ravb_write(ndev
, ravb_read(ndev
, CCC
) | CCC_BOC
, CCC
);
402 ravb_write(ndev
, RCR_EFFS
| RCR_ENCF
| RCR_ETS0
| 0x18000000, RCR
);
405 ravb_write(ndev
, TGC_TQP_AVBMODE1
| 0x00222200, TGC
);
407 /* Timestamp enable */
408 ravb_write(ndev
, TCCR_TFEN
, TCCR
);
410 /* Interrupt init: */
412 ravb_write(ndev
, RIC0_FRE0
| RIC0_FRE1
, RIC0
);
413 /* Disable FIFO full warning */
414 ravb_write(ndev
, 0, RIC1
);
415 /* Receive FIFO full error, descriptor empty */
416 ravb_write(ndev
, RIC2_QFE0
| RIC2_QFE1
| RIC2_RFFE
, RIC2
);
417 /* Frame transmitted, timestamp FIFO updated */
418 ravb_write(ndev
, TIC_FTE0
| TIC_FTE1
| TIC_TFUE
, TIC
);
420 /* Setting the control will start the AVB-DMAC process. */
421 ravb_write(ndev
, (ravb_read(ndev
, CCC
) & ~CCC_OPC
) | CCC_OPC_OPERATION
,
427 /* Free TX skb function for AVB-IP */
428 static int ravb_tx_free(struct net_device
*ndev
, int q
)
430 struct ravb_private
*priv
= netdev_priv(ndev
);
431 struct net_device_stats
*stats
= &priv
->stats
[q
];
432 struct ravb_tx_desc
*desc
;
437 for (; priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > 0; priv
->dirty_tx
[q
]++) {
438 entry
= priv
->dirty_tx
[q
] % (priv
->num_tx_ring
[q
] *
440 desc
= &priv
->tx_ring
[q
][entry
];
441 if (desc
->die_dt
!= DT_FEMPTY
)
443 /* Descriptor type must be checked before all other reads */
445 size
= le16_to_cpu(desc
->ds_tagl
) & TX_DS
;
446 /* Free the original skb. */
447 if (priv
->tx_skb
[q
][entry
/ NUM_TX_DESC
]) {
448 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
449 size
, DMA_TO_DEVICE
);
450 /* Last packet descriptor? */
451 if (entry
% NUM_TX_DESC
== NUM_TX_DESC
- 1) {
452 entry
/= NUM_TX_DESC
;
453 dev_kfree_skb_any(priv
->tx_skb
[q
][entry
]);
454 priv
->tx_skb
[q
][entry
] = NULL
;
459 stats
->tx_bytes
+= size
;
460 desc
->die_dt
= DT_EEMPTY
;
465 static void ravb_get_tx_tstamp(struct net_device
*ndev
)
467 struct ravb_private
*priv
= netdev_priv(ndev
);
468 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
469 struct skb_shared_hwtstamps shhwtstamps
;
471 struct timespec64 ts
;
476 count
= (ravb_read(ndev
, TSR
) & TSR_TFFL
) >> 8;
478 tfa2
= ravb_read(ndev
, TFA2
);
479 tfa_tag
= (tfa2
& TFA2_TST
) >> 16;
480 ts
.tv_nsec
= (u64
)ravb_read(ndev
, TFA0
);
481 ts
.tv_sec
= ((u64
)(tfa2
& TFA2_TSV
) << 32) |
482 ravb_read(ndev
, TFA1
);
483 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
484 shhwtstamps
.hwtstamp
= timespec64_to_ktime(ts
);
485 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
,
489 list_del(&ts_skb
->list
);
491 if (tag
== tfa_tag
) {
492 skb_tstamp_tx(skb
, &shhwtstamps
);
496 ravb_write(ndev
, ravb_read(ndev
, TCCR
) | TCCR_TFR
, TCCR
);
500 /* Packet receive function for Ethernet AVB */
501 static bool ravb_rx(struct net_device
*ndev
, int *quota
, int q
)
503 struct ravb_private
*priv
= netdev_priv(ndev
);
504 int entry
= priv
->cur_rx
[q
] % priv
->num_rx_ring
[q
];
505 int boguscnt
= (priv
->dirty_rx
[q
] + priv
->num_rx_ring
[q
]) -
507 struct net_device_stats
*stats
= &priv
->stats
[q
];
508 struct ravb_ex_rx_desc
*desc
;
511 struct timespec64 ts
;
516 boguscnt
= min(boguscnt
, *quota
);
518 desc
= &priv
->rx_ring
[q
][entry
];
519 while (desc
->die_dt
!= DT_FEMPTY
) {
520 /* Descriptor type must be checked before all other reads */
522 desc_status
= desc
->msc
;
523 pkt_len
= le16_to_cpu(desc
->ds_cc
) & RX_DS
;
528 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (desc_status
& MSC_MC
)
535 if (desc_status
& (MSC_CRC
| MSC_RFE
| MSC_RTSF
| MSC_RTLF
|
538 if (desc_status
& MSC_CRC
)
539 stats
->rx_crc_errors
++;
540 if (desc_status
& MSC_RFE
)
541 stats
->rx_frame_errors
++;
542 if (desc_status
& (MSC_RTLF
| MSC_RTSF
))
543 stats
->rx_length_errors
++;
544 if (desc_status
& MSC_CEEF
)
545 stats
->rx_missed_errors
++;
547 u32 get_ts
= priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE
;
549 skb
= priv
->rx_skb
[q
][entry
];
550 priv
->rx_skb
[q
][entry
] = NULL
;
551 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
552 ALIGN(PKT_BUF_SZ
, 16),
554 get_ts
&= (q
== RAVB_NC
) ?
555 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
:
556 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
558 struct skb_shared_hwtstamps
*shhwtstamps
;
560 shhwtstamps
= skb_hwtstamps(skb
);
561 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
562 ts
.tv_sec
= ((u64
) le16_to_cpu(desc
->ts_sh
) <<
563 32) | le32_to_cpu(desc
->ts_sl
);
564 ts
.tv_nsec
= le32_to_cpu(desc
->ts_n
);
565 shhwtstamps
->hwtstamp
= timespec64_to_ktime(ts
);
567 skb_put(skb
, pkt_len
);
568 skb
->protocol
= eth_type_trans(skb
, ndev
);
569 napi_gro_receive(&priv
->napi
[q
], skb
);
571 stats
->rx_bytes
+= pkt_len
;
574 entry
= (++priv
->cur_rx
[q
]) % priv
->num_rx_ring
[q
];
575 desc
= &priv
->rx_ring
[q
][entry
];
578 /* Refill the RX ring buffers. */
579 for (; priv
->cur_rx
[q
] - priv
->dirty_rx
[q
] > 0; priv
->dirty_rx
[q
]++) {
580 entry
= priv
->dirty_rx
[q
] % priv
->num_rx_ring
[q
];
581 desc
= &priv
->rx_ring
[q
][entry
];
582 /* The size of the buffer should be on 16-byte boundary. */
583 desc
->ds_cc
= cpu_to_le16(ALIGN(PKT_BUF_SZ
, 16));
585 if (!priv
->rx_skb
[q
][entry
]) {
586 skb
= netdev_alloc_skb(ndev
,
587 PKT_BUF_SZ
+ RAVB_ALIGN
- 1);
589 break; /* Better luck next round. */
590 ravb_set_buffer_align(skb
);
591 dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
592 le16_to_cpu(desc
->ds_cc
),
594 skb_checksum_none_assert(skb
);
595 /* We just set the data size to 0 for a failed mapping
596 * which should prevent DMA from happening...
598 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
599 desc
->ds_cc
= cpu_to_le16(0);
600 desc
->dptr
= cpu_to_le32(dma_addr
);
601 priv
->rx_skb
[q
][entry
] = skb
;
603 /* Descriptor type must be set after all the above writes */
605 desc
->die_dt
= DT_FEMPTY
;
608 *quota
-= limit
- (++boguscnt
);
610 return boguscnt
<= 0;
613 static void ravb_rcv_snd_disable(struct net_device
*ndev
)
615 /* Disable TX and RX */
616 ravb_write(ndev
, ravb_read(ndev
, ECMR
) & ~(ECMR_RE
| ECMR_TE
), ECMR
);
619 static void ravb_rcv_snd_enable(struct net_device
*ndev
)
621 /* Enable TX and RX */
622 ravb_write(ndev
, ravb_read(ndev
, ECMR
) | ECMR_RE
| ECMR_TE
, ECMR
);
625 /* function for waiting dma process finished */
626 static int ravb_stop_dma(struct net_device
*ndev
)
630 /* Wait for stopping the hardware TX process */
631 error
= ravb_wait(ndev
, TCCR
,
632 TCCR_TSRQ0
| TCCR_TSRQ1
| TCCR_TSRQ2
| TCCR_TSRQ3
, 0);
636 error
= ravb_wait(ndev
, CSR
, CSR_TPO0
| CSR_TPO1
| CSR_TPO2
| CSR_TPO3
,
641 /* Stop the E-MAC's RX/TX processes. */
642 ravb_rcv_snd_disable(ndev
);
644 /* Wait for stopping the RX DMA process */
645 error
= ravb_wait(ndev
, CSR
, CSR_RPO
, 0);
649 /* Stop AVB-DMAC process */
650 return ravb_config(ndev
);
653 /* E-MAC interrupt handler */
654 static void ravb_emac_interrupt(struct net_device
*ndev
)
656 struct ravb_private
*priv
= netdev_priv(ndev
);
659 ecsr
= ravb_read(ndev
, ECSR
);
660 ravb_write(ndev
, ecsr
, ECSR
); /* clear interrupt */
662 ndev
->stats
.tx_carrier_errors
++;
663 if (ecsr
& ECSR_LCHNG
) {
665 if (priv
->no_avb_link
)
667 psr
= ravb_read(ndev
, PSR
);
668 if (priv
->avb_link_active_low
)
670 if (!(psr
& PSR_LMON
)) {
671 /* DIsable RX and TX */
672 ravb_rcv_snd_disable(ndev
);
674 /* Enable RX and TX */
675 ravb_rcv_snd_enable(ndev
);
680 /* Error interrupt handler */
681 static void ravb_error_interrupt(struct net_device
*ndev
)
683 struct ravb_private
*priv
= netdev_priv(ndev
);
686 eis
= ravb_read(ndev
, EIS
);
687 ravb_write(ndev
, ~EIS_QFS
, EIS
);
689 ris2
= ravb_read(ndev
, RIS2
);
690 ravb_write(ndev
, ~(RIS2_QFF0
| RIS2_RFFF
), RIS2
);
692 /* Receive Descriptor Empty int */
693 if (ris2
& RIS2_QFF0
)
694 priv
->stats
[RAVB_BE
].rx_over_errors
++;
696 /* Receive Descriptor Empty int */
697 if (ris2
& RIS2_QFF1
)
698 priv
->stats
[RAVB_NC
].rx_over_errors
++;
700 /* Receive FIFO Overflow int */
701 if (ris2
& RIS2_RFFF
)
702 priv
->rx_fifo_errors
++;
706 static irqreturn_t
ravb_interrupt(int irq
, void *dev_id
)
708 struct net_device
*ndev
= dev_id
;
709 struct ravb_private
*priv
= netdev_priv(ndev
);
710 irqreturn_t result
= IRQ_NONE
;
713 spin_lock(&priv
->lock
);
714 /* Get interrupt status */
715 iss
= ravb_read(ndev
, ISS
);
717 /* Received and transmitted interrupts */
718 if (iss
& (ISS_FRS
| ISS_FTS
| ISS_TFUS
)) {
719 u32 ris0
= ravb_read(ndev
, RIS0
);
720 u32 ric0
= ravb_read(ndev
, RIC0
);
721 u32 tis
= ravb_read(ndev
, TIS
);
722 u32 tic
= ravb_read(ndev
, TIC
);
725 /* Timestamp updated */
726 if (tis
& TIS_TFUF
) {
727 ravb_write(ndev
, ~TIS_TFUF
, TIS
);
728 ravb_get_tx_tstamp(ndev
);
729 result
= IRQ_HANDLED
;
732 /* Network control and best effort queue RX/TX */
733 for (q
= RAVB_NC
; q
>= RAVB_BE
; q
--) {
734 if (((ris0
& ric0
) & BIT(q
)) ||
735 ((tis
& tic
) & BIT(q
))) {
736 if (napi_schedule_prep(&priv
->napi
[q
])) {
737 /* Mask RX and TX interrupts */
740 ravb_write(ndev
, ric0
, RIC0
);
741 ravb_write(ndev
, tic
, TIC
);
742 __napi_schedule(&priv
->napi
[q
]);
745 "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
748 " tx status 0x%08x, tx mask 0x%08x.\n",
751 result
= IRQ_HANDLED
;
756 /* E-MAC status summary */
758 ravb_emac_interrupt(ndev
);
759 result
= IRQ_HANDLED
;
762 /* Error status summary */
764 ravb_error_interrupt(ndev
);
765 result
= IRQ_HANDLED
;
769 result
= ravb_ptp_interrupt(ndev
);
772 spin_unlock(&priv
->lock
);
776 static int ravb_poll(struct napi_struct
*napi
, int budget
)
778 struct net_device
*ndev
= napi
->dev
;
779 struct ravb_private
*priv
= netdev_priv(ndev
);
781 int q
= napi
- priv
->napi
;
787 tis
= ravb_read(ndev
, TIS
);
788 ris0
= ravb_read(ndev
, RIS0
);
789 if (!((ris0
& mask
) || (tis
& mask
)))
792 /* Processing RX Descriptor Ring */
794 /* Clear RX interrupt */
795 ravb_write(ndev
, ~mask
, RIS0
);
796 if (ravb_rx(ndev
, "a
, q
))
799 /* Processing TX Descriptor Ring */
801 spin_lock_irqsave(&priv
->lock
, flags
);
802 /* Clear TX interrupt */
803 ravb_write(ndev
, ~mask
, TIS
);
804 ravb_tx_free(ndev
, q
);
805 netif_wake_subqueue(ndev
, q
);
807 spin_unlock_irqrestore(&priv
->lock
, flags
);
813 /* Re-enable RX/TX interrupts */
814 spin_lock_irqsave(&priv
->lock
, flags
);
815 ravb_write(ndev
, ravb_read(ndev
, RIC0
) | mask
, RIC0
);
816 ravb_write(ndev
, ravb_read(ndev
, TIC
) | mask
, TIC
);
818 spin_unlock_irqrestore(&priv
->lock
, flags
);
820 /* Receive error message handling */
821 priv
->rx_over_errors
= priv
->stats
[RAVB_BE
].rx_over_errors
;
822 priv
->rx_over_errors
+= priv
->stats
[RAVB_NC
].rx_over_errors
;
823 if (priv
->rx_over_errors
!= ndev
->stats
.rx_over_errors
) {
824 ndev
->stats
.rx_over_errors
= priv
->rx_over_errors
;
825 netif_err(priv
, rx_err
, ndev
, "Receive Descriptor Empty\n");
827 if (priv
->rx_fifo_errors
!= ndev
->stats
.rx_fifo_errors
) {
828 ndev
->stats
.rx_fifo_errors
= priv
->rx_fifo_errors
;
829 netif_err(priv
, rx_err
, ndev
, "Receive FIFO Overflow\n");
832 return budget
- quota
;
835 /* PHY state control function */
836 static void ravb_adjust_link(struct net_device
*ndev
)
838 struct ravb_private
*priv
= netdev_priv(ndev
);
839 struct phy_device
*phydev
= priv
->phydev
;
840 bool new_state
= false;
843 if (phydev
->duplex
!= priv
->duplex
) {
845 priv
->duplex
= phydev
->duplex
;
846 ravb_set_duplex(ndev
);
849 if (phydev
->speed
!= priv
->speed
) {
851 priv
->speed
= phydev
->speed
;
855 ravb_write(ndev
, ravb_read(ndev
, ECMR
) & ~ECMR_TXF
,
858 priv
->link
= phydev
->link
;
859 if (priv
->no_avb_link
)
860 ravb_rcv_snd_enable(ndev
);
862 } else if (priv
->link
) {
867 if (priv
->no_avb_link
)
868 ravb_rcv_snd_disable(ndev
);
871 if (new_state
&& netif_msg_link(priv
))
872 phy_print_status(phydev
);
875 /* PHY init function */
876 static int ravb_phy_init(struct net_device
*ndev
)
878 struct device_node
*np
= ndev
->dev
.parent
->of_node
;
879 struct ravb_private
*priv
= netdev_priv(ndev
);
880 struct phy_device
*phydev
;
881 struct device_node
*pn
;
888 /* Try connecting to PHY */
889 pn
= of_parse_phandle(np
, "phy-handle", 0);
891 /* In the case of a fixed PHY, the DT node associated
892 * to the PHY is the Ethernet MAC DT node.
894 if (of_phy_is_fixed_link(np
)) {
895 err
= of_phy_register_fixed_link(np
);
899 pn
= of_node_get(np
);
901 phydev
= of_phy_connect(ndev
, pn
, ravb_adjust_link
, 0,
902 priv
->phy_interface
);
904 netdev_err(ndev
, "failed to connect PHY\n");
908 /* This driver only support 10/100Mbit speeds on Gen3
911 if (priv
->chip_id
== RCAR_GEN3
) {
914 err
= phy_set_max_speed(phydev
, SPEED_100
);
916 netdev_err(ndev
, "failed to limit PHY to 100Mbit/s\n");
917 phy_disconnect(phydev
);
921 netdev_info(ndev
, "limited PHY to 100Mbit/s\n");
924 /* 10BASE is not supported */
925 phydev
->supported
&= ~PHY_10BT_FEATURES
;
927 phy_attached_info(phydev
);
929 priv
->phydev
= phydev
;
934 /* PHY control start function */
935 static int ravb_phy_start(struct net_device
*ndev
)
937 struct ravb_private
*priv
= netdev_priv(ndev
);
940 error
= ravb_phy_init(ndev
);
944 phy_start(priv
->phydev
);
949 static int ravb_get_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
951 struct ravb_private
*priv
= netdev_priv(ndev
);
956 spin_lock_irqsave(&priv
->lock
, flags
);
957 error
= phy_ethtool_gset(priv
->phydev
, ecmd
);
958 spin_unlock_irqrestore(&priv
->lock
, flags
);
964 static int ravb_set_settings(struct net_device
*ndev
, struct ethtool_cmd
*ecmd
)
966 struct ravb_private
*priv
= netdev_priv(ndev
);
973 spin_lock_irqsave(&priv
->lock
, flags
);
975 /* Disable TX and RX */
976 ravb_rcv_snd_disable(ndev
);
978 error
= phy_ethtool_sset(priv
->phydev
, ecmd
);
982 if (ecmd
->duplex
== DUPLEX_FULL
)
987 ravb_set_duplex(ndev
);
992 /* Enable TX and RX */
993 ravb_rcv_snd_enable(ndev
);
996 spin_unlock_irqrestore(&priv
->lock
, flags
);
1001 static int ravb_nway_reset(struct net_device
*ndev
)
1003 struct ravb_private
*priv
= netdev_priv(ndev
);
1004 int error
= -ENODEV
;
1005 unsigned long flags
;
1008 spin_lock_irqsave(&priv
->lock
, flags
);
1009 error
= phy_start_aneg(priv
->phydev
);
1010 spin_unlock_irqrestore(&priv
->lock
, flags
);
1016 static u32
ravb_get_msglevel(struct net_device
*ndev
)
1018 struct ravb_private
*priv
= netdev_priv(ndev
);
1020 return priv
->msg_enable
;
1023 static void ravb_set_msglevel(struct net_device
*ndev
, u32 value
)
1025 struct ravb_private
*priv
= netdev_priv(ndev
);
1027 priv
->msg_enable
= value
;
1030 static const char ravb_gstrings_stats
[][ETH_GSTRING_LEN
] = {
1031 "rx_queue_0_current",
1032 "tx_queue_0_current",
1035 "rx_queue_0_packets",
1036 "tx_queue_0_packets",
1039 "rx_queue_0_mcast_packets",
1040 "rx_queue_0_errors",
1041 "rx_queue_0_crc_errors",
1042 "rx_queue_0_frame_errors",
1043 "rx_queue_0_length_errors",
1044 "rx_queue_0_missed_errors",
1045 "rx_queue_0_over_errors",
1047 "rx_queue_1_current",
1048 "tx_queue_1_current",
1051 "rx_queue_1_packets",
1052 "tx_queue_1_packets",
1055 "rx_queue_1_mcast_packets",
1056 "rx_queue_1_errors",
1057 "rx_queue_1_crc_errors",
1058 "rx_queue_1_frame_errors",
1059 "rx_queue_1_length_errors",
1060 "rx_queue_1_missed_errors",
1061 "rx_queue_1_over_errors",
1064 #define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats)
1066 static int ravb_get_sset_count(struct net_device
*netdev
, int sset
)
1070 return RAVB_STATS_LEN
;
1076 static void ravb_get_ethtool_stats(struct net_device
*ndev
,
1077 struct ethtool_stats
*stats
, u64
*data
)
1079 struct ravb_private
*priv
= netdev_priv(ndev
);
1083 /* Device-specific stats */
1084 for (q
= RAVB_BE
; q
< NUM_RX_QUEUE
; q
++) {
1085 struct net_device_stats
*stats
= &priv
->stats
[q
];
1087 data
[i
++] = priv
->cur_rx
[q
];
1088 data
[i
++] = priv
->cur_tx
[q
];
1089 data
[i
++] = priv
->dirty_rx
[q
];
1090 data
[i
++] = priv
->dirty_tx
[q
];
1091 data
[i
++] = stats
->rx_packets
;
1092 data
[i
++] = stats
->tx_packets
;
1093 data
[i
++] = stats
->rx_bytes
;
1094 data
[i
++] = stats
->tx_bytes
;
1095 data
[i
++] = stats
->multicast
;
1096 data
[i
++] = stats
->rx_errors
;
1097 data
[i
++] = stats
->rx_crc_errors
;
1098 data
[i
++] = stats
->rx_frame_errors
;
1099 data
[i
++] = stats
->rx_length_errors
;
1100 data
[i
++] = stats
->rx_missed_errors
;
1101 data
[i
++] = stats
->rx_over_errors
;
1105 static void ravb_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
1107 switch (stringset
) {
1109 memcpy(data
, *ravb_gstrings_stats
, sizeof(ravb_gstrings_stats
));
1114 static void ravb_get_ringparam(struct net_device
*ndev
,
1115 struct ethtool_ringparam
*ring
)
1117 struct ravb_private
*priv
= netdev_priv(ndev
);
1119 ring
->rx_max_pending
= BE_RX_RING_MAX
;
1120 ring
->tx_max_pending
= BE_TX_RING_MAX
;
1121 ring
->rx_pending
= priv
->num_rx_ring
[RAVB_BE
];
1122 ring
->tx_pending
= priv
->num_tx_ring
[RAVB_BE
];
1125 static int ravb_set_ringparam(struct net_device
*ndev
,
1126 struct ethtool_ringparam
*ring
)
1128 struct ravb_private
*priv
= netdev_priv(ndev
);
1131 if (ring
->tx_pending
> BE_TX_RING_MAX
||
1132 ring
->rx_pending
> BE_RX_RING_MAX
||
1133 ring
->tx_pending
< BE_TX_RING_MIN
||
1134 ring
->rx_pending
< BE_RX_RING_MIN
)
1136 if (ring
->rx_mini_pending
|| ring
->rx_jumbo_pending
)
1139 if (netif_running(ndev
)) {
1140 netif_device_detach(ndev
);
1141 /* Stop PTP Clock driver */
1142 if (priv
->chip_id
== RCAR_GEN2
)
1143 ravb_ptp_stop(ndev
);
1144 /* Wait for DMA stopping */
1145 error
= ravb_stop_dma(ndev
);
1148 "cannot set ringparam! Any AVB processes are still running?\n");
1151 synchronize_irq(ndev
->irq
);
1153 /* Free all the skb's in the RX queue and the DMA buffers. */
1154 ravb_ring_free(ndev
, RAVB_BE
);
1155 ravb_ring_free(ndev
, RAVB_NC
);
1158 /* Set new parameters */
1159 priv
->num_rx_ring
[RAVB_BE
] = ring
->rx_pending
;
1160 priv
->num_tx_ring
[RAVB_BE
] = ring
->tx_pending
;
1162 if (netif_running(ndev
)) {
1163 error
= ravb_dmac_init(ndev
);
1166 "%s: ravb_dmac_init() failed, error %d\n",
1171 ravb_emac_init(ndev
);
1173 /* Initialise PTP Clock driver */
1174 if (priv
->chip_id
== RCAR_GEN2
)
1175 ravb_ptp_init(ndev
, priv
->pdev
);
1177 netif_device_attach(ndev
);
1183 static int ravb_get_ts_info(struct net_device
*ndev
,
1184 struct ethtool_ts_info
*info
)
1186 struct ravb_private
*priv
= netdev_priv(ndev
);
1188 info
->so_timestamping
=
1189 SOF_TIMESTAMPING_TX_SOFTWARE
|
1190 SOF_TIMESTAMPING_RX_SOFTWARE
|
1191 SOF_TIMESTAMPING_SOFTWARE
|
1192 SOF_TIMESTAMPING_TX_HARDWARE
|
1193 SOF_TIMESTAMPING_RX_HARDWARE
|
1194 SOF_TIMESTAMPING_RAW_HARDWARE
;
1195 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
1197 (1 << HWTSTAMP_FILTER_NONE
) |
1198 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
1199 (1 << HWTSTAMP_FILTER_ALL
);
1200 info
->phc_index
= ptp_clock_index(priv
->ptp
.clock
);
1205 static const struct ethtool_ops ravb_ethtool_ops
= {
1206 .get_settings
= ravb_get_settings
,
1207 .set_settings
= ravb_set_settings
,
1208 .nway_reset
= ravb_nway_reset
,
1209 .get_msglevel
= ravb_get_msglevel
,
1210 .set_msglevel
= ravb_set_msglevel
,
1211 .get_link
= ethtool_op_get_link
,
1212 .get_strings
= ravb_get_strings
,
1213 .get_ethtool_stats
= ravb_get_ethtool_stats
,
1214 .get_sset_count
= ravb_get_sset_count
,
1215 .get_ringparam
= ravb_get_ringparam
,
1216 .set_ringparam
= ravb_set_ringparam
,
1217 .get_ts_info
= ravb_get_ts_info
,
1220 /* Network device open function for Ethernet AVB */
1221 static int ravb_open(struct net_device
*ndev
)
1223 struct ravb_private
*priv
= netdev_priv(ndev
);
1226 napi_enable(&priv
->napi
[RAVB_BE
]);
1227 napi_enable(&priv
->napi
[RAVB_NC
]);
1229 error
= request_irq(ndev
->irq
, ravb_interrupt
, IRQF_SHARED
, ndev
->name
,
1232 netdev_err(ndev
, "cannot request IRQ\n");
1236 if (priv
->chip_id
== RCAR_GEN3
) {
1237 error
= request_irq(priv
->emac_irq
, ravb_interrupt
,
1238 IRQF_SHARED
, ndev
->name
, ndev
);
1240 netdev_err(ndev
, "cannot request IRQ\n");
1246 error
= ravb_dmac_init(ndev
);
1249 ravb_emac_init(ndev
);
1251 /* Initialise PTP Clock driver */
1252 if (priv
->chip_id
== RCAR_GEN2
)
1253 ravb_ptp_init(ndev
, priv
->pdev
);
1255 netif_tx_start_all_queues(ndev
);
1257 /* PHY control start */
1258 error
= ravb_phy_start(ndev
);
1265 /* Stop PTP Clock driver */
1266 if (priv
->chip_id
== RCAR_GEN2
)
1267 ravb_ptp_stop(ndev
);
1269 if (priv
->chip_id
== RCAR_GEN3
)
1270 free_irq(priv
->emac_irq
, ndev
);
1272 free_irq(ndev
->irq
, ndev
);
1274 napi_disable(&priv
->napi
[RAVB_NC
]);
1275 napi_disable(&priv
->napi
[RAVB_BE
]);
1279 /* Timeout function for Ethernet AVB */
1280 static void ravb_tx_timeout(struct net_device
*ndev
)
1282 struct ravb_private
*priv
= netdev_priv(ndev
);
1284 netif_err(priv
, tx_err
, ndev
,
1285 "transmit timed out, status %08x, resetting...\n",
1286 ravb_read(ndev
, ISS
));
1288 /* tx_errors count up */
1289 ndev
->stats
.tx_errors
++;
1291 schedule_work(&priv
->work
);
1294 static void ravb_tx_timeout_work(struct work_struct
*work
)
1296 struct ravb_private
*priv
= container_of(work
, struct ravb_private
,
1298 struct net_device
*ndev
= priv
->ndev
;
1300 netif_tx_stop_all_queues(ndev
);
1302 /* Stop PTP Clock driver */
1303 if (priv
->chip_id
== RCAR_GEN2
)
1304 ravb_ptp_stop(ndev
);
1306 /* Wait for DMA stopping */
1307 ravb_stop_dma(ndev
);
1309 ravb_ring_free(ndev
, RAVB_BE
);
1310 ravb_ring_free(ndev
, RAVB_NC
);
1313 ravb_dmac_init(ndev
);
1314 ravb_emac_init(ndev
);
1316 /* Initialise PTP Clock driver */
1317 if (priv
->chip_id
== RCAR_GEN2
)
1318 ravb_ptp_init(ndev
, priv
->pdev
);
1320 netif_tx_start_all_queues(ndev
);
1323 /* Packet transmit function for Ethernet AVB */
1324 static netdev_tx_t
ravb_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1326 struct ravb_private
*priv
= netdev_priv(ndev
);
1327 u16 q
= skb_get_queue_mapping(skb
);
1328 struct ravb_tstamp_skb
*ts_skb
;
1329 struct ravb_tx_desc
*desc
;
1330 unsigned long flags
;
1336 spin_lock_irqsave(&priv
->lock
, flags
);
1337 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] > (priv
->num_tx_ring
[q
] - 1) *
1339 netif_err(priv
, tx_queued
, ndev
,
1340 "still transmitting with the full ring!\n");
1341 netif_stop_subqueue(ndev
, q
);
1342 spin_unlock_irqrestore(&priv
->lock
, flags
);
1343 return NETDEV_TX_BUSY
;
1345 entry
= priv
->cur_tx
[q
] % (priv
->num_tx_ring
[q
] * NUM_TX_DESC
);
1346 priv
->tx_skb
[q
][entry
/ NUM_TX_DESC
] = skb
;
1348 if (skb_put_padto(skb
, ETH_ZLEN
))
1351 buffer
= PTR_ALIGN(priv
->tx_align
[q
], DPTR_ALIGN
) +
1352 entry
/ NUM_TX_DESC
* DPTR_ALIGN
;
1353 len
= PTR_ALIGN(skb
->data
, DPTR_ALIGN
) - skb
->data
;
1354 memcpy(buffer
, skb
->data
, len
);
1355 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
, DMA_TO_DEVICE
);
1356 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
1359 desc
= &priv
->tx_ring
[q
][entry
];
1360 desc
->ds_tagl
= cpu_to_le16(len
);
1361 desc
->dptr
= cpu_to_le32(dma_addr
);
1363 buffer
= skb
->data
+ len
;
1364 len
= skb
->len
- len
;
1365 dma_addr
= dma_map_single(ndev
->dev
.parent
, buffer
, len
, DMA_TO_DEVICE
);
1366 if (dma_mapping_error(ndev
->dev
.parent
, dma_addr
))
1370 desc
->ds_tagl
= cpu_to_le16(len
);
1371 desc
->dptr
= cpu_to_le32(dma_addr
);
1373 /* TX timestamp required */
1375 ts_skb
= kmalloc(sizeof(*ts_skb
), GFP_ATOMIC
);
1378 dma_unmap_single(ndev
->dev
.parent
, dma_addr
, len
,
1383 ts_skb
->tag
= priv
->ts_skb_tag
++;
1384 priv
->ts_skb_tag
&= 0x3ff;
1385 list_add_tail(&ts_skb
->list
, &priv
->ts_skb_list
);
1387 /* TAG and timestamp required flag */
1388 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1389 skb_tx_timestamp(skb
);
1390 desc
->tagh_tsr
= (ts_skb
->tag
>> 4) | TX_TSR
;
1391 desc
->ds_tagl
|= le16_to_cpu(ts_skb
->tag
<< 12);
1394 /* Descriptor type must be set after all the above writes */
1396 desc
->die_dt
= DT_FEND
;
1398 desc
->die_dt
= DT_FSTART
;
1400 ravb_write(ndev
, ravb_read(ndev
, TCCR
) | (TCCR_TSRQ0
<< q
), TCCR
);
1402 priv
->cur_tx
[q
] += NUM_TX_DESC
;
1403 if (priv
->cur_tx
[q
] - priv
->dirty_tx
[q
] >
1404 (priv
->num_tx_ring
[q
] - 1) * NUM_TX_DESC
&& !ravb_tx_free(ndev
, q
))
1405 netif_stop_subqueue(ndev
, q
);
1409 spin_unlock_irqrestore(&priv
->lock
, flags
);
1410 return NETDEV_TX_OK
;
1413 dma_unmap_single(ndev
->dev
.parent
, le32_to_cpu(desc
->dptr
),
1414 le16_to_cpu(desc
->ds_tagl
), DMA_TO_DEVICE
);
1416 dev_kfree_skb_any(skb
);
1417 priv
->tx_skb
[q
][entry
/ NUM_TX_DESC
] = NULL
;
1421 static u16
ravb_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
1422 void *accel_priv
, select_queue_fallback_t fallback
)
1424 /* If skb needs TX timestamp, it is handled in network control queue */
1425 return (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) ? RAVB_NC
:
1430 static struct net_device_stats
*ravb_get_stats(struct net_device
*ndev
)
1432 struct ravb_private
*priv
= netdev_priv(ndev
);
1433 struct net_device_stats
*nstats
, *stats0
, *stats1
;
1435 nstats
= &ndev
->stats
;
1436 stats0
= &priv
->stats
[RAVB_BE
];
1437 stats1
= &priv
->stats
[RAVB_NC
];
1439 nstats
->tx_dropped
+= ravb_read(ndev
, TROCR
);
1440 ravb_write(ndev
, 0, TROCR
); /* (write clear) */
1441 nstats
->collisions
+= ravb_read(ndev
, CDCR
);
1442 ravb_write(ndev
, 0, CDCR
); /* (write clear) */
1443 nstats
->tx_carrier_errors
+= ravb_read(ndev
, LCCR
);
1444 ravb_write(ndev
, 0, LCCR
); /* (write clear) */
1446 nstats
->tx_carrier_errors
+= ravb_read(ndev
, CERCR
);
1447 ravb_write(ndev
, 0, CERCR
); /* (write clear) */
1448 nstats
->tx_carrier_errors
+= ravb_read(ndev
, CEECR
);
1449 ravb_write(ndev
, 0, CEECR
); /* (write clear) */
1451 nstats
->rx_packets
= stats0
->rx_packets
+ stats1
->rx_packets
;
1452 nstats
->tx_packets
= stats0
->tx_packets
+ stats1
->tx_packets
;
1453 nstats
->rx_bytes
= stats0
->rx_bytes
+ stats1
->rx_bytes
;
1454 nstats
->tx_bytes
= stats0
->tx_bytes
+ stats1
->tx_bytes
;
1455 nstats
->multicast
= stats0
->multicast
+ stats1
->multicast
;
1456 nstats
->rx_errors
= stats0
->rx_errors
+ stats1
->rx_errors
;
1457 nstats
->rx_crc_errors
= stats0
->rx_crc_errors
+ stats1
->rx_crc_errors
;
1458 nstats
->rx_frame_errors
=
1459 stats0
->rx_frame_errors
+ stats1
->rx_frame_errors
;
1460 nstats
->rx_length_errors
=
1461 stats0
->rx_length_errors
+ stats1
->rx_length_errors
;
1462 nstats
->rx_missed_errors
=
1463 stats0
->rx_missed_errors
+ stats1
->rx_missed_errors
;
1464 nstats
->rx_over_errors
=
1465 stats0
->rx_over_errors
+ stats1
->rx_over_errors
;
1470 /* Update promiscuous bit */
1471 static void ravb_set_rx_mode(struct net_device
*ndev
)
1473 struct ravb_private
*priv
= netdev_priv(ndev
);
1474 unsigned long flags
;
1477 spin_lock_irqsave(&priv
->lock
, flags
);
1478 ecmr
= ravb_read(ndev
, ECMR
);
1479 if (ndev
->flags
& IFF_PROMISC
)
1483 ravb_write(ndev
, ecmr
, ECMR
);
1485 spin_unlock_irqrestore(&priv
->lock
, flags
);
1488 /* Device close function for Ethernet AVB */
1489 static int ravb_close(struct net_device
*ndev
)
1491 struct ravb_private
*priv
= netdev_priv(ndev
);
1492 struct ravb_tstamp_skb
*ts_skb
, *ts_skb2
;
1494 netif_tx_stop_all_queues(ndev
);
1496 /* Disable interrupts by clearing the interrupt masks. */
1497 ravb_write(ndev
, 0, RIC0
);
1498 ravb_write(ndev
, 0, RIC2
);
1499 ravb_write(ndev
, 0, TIC
);
1501 /* Stop PTP Clock driver */
1502 if (priv
->chip_id
== RCAR_GEN2
)
1503 ravb_ptp_stop(ndev
);
1505 /* Set the config mode to stop the AVB-DMAC's processes */
1506 if (ravb_stop_dma(ndev
) < 0)
1508 "device will be stopped after h/w processes are done.\n");
1510 /* Clear the timestamp list */
1511 list_for_each_entry_safe(ts_skb
, ts_skb2
, &priv
->ts_skb_list
, list
) {
1512 list_del(&ts_skb
->list
);
1516 /* PHY disconnect */
1518 phy_stop(priv
->phydev
);
1519 phy_disconnect(priv
->phydev
);
1520 priv
->phydev
= NULL
;
1523 free_irq(ndev
->irq
, ndev
);
1525 napi_disable(&priv
->napi
[RAVB_NC
]);
1526 napi_disable(&priv
->napi
[RAVB_BE
]);
1528 /* Free all the skb's in the RX queue and the DMA buffers. */
1529 ravb_ring_free(ndev
, RAVB_BE
);
1530 ravb_ring_free(ndev
, RAVB_NC
);
1535 static int ravb_hwtstamp_get(struct net_device
*ndev
, struct ifreq
*req
)
1537 struct ravb_private
*priv
= netdev_priv(ndev
);
1538 struct hwtstamp_config config
;
1541 config
.tx_type
= priv
->tstamp_tx_ctrl
? HWTSTAMP_TX_ON
:
1543 if (priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
)
1544 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
1545 else if (priv
->tstamp_rx_ctrl
& RAVB_RXTSTAMP_TYPE_ALL
)
1546 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1548 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
1550 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
1554 /* Control hardware time stamping */
1555 static int ravb_hwtstamp_set(struct net_device
*ndev
, struct ifreq
*req
)
1557 struct ravb_private
*priv
= netdev_priv(ndev
);
1558 struct hwtstamp_config config
;
1559 u32 tstamp_rx_ctrl
= RAVB_RXTSTAMP_ENABLED
;
1562 if (copy_from_user(&config
, req
->ifr_data
, sizeof(config
)))
1565 /* Reserved for future extensions */
1569 switch (config
.tx_type
) {
1570 case HWTSTAMP_TX_OFF
:
1573 case HWTSTAMP_TX_ON
:
1574 tstamp_tx_ctrl
= RAVB_TXTSTAMP_ENABLED
;
1580 switch (config
.rx_filter
) {
1581 case HWTSTAMP_FILTER_NONE
:
1584 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1585 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_V2_L2_EVENT
;
1588 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1589 tstamp_rx_ctrl
|= RAVB_RXTSTAMP_TYPE_ALL
;
1592 priv
->tstamp_tx_ctrl
= tstamp_tx_ctrl
;
1593 priv
->tstamp_rx_ctrl
= tstamp_rx_ctrl
;
1595 return copy_to_user(req
->ifr_data
, &config
, sizeof(config
)) ?
1599 /* ioctl to device function */
1600 static int ravb_do_ioctl(struct net_device
*ndev
, struct ifreq
*req
, int cmd
)
1602 struct ravb_private
*priv
= netdev_priv(ndev
);
1603 struct phy_device
*phydev
= priv
->phydev
;
1605 if (!netif_running(ndev
))
1613 return ravb_hwtstamp_get(ndev
, req
);
1615 return ravb_hwtstamp_set(ndev
, req
);
1618 return phy_mii_ioctl(phydev
, req
, cmd
);
1621 static const struct net_device_ops ravb_netdev_ops
= {
1622 .ndo_open
= ravb_open
,
1623 .ndo_stop
= ravb_close
,
1624 .ndo_start_xmit
= ravb_start_xmit
,
1625 .ndo_select_queue
= ravb_select_queue
,
1626 .ndo_get_stats
= ravb_get_stats
,
1627 .ndo_set_rx_mode
= ravb_set_rx_mode
,
1628 .ndo_tx_timeout
= ravb_tx_timeout
,
1629 .ndo_do_ioctl
= ravb_do_ioctl
,
1630 .ndo_validate_addr
= eth_validate_addr
,
1631 .ndo_set_mac_address
= eth_mac_addr
,
1632 .ndo_change_mtu
= eth_change_mtu
,
1635 /* MDIO bus init function */
1636 static int ravb_mdio_init(struct ravb_private
*priv
)
1638 struct platform_device
*pdev
= priv
->pdev
;
1639 struct device
*dev
= &pdev
->dev
;
1643 priv
->mdiobb
.ops
= &bb_ops
;
1645 /* MII controller setting */
1646 priv
->mii_bus
= alloc_mdio_bitbang(&priv
->mdiobb
);
1650 /* Hook up MII support for ethtool */
1651 priv
->mii_bus
->name
= "ravb_mii";
1652 priv
->mii_bus
->parent
= dev
;
1653 snprintf(priv
->mii_bus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
1654 pdev
->name
, pdev
->id
);
1656 /* Register MDIO bus */
1657 error
= of_mdiobus_register(priv
->mii_bus
, dev
->of_node
);
1664 free_mdio_bitbang(priv
->mii_bus
);
1668 /* MDIO bus release function */
1669 static int ravb_mdio_release(struct ravb_private
*priv
)
1671 /* Unregister mdio bus */
1672 mdiobus_unregister(priv
->mii_bus
);
1674 /* Free bitbang info */
1675 free_mdio_bitbang(priv
->mii_bus
);
1680 static const struct of_device_id ravb_match_table
[] = {
1681 { .compatible
= "renesas,etheravb-r8a7790", .data
= (void *)RCAR_GEN2
},
1682 { .compatible
= "renesas,etheravb-r8a7794", .data
= (void *)RCAR_GEN2
},
1683 { .compatible
= "renesas,etheravb-rcar-gen2", .data
= (void *)RCAR_GEN2
},
1684 { .compatible
= "renesas,etheravb-r8a7795", .data
= (void *)RCAR_GEN3
},
1685 { .compatible
= "renesas,etheravb-rcar-gen3", .data
= (void *)RCAR_GEN3
},
1688 MODULE_DEVICE_TABLE(of
, ravb_match_table
);
1690 static int ravb_set_gti(struct net_device
*ndev
)
1693 struct device
*dev
= ndev
->dev
.parent
;
1694 struct device_node
*np
= dev
->of_node
;
1699 clk
= of_clk_get(np
, 0);
1701 dev_err(dev
, "could not get clock\n");
1702 return PTR_ERR(clk
);
1705 rate
= clk_get_rate(clk
);
1708 inc
= 1000000000ULL << 20;
1711 if (inc
< GTI_TIV_MIN
|| inc
> GTI_TIV_MAX
) {
1712 dev_err(dev
, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n",
1713 inc
, GTI_TIV_MIN
, GTI_TIV_MAX
);
1717 ravb_write(ndev
, inc
, GTI
);
1722 static int ravb_probe(struct platform_device
*pdev
)
1724 struct device_node
*np
= pdev
->dev
.of_node
;
1725 const struct of_device_id
*match
;
1726 struct ravb_private
*priv
;
1727 enum ravb_chip_id chip_id
;
1728 struct net_device
*ndev
;
1730 struct resource
*res
;
1734 "this driver is required to be instantiated from device tree\n");
1738 /* Get base address */
1739 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1741 dev_err(&pdev
->dev
, "invalid resource\n");
1745 ndev
= alloc_etherdev_mqs(sizeof(struct ravb_private
),
1746 NUM_TX_QUEUE
, NUM_RX_QUEUE
);
1750 pm_runtime_enable(&pdev
->dev
);
1751 pm_runtime_get_sync(&pdev
->dev
);
1753 /* The Ether-specific entries in the device structure. */
1754 ndev
->base_addr
= res
->start
;
1757 match
= of_match_device(of_match_ptr(ravb_match_table
), &pdev
->dev
);
1758 chip_id
= (enum ravb_chip_id
)match
->data
;
1760 if (chip_id
== RCAR_GEN3
)
1761 irq
= platform_get_irq_byname(pdev
, "ch22");
1763 irq
= platform_get_irq(pdev
, 0);
1770 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1772 priv
= netdev_priv(ndev
);
1775 priv
->num_tx_ring
[RAVB_BE
] = BE_TX_RING_SIZE
;
1776 priv
->num_rx_ring
[RAVB_BE
] = BE_RX_RING_SIZE
;
1777 priv
->num_tx_ring
[RAVB_NC
] = NC_TX_RING_SIZE
;
1778 priv
->num_rx_ring
[RAVB_NC
] = NC_RX_RING_SIZE
;
1779 priv
->addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1780 if (IS_ERR(priv
->addr
)) {
1781 error
= PTR_ERR(priv
->addr
);
1785 spin_lock_init(&priv
->lock
);
1786 INIT_WORK(&priv
->work
, ravb_tx_timeout_work
);
1788 priv
->phy_interface
= of_get_phy_mode(np
);
1790 priv
->no_avb_link
= of_property_read_bool(np
, "renesas,no-ether-link");
1791 priv
->avb_link_active_low
=
1792 of_property_read_bool(np
, "renesas,ether-link-active-low");
1794 if (chip_id
== RCAR_GEN3
) {
1795 irq
= platform_get_irq_byname(pdev
, "ch24");
1800 priv
->emac_irq
= irq
;
1803 priv
->chip_id
= chip_id
;
1806 ndev
->netdev_ops
= &ravb_netdev_ops
;
1807 ndev
->ethtool_ops
= &ravb_ethtool_ops
;
1809 /* Set AVB config mode */
1810 if (chip_id
== RCAR_GEN2
) {
1811 ravb_write(ndev
, (ravb_read(ndev
, CCC
) & ~CCC_OPC
) |
1812 CCC_OPC_CONFIG
, CCC
);
1813 /* Set CSEL value */
1814 ravb_write(ndev
, (ravb_read(ndev
, CCC
) & ~CCC_CSEL
) |
1817 ravb_write(ndev
, (ravb_read(ndev
, CCC
) & ~CCC_OPC
) |
1818 CCC_OPC_CONFIG
| CCC_GAC
| CCC_CSEL_HPB
, CCC
);
1822 error
= ravb_set_gti(ndev
);
1826 /* Request GTI loading */
1827 ravb_write(ndev
, ravb_read(ndev
, GCCR
) | GCCR_LTI
, GCCR
);
1829 /* Allocate descriptor base address table */
1830 priv
->desc_bat_size
= sizeof(struct ravb_desc
) * DBAT_ENTRY_NUM
;
1831 priv
->desc_bat
= dma_alloc_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
,
1832 &priv
->desc_bat_dma
, GFP_KERNEL
);
1833 if (!priv
->desc_bat
) {
1835 "Cannot allocate desc base address table (size %d bytes)\n",
1836 priv
->desc_bat_size
);
1840 for (q
= RAVB_BE
; q
< DBAT_ENTRY_NUM
; q
++)
1841 priv
->desc_bat
[q
].die_dt
= DT_EOS
;
1842 ravb_write(ndev
, priv
->desc_bat_dma
, DBAT
);
1844 /* Initialise HW timestamp list */
1845 INIT_LIST_HEAD(&priv
->ts_skb_list
);
1847 /* Initialise PTP Clock driver */
1848 if (chip_id
!= RCAR_GEN2
)
1849 ravb_ptp_init(ndev
, pdev
);
1851 /* Debug message level */
1852 priv
->msg_enable
= RAVB_DEF_MSG_ENABLE
;
1854 /* Read and set MAC address */
1855 ravb_read_mac_address(ndev
, of_get_mac_address(np
));
1856 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
1857 dev_warn(&pdev
->dev
,
1858 "no valid MAC address supplied, using a random one\n");
1859 eth_hw_addr_random(ndev
);
1863 error
= ravb_mdio_init(priv
);
1865 dev_err(&pdev
->dev
, "failed to initialize MDIO\n");
1869 netif_napi_add(ndev
, &priv
->napi
[RAVB_BE
], ravb_poll
, 64);
1870 netif_napi_add(ndev
, &priv
->napi
[RAVB_NC
], ravb_poll
, 64);
1872 /* Network device register */
1873 error
= register_netdev(ndev
);
1877 /* Print device information */
1878 netdev_info(ndev
, "Base address at %#x, %pM, IRQ %d.\n",
1879 (u32
)ndev
->base_addr
, ndev
->dev_addr
, ndev
->irq
);
1881 platform_set_drvdata(pdev
, ndev
);
1886 netif_napi_del(&priv
->napi
[RAVB_NC
]);
1887 netif_napi_del(&priv
->napi
[RAVB_BE
]);
1888 ravb_mdio_release(priv
);
1890 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
1891 priv
->desc_bat_dma
);
1893 /* Stop PTP Clock driver */
1894 if (chip_id
!= RCAR_GEN2
)
1895 ravb_ptp_stop(ndev
);
1900 pm_runtime_put(&pdev
->dev
);
1901 pm_runtime_disable(&pdev
->dev
);
1905 static int ravb_remove(struct platform_device
*pdev
)
1907 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1908 struct ravb_private
*priv
= netdev_priv(ndev
);
1910 /* Stop PTP Clock driver */
1911 if (priv
->chip_id
!= RCAR_GEN2
)
1912 ravb_ptp_stop(ndev
);
1914 dma_free_coherent(ndev
->dev
.parent
, priv
->desc_bat_size
, priv
->desc_bat
,
1915 priv
->desc_bat_dma
);
1916 /* Set reset mode */
1917 ravb_write(ndev
, CCC_OPC_RESET
, CCC
);
1918 pm_runtime_put_sync(&pdev
->dev
);
1919 unregister_netdev(ndev
);
1920 netif_napi_del(&priv
->napi
[RAVB_NC
]);
1921 netif_napi_del(&priv
->napi
[RAVB_BE
]);
1922 ravb_mdio_release(priv
);
1923 pm_runtime_disable(&pdev
->dev
);
1925 platform_set_drvdata(pdev
, NULL
);
1931 static int ravb_runtime_nop(struct device
*dev
)
1933 /* Runtime PM callback shared between ->runtime_suspend()
1934 * and ->runtime_resume(). Simply returns success.
1936 * This driver re-initializes all registers after
1937 * pm_runtime_get_sync() anyway so there is no need
1938 * to save and restore registers here.
1943 static const struct dev_pm_ops ravb_dev_pm_ops
= {
1944 .runtime_suspend
= ravb_runtime_nop
,
1945 .runtime_resume
= ravb_runtime_nop
,
1948 #define RAVB_PM_OPS (&ravb_dev_pm_ops)
1950 #define RAVB_PM_OPS NULL
1953 static struct platform_driver ravb_driver
= {
1954 .probe
= ravb_probe
,
1955 .remove
= ravb_remove
,
1959 .of_match_table
= ravb_match_table
,
1963 module_platform_driver(ravb_driver
);
1965 MODULE_AUTHOR("Mitsuhiro Kimura, Masaru Nagai");
1966 MODULE_DESCRIPTION("Renesas Ethernet AVB driver");
1967 MODULE_LICENSE("GPL v2");