2 * Blackfin On-Chip MAC Driver
4 * Copyright 2004-2010 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
11 #define DRV_VERSION "1.1"
12 #define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/irq.h>
26 #include <linux/ioport.h>
27 #include <linux/crc32.h>
28 #include <linux/device.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/ethtool.h>
34 #include <linux/skbuff.h>
35 #include <linux/platform_device.h>
38 #include <linux/dma-mapping.h>
40 #include <asm/div64.h>
42 #include <asm/blackfin.h>
43 #include <asm/cacheflush.h>
44 #include <asm/portmux.h>
49 MODULE_AUTHOR("Bryan Wu, Luke Yang");
50 MODULE_LICENSE("GPL");
51 MODULE_DESCRIPTION(DRV_DESC
);
52 MODULE_ALIAS("platform:bfin_mac");
54 #if defined(CONFIG_BFIN_MAC_USE_L1)
55 # define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56 # define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
58 # define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60 # define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
64 #define PKT_BUF_SZ 1580
66 #define MAX_TIMEOUT_CNT 500
68 /* pointers to maintain transmit list */
69 static struct net_dma_desc_tx
*tx_list_head
;
70 static struct net_dma_desc_tx
*tx_list_tail
;
71 static struct net_dma_desc_rx
*rx_list_head
;
72 static struct net_dma_desc_rx
*rx_list_tail
;
73 static struct net_dma_desc_rx
*current_rx_ptr
;
74 static struct net_dma_desc_tx
*current_tx_ptr
;
75 static struct net_dma_desc_tx
*tx_desc
;
76 static struct net_dma_desc_rx
*rx_desc
;
78 static void desc_list_free(void)
80 struct net_dma_desc_rx
*r
;
81 struct net_dma_desc_tx
*t
;
83 #if !defined(CONFIG_BFIN_MAC_USE_L1)
84 dma_addr_t dma_handle
= 0;
89 for (i
= 0; i
< CONFIG_BFIN_TX_DESC_NUM
; i
++) {
92 dev_kfree_skb(t
->skb
);
98 bfin_mac_free(dma_handle
, tx_desc
, CONFIG_BFIN_TX_DESC_NUM
);
103 for (i
= 0; i
< CONFIG_BFIN_RX_DESC_NUM
; i
++) {
106 dev_kfree_skb(r
->skb
);
112 bfin_mac_free(dma_handle
, rx_desc
, CONFIG_BFIN_RX_DESC_NUM
);
116 static int desc_list_init(struct net_device
*dev
)
119 struct sk_buff
*new_skb
;
120 #if !defined(CONFIG_BFIN_MAC_USE_L1)
122 * This dma_handle is useless in Blackfin dma_alloc_coherent().
123 * The real dma handler is the return value of dma_alloc_coherent().
125 dma_addr_t dma_handle
;
128 tx_desc
= bfin_mac_alloc(&dma_handle
,
129 sizeof(struct net_dma_desc_tx
),
130 CONFIG_BFIN_TX_DESC_NUM
);
134 rx_desc
= bfin_mac_alloc(&dma_handle
,
135 sizeof(struct net_dma_desc_rx
),
136 CONFIG_BFIN_RX_DESC_NUM
);
141 tx_list_head
= tx_list_tail
= tx_desc
;
143 for (i
= 0; i
< CONFIG_BFIN_TX_DESC_NUM
; i
++) {
144 struct net_dma_desc_tx
*t
= tx_desc
+ i
;
145 struct dma_descriptor
*a
= &(t
->desc_a
);
146 struct dma_descriptor
*b
= &(t
->desc_b
);
150 * read from memory WNR = 0
151 * wordsize is 32 bits
152 * 6 half words is desc size
155 a
->config
= WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
156 a
->start_addr
= (unsigned long)t
->packet
;
158 a
->next_dma_desc
= b
;
162 * write to memory WNR = 1
163 * wordsize is 32 bits
165 * 6 half words is desc size
168 b
->config
= DMAEN
| WNR
| WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
169 b
->start_addr
= (unsigned long)(&(t
->status
));
173 tx_list_tail
->desc_b
.next_dma_desc
= a
;
174 tx_list_tail
->next
= t
;
177 tx_list_tail
->next
= tx_list_head
; /* tx_list is a circle */
178 tx_list_tail
->desc_b
.next_dma_desc
= &(tx_list_head
->desc_a
);
179 current_tx_ptr
= tx_list_head
;
182 rx_list_head
= rx_list_tail
= rx_desc
;
184 for (i
= 0; i
< CONFIG_BFIN_RX_DESC_NUM
; i
++) {
185 struct net_dma_desc_rx
*r
= rx_desc
+ i
;
186 struct dma_descriptor
*a
= &(r
->desc_a
);
187 struct dma_descriptor
*b
= &(r
->desc_b
);
189 /* allocate a new skb for next time receive */
190 new_skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
+ NET_IP_ALIGN
);
192 pr_notice("init: low on mem - packet dropped\n");
195 skb_reserve(new_skb
, NET_IP_ALIGN
);
196 /* Invidate the data cache of skb->data range when it is write back
197 * cache. It will prevent overwritting the new data from DMA
199 blackfin_dcache_invalidate_range((unsigned long)new_skb
->head
,
200 (unsigned long)new_skb
->end
);
205 * write to memory WNR = 1
206 * wordsize is 32 bits
208 * 6 half words is desc size
211 a
->config
= DMAEN
| WNR
| WDSIZE_32
| NDSIZE_6
| DMAFLOW_LARGE
;
212 /* since RXDWA is enabled */
213 a
->start_addr
= (unsigned long)new_skb
->data
- 2;
215 a
->next_dma_desc
= b
;
219 * write to memory WNR = 1
220 * wordsize is 32 bits
222 * 6 half words is desc size
225 b
->config
= DMAEN
| WNR
| WDSIZE_32
| DI_EN
|
226 NDSIZE_6
| DMAFLOW_LARGE
;
227 b
->start_addr
= (unsigned long)(&(r
->status
));
230 rx_list_tail
->desc_b
.next_dma_desc
= a
;
231 rx_list_tail
->next
= r
;
234 rx_list_tail
->next
= rx_list_head
; /* rx_list is a circle */
235 rx_list_tail
->desc_b
.next_dma_desc
= &(rx_list_head
->desc_a
);
236 current_rx_ptr
= rx_list_head
;
242 pr_err("kmalloc failed\n");
247 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
252 /* Wait until the previous MDC/MDIO transaction has completed */
253 static int bfin_mdio_poll(void)
255 int timeout_cnt
= MAX_TIMEOUT_CNT
;
257 /* poll the STABUSY bit */
258 while ((bfin_read_EMAC_STAADD()) & STABUSY
) {
260 if (timeout_cnt
-- < 0) {
261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
269 /* Read an off-chip register in a PHY through the MDC/MDIO port */
270 static int bfin_mdiobus_read(struct mii_bus
*bus
, int phy_addr
, int regnum
)
274 ret
= bfin_mdio_poll();
279 bfin_write_EMAC_STAADD(SET_PHYAD((u16
) phy_addr
) |
280 SET_REGAD((u16
) regnum
) |
283 ret
= bfin_mdio_poll();
287 return (int) bfin_read_EMAC_STADAT();
290 /* Write an off-chip register in a PHY through the MDC/MDIO port */
291 static int bfin_mdiobus_write(struct mii_bus
*bus
, int phy_addr
, int regnum
,
296 ret
= bfin_mdio_poll();
300 bfin_write_EMAC_STADAT((u32
) value
);
303 bfin_write_EMAC_STAADD(SET_PHYAD((u16
) phy_addr
) |
304 SET_REGAD((u16
) regnum
) |
308 return bfin_mdio_poll();
311 static int bfin_mdiobus_reset(struct mii_bus
*bus
)
316 static void bfin_mac_adjust_link(struct net_device
*dev
)
318 struct bfin_mac_local
*lp
= netdev_priv(dev
);
319 struct phy_device
*phydev
= lp
->phydev
;
323 spin_lock_irqsave(&lp
->lock
, flags
);
325 /* Now we make sure that we can be in full duplex mode.
326 * If not, we operate in half-duplex mode. */
327 if (phydev
->duplex
!= lp
->old_duplex
) {
328 u32 opmode
= bfin_read_EMAC_OPMODE();
336 bfin_write_EMAC_OPMODE(opmode
);
337 lp
->old_duplex
= phydev
->duplex
;
340 if (phydev
->speed
!= lp
->old_speed
) {
341 if (phydev
->interface
== PHY_INTERFACE_MODE_RMII
) {
342 u32 opmode
= bfin_read_EMAC_OPMODE();
343 switch (phydev
->speed
) {
352 "Ack! Speed (%d) is not 10/100!\n",
356 bfin_write_EMAC_OPMODE(opmode
);
360 lp
->old_speed
= phydev
->speed
;
367 } else if (lp
->old_link
) {
375 u32 opmode
= bfin_read_EMAC_OPMODE();
376 phy_print_status(phydev
);
377 pr_debug("EMAC_OPMODE = 0x%08x\n", opmode
);
380 spin_unlock_irqrestore(&lp
->lock
, flags
);
384 #define MDC_CLK 2500000
386 static int mii_probe(struct net_device
*dev
, int phy_mode
)
388 struct bfin_mac_local
*lp
= netdev_priv(dev
);
389 struct phy_device
*phydev
= NULL
;
390 unsigned short sysctl
;
394 /* Enable PHY output early */
395 if (!(bfin_read_VR_CTL() & CLKBUFOE
))
396 bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE
);
399 mdc_div
= ((sclk
/ MDC_CLK
) / 2) - 1;
401 sysctl
= bfin_read_EMAC_SYSCTL();
402 sysctl
= (sysctl
& ~MDCDIV
) | SET_MDCDIV(mdc_div
);
403 bfin_write_EMAC_SYSCTL(sysctl
);
405 /* search for connected PHY device */
406 for (i
= 0; i
< PHY_MAX_ADDR
; ++i
) {
407 struct phy_device
*const tmp_phydev
= lp
->mii_bus
->phy_map
[i
];
410 continue; /* no PHY here... */
413 break; /* found it */
416 /* now we are supposed to have a proper phydev, to attach to... */
418 netdev_err(dev
, "no phy device found\n");
422 if (phy_mode
!= PHY_INTERFACE_MODE_RMII
&&
423 phy_mode
!= PHY_INTERFACE_MODE_MII
) {
424 netdev_err(dev
, "invalid phy interface mode\n");
428 phydev
= phy_connect(dev
, dev_name(&phydev
->dev
),
429 &bfin_mac_adjust_link
, phy_mode
);
431 if (IS_ERR(phydev
)) {
432 netdev_err(dev
, "could not attach PHY\n");
433 return PTR_ERR(phydev
);
436 /* mask with MAC supported features */
437 phydev
->supported
&= (SUPPORTED_10baseT_Half
438 | SUPPORTED_10baseT_Full
439 | SUPPORTED_100baseT_Half
440 | SUPPORTED_100baseT_Full
442 | SUPPORTED_Pause
| SUPPORTED_Asym_Pause
446 phydev
->advertising
= phydev
->supported
;
453 pr_info("attached PHY driver [%s] "
454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
455 phydev
->drv
->name
, dev_name(&phydev
->dev
), phydev
->irq
,
456 MDC_CLK
, mdc_div
, sclk
/1000000);
466 * interrupt routine for magic packet wakeup
468 static irqreturn_t
bfin_mac_wake_interrupt(int irq
, void *dev_id
)
474 bfin_mac_ethtool_getsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
476 struct bfin_mac_local
*lp
= netdev_priv(dev
);
479 return phy_ethtool_gset(lp
->phydev
, cmd
);
485 bfin_mac_ethtool_setsettings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
487 struct bfin_mac_local
*lp
= netdev_priv(dev
);
489 if (!capable(CAP_NET_ADMIN
))
493 return phy_ethtool_sset(lp
->phydev
, cmd
);
498 static void bfin_mac_ethtool_getdrvinfo(struct net_device
*dev
,
499 struct ethtool_drvinfo
*info
)
501 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
502 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
503 strlcpy(info
->fw_version
, "N/A", sizeof(info
->fw_version
));
504 strlcpy(info
->bus_info
, dev_name(&dev
->dev
), sizeof(info
->bus_info
));
507 static void bfin_mac_ethtool_getwol(struct net_device
*dev
,
508 struct ethtool_wolinfo
*wolinfo
)
510 struct bfin_mac_local
*lp
= netdev_priv(dev
);
512 wolinfo
->supported
= WAKE_MAGIC
;
513 wolinfo
->wolopts
= lp
->wol
;
516 static int bfin_mac_ethtool_setwol(struct net_device
*dev
,
517 struct ethtool_wolinfo
*wolinfo
)
519 struct bfin_mac_local
*lp
= netdev_priv(dev
);
522 if (wolinfo
->wolopts
& (WAKE_MAGICSECURE
|
529 lp
->wol
= wolinfo
->wolopts
;
531 if (lp
->wol
&& !lp
->irq_wake_requested
) {
532 /* register wake irq handler */
533 rc
= request_irq(IRQ_MAC_WAKEDET
, bfin_mac_wake_interrupt
,
534 IRQF_DISABLED
, "EMAC_WAKE", dev
);
537 lp
->irq_wake_requested
= true;
540 if (!lp
->wol
&& lp
->irq_wake_requested
) {
541 free_irq(IRQ_MAC_WAKEDET
, dev
);
542 lp
->irq_wake_requested
= false;
545 /* Make sure the PHY driver doesn't suspend */
546 device_init_wakeup(&dev
->dev
, lp
->wol
);
551 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
552 static int bfin_mac_ethtool_get_ts_info(struct net_device
*dev
,
553 struct ethtool_ts_info
*info
)
555 struct bfin_mac_local
*lp
= netdev_priv(dev
);
557 info
->so_timestamping
=
558 SOF_TIMESTAMPING_TX_HARDWARE
|
559 SOF_TIMESTAMPING_RX_HARDWARE
|
560 SOF_TIMESTAMPING_RAW_HARDWARE
;
561 info
->phc_index
= lp
->phc_index
;
563 (1 << HWTSTAMP_TX_OFF
) |
564 (1 << HWTSTAMP_TX_ON
);
566 (1 << HWTSTAMP_FILTER_NONE
) |
567 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
568 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
569 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
574 static const struct ethtool_ops bfin_mac_ethtool_ops
= {
575 .get_settings
= bfin_mac_ethtool_getsettings
,
576 .set_settings
= bfin_mac_ethtool_setsettings
,
577 .get_link
= ethtool_op_get_link
,
578 .get_drvinfo
= bfin_mac_ethtool_getdrvinfo
,
579 .get_wol
= bfin_mac_ethtool_getwol
,
580 .set_wol
= bfin_mac_ethtool_setwol
,
581 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
582 .get_ts_info
= bfin_mac_ethtool_get_ts_info
,
586 /**************************************************************************/
587 static void setup_system_regs(struct net_device
*dev
)
589 struct bfin_mac_local
*lp
= netdev_priv(dev
);
591 unsigned short sysctl
;
594 * Odd word alignment for Receive Frame DMA word
595 * Configure checksum support and rcve frame word alignment
597 sysctl
= bfin_read_EMAC_SYSCTL();
599 * check if interrupt is requested for any PHY,
600 * enable PHY interrupt only if needed
602 for (i
= 0; i
< PHY_MAX_ADDR
; ++i
)
603 if (lp
->mii_bus
->irq
[i
] != PHY_POLL
)
605 if (i
< PHY_MAX_ADDR
)
608 #if defined(BFIN_MAC_CSUM_OFFLOAD)
613 bfin_write_EMAC_SYSCTL(sysctl
);
615 bfin_write_EMAC_MMC_CTL(RSTC
| CROLL
);
617 /* Set vlan regs to let 1522 bytes long packets pass through */
618 bfin_write_EMAC_VLAN1(lp
->vlan1_mask
);
619 bfin_write_EMAC_VLAN2(lp
->vlan2_mask
);
621 /* Initialize the TX DMA channel registers */
622 bfin_write_DMA2_X_COUNT(0);
623 bfin_write_DMA2_X_MODIFY(4);
624 bfin_write_DMA2_Y_COUNT(0);
625 bfin_write_DMA2_Y_MODIFY(0);
627 /* Initialize the RX DMA channel registers */
628 bfin_write_DMA1_X_COUNT(0);
629 bfin_write_DMA1_X_MODIFY(4);
630 bfin_write_DMA1_Y_COUNT(0);
631 bfin_write_DMA1_Y_MODIFY(0);
634 static void setup_mac_addr(u8
*mac_addr
)
636 u32 addr_low
= le32_to_cpu(*(__le32
*) & mac_addr
[0]);
637 u16 addr_hi
= le16_to_cpu(*(__le16
*) & mac_addr
[4]);
639 /* this depends on a little-endian machine */
640 bfin_write_EMAC_ADDRLO(addr_low
);
641 bfin_write_EMAC_ADDRHI(addr_hi
);
644 static int bfin_mac_set_mac_address(struct net_device
*dev
, void *p
)
646 struct sockaddr
*addr
= p
;
647 if (netif_running(dev
))
649 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
650 setup_mac_addr(dev
->dev_addr
);
654 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
655 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
657 static u32
bfin_select_phc_clock(u32 input_clk
, unsigned int *shift_result
)
659 u32 ipn
= 1000000000UL / input_clk
;
661 unsigned int shift
= 0;
667 *shift_result
= shift
;
668 return 1000000000UL / ppn
;
671 static int bfin_mac_hwtstamp_ioctl(struct net_device
*netdev
,
672 struct ifreq
*ifr
, int cmd
)
674 struct hwtstamp_config config
;
675 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
677 u32 ptpfv1
, ptpfv2
, ptpfv3
, ptpfoff
;
679 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
682 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
683 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
685 /* reserved for future extensions */
689 if ((config
.tx_type
!= HWTSTAMP_TX_OFF
) &&
690 (config
.tx_type
!= HWTSTAMP_TX_ON
))
693 ptpctl
= bfin_read_EMAC_PTP_CTL();
695 switch (config
.rx_filter
) {
696 case HWTSTAMP_FILTER_NONE
:
698 * Dont allow any timestamping
701 bfin_write_EMAC_PTP_FV3(ptpfv3
);
703 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
704 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
705 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
707 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
708 * to enable all the field matches.
711 bfin_write_EMAC_PTP_CTL(ptpctl
);
713 * Keep the default values of the EMAC_PTP_FOFF register.
715 ptpfoff
= 0x4A24170C;
716 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
718 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
722 bfin_write_EMAC_PTP_FV1(ptpfv1
);
724 bfin_write_EMAC_PTP_FV2(ptpfv2
);
726 * The default value (0xFFFC) allows the timestamping of both
727 * received Sync messages and Delay_Req messages.
730 bfin_write_EMAC_PTP_FV3(ptpfv3
);
732 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
734 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
735 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
736 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
737 /* Clear all five comparison mask bits (bits[12:8]) in the
738 * EMAC_PTP_CTL register to enable all the field matches.
741 bfin_write_EMAC_PTP_CTL(ptpctl
);
743 * Keep the default values of the EMAC_PTP_FOFF register, except set
744 * the PTPCOF field to 0x2A.
746 ptpfoff
= 0x2A24170C;
747 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
749 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
753 bfin_write_EMAC_PTP_FV1(ptpfv1
);
755 bfin_write_EMAC_PTP_FV2(ptpfv2
);
757 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
758 * the value to 0xFFF0.
761 bfin_write_EMAC_PTP_FV3(ptpfv3
);
763 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
765 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
766 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
767 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
769 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
770 * EFTM and PTPCM field comparison.
773 bfin_write_EMAC_PTP_CTL(ptpctl
);
775 * Keep the default values of all the fields of the EMAC_PTP_FOFF
776 * register, except set the PTPCOF field to 0x0E.
778 ptpfoff
= 0x0E24170C;
779 bfin_write_EMAC_PTP_FOFF(ptpfoff
);
781 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
782 * corresponds to PTP messages on the MAC layer.
785 bfin_write_EMAC_PTP_FV1(ptpfv1
);
787 bfin_write_EMAC_PTP_FV2(ptpfv2
);
789 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
790 * messages, set the value to 0xFFF0.
793 bfin_write_EMAC_PTP_FV3(ptpfv3
);
795 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
801 if (config
.tx_type
== HWTSTAMP_TX_OFF
&&
802 bfin_mac_hwtstamp_is_none(config
.rx_filter
)) {
804 bfin_write_EMAC_PTP_CTL(ptpctl
);
809 bfin_write_EMAC_PTP_CTL(ptpctl
);
812 * clear any existing timestamp
814 bfin_read_EMAC_PTP_RXSNAPLO();
815 bfin_read_EMAC_PTP_RXSNAPHI();
817 bfin_read_EMAC_PTP_TXSNAPLO();
818 bfin_read_EMAC_PTP_TXSNAPHI();
823 lp
->stamp_cfg
= config
;
824 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
828 static void bfin_tx_hwtstamp(struct net_device
*netdev
, struct sk_buff
*skb
)
830 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
832 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) {
833 int timeout_cnt
= MAX_TIMEOUT_CNT
;
835 /* When doing time stamping, keep the connection to the socket
838 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
841 * The timestamping is done at the EMAC module's MII/RMII interface
842 * when the module sees the Start of Frame of an event message packet. This
843 * interface is the closest possible place to the physical Ethernet transmission
844 * medium, providing the best timing accuracy.
846 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL
)) && (--timeout_cnt
))
848 if (timeout_cnt
== 0)
849 netdev_err(netdev
, "timestamp the TX packet failed\n");
851 struct skb_shared_hwtstamps shhwtstamps
;
855 regval
= bfin_read_EMAC_PTP_TXSNAPLO();
856 regval
|= (u64
)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
857 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
858 ns
= regval
<< lp
->shift
;
859 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
860 skb_tstamp_tx(skb
, &shhwtstamps
);
865 static void bfin_rx_hwtstamp(struct net_device
*netdev
, struct sk_buff
*skb
)
867 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
870 struct skb_shared_hwtstamps
*shhwtstamps
;
872 if (bfin_mac_hwtstamp_is_none(lp
->stamp_cfg
.rx_filter
))
875 valid
= bfin_read_EMAC_PTP_ISTAT() & RXEL
;
879 shhwtstamps
= skb_hwtstamps(skb
);
881 regval
= bfin_read_EMAC_PTP_RXSNAPLO();
882 regval
|= (u64
)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
883 ns
= regval
<< lp
->shift
;
884 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
885 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
888 static void bfin_mac_hwtstamp_init(struct net_device
*netdev
)
890 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
892 u32 input_clk
, phc_clk
;
894 /* Initialize hardware timer */
895 input_clk
= get_sclk();
896 phc_clk
= bfin_select_phc_clock(input_clk
, &lp
->shift
);
897 addend
= phc_clk
* (1ULL << 32);
898 do_div(addend
, input_clk
);
899 bfin_write_EMAC_PTP_ADDEND((u32
)addend
);
902 ppb
= 1000000000ULL * input_clk
;
903 do_div(ppb
, phc_clk
);
904 lp
->max_ppb
= ppb
- 1000000000ULL - 1ULL;
906 /* Initialize hwstamp config */
907 lp
->stamp_cfg
.rx_filter
= HWTSTAMP_FILTER_NONE
;
908 lp
->stamp_cfg
.tx_type
= HWTSTAMP_TX_OFF
;
911 static u64
bfin_ptp_time_read(struct bfin_mac_local
*lp
)
916 lo
= bfin_read_EMAC_PTP_TIMELO();
917 hi
= bfin_read_EMAC_PTP_TIMEHI();
919 ns
= ((u64
) hi
) << 32;
926 static void bfin_ptp_time_write(struct bfin_mac_local
*lp
, u64 ns
)
932 lo
= ns
& 0xffffffff;
934 bfin_write_EMAC_PTP_TIMELO(lo
);
935 bfin_write_EMAC_PTP_TIMEHI(hi
);
938 /* PTP Hardware Clock operations */
940 static int bfin_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
945 struct bfin_mac_local
*lp
=
946 container_of(ptp
, struct bfin_mac_local
, caps
);
955 diff
= div_u64(adj
, 1000000000ULL);
957 addend
= neg_adj
? addend
- diff
: addend
+ diff
;
959 bfin_write_EMAC_PTP_ADDEND(addend
);
964 static int bfin_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
968 struct bfin_mac_local
*lp
=
969 container_of(ptp
, struct bfin_mac_local
, caps
);
971 spin_lock_irqsave(&lp
->phc_lock
, flags
);
973 now
= bfin_ptp_time_read(lp
);
975 bfin_ptp_time_write(lp
, now
);
977 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
982 static int bfin_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
987 struct bfin_mac_local
*lp
=
988 container_of(ptp
, struct bfin_mac_local
, caps
);
990 spin_lock_irqsave(&lp
->phc_lock
, flags
);
992 ns
= bfin_ptp_time_read(lp
);
994 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
996 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
997 ts
->tv_nsec
= remainder
;
1001 static int bfin_ptp_settime(struct ptp_clock_info
*ptp
,
1002 const struct timespec
*ts
)
1005 unsigned long flags
;
1006 struct bfin_mac_local
*lp
=
1007 container_of(ptp
, struct bfin_mac_local
, caps
);
1009 ns
= ts
->tv_sec
* 1000000000ULL;
1012 spin_lock_irqsave(&lp
->phc_lock
, flags
);
1014 bfin_ptp_time_write(lp
, ns
);
1016 spin_unlock_irqrestore(&lp
->phc_lock
, flags
);
1021 static int bfin_ptp_enable(struct ptp_clock_info
*ptp
,
1022 struct ptp_clock_request
*rq
, int on
)
1027 static struct ptp_clock_info bfin_ptp_caps
= {
1028 .owner
= THIS_MODULE
,
1029 .name
= "BF518 clock",
1035 .adjfreq
= bfin_ptp_adjfreq
,
1036 .adjtime
= bfin_ptp_adjtime
,
1037 .gettime
= bfin_ptp_gettime
,
1038 .settime
= bfin_ptp_settime
,
1039 .enable
= bfin_ptp_enable
,
1042 static int bfin_phc_init(struct net_device
*netdev
, struct device
*dev
)
1044 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
1046 lp
->caps
= bfin_ptp_caps
;
1047 lp
->caps
.max_adj
= lp
->max_ppb
;
1048 lp
->clock
= ptp_clock_register(&lp
->caps
, dev
);
1049 if (IS_ERR(lp
->clock
))
1050 return PTR_ERR(lp
->clock
);
1052 lp
->phc_index
= ptp_clock_index(lp
->clock
);
1053 spin_lock_init(&lp
->phc_lock
);
1058 static void bfin_phc_release(struct bfin_mac_local
*lp
)
1060 ptp_clock_unregister(lp
->clock
);
1064 # define bfin_mac_hwtstamp_is_none(cfg) 0
1065 # define bfin_mac_hwtstamp_init(dev)
1066 # define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
1067 # define bfin_rx_hwtstamp(dev, skb)
1068 # define bfin_tx_hwtstamp(dev, skb)
1069 # define bfin_phc_init(netdev, dev) 0
1070 # define bfin_phc_release(lp)
1073 static inline void _tx_reclaim_skb(void)
1076 tx_list_head
->desc_a
.config
&= ~DMAEN
;
1077 tx_list_head
->status
.status_word
= 0;
1078 if (tx_list_head
->skb
) {
1079 dev_kfree_skb(tx_list_head
->skb
);
1080 tx_list_head
->skb
= NULL
;
1082 tx_list_head
= tx_list_head
->next
;
1084 } while (tx_list_head
->status
.status_word
!= 0);
1087 static void tx_reclaim_skb(struct bfin_mac_local
*lp
)
1089 int timeout_cnt
= MAX_TIMEOUT_CNT
;
1091 if (tx_list_head
->status
.status_word
!= 0)
1094 if (current_tx_ptr
->next
== tx_list_head
) {
1095 while (tx_list_head
->status
.status_word
== 0) {
1096 /* slow down polling to avoid too many queue stop. */
1098 /* reclaim skb if DMA is not running. */
1099 if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN
))
1101 if (timeout_cnt
-- < 0)
1105 if (timeout_cnt
>= 0)
1108 netif_stop_queue(lp
->ndev
);
1111 if (current_tx_ptr
->next
!= tx_list_head
&&
1112 netif_queue_stopped(lp
->ndev
))
1113 netif_wake_queue(lp
->ndev
);
1115 if (tx_list_head
!= current_tx_ptr
) {
1116 /* shorten the timer interval if tx queue is stopped */
1117 if (netif_queue_stopped(lp
->ndev
))
1118 lp
->tx_reclaim_timer
.expires
=
1119 jiffies
+ (TX_RECLAIM_JIFFIES
>> 4);
1121 lp
->tx_reclaim_timer
.expires
=
1122 jiffies
+ TX_RECLAIM_JIFFIES
;
1124 mod_timer(&lp
->tx_reclaim_timer
,
1125 lp
->tx_reclaim_timer
.expires
);
1131 static void tx_reclaim_skb_timeout(unsigned long lp
)
1133 tx_reclaim_skb((struct bfin_mac_local
*)lp
);
1136 static int bfin_mac_hard_start_xmit(struct sk_buff
*skb
,
1137 struct net_device
*dev
)
1139 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1141 u32 data_align
= (unsigned long)(skb
->data
) & 0x3;
1143 current_tx_ptr
->skb
= skb
;
1145 if (data_align
== 0x2) {
1146 /* move skb->data to current_tx_ptr payload */
1147 data
= (u16
*)(skb
->data
) - 1;
1148 *data
= (u16
)(skb
->len
);
1150 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
1151 * a DMA_Length_Word field associated with the packet. The lower 12 bits
1152 * of this field are the length of the packet payload in bytes and the higher
1153 * 4 bits are the timestamping enable field.
1155 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
1158 current_tx_ptr
->desc_a
.start_addr
= (u32
)data
;
1159 /* this is important! */
1160 blackfin_dcache_flush_range((u32
)data
,
1161 (u32
)((u8
*)data
+ skb
->len
+ 4));
1163 *((u16
*)(current_tx_ptr
->packet
)) = (u16
)(skb
->len
);
1164 /* enable timestamping for the sent packet */
1165 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)
1166 *((u16
*)(current_tx_ptr
->packet
)) |= 0x1000;
1167 memcpy((u8
*)(current_tx_ptr
->packet
+ 2), skb
->data
,
1169 current_tx_ptr
->desc_a
.start_addr
=
1170 (u32
)current_tx_ptr
->packet
;
1171 blackfin_dcache_flush_range(
1172 (u32
)current_tx_ptr
->packet
,
1173 (u32
)(current_tx_ptr
->packet
+ skb
->len
+ 2));
1176 /* make sure the internal data buffers in the core are drained
1177 * so that the DMA descriptors are completely written when the
1178 * DMA engine goes to fetch them below
1182 /* always clear status buffer before start tx dma */
1183 current_tx_ptr
->status
.status_word
= 0;
1185 /* enable this packet's dma */
1186 current_tx_ptr
->desc_a
.config
|= DMAEN
;
1188 /* tx dma is running, just return */
1189 if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN
)
1192 /* tx dma is not running */
1193 bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr
->desc_a
));
1194 /* dma enabled, read from memory, size is 6 */
1195 bfin_write_DMA2_CONFIG(current_tx_ptr
->desc_a
.config
);
1196 /* Turn on the EMAC tx */
1197 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE
);
1200 bfin_tx_hwtstamp(dev
, skb
);
1202 current_tx_ptr
= current_tx_ptr
->next
;
1203 dev
->stats
.tx_packets
++;
1204 dev
->stats
.tx_bytes
+= (skb
->len
);
1208 return NETDEV_TX_OK
;
1211 #define IP_HEADER_OFF 0
1212 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1213 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1215 static void bfin_mac_rx(struct net_device
*dev
)
1217 struct sk_buff
*skb
, *new_skb
;
1219 struct bfin_mac_local
*lp __maybe_unused
= netdev_priv(dev
);
1220 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1222 unsigned char fcs
[ETH_FCS_LEN
+ 1];
1225 /* check if frame status word reports an error condition
1226 * we which case we simply drop the packet
1228 if (current_rx_ptr
->status
.status_word
& RX_ERROR_MASK
) {
1229 netdev_notice(dev
, "rx: receive error - packet dropped\n");
1230 dev
->stats
.rx_dropped
++;
1234 /* allocate a new skb for next time receive */
1235 skb
= current_rx_ptr
->skb
;
1237 new_skb
= netdev_alloc_skb(dev
, PKT_BUF_SZ
+ NET_IP_ALIGN
);
1239 netdev_notice(dev
, "rx: low on mem - packet dropped\n");
1240 dev
->stats
.rx_dropped
++;
1243 /* reserve 2 bytes for RXDWA padding */
1244 skb_reserve(new_skb
, NET_IP_ALIGN
);
1245 /* Invidate the data cache of skb->data range when it is write back
1246 * cache. It will prevent overwritting the new data from DMA
1248 blackfin_dcache_invalidate_range((unsigned long)new_skb
->head
,
1249 (unsigned long)new_skb
->end
);
1251 current_rx_ptr
->skb
= new_skb
;
1252 current_rx_ptr
->desc_a
.start_addr
= (unsigned long)new_skb
->data
- 2;
1254 len
= (unsigned short)((current_rx_ptr
->status
.status_word
) & RX_FRLEN
);
1255 /* Deduce Ethernet FCS length from Ethernet payload length */
1259 skb
->protocol
= eth_type_trans(skb
, dev
);
1261 bfin_rx_hwtstamp(dev
, skb
);
1263 #if defined(BFIN_MAC_CSUM_OFFLOAD)
1264 /* Checksum offloading only works for IPv4 packets with the standard IP header
1265 * length of 20 bytes, because the blackfin MAC checksum calculation is
1266 * based on that assumption. We must NOT use the calculated checksum if our
1267 * IP version or header break that assumption.
1269 if (skb
->data
[IP_HEADER_OFF
] == 0x45) {
1270 skb
->csum
= current_rx_ptr
->status
.ip_payload_csum
;
1272 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1273 * IP checksum is based on 16-bit one's complement algorithm.
1274 * To deduce a value from checksum is equal to add its inversion.
1275 * If the IP payload len is odd, the inversed FCS should also
1276 * begin from odd address and leave first byte zero.
1280 for (i
= 0; i
< ETH_FCS_LEN
; i
++)
1281 fcs
[i
+ 1] = ~skb
->data
[skb
->len
+ i
];
1282 skb
->csum
= csum_partial(fcs
, ETH_FCS_LEN
+ 1, skb
->csum
);
1284 for (i
= 0; i
< ETH_FCS_LEN
; i
++)
1285 fcs
[i
] = ~skb
->data
[skb
->len
+ i
];
1286 skb
->csum
= csum_partial(fcs
, ETH_FCS_LEN
, skb
->csum
);
1288 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1293 dev
->stats
.rx_packets
++;
1294 dev
->stats
.rx_bytes
+= len
;
1296 current_rx_ptr
->status
.status_word
= 0x00000000;
1297 current_rx_ptr
= current_rx_ptr
->next
;
1300 /* interrupt routine to handle rx and error signal */
1301 static irqreturn_t
bfin_mac_interrupt(int irq
, void *dev_id
)
1303 struct net_device
*dev
= dev_id
;
1307 if (current_rx_ptr
->status
.status_word
== 0) {
1308 /* no more new packet received */
1310 if (current_rx_ptr
->next
->status
.status_word
!= 0) {
1311 current_rx_ptr
= current_rx_ptr
->next
;
1315 bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
1316 DMA_DONE
| DMA_ERR
);
1323 goto get_one_packet
;
1326 #ifdef CONFIG_NET_POLL_CONTROLLER
1327 static void bfin_mac_poll(struct net_device
*dev
)
1329 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1331 disable_irq(IRQ_MAC_RX
);
1332 bfin_mac_interrupt(IRQ_MAC_RX
, dev
);
1334 enable_irq(IRQ_MAC_RX
);
1336 #endif /* CONFIG_NET_POLL_CONTROLLER */
1338 static void bfin_mac_disable(void)
1340 unsigned int opmode
;
1342 opmode
= bfin_read_EMAC_OPMODE();
1345 /* Turn off the EMAC */
1346 bfin_write_EMAC_OPMODE(opmode
);
1350 * Enable Interrupts, Receive, and Transmit
1352 static int bfin_mac_enable(struct phy_device
*phydev
)
1357 pr_debug("%s\n", __func__
);
1360 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head
->desc_a
));
1361 bfin_write_DMA1_CONFIG(rx_list_head
->desc_a
.config
);
1364 ret
= bfin_mdio_poll();
1368 /* We enable only RX here */
1369 /* ASTP : Enable Automatic Pad Stripping
1370 PR : Promiscuous Mode for test
1371 PSF : Receive frames with total length less than 64 bytes.
1372 FDMODE : Full Duplex Mode
1373 LB : Internal Loopback for test
1374 RE : Receiver Enable */
1375 opmode
= bfin_read_EMAC_OPMODE();
1376 if (opmode
& FDMODE
)
1379 opmode
|= DRO
| DC
| PSF
;
1382 if (phydev
->interface
== PHY_INTERFACE_MODE_RMII
) {
1383 opmode
|= RMII
; /* For Now only 100MBit are supported */
1384 #if defined(CONFIG_BF537) || defined(CONFIG_BF536)
1385 if (__SILICON_REVISION__
< 3) {
1387 * This isn't publicly documented (fun times!), but in
1388 * silicon <=0.2, the RX and TX pins are clocked together.
1389 * So in order to recv, we must enable the transmit side
1390 * as well. This will cause a spurious TX interrupt too,
1391 * but we can easily consume that.
1398 /* Turn on the EMAC rx */
1399 bfin_write_EMAC_OPMODE(opmode
);
1404 /* Our watchdog timed out. Called by the networking layer */
1405 static void bfin_mac_timeout(struct net_device
*dev
)
1407 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1409 pr_debug("%s: %s\n", dev
->name
, __func__
);
1413 del_timer(&lp
->tx_reclaim_timer
);
1415 /* reset tx queue and free skb */
1416 while (tx_list_head
!= current_tx_ptr
) {
1417 tx_list_head
->desc_a
.config
&= ~DMAEN
;
1418 tx_list_head
->status
.status_word
= 0;
1419 if (tx_list_head
->skb
) {
1420 dev_kfree_skb(tx_list_head
->skb
);
1421 tx_list_head
->skb
= NULL
;
1423 tx_list_head
= tx_list_head
->next
;
1426 if (netif_queue_stopped(lp
->ndev
))
1427 netif_wake_queue(lp
->ndev
);
1429 bfin_mac_enable(lp
->phydev
);
1431 /* We can accept TX packets again */
1432 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1433 netif_wake_queue(dev
);
1436 static void bfin_mac_multicast_hash(struct net_device
*dev
)
1438 u32 emac_hashhi
, emac_hashlo
;
1439 struct netdev_hw_addr
*ha
;
1442 emac_hashhi
= emac_hashlo
= 0;
1444 netdev_for_each_mc_addr(ha
, dev
) {
1445 crc
= ether_crc(ETH_ALEN
, ha
->addr
);
1449 emac_hashhi
|= 1 << (crc
& 0x1f);
1451 emac_hashlo
|= 1 << (crc
& 0x1f);
1454 bfin_write_EMAC_HASHHI(emac_hashhi
);
1455 bfin_write_EMAC_HASHLO(emac_hashlo
);
1459 * This routine will, depending on the values passed to it,
1460 * either make it accept multicast packets, go into
1461 * promiscuous mode (for TCPDUMP and cousins) or accept
1462 * a select set of multicast packets
1464 static void bfin_mac_set_multicast_list(struct net_device
*dev
)
1468 if (dev
->flags
& IFF_PROMISC
) {
1469 netdev_info(dev
, "set promisc mode\n");
1470 sysctl
= bfin_read_EMAC_OPMODE();
1472 bfin_write_EMAC_OPMODE(sysctl
);
1473 } else if (dev
->flags
& IFF_ALLMULTI
) {
1474 /* accept all multicast */
1475 sysctl
= bfin_read_EMAC_OPMODE();
1477 bfin_write_EMAC_OPMODE(sysctl
);
1478 } else if (!netdev_mc_empty(dev
)) {
1479 /* set up multicast hash table */
1480 sysctl
= bfin_read_EMAC_OPMODE();
1482 bfin_write_EMAC_OPMODE(sysctl
);
1483 bfin_mac_multicast_hash(dev
);
1485 /* clear promisc or multicast mode */
1486 sysctl
= bfin_read_EMAC_OPMODE();
1487 sysctl
&= ~(RAF
| PAM
);
1488 bfin_write_EMAC_OPMODE(sysctl
);
1492 static int bfin_mac_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
1494 struct bfin_mac_local
*lp
= netdev_priv(netdev
);
1496 if (!netif_running(netdev
))
1501 return bfin_mac_hwtstamp_ioctl(netdev
, ifr
, cmd
);
1504 return phy_mii_ioctl(lp
->phydev
, ifr
, cmd
);
1511 * this puts the device in an inactive state
1513 static void bfin_mac_shutdown(struct net_device
*dev
)
1515 /* Turn off the EMAC */
1516 bfin_write_EMAC_OPMODE(0x00000000);
1517 /* Turn off the EMAC RX DMA */
1518 bfin_write_DMA1_CONFIG(0x0000);
1519 bfin_write_DMA2_CONFIG(0x0000);
1523 * Open and Initialize the interface
1525 * Set up everything, reset the card, etc..
1527 static int bfin_mac_open(struct net_device
*dev
)
1529 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1531 pr_debug("%s: %s\n", dev
->name
, __func__
);
1534 * Check that the address is valid. If its not, refuse
1535 * to bring the device up. The user must specify an
1536 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1538 if (!is_valid_ether_addr(dev
->dev_addr
)) {
1539 netdev_warn(dev
, "no valid ethernet hw addr\n");
1543 /* initial rx and tx list */
1544 ret
= desc_list_init(dev
);
1548 phy_start(lp
->phydev
);
1549 phy_write(lp
->phydev
, MII_BMCR
, BMCR_RESET
);
1550 setup_system_regs(dev
);
1551 setup_mac_addr(dev
->dev_addr
);
1554 ret
= bfin_mac_enable(lp
->phydev
);
1557 pr_debug("hardware init finished\n");
1559 netif_start_queue(dev
);
1560 netif_carrier_on(dev
);
1566 * this makes the board clean up everything that it can
1567 * and not talk to the outside world. Caused by
1568 * an 'ifconfig ethX down'
1570 static int bfin_mac_close(struct net_device
*dev
)
1572 struct bfin_mac_local
*lp
= netdev_priv(dev
);
1573 pr_debug("%s: %s\n", dev
->name
, __func__
);
1575 netif_stop_queue(dev
);
1576 netif_carrier_off(dev
);
1578 phy_stop(lp
->phydev
);
1579 phy_write(lp
->phydev
, MII_BMCR
, BMCR_PDOWN
);
1581 /* clear everything */
1582 bfin_mac_shutdown(dev
);
1584 /* free the rx/tx buffers */
1590 static const struct net_device_ops bfin_mac_netdev_ops
= {
1591 .ndo_open
= bfin_mac_open
,
1592 .ndo_stop
= bfin_mac_close
,
1593 .ndo_start_xmit
= bfin_mac_hard_start_xmit
,
1594 .ndo_set_mac_address
= bfin_mac_set_mac_address
,
1595 .ndo_tx_timeout
= bfin_mac_timeout
,
1596 .ndo_set_rx_mode
= bfin_mac_set_multicast_list
,
1597 .ndo_do_ioctl
= bfin_mac_ioctl
,
1598 .ndo_validate_addr
= eth_validate_addr
,
1599 .ndo_change_mtu
= eth_change_mtu
,
1600 #ifdef CONFIG_NET_POLL_CONTROLLER
1601 .ndo_poll_controller
= bfin_mac_poll
,
1605 static int bfin_mac_probe(struct platform_device
*pdev
)
1607 struct net_device
*ndev
;
1608 struct bfin_mac_local
*lp
;
1609 struct platform_device
*pd
;
1610 struct bfin_mii_bus_platform_data
*mii_bus_data
;
1613 ndev
= alloc_etherdev(sizeof(struct bfin_mac_local
));
1617 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1618 platform_set_drvdata(pdev
, ndev
);
1619 lp
= netdev_priv(ndev
);
1622 /* Grab the MAC address in the MAC */
1623 *(__le32
*) (&(ndev
->dev_addr
[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
1624 *(__le16
*) (&(ndev
->dev_addr
[4])) = cpu_to_le16((u16
) bfin_read_EMAC_ADDRHI());
1627 /*todo: how to proble? which is revision_register */
1628 bfin_write_EMAC_ADDRLO(0x12345678);
1629 if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
1630 dev_err(&pdev
->dev
, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
1632 goto out_err_probe_mac
;
1637 * Is it valid? (Did bootloader initialize it?)
1638 * Grab the MAC from the board somehow
1639 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
1641 if (!is_valid_ether_addr(ndev
->dev_addr
)) {
1642 if (bfin_get_ether_addr(ndev
->dev_addr
) ||
1643 !is_valid_ether_addr(ndev
->dev_addr
)) {
1644 /* Still not valid, get a random one */
1645 netdev_warn(ndev
, "Setting Ethernet MAC to a random one\n");
1646 eth_hw_addr_random(ndev
);
1650 setup_mac_addr(ndev
->dev_addr
);
1652 if (!pdev
->dev
.platform_data
) {
1653 dev_err(&pdev
->dev
, "Cannot get platform device bfin_mii_bus!\n");
1655 goto out_err_probe_mac
;
1657 pd
= pdev
->dev
.platform_data
;
1658 lp
->mii_bus
= platform_get_drvdata(pd
);
1660 dev_err(&pdev
->dev
, "Cannot get mii_bus!\n");
1662 goto out_err_probe_mac
;
1664 lp
->mii_bus
->priv
= ndev
;
1665 mii_bus_data
= pd
->dev
.platform_data
;
1667 rc
= mii_probe(ndev
, mii_bus_data
->phy_mode
);
1669 dev_err(&pdev
->dev
, "MII Probe failed!\n");
1670 goto out_err_mii_probe
;
1673 lp
->vlan1_mask
= ETH_P_8021Q
| mii_bus_data
->vlan1_mask
;
1674 lp
->vlan2_mask
= ETH_P_8021Q
| mii_bus_data
->vlan2_mask
;
1676 /* Fill in the fields of the device structure with ethernet values. */
1679 ndev
->netdev_ops
= &bfin_mac_netdev_ops
;
1680 ndev
->ethtool_ops
= &bfin_mac_ethtool_ops
;
1682 init_timer(&lp
->tx_reclaim_timer
);
1683 lp
->tx_reclaim_timer
.data
= (unsigned long)lp
;
1684 lp
->tx_reclaim_timer
.function
= tx_reclaim_skb_timeout
;
1686 spin_lock_init(&lp
->lock
);
1688 /* now, enable interrupts */
1689 /* register irq handler */
1690 rc
= request_irq(IRQ_MAC_RX
, bfin_mac_interrupt
,
1691 IRQF_DISABLED
, "EMAC_RX", ndev
);
1693 dev_err(&pdev
->dev
, "Cannot request Blackfin MAC RX IRQ!\n");
1695 goto out_err_request_irq
;
1698 rc
= register_netdev(ndev
);
1700 dev_err(&pdev
->dev
, "Cannot register net device!\n");
1701 goto out_err_reg_ndev
;
1704 bfin_mac_hwtstamp_init(ndev
);
1705 if (bfin_phc_init(ndev
, &pdev
->dev
)) {
1706 dev_err(&pdev
->dev
, "Cannot register PHC device!\n");
1710 /* now, print out the card info, in a short format.. */
1711 netdev_info(ndev
, "%s, Version %s\n", DRV_DESC
, DRV_VERSION
);
1717 free_irq(IRQ_MAC_RX
, ndev
);
1718 out_err_request_irq
:
1720 mdiobus_unregister(lp
->mii_bus
);
1721 mdiobus_free(lp
->mii_bus
);
1723 platform_set_drvdata(pdev
, NULL
);
1729 static int bfin_mac_remove(struct platform_device
*pdev
)
1731 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1732 struct bfin_mac_local
*lp
= netdev_priv(ndev
);
1734 bfin_phc_release(lp
);
1736 platform_set_drvdata(pdev
, NULL
);
1738 lp
->mii_bus
->priv
= NULL
;
1740 unregister_netdev(ndev
);
1742 free_irq(IRQ_MAC_RX
, ndev
);
1750 static int bfin_mac_suspend(struct platform_device
*pdev
, pm_message_t mesg
)
1752 struct net_device
*net_dev
= platform_get_drvdata(pdev
);
1753 struct bfin_mac_local
*lp
= netdev_priv(net_dev
);
1756 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE
) | RE
);
1757 bfin_write_EMAC_WKUP_CTL(MPKE
);
1758 enable_irq_wake(IRQ_MAC_WAKEDET
);
1760 if (netif_running(net_dev
))
1761 bfin_mac_close(net_dev
);
1767 static int bfin_mac_resume(struct platform_device
*pdev
)
1769 struct net_device
*net_dev
= platform_get_drvdata(pdev
);
1770 struct bfin_mac_local
*lp
= netdev_priv(net_dev
);
1773 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE
);
1774 bfin_write_EMAC_WKUP_CTL(0);
1775 disable_irq_wake(IRQ_MAC_WAKEDET
);
1777 if (netif_running(net_dev
))
1778 bfin_mac_open(net_dev
);
1784 #define bfin_mac_suspend NULL
1785 #define bfin_mac_resume NULL
1786 #endif /* CONFIG_PM */
1788 static int bfin_mii_bus_probe(struct platform_device
*pdev
)
1790 struct mii_bus
*miibus
;
1791 struct bfin_mii_bus_platform_data
*mii_bus_pd
;
1792 const unsigned short *pin_req
;
1795 mii_bus_pd
= dev_get_platdata(&pdev
->dev
);
1797 dev_err(&pdev
->dev
, "No peripherals in platform data!\n");
1802 * We are setting up a network card,
1803 * so set the GPIO pins to Ethernet mode
1805 pin_req
= mii_bus_pd
->mac_peripherals
;
1806 rc
= peripheral_request_list(pin_req
, KBUILD_MODNAME
);
1808 dev_err(&pdev
->dev
, "Requesting peripherals failed!\n");
1813 miibus
= mdiobus_alloc();
1816 miibus
->read
= bfin_mdiobus_read
;
1817 miibus
->write
= bfin_mdiobus_write
;
1818 miibus
->reset
= bfin_mdiobus_reset
;
1820 miibus
->parent
= &pdev
->dev
;
1821 miibus
->name
= "bfin_mii_bus";
1822 miibus
->phy_mask
= mii_bus_pd
->phy_mask
;
1824 snprintf(miibus
->id
, MII_BUS_ID_SIZE
, "%s-%x",
1825 pdev
->name
, pdev
->id
);
1826 miibus
->irq
= kmalloc(sizeof(int)*PHY_MAX_ADDR
, GFP_KERNEL
);
1828 goto out_err_irq_alloc
;
1830 for (i
= rc
; i
< PHY_MAX_ADDR
; ++i
)
1831 miibus
->irq
[i
] = PHY_POLL
;
1833 rc
= clamp(mii_bus_pd
->phydev_number
, 0, PHY_MAX_ADDR
);
1834 if (rc
!= mii_bus_pd
->phydev_number
)
1835 dev_err(&pdev
->dev
, "Invalid number (%i) of phydevs\n",
1836 mii_bus_pd
->phydev_number
);
1837 for (i
= 0; i
< rc
; ++i
) {
1838 unsigned short phyaddr
= mii_bus_pd
->phydev_data
[i
].addr
;
1839 if (phyaddr
< PHY_MAX_ADDR
)
1840 miibus
->irq
[phyaddr
] = mii_bus_pd
->phydev_data
[i
].irq
;
1843 "Invalid PHY address %i for phydev %i\n",
1847 rc
= mdiobus_register(miibus
);
1849 dev_err(&pdev
->dev
, "Cannot register MDIO bus!\n");
1850 goto out_err_mdiobus_register
;
1853 platform_set_drvdata(pdev
, miibus
);
1856 out_err_mdiobus_register
:
1859 mdiobus_free(miibus
);
1861 peripheral_free_list(pin_req
);
1866 static int bfin_mii_bus_remove(struct platform_device
*pdev
)
1868 struct mii_bus
*miibus
= platform_get_drvdata(pdev
);
1869 struct bfin_mii_bus_platform_data
*mii_bus_pd
=
1870 dev_get_platdata(&pdev
->dev
);
1872 platform_set_drvdata(pdev
, NULL
);
1873 mdiobus_unregister(miibus
);
1875 mdiobus_free(miibus
);
1876 peripheral_free_list(mii_bus_pd
->mac_peripherals
);
1881 static struct platform_driver bfin_mii_bus_driver
= {
1882 .probe
= bfin_mii_bus_probe
,
1883 .remove
= bfin_mii_bus_remove
,
1885 .name
= "bfin_mii_bus",
1886 .owner
= THIS_MODULE
,
1890 static struct platform_driver bfin_mac_driver
= {
1891 .probe
= bfin_mac_probe
,
1892 .remove
= bfin_mac_remove
,
1893 .resume
= bfin_mac_resume
,
1894 .suspend
= bfin_mac_suspend
,
1896 .name
= KBUILD_MODNAME
,
1897 .owner
= THIS_MODULE
,
1901 static int __init
bfin_mac_init(void)
1904 ret
= platform_driver_register(&bfin_mii_bus_driver
);
1906 return platform_driver_register(&bfin_mac_driver
);
1910 module_init(bfin_mac_init
);
1912 static void __exit
bfin_mac_cleanup(void)
1914 platform_driver_unregister(&bfin_mac_driver
);
1915 platform_driver_unregister(&bfin_mii_bus_driver
);
1918 module_exit(bfin_mac_cleanup
);