]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/micrel/ks8695net.c
net: remove use of ndo_set_multicast_list in drivers
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / micrel / ks8695net.c
1 /*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Copyright 2008 Simtec Electronics
15 * Daniel Silverstone <dsilvers@simtec.co.uk>
16 * Vincent Sanders <vince@simtec.co.uk>
17 */
18
19 #include <linux/dma-mapping.h>
20 #include <linux/module.h>
21 #include <linux/ioport.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
28 #include <linux/crc32.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <linux/irq.h>
34 #include <linux/io.h>
35 #include <linux/slab.h>
36
37 #include <asm/irq.h>
38
39 #include <mach/regs-switch.h>
40 #include <mach/regs-misc.h>
41 #include <asm/mach/irq.h>
42 #include <mach/regs-irq.h>
43
44 #include "ks8695net.h"
45
46 #define MODULENAME "ks8695_ether"
47 #define MODULEVERSION "1.02"
48
49 /*
50 * Transmit and device reset timeout, default 5 seconds.
51 */
52 static int watchdog = 5000;
53
54 /* Hardware structures */
55
56 /**
57 * struct rx_ring_desc - Receive descriptor ring element
58 * @status: The status of the descriptor element (E.g. who owns it)
59 * @length: The number of bytes in the block pointed to by data_ptr
60 * @data_ptr: The physical address of the data block to receive into
61 * @next_desc: The physical address of the next descriptor element.
62 */
63 struct rx_ring_desc {
64 __le32 status;
65 __le32 length;
66 __le32 data_ptr;
67 __le32 next_desc;
68 };
69
70 /**
71 * struct tx_ring_desc - Transmit descriptor ring element
72 * @owner: Who owns the descriptor
73 * @status: The number of bytes in the block pointed to by data_ptr
74 * @data_ptr: The physical address of the data block to receive into
75 * @next_desc: The physical address of the next descriptor element.
76 */
77 struct tx_ring_desc {
78 __le32 owner;
79 __le32 status;
80 __le32 data_ptr;
81 __le32 next_desc;
82 };
83
84 /**
85 * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
86 * @skb: The buffer in the ring
87 * @dma_ptr: The mapped DMA pointer of the buffer
88 * @length: The number of bytes mapped to dma_ptr
89 */
90 struct ks8695_skbuff {
91 struct sk_buff *skb;
92 dma_addr_t dma_ptr;
93 u32 length;
94 };
95
96 /* Private device structure */
97
98 #define MAX_TX_DESC 8
99 #define MAX_TX_DESC_MASK 0x7
100 #define MAX_RX_DESC 16
101 #define MAX_RX_DESC_MASK 0xf
102
103 /*napi_weight have better more than rx DMA buffers*/
104 #define NAPI_WEIGHT 64
105
106 #define MAX_RXBUF_SIZE 0x700
107
108 #define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
109 #define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
110 #define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
111
112 /**
113 * enum ks8695_dtype - Device type
114 * @KS8695_DTYPE_WAN: This device is a WAN interface
115 * @KS8695_DTYPE_LAN: This device is a LAN interface
116 * @KS8695_DTYPE_HPNA: This device is an HPNA interface
117 */
118 enum ks8695_dtype {
119 KS8695_DTYPE_WAN,
120 KS8695_DTYPE_LAN,
121 KS8695_DTYPE_HPNA,
122 };
123
124 /**
125 * struct ks8695_priv - Private data for the KS8695 Ethernet
126 * @in_suspend: Flag to indicate if we're suspending/resuming
127 * @ndev: The net_device for this interface
128 * @dev: The platform device object for this interface
129 * @dtype: The type of this device
130 * @io_regs: The ioremapped registers for this interface
131 * @napi : Add support NAPI for Rx
132 * @rx_irq_name: The textual name of the RX IRQ from the platform data
133 * @tx_irq_name: The textual name of the TX IRQ from the platform data
134 * @link_irq_name: The textual name of the link IRQ from the
135 * platform data if available
136 * @rx_irq: The IRQ number for the RX IRQ
137 * @tx_irq: The IRQ number for the TX IRQ
138 * @link_irq: The IRQ number for the link IRQ if available
139 * @regs_req: The resource request for the registers region
140 * @phyiface_req: The resource request for the phy/switch region
141 * if available
142 * @phyiface_regs: The ioremapped registers for the phy/switch if available
143 * @ring_base: The base pointer of the dma coherent memory for the rings
144 * @ring_base_dma: The DMA mapped equivalent of ring_base
145 * @tx_ring: The pointer in ring_base of the TX ring
146 * @tx_ring_used: The number of slots in the TX ring which are occupied
147 * @tx_ring_next_slot: The next slot to fill in the TX ring
148 * @tx_ring_dma: The DMA mapped equivalent of tx_ring
149 * @tx_buffers: The sk_buff mappings for the TX ring
150 * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
151 * @rx_ring: The pointer in ring_base of the RX ring
152 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
153 * @rx_buffers: The sk_buff mappings for the RX ring
154 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
155 * @rx_lock: A lock to protect Rx irq function
156 * @msg_enable: The flags for which messages to emit
157 */
158 struct ks8695_priv {
159 int in_suspend;
160 struct net_device *ndev;
161 struct device *dev;
162 enum ks8695_dtype dtype;
163 void __iomem *io_regs;
164
165 struct napi_struct napi;
166
167 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
168 int rx_irq, tx_irq, link_irq;
169
170 struct resource *regs_req, *phyiface_req;
171 void __iomem *phyiface_regs;
172
173 void *ring_base;
174 dma_addr_t ring_base_dma;
175
176 struct tx_ring_desc *tx_ring;
177 int tx_ring_used;
178 int tx_ring_next_slot;
179 dma_addr_t tx_ring_dma;
180 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
181 spinlock_t txq_lock;
182
183 struct rx_ring_desc *rx_ring;
184 dma_addr_t rx_ring_dma;
185 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
186 int next_rx_desc_read;
187 spinlock_t rx_lock;
188
189 int msg_enable;
190 };
191
192 /* Register access */
193
194 /**
195 * ks8695_readreg - Read from a KS8695 ethernet register
196 * @ksp: The device to read from
197 * @reg: The register to read
198 */
199 static inline u32
200 ks8695_readreg(struct ks8695_priv *ksp, int reg)
201 {
202 return readl(ksp->io_regs + reg);
203 }
204
205 /**
206 * ks8695_writereg - Write to a KS8695 ethernet register
207 * @ksp: The device to write to
208 * @reg: The register to write
209 * @value: The value to write to the register
210 */
211 static inline void
212 ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
213 {
214 writel(value, ksp->io_regs + reg);
215 }
216
217 /* Utility functions */
218
219 /**
220 * ks8695_port_type - Retrieve port-type as user-friendly string
221 * @ksp: The device to return the type for
222 *
223 * Returns a string indicating which of the WAN, LAN or HPNA
224 * ports this device is likely to represent.
225 */
226 static const char *
227 ks8695_port_type(struct ks8695_priv *ksp)
228 {
229 switch (ksp->dtype) {
230 case KS8695_DTYPE_LAN:
231 return "LAN";
232 case KS8695_DTYPE_WAN:
233 return "WAN";
234 case KS8695_DTYPE_HPNA:
235 return "HPNA";
236 }
237
238 return "UNKNOWN";
239 }
240
241 /**
242 * ks8695_update_mac - Update the MAC registers in the device
243 * @ksp: The device to update
244 *
245 * Updates the MAC registers in the KS8695 device from the address in the
246 * net_device structure associated with this interface.
247 */
248 static void
249 ks8695_update_mac(struct ks8695_priv *ksp)
250 {
251 /* Update the HW with the MAC from the net_device */
252 struct net_device *ndev = ksp->ndev;
253 u32 machigh, maclow;
254
255 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
256 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
257 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
258
259 ks8695_writereg(ksp, KS8695_MAL, maclow);
260 ks8695_writereg(ksp, KS8695_MAH, machigh);
261
262 }
263
264 /**
265 * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
266 * @ksp: The device to refill
267 *
268 * Iterates the RX ring of the device looking for empty slots.
269 * For each empty slot, we allocate and map a new SKB and give it
270 * to the hardware.
271 * This can be called from interrupt context safely.
272 */
273 static void
274 ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
275 {
276 /* Run around the RX ring, filling in any missing sk_buff's */
277 int buff_n;
278
279 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
280 if (!ksp->rx_buffers[buff_n].skb) {
281 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
282 dma_addr_t mapping;
283
284 ksp->rx_buffers[buff_n].skb = skb;
285 if (skb == NULL) {
286 /* Failed to allocate one, perhaps
287 * we'll try again later.
288 */
289 break;
290 }
291
292 mapping = dma_map_single(ksp->dev, skb->data,
293 MAX_RXBUF_SIZE,
294 DMA_FROM_DEVICE);
295 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
296 /* Failed to DMA map this SKB, try later */
297 dev_kfree_skb_irq(skb);
298 ksp->rx_buffers[buff_n].skb = NULL;
299 break;
300 }
301 ksp->rx_buffers[buff_n].dma_ptr = mapping;
302 skb->dev = ksp->ndev;
303 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
304
305 /* Record this into the DMA ring */
306 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
307 ksp->rx_ring[buff_n].length =
308 cpu_to_le32(MAX_RXBUF_SIZE);
309
310 wmb();
311
312 /* And give ownership over to the hardware */
313 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
314 }
315 }
316 }
317
318 /* Maximum number of multicast addresses which the KS8695 HW supports */
319 #define KS8695_NR_ADDRESSES 16
320
321 /**
322 * ks8695_init_partial_multicast - Init the mcast addr registers
323 * @ksp: The device to initialise
324 * @addr: The multicast address list to use
325 * @nr_addr: The number of addresses in the list
326 *
327 * This routine is a helper for ks8695_set_multicast - it writes
328 * the additional-address registers in the KS8695 ethernet device
329 * and cleans up any others left behind.
330 */
331 static void
332 ks8695_init_partial_multicast(struct ks8695_priv *ksp,
333 struct net_device *ndev)
334 {
335 u32 low, high;
336 int i;
337 struct netdev_hw_addr *ha;
338
339 i = 0;
340 netdev_for_each_mc_addr(ha, ndev) {
341 /* Ran out of space in chip? */
342 BUG_ON(i == KS8695_NR_ADDRESSES);
343
344 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
345 (ha->addr[4] << 8) | (ha->addr[5]);
346 high = (ha->addr[0] << 8) | (ha->addr[1]);
347
348 ks8695_writereg(ksp, KS8695_AAL_(i), low);
349 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
350 i++;
351 }
352
353 /* Clear the remaining Additional Station Addresses */
354 for (; i < KS8695_NR_ADDRESSES; i++) {
355 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
356 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
357 }
358 }
359
360 /* Interrupt handling */
361
362 /**
363 * ks8695_tx_irq - Transmit IRQ handler
364 * @irq: The IRQ which went off (ignored)
365 * @dev_id: The net_device for the interrupt
366 *
367 * Process the TX ring, clearing out any transmitted slots.
368 * Allows the net_device to pass us new packets once slots are
369 * freed.
370 */
371 static irqreturn_t
372 ks8695_tx_irq(int irq, void *dev_id)
373 {
374 struct net_device *ndev = (struct net_device *)dev_id;
375 struct ks8695_priv *ksp = netdev_priv(ndev);
376 int buff_n;
377
378 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
379 if (ksp->tx_buffers[buff_n].skb &&
380 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
381 rmb();
382 /* An SKB which is not owned by HW is present */
383 /* Update the stats for the net_device */
384 ndev->stats.tx_packets++;
385 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
386
387 /* Free the packet from the ring */
388 ksp->tx_ring[buff_n].data_ptr = 0;
389
390 /* Free the sk_buff */
391 dma_unmap_single(ksp->dev,
392 ksp->tx_buffers[buff_n].dma_ptr,
393 ksp->tx_buffers[buff_n].length,
394 DMA_TO_DEVICE);
395 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
396 ksp->tx_buffers[buff_n].skb = NULL;
397 ksp->tx_ring_used--;
398 }
399 }
400
401 netif_wake_queue(ndev);
402
403 return IRQ_HANDLED;
404 }
405
406 /**
407 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
408 * @ksp: Private data for the KS8695 Ethernet
409 *
410 * For KS8695 document:
411 * Interrupt Enable Register (offset 0xE204)
412 * Bit29 : WAN MAC Receive Interrupt Enable
413 * Bit16 : LAN MAC Receive Interrupt Enable
414 * Interrupt Status Register (Offset 0xF208)
415 * Bit29: WAN MAC Receive Status
416 * Bit16: LAN MAC Receive Status
417 * So, this Rx interrrupt enable/status bit number is equal
418 * as Rx IRQ number.
419 */
420 static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
421 {
422 return ksp->rx_irq;
423 }
424
425 /**
426 * ks8695_rx_irq - Receive IRQ handler
427 * @irq: The IRQ which went off (ignored)
428 * @dev_id: The net_device for the interrupt
429 *
430 * Inform NAPI that packet reception needs to be scheduled
431 */
432
433 static irqreturn_t
434 ks8695_rx_irq(int irq, void *dev_id)
435 {
436 struct net_device *ndev = (struct net_device *)dev_id;
437 struct ks8695_priv *ksp = netdev_priv(ndev);
438
439 spin_lock(&ksp->rx_lock);
440
441 if (napi_schedule_prep(&ksp->napi)) {
442 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
443 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
444 /*disable rx interrupt*/
445 status &= ~mask_bit;
446 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
447 __napi_schedule(&ksp->napi);
448 }
449
450 spin_unlock(&ksp->rx_lock);
451 return IRQ_HANDLED;
452 }
453
454 /**
455 * ks8695_rx - Receive packets called by NAPI poll method
456 * @ksp: Private data for the KS8695 Ethernet
457 * @budget: Number of packets allowed to process
458 */
459 static int ks8695_rx(struct ks8695_priv *ksp, int budget)
460 {
461 struct net_device *ndev = ksp->ndev;
462 struct sk_buff *skb;
463 int buff_n;
464 u32 flags;
465 int pktlen;
466 int received = 0;
467
468 buff_n = ksp->next_rx_desc_read;
469 while (received < budget
470 && ksp->rx_buffers[buff_n].skb
471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
473 rmb();
474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
475
476 /* Found an SKB which we own, this means we
477 * received a packet
478 */
479 if ((flags & (RDES_FS | RDES_LS)) !=
480 (RDES_FS | RDES_LS)) {
481 /* This packet is not the first and
482 * the last segment. Therefore it is
483 * a "spanning" packet and we can't
484 * handle it
485 */
486 goto rx_failure;
487 }
488
489 if (flags & (RDES_ES | RDES_RE)) {
490 /* It's an error packet */
491 ndev->stats.rx_errors++;
492 if (flags & RDES_TL)
493 ndev->stats.rx_length_errors++;
494 if (flags & RDES_RF)
495 ndev->stats.rx_length_errors++;
496 if (flags & RDES_CE)
497 ndev->stats.rx_crc_errors++;
498 if (flags & RDES_RE)
499 ndev->stats.rx_missed_errors++;
500
501 goto rx_failure;
502 }
503
504 pktlen = flags & RDES_FLEN;
505 pktlen -= 4; /* Drop the CRC */
506
507 /* Retrieve the sk_buff */
508 skb = ksp->rx_buffers[buff_n].skb;
509
510 /* Clear it from the ring */
511 ksp->rx_buffers[buff_n].skb = NULL;
512 ksp->rx_ring[buff_n].data_ptr = 0;
513
514 /* Unmap the SKB */
515 dma_unmap_single(ksp->dev,
516 ksp->rx_buffers[buff_n].dma_ptr,
517 ksp->rx_buffers[buff_n].length,
518 DMA_FROM_DEVICE);
519
520 /* Relinquish the SKB to the network layer */
521 skb_put(skb, pktlen);
522 skb->protocol = eth_type_trans(skb, ndev);
523 netif_receive_skb(skb);
524
525 /* Record stats */
526 ndev->stats.rx_packets++;
527 ndev->stats.rx_bytes += pktlen;
528 goto rx_finished;
529
530 rx_failure:
531 /* This ring entry is an error, but we can
532 * re-use the skb
533 */
534 /* Give the ring entry back to the hardware */
535 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
536 rx_finished:
537 received++;
538 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
539 }
540
541 /* And note which RX descriptor we last did */
542 ksp->next_rx_desc_read = buff_n;
543
544 /* And refill the buffers */
545 ks8695_refill_rxbuffers(ksp);
546
547 /* Kick the RX DMA engine, in case it became suspended */
548 ks8695_writereg(ksp, KS8695_DRSC, 0);
549
550 return received;
551 }
552
553
554 /**
555 * ks8695_poll - Receive packet by NAPI poll method
556 * @ksp: Private data for the KS8695 Ethernet
557 * @budget: The remaining number packets for network subsystem
558 *
559 * Invoked by the network core when it requests for new
560 * packets from the driver
561 */
562 static int ks8695_poll(struct napi_struct *napi, int budget)
563 {
564 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
565 unsigned long work_done;
566
567 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
568 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
569
570 work_done = ks8695_rx(ksp, budget);
571
572 if (work_done < budget) {
573 unsigned long flags;
574 spin_lock_irqsave(&ksp->rx_lock, flags);
575 __napi_complete(napi);
576 /*enable rx interrupt*/
577 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
578 spin_unlock_irqrestore(&ksp->rx_lock, flags);
579 }
580 return work_done;
581 }
582
583 /**
584 * ks8695_link_irq - Link change IRQ handler
585 * @irq: The IRQ which went off (ignored)
586 * @dev_id: The net_device for the interrupt
587 *
588 * The WAN interface can generate an IRQ when the link changes,
589 * report this to the net layer and the user.
590 */
591 static irqreturn_t
592 ks8695_link_irq(int irq, void *dev_id)
593 {
594 struct net_device *ndev = (struct net_device *)dev_id;
595 struct ks8695_priv *ksp = netdev_priv(ndev);
596 u32 ctrl;
597
598 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
599 if (ctrl & WMC_WLS) {
600 netif_carrier_on(ndev);
601 if (netif_msg_link(ksp))
602 dev_info(ksp->dev,
603 "%s: Link is now up (10%sMbps/%s-duplex)\n",
604 ndev->name,
605 (ctrl & WMC_WSS) ? "0" : "",
606 (ctrl & WMC_WDS) ? "Full" : "Half");
607 } else {
608 netif_carrier_off(ndev);
609 if (netif_msg_link(ksp))
610 dev_info(ksp->dev, "%s: Link is now down.\n",
611 ndev->name);
612 }
613
614 return IRQ_HANDLED;
615 }
616
617
618 /* KS8695 Device functions */
619
620 /**
621 * ks8695_reset - Reset a KS8695 ethernet interface
622 * @ksp: The interface to reset
623 *
624 * Perform an engine reset of the interface and re-program it
625 * with sensible defaults.
626 */
627 static void
628 ks8695_reset(struct ks8695_priv *ksp)
629 {
630 int reset_timeout = watchdog;
631 /* Issue the reset via the TX DMA control register */
632 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
633 while (reset_timeout--) {
634 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
635 break;
636 msleep(1);
637 }
638
639 if (reset_timeout < 0) {
640 dev_crit(ksp->dev,
641 "Timeout waiting for DMA engines to reset\n");
642 /* And blithely carry on */
643 }
644
645 /* Definitely wait long enough before attempting to program
646 * the engines
647 */
648 msleep(10);
649
650 /* RX: unicast and broadcast */
651 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
652 /* TX: pad and add CRC */
653 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
654 }
655
656 /**
657 * ks8695_shutdown - Shut down a KS8695 ethernet interface
658 * @ksp: The interface to shut down
659 *
660 * This disables packet RX/TX, cleans up IRQs, drains the rings,
661 * and basically places the interface into a clean shutdown
662 * state.
663 */
664 static void
665 ks8695_shutdown(struct ks8695_priv *ksp)
666 {
667 u32 ctrl;
668 int buff_n;
669
670 /* Disable packet transmission */
671 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
672 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
673
674 /* Disable packet reception */
675 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
676 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
677
678 /* Release the IRQs */
679 free_irq(ksp->rx_irq, ksp->ndev);
680 free_irq(ksp->tx_irq, ksp->ndev);
681 if (ksp->link_irq != -1)
682 free_irq(ksp->link_irq, ksp->ndev);
683
684 /* Throw away any pending TX packets */
685 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
686 if (ksp->tx_buffers[buff_n].skb) {
687 /* Remove this SKB from the TX ring */
688 ksp->tx_ring[buff_n].owner = 0;
689 ksp->tx_ring[buff_n].status = 0;
690 ksp->tx_ring[buff_n].data_ptr = 0;
691
692 /* Unmap and bin this SKB */
693 dma_unmap_single(ksp->dev,
694 ksp->tx_buffers[buff_n].dma_ptr,
695 ksp->tx_buffers[buff_n].length,
696 DMA_TO_DEVICE);
697 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
698 ksp->tx_buffers[buff_n].skb = NULL;
699 }
700 }
701
702 /* Purge the RX buffers */
703 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
704 if (ksp->rx_buffers[buff_n].skb) {
705 /* Remove the SKB from the RX ring */
706 ksp->rx_ring[buff_n].status = 0;
707 ksp->rx_ring[buff_n].data_ptr = 0;
708
709 /* Unmap and bin the SKB */
710 dma_unmap_single(ksp->dev,
711 ksp->rx_buffers[buff_n].dma_ptr,
712 ksp->rx_buffers[buff_n].length,
713 DMA_FROM_DEVICE);
714 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
715 ksp->rx_buffers[buff_n].skb = NULL;
716 }
717 }
718 }
719
720
721 /**
722 * ks8695_setup_irq - IRQ setup helper function
723 * @irq: The IRQ number to claim
724 * @irq_name: The name to give the IRQ claimant
725 * @handler: The function to call to handle the IRQ
726 * @ndev: The net_device to pass in as the dev_id argument to the handler
727 *
728 * Return 0 on success.
729 */
730 static int
731 ks8695_setup_irq(int irq, const char *irq_name,
732 irq_handler_t handler, struct net_device *ndev)
733 {
734 int ret;
735
736 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
737
738 if (ret) {
739 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
740 return ret;
741 }
742
743 return 0;
744 }
745
746 /**
747 * ks8695_init_net - Initialise a KS8695 ethernet interface
748 * @ksp: The interface to initialise
749 *
750 * This routine fills the RX ring, initialises the DMA engines,
751 * allocates the IRQs and then starts the packet TX and RX
752 * engines.
753 */
754 static int
755 ks8695_init_net(struct ks8695_priv *ksp)
756 {
757 int ret;
758 u32 ctrl;
759
760 ks8695_refill_rxbuffers(ksp);
761
762 /* Initialise the DMA engines */
763 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
764 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
765
766 /* Request the IRQs */
767 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
768 ks8695_rx_irq, ksp->ndev);
769 if (ret)
770 return ret;
771 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
772 ks8695_tx_irq, ksp->ndev);
773 if (ret)
774 return ret;
775 if (ksp->link_irq != -1) {
776 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
777 ks8695_link_irq, ksp->ndev);
778 if (ret)
779 return ret;
780 }
781
782 /* Set up the ring indices */
783 ksp->next_rx_desc_read = 0;
784 ksp->tx_ring_next_slot = 0;
785 ksp->tx_ring_used = 0;
786
787 /* Bring up transmission */
788 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
789 /* Enable packet transmission */
790 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
791
792 /* Bring up the reception */
793 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
794 /* Enable packet reception */
795 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
796 /* And start the DMA engine */
797 ks8695_writereg(ksp, KS8695_DRSC, 0);
798
799 /* All done */
800 return 0;
801 }
802
803 /**
804 * ks8695_release_device - HW resource release for KS8695 e-net
805 * @ksp: The device to be freed
806 *
807 * This unallocates io memory regions, dma-coherent regions etc
808 * which were allocated in ks8695_probe.
809 */
810 static void
811 ks8695_release_device(struct ks8695_priv *ksp)
812 {
813 /* Unmap the registers */
814 iounmap(ksp->io_regs);
815 if (ksp->phyiface_regs)
816 iounmap(ksp->phyiface_regs);
817
818 /* And release the request */
819 release_resource(ksp->regs_req);
820 kfree(ksp->regs_req);
821 if (ksp->phyiface_req) {
822 release_resource(ksp->phyiface_req);
823 kfree(ksp->phyiface_req);
824 }
825
826 /* Free the ring buffers */
827 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
828 ksp->ring_base, ksp->ring_base_dma);
829 }
830
831 /* Ethtool support */
832
833 /**
834 * ks8695_get_msglevel - Get the messages enabled for emission
835 * @ndev: The network device to read from
836 */
837 static u32
838 ks8695_get_msglevel(struct net_device *ndev)
839 {
840 struct ks8695_priv *ksp = netdev_priv(ndev);
841
842 return ksp->msg_enable;
843 }
844
845 /**
846 * ks8695_set_msglevel - Set the messages enabled for emission
847 * @ndev: The network device to configure
848 * @value: The messages to set for emission
849 */
850 static void
851 ks8695_set_msglevel(struct net_device *ndev, u32 value)
852 {
853 struct ks8695_priv *ksp = netdev_priv(ndev);
854
855 ksp->msg_enable = value;
856 }
857
858 /**
859 * ks8695_wan_get_settings - Get device-specific settings.
860 * @ndev: The network device to read settings from
861 * @cmd: The ethtool structure to read into
862 */
863 static int
864 ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
865 {
866 struct ks8695_priv *ksp = netdev_priv(ndev);
867 u32 ctrl;
868
869 /* All ports on the KS8695 support these... */
870 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
871 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
872 SUPPORTED_TP | SUPPORTED_MII);
873 cmd->transceiver = XCVR_INTERNAL;
874
875 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
876 cmd->port = PORT_MII;
877 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
878 cmd->phy_address = 0;
879
880 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
881 if ((ctrl & WMC_WAND) == 0) {
882 /* auto-negotiation is enabled */
883 cmd->advertising |= ADVERTISED_Autoneg;
884 if (ctrl & WMC_WANA100F)
885 cmd->advertising |= ADVERTISED_100baseT_Full;
886 if (ctrl & WMC_WANA100H)
887 cmd->advertising |= ADVERTISED_100baseT_Half;
888 if (ctrl & WMC_WANA10F)
889 cmd->advertising |= ADVERTISED_10baseT_Full;
890 if (ctrl & WMC_WANA10H)
891 cmd->advertising |= ADVERTISED_10baseT_Half;
892 if (ctrl & WMC_WANAP)
893 cmd->advertising |= ADVERTISED_Pause;
894 cmd->autoneg = AUTONEG_ENABLE;
895
896 ethtool_cmd_speed_set(cmd,
897 (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
898 cmd->duplex = (ctrl & WMC_WDS) ?
899 DUPLEX_FULL : DUPLEX_HALF;
900 } else {
901 /* auto-negotiation is disabled */
902 cmd->autoneg = AUTONEG_DISABLE;
903
904 ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
905 SPEED_100 : SPEED_10));
906 cmd->duplex = (ctrl & WMC_WANFF) ?
907 DUPLEX_FULL : DUPLEX_HALF;
908 }
909
910 return 0;
911 }
912
913 /**
914 * ks8695_wan_set_settings - Set device-specific settings.
915 * @ndev: The network device to configure
916 * @cmd: The settings to configure
917 */
918 static int
919 ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
920 {
921 struct ks8695_priv *ksp = netdev_priv(ndev);
922 u32 ctrl;
923
924 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
925 return -EINVAL;
926 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
927 return -EINVAL;
928 if (cmd->port != PORT_MII)
929 return -EINVAL;
930 if (cmd->transceiver != XCVR_INTERNAL)
931 return -EINVAL;
932 if ((cmd->autoneg != AUTONEG_DISABLE) &&
933 (cmd->autoneg != AUTONEG_ENABLE))
934 return -EINVAL;
935
936 if (cmd->autoneg == AUTONEG_ENABLE) {
937 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
938 ADVERTISED_10baseT_Full |
939 ADVERTISED_100baseT_Half |
940 ADVERTISED_100baseT_Full)) == 0)
941 return -EINVAL;
942
943 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
944
945 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
946 WMC_WANA10F | WMC_WANA10H);
947 if (cmd->advertising & ADVERTISED_100baseT_Full)
948 ctrl |= WMC_WANA100F;
949 if (cmd->advertising & ADVERTISED_100baseT_Half)
950 ctrl |= WMC_WANA100H;
951 if (cmd->advertising & ADVERTISED_10baseT_Full)
952 ctrl |= WMC_WANA10F;
953 if (cmd->advertising & ADVERTISED_10baseT_Half)
954 ctrl |= WMC_WANA10H;
955
956 /* force a re-negotiation */
957 ctrl |= WMC_WANR;
958 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
959 } else {
960 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
961
962 /* disable auto-negotiation */
963 ctrl |= WMC_WAND;
964 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
965
966 if (cmd->speed == SPEED_100)
967 ctrl |= WMC_WANF100;
968 if (cmd->duplex == DUPLEX_FULL)
969 ctrl |= WMC_WANFF;
970
971 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
972 }
973
974 return 0;
975 }
976
977 /**
978 * ks8695_wan_nwayreset - Restart the autonegotiation on the port.
979 * @ndev: The network device to restart autoneotiation on
980 */
981 static int
982 ks8695_wan_nwayreset(struct net_device *ndev)
983 {
984 struct ks8695_priv *ksp = netdev_priv(ndev);
985 u32 ctrl;
986
987 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
988
989 if ((ctrl & WMC_WAND) == 0)
990 writel(ctrl | WMC_WANR,
991 ksp->phyiface_regs + KS8695_WMC);
992 else
993 /* auto-negotiation not enabled */
994 return -EINVAL;
995
996 return 0;
997 }
998
999 /**
1000 * ks8695_wan_get_pause - Retrieve network pause/flow-control advertising
1001 * @ndev: The device to retrieve settings from
1002 * @param: The structure to fill out with the information
1003 */
1004 static void
1005 ks8695_wan_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1006 {
1007 struct ks8695_priv *ksp = netdev_priv(ndev);
1008 u32 ctrl;
1009
1010 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1011
1012 /* advertise Pause */
1013 param->autoneg = (ctrl & WMC_WANAP);
1014
1015 /* current Rx Flow-control */
1016 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1017 param->rx_pause = (ctrl & DRXC_RFCE);
1018
1019 /* current Tx Flow-control */
1020 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1021 param->tx_pause = (ctrl & DTXC_TFCE);
1022 }
1023
1024 /**
1025 * ks8695_get_drvinfo - Retrieve driver information
1026 * @ndev: The network device to retrieve info about
1027 * @info: The info structure to fill out.
1028 */
1029 static void
1030 ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1031 {
1032 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1033 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1034 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1035 sizeof(info->bus_info));
1036 }
1037
1038 static const struct ethtool_ops ks8695_ethtool_ops = {
1039 .get_msglevel = ks8695_get_msglevel,
1040 .set_msglevel = ks8695_set_msglevel,
1041 .get_drvinfo = ks8695_get_drvinfo,
1042 };
1043
1044 static const struct ethtool_ops ks8695_wan_ethtool_ops = {
1045 .get_msglevel = ks8695_get_msglevel,
1046 .set_msglevel = ks8695_set_msglevel,
1047 .get_settings = ks8695_wan_get_settings,
1048 .set_settings = ks8695_wan_set_settings,
1049 .nway_reset = ks8695_wan_nwayreset,
1050 .get_link = ethtool_op_get_link,
1051 .get_pauseparam = ks8695_wan_get_pause,
1052 .get_drvinfo = ks8695_get_drvinfo,
1053 };
1054
1055 /* Network device interface functions */
1056
1057 /**
1058 * ks8695_set_mac - Update MAC in net dev and HW
1059 * @ndev: The network device to update
1060 * @addr: The new MAC address to set
1061 */
1062 static int
1063 ks8695_set_mac(struct net_device *ndev, void *addr)
1064 {
1065 struct ks8695_priv *ksp = netdev_priv(ndev);
1066 struct sockaddr *address = addr;
1067
1068 if (!is_valid_ether_addr(address->sa_data))
1069 return -EADDRNOTAVAIL;
1070
1071 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1072
1073 ks8695_update_mac(ksp);
1074
1075 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1076 ndev->name, ndev->dev_addr);
1077
1078 return 0;
1079 }
1080
1081 /**
1082 * ks8695_set_multicast - Set up the multicast behaviour of the interface
1083 * @ndev: The net_device to configure
1084 *
1085 * This routine, called by the net layer, configures promiscuity
1086 * and multicast reception behaviour for the interface.
1087 */
1088 static void
1089 ks8695_set_multicast(struct net_device *ndev)
1090 {
1091 struct ks8695_priv *ksp = netdev_priv(ndev);
1092 u32 ctrl;
1093
1094 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1095
1096 if (ndev->flags & IFF_PROMISC) {
1097 /* enable promiscuous mode */
1098 ctrl |= DRXC_RA;
1099 } else if (ndev->flags & ~IFF_PROMISC) {
1100 /* disable promiscuous mode */
1101 ctrl &= ~DRXC_RA;
1102 }
1103
1104 if (ndev->flags & IFF_ALLMULTI) {
1105 /* enable all multicast mode */
1106 ctrl |= DRXC_RM;
1107 } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
1108 /* more specific multicast addresses than can be
1109 * handled in hardware
1110 */
1111 ctrl |= DRXC_RM;
1112 } else {
1113 /* enable specific multicasts */
1114 ctrl &= ~DRXC_RM;
1115 ks8695_init_partial_multicast(ksp, ndev);
1116 }
1117
1118 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1119 }
1120
1121 /**
1122 * ks8695_timeout - Handle a network tx/rx timeout.
1123 * @ndev: The net_device which timed out.
1124 *
1125 * A network transaction timed out, reset the device.
1126 */
1127 static void
1128 ks8695_timeout(struct net_device *ndev)
1129 {
1130 struct ks8695_priv *ksp = netdev_priv(ndev);
1131
1132 netif_stop_queue(ndev);
1133 ks8695_shutdown(ksp);
1134
1135 ks8695_reset(ksp);
1136
1137 ks8695_update_mac(ksp);
1138
1139 /* We ignore the return from this since it managed to init
1140 * before it probably will be okay to init again.
1141 */
1142 ks8695_init_net(ksp);
1143
1144 /* Reconfigure promiscuity etc */
1145 ks8695_set_multicast(ndev);
1146
1147 /* And start the TX queue once more */
1148 netif_start_queue(ndev);
1149 }
1150
1151 /**
1152 * ks8695_start_xmit - Start a packet transmission
1153 * @skb: The packet to transmit
1154 * @ndev: The network device to send the packet on
1155 *
1156 * This routine, called by the net layer, takes ownership of the
1157 * sk_buff and adds it to the TX ring. It then kicks the TX DMA
1158 * engine to ensure transmission begins.
1159 */
1160 static int
1161 ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1162 {
1163 struct ks8695_priv *ksp = netdev_priv(ndev);
1164 int buff_n;
1165 dma_addr_t dmap;
1166
1167 spin_lock_irq(&ksp->txq_lock);
1168
1169 if (ksp->tx_ring_used == MAX_TX_DESC) {
1170 /* Somehow we got entered when we have no room */
1171 spin_unlock_irq(&ksp->txq_lock);
1172 return NETDEV_TX_BUSY;
1173 }
1174
1175 buff_n = ksp->tx_ring_next_slot;
1176
1177 BUG_ON(ksp->tx_buffers[buff_n].skb);
1178
1179 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1180 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1181 /* Failed to DMA map this SKB, give it back for now */
1182 spin_unlock_irq(&ksp->txq_lock);
1183 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1184 "transmission, trying later\n", ndev->name);
1185 return NETDEV_TX_BUSY;
1186 }
1187
1188 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1189 /* Mapped okay, store the buffer pointer and length for later */
1190 ksp->tx_buffers[buff_n].skb = skb;
1191 ksp->tx_buffers[buff_n].length = skb->len;
1192
1193 /* Fill out the TX descriptor */
1194 ksp->tx_ring[buff_n].data_ptr =
1195 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1196 ksp->tx_ring[buff_n].status =
1197 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1198 (skb->len & TDES_TBS));
1199
1200 wmb();
1201
1202 /* Hand it over to the hardware */
1203 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1204
1205 if (++ksp->tx_ring_used == MAX_TX_DESC)
1206 netif_stop_queue(ndev);
1207
1208 /* Kick the TX DMA in case it decided to go IDLE */
1209 ks8695_writereg(ksp, KS8695_DTSC, 0);
1210
1211 /* And update the next ring slot */
1212 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1213
1214 spin_unlock_irq(&ksp->txq_lock);
1215 return NETDEV_TX_OK;
1216 }
1217
1218 /**
1219 * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
1220 * @ndev: The net_device to stop
1221 *
1222 * This disables the TX queue and cleans up a KS8695 ethernet
1223 * device.
1224 */
1225 static int
1226 ks8695_stop(struct net_device *ndev)
1227 {
1228 struct ks8695_priv *ksp = netdev_priv(ndev);
1229
1230 netif_stop_queue(ndev);
1231 napi_disable(&ksp->napi);
1232
1233 ks8695_shutdown(ksp);
1234
1235 return 0;
1236 }
1237
1238 /**
1239 * ks8695_open - Open (bring up) a KS8695 ethernet interface
1240 * @ndev: The net_device to open
1241 *
1242 * This resets, configures the MAC, initialises the RX ring and
1243 * DMA engines and starts the TX queue for a KS8695 ethernet
1244 * device.
1245 */
1246 static int
1247 ks8695_open(struct net_device *ndev)
1248 {
1249 struct ks8695_priv *ksp = netdev_priv(ndev);
1250 int ret;
1251
1252 if (!is_valid_ether_addr(ndev->dev_addr))
1253 return -EADDRNOTAVAIL;
1254
1255 ks8695_reset(ksp);
1256
1257 ks8695_update_mac(ksp);
1258
1259 ret = ks8695_init_net(ksp);
1260 if (ret) {
1261 ks8695_shutdown(ksp);
1262 return ret;
1263 }
1264
1265 napi_enable(&ksp->napi);
1266 netif_start_queue(ndev);
1267
1268 return 0;
1269 }
1270
1271 /* Platform device driver */
1272
1273 /**
1274 * ks8695_init_switch - Init LAN switch to known good defaults.
1275 * @ksp: The device to initialise
1276 *
1277 * This initialises the LAN switch in the KS8695 to a known-good
1278 * set of defaults.
1279 */
1280 static void __devinit
1281 ks8695_init_switch(struct ks8695_priv *ksp)
1282 {
1283 u32 ctrl;
1284
1285 /* Default value for SEC0 according to datasheet */
1286 ctrl = 0x40819e00;
1287
1288 /* LED0 = Speed LED1 = Link/Activity */
1289 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1290 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1291
1292 /* Enable Switch */
1293 ctrl |= SEC0_ENABLE;
1294
1295 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1296
1297 /* Defaults for SEC1 */
1298 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1299 }
1300
1301 /**
1302 * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
1303 * @ksp: The device to initialise
1304 *
1305 * This initialises a KS8695's WAN phy to sensible values for
1306 * autonegotiation etc.
1307 */
1308 static void __devinit
1309 ks8695_init_wan_phy(struct ks8695_priv *ksp)
1310 {
1311 u32 ctrl;
1312
1313 /* Support auto-negotiation */
1314 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1315 WMC_WANA10F | WMC_WANA10H);
1316
1317 /* LED0 = Activity , LED1 = Link */
1318 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1319
1320 /* Restart Auto-negotiation */
1321 ctrl |= WMC_WANR;
1322
1323 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1324
1325 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1326 writel(0, ksp->phyiface_regs + KS8695_PPS);
1327 }
1328
1329 static const struct net_device_ops ks8695_netdev_ops = {
1330 .ndo_open = ks8695_open,
1331 .ndo_stop = ks8695_stop,
1332 .ndo_start_xmit = ks8695_start_xmit,
1333 .ndo_tx_timeout = ks8695_timeout,
1334 .ndo_set_mac_address = ks8695_set_mac,
1335 .ndo_validate_addr = eth_validate_addr,
1336 .ndo_set_rx_mode = ks8695_set_multicast,
1337 };
1338
1339 /**
1340 * ks8695_probe - Probe and initialise a KS8695 ethernet interface
1341 * @pdev: The platform device to probe
1342 *
1343 * Initialise a KS8695 ethernet device from platform data.
1344 *
1345 * This driver requires at least one IORESOURCE_MEM for the
1346 * registers and two IORESOURCE_IRQ for the RX and TX IRQs
1347 * respectively. It can optionally take an additional
1348 * IORESOURCE_MEM for the switch or phy in the case of the lan or
1349 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
1350 * port.
1351 */
1352 static int __devinit
1353 ks8695_probe(struct platform_device *pdev)
1354 {
1355 struct ks8695_priv *ksp;
1356 struct net_device *ndev;
1357 struct resource *regs_res, *phyiface_res;
1358 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1359 int ret = 0;
1360 int buff_n;
1361 u32 machigh, maclow;
1362
1363 /* Initialise a net_device */
1364 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1365 if (!ndev) {
1366 dev_err(&pdev->dev, "could not allocate device.\n");
1367 return -ENOMEM;
1368 }
1369
1370 SET_NETDEV_DEV(ndev, &pdev->dev);
1371
1372 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1373
1374 /* Configure our private structure a little */
1375 ksp = netdev_priv(ndev);
1376
1377 ksp->dev = &pdev->dev;
1378 ksp->ndev = ndev;
1379 ksp->msg_enable = NETIF_MSG_LINK;
1380
1381 /* Retrieve resources */
1382 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1383 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1384
1385 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1386 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1387 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1388
1389 if (!(regs_res && rxirq_res && txirq_res)) {
1390 dev_err(ksp->dev, "insufficient resources\n");
1391 ret = -ENOENT;
1392 goto failure;
1393 }
1394
1395 ksp->regs_req = request_mem_region(regs_res->start,
1396 resource_size(regs_res),
1397 pdev->name);
1398
1399 if (!ksp->regs_req) {
1400 dev_err(ksp->dev, "cannot claim register space\n");
1401 ret = -EIO;
1402 goto failure;
1403 }
1404
1405 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1406
1407 if (!ksp->io_regs) {
1408 dev_err(ksp->dev, "failed to ioremap registers\n");
1409 ret = -EINVAL;
1410 goto failure;
1411 }
1412
1413 if (phyiface_res) {
1414 ksp->phyiface_req =
1415 request_mem_region(phyiface_res->start,
1416 resource_size(phyiface_res),
1417 phyiface_res->name);
1418
1419 if (!ksp->phyiface_req) {
1420 dev_err(ksp->dev,
1421 "cannot claim switch register space\n");
1422 ret = -EIO;
1423 goto failure;
1424 }
1425
1426 ksp->phyiface_regs = ioremap(phyiface_res->start,
1427 resource_size(phyiface_res));
1428
1429 if (!ksp->phyiface_regs) {
1430 dev_err(ksp->dev,
1431 "failed to ioremap switch registers\n");
1432 ret = -EINVAL;
1433 goto failure;
1434 }
1435 }
1436
1437 ksp->rx_irq = rxirq_res->start;
1438 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1439 ksp->tx_irq = txirq_res->start;
1440 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1441 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1442 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1443 linkirq_res->name : "Ethernet Link";
1444
1445 /* driver system setup */
1446 ndev->netdev_ops = &ks8695_netdev_ops;
1447 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1448
1449 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1450
1451 /* Retrieve the default MAC addr from the chip. */
1452 /* The bootloader should have left it in there for us. */
1453
1454 machigh = ks8695_readreg(ksp, KS8695_MAH);
1455 maclow = ks8695_readreg(ksp, KS8695_MAL);
1456
1457 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1458 ndev->dev_addr[1] = machigh & 0xFF;
1459 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1460 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1461 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1462 ndev->dev_addr[5] = maclow & 0xFF;
1463
1464 if (!is_valid_ether_addr(ndev->dev_addr))
1465 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1466 "set using ifconfig\n", ndev->name);
1467
1468 /* In order to be efficient memory-wise, we allocate both
1469 * rings in one go.
1470 */
1471 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1472 &ksp->ring_base_dma, GFP_KERNEL);
1473 if (!ksp->ring_base) {
1474 ret = -ENOMEM;
1475 goto failure;
1476 }
1477
1478 /* Specify the TX DMA ring buffer */
1479 ksp->tx_ring = ksp->ring_base;
1480 ksp->tx_ring_dma = ksp->ring_base_dma;
1481
1482 /* And initialise the queue's lock */
1483 spin_lock_init(&ksp->txq_lock);
1484 spin_lock_init(&ksp->rx_lock);
1485
1486 /* Specify the RX DMA ring buffer */
1487 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1488 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1489
1490 /* Zero the descriptor rings */
1491 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1492 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1493
1494 /* Build the rings */
1495 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1496 ksp->tx_ring[buff_n].next_desc =
1497 cpu_to_le32(ksp->tx_ring_dma +
1498 (sizeof(struct tx_ring_desc) *
1499 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1500 }
1501
1502 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1503 ksp->rx_ring[buff_n].next_desc =
1504 cpu_to_le32(ksp->rx_ring_dma +
1505 (sizeof(struct rx_ring_desc) *
1506 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1507 }
1508
1509 /* Initialise the port (physically) */
1510 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1511 ks8695_init_switch(ksp);
1512 ksp->dtype = KS8695_DTYPE_LAN;
1513 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1514 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1515 ks8695_init_wan_phy(ksp);
1516 ksp->dtype = KS8695_DTYPE_WAN;
1517 SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
1518 } else {
1519 /* No initialisation since HPNA does not have a PHY */
1520 ksp->dtype = KS8695_DTYPE_HPNA;
1521 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1522 }
1523
1524 /* And bring up the net_device with the net core */
1525 platform_set_drvdata(pdev, ndev);
1526 ret = register_netdev(ndev);
1527
1528 if (ret == 0) {
1529 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1530 ks8695_port_type(ksp), ndev->dev_addr);
1531 } else {
1532 /* Report the failure to register the net_device */
1533 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1534 goto failure;
1535 }
1536
1537 /* All is well */
1538 return 0;
1539
1540 /* Error exit path */
1541 failure:
1542 ks8695_release_device(ksp);
1543 free_netdev(ndev);
1544
1545 return ret;
1546 }
1547
1548 /**
1549 * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
1550 * @pdev: The device to suspend
1551 * @state: The suspend state
1552 *
1553 * This routine detaches and shuts down a KS8695 ethernet device.
1554 */
1555 static int
1556 ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1557 {
1558 struct net_device *ndev = platform_get_drvdata(pdev);
1559 struct ks8695_priv *ksp = netdev_priv(ndev);
1560
1561 ksp->in_suspend = 1;
1562
1563 if (netif_running(ndev)) {
1564 netif_device_detach(ndev);
1565 ks8695_shutdown(ksp);
1566 }
1567
1568 return 0;
1569 }
1570
1571 /**
1572 * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
1573 * @pdev: The device to resume
1574 *
1575 * This routine re-initialises and re-attaches a KS8695 ethernet
1576 * device.
1577 */
1578 static int
1579 ks8695_drv_resume(struct platform_device *pdev)
1580 {
1581 struct net_device *ndev = platform_get_drvdata(pdev);
1582 struct ks8695_priv *ksp = netdev_priv(ndev);
1583
1584 if (netif_running(ndev)) {
1585 ks8695_reset(ksp);
1586 ks8695_init_net(ksp);
1587 ks8695_set_multicast(ndev);
1588 netif_device_attach(ndev);
1589 }
1590
1591 ksp->in_suspend = 0;
1592
1593 return 0;
1594 }
1595
1596 /**
1597 * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
1598 * @pdev: The platform device to remove
1599 *
1600 * This unregisters and releases a KS8695 ethernet device.
1601 */
1602 static int __devexit
1603 ks8695_drv_remove(struct platform_device *pdev)
1604 {
1605 struct net_device *ndev = platform_get_drvdata(pdev);
1606 struct ks8695_priv *ksp = netdev_priv(ndev);
1607
1608 platform_set_drvdata(pdev, NULL);
1609 netif_napi_del(&ksp->napi);
1610
1611 unregister_netdev(ndev);
1612 ks8695_release_device(ksp);
1613 free_netdev(ndev);
1614
1615 dev_dbg(&pdev->dev, "released and freed device\n");
1616 return 0;
1617 }
1618
1619 static struct platform_driver ks8695_driver = {
1620 .driver = {
1621 .name = MODULENAME,
1622 .owner = THIS_MODULE,
1623 },
1624 .probe = ks8695_probe,
1625 .remove = __devexit_p(ks8695_drv_remove),
1626 .suspend = ks8695_drv_suspend,
1627 .resume = ks8695_drv_resume,
1628 };
1629
1630 /* Module interface */
1631
1632 static int __init
1633 ks8695_init(void)
1634 {
1635 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1636 MODULENAME, MODULEVERSION);
1637
1638 return platform_driver_register(&ks8695_driver);
1639 }
1640
1641 static void __exit
1642 ks8695_cleanup(void)
1643 {
1644 platform_driver_unregister(&ks8695_driver);
1645 }
1646
1647 module_init(ks8695_init);
1648 module_exit(ks8695_cleanup);
1649
1650 MODULE_AUTHOR("Simtec Electronics");
1651 MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1652 MODULE_LICENSE("GPL");
1653 MODULE_ALIAS("platform:" MODULENAME);
1654
1655 module_param(watchdog, int, 0400);
1656 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");