]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/wiznet/w5100.c
treewide: remove redundant #include <linux/kconfig.h>
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / wiznet / w5100.c
1 /*
2 * Ethernet driver for the WIZnet W5100 chip.
3 *
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/platform_data/wiznet.h>
16 #include <linux/ethtool.h>
17 #include <linux/skbuff.h>
18 #include <linux/types.h>
19 #include <linux/errno.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/io.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/gpio.h>
28
29 #include "w5100.h"
30
31 #define DRV_NAME "w5100"
32 #define DRV_VERSION "2012-04-04"
33
34 MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
35 MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
36 MODULE_ALIAS("platform:"DRV_NAME);
37 MODULE_LICENSE("GPL");
38
39 /*
40 * W5100/W5200/W5500 common registers
41 */
42 #define W5100_COMMON_REGS 0x0000
43 #define W5100_MR 0x0000 /* Mode Register */
44 #define MR_RST 0x80 /* S/W reset */
45 #define MR_PB 0x10 /* Ping block */
46 #define MR_AI 0x02 /* Address Auto-Increment */
47 #define MR_IND 0x01 /* Indirect mode */
48 #define W5100_SHAR 0x0009 /* Source MAC address */
49 #define W5100_IR 0x0015 /* Interrupt Register */
50 #define W5100_COMMON_REGS_LEN 0x0040
51
52 #define W5100_Sn_MR 0x0000 /* Sn Mode Register */
53 #define W5100_Sn_CR 0x0001 /* Sn Command Register */
54 #define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
55 #define W5100_Sn_SR 0x0003 /* Sn Status Register */
56 #define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
57 #define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
58 #define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
59 #define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
60 #define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
61
62 #define S0_REGS(priv) ((priv)->s0_regs)
63
64 #define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
65 #define S0_MR_MACRAW 0x04 /* MAC RAW mode */
66 #define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
67 #define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
68 #define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
69 #define S0_CR_OPEN 0x01 /* OPEN command */
70 #define S0_CR_CLOSE 0x10 /* CLOSE command */
71 #define S0_CR_SEND 0x20 /* SEND command */
72 #define S0_CR_RECV 0x40 /* RECV command */
73 #define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
74 #define S0_IR_SENDOK 0x10 /* complete sending */
75 #define S0_IR_RECV 0x04 /* receiving data */
76 #define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
77 #define S0_SR_MACRAW 0x42 /* mac raw mode */
78 #define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
79 #define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
80 #define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
81 #define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
82 #define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
83
84 #define W5100_S0_REGS_LEN 0x0040
85
86 /*
87 * W5100 and W5200 common registers
88 */
89 #define W5100_IMR 0x0016 /* Interrupt Mask Register */
90 #define IR_S0 0x01 /* S0 interrupt */
91 #define W5100_RTR 0x0017 /* Retry Time-value Register */
92 #define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
93
94 /*
95 * W5100 specific register and memory
96 */
97 #define W5100_RMSR 0x001a /* Receive Memory Size */
98 #define W5100_TMSR 0x001b /* Transmit Memory Size */
99
100 #define W5100_S0_REGS 0x0400
101
102 #define W5100_TX_MEM_START 0x4000
103 #define W5100_TX_MEM_SIZE 0x2000
104 #define W5100_RX_MEM_START 0x6000
105 #define W5100_RX_MEM_SIZE 0x2000
106
107 /*
108 * W5200 specific register and memory
109 */
110 #define W5200_S0_REGS 0x4000
111
112 #define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
113 #define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
114
115 #define W5200_TX_MEM_START 0x8000
116 #define W5200_TX_MEM_SIZE 0x4000
117 #define W5200_RX_MEM_START 0xc000
118 #define W5200_RX_MEM_SIZE 0x4000
119
120 /*
121 * W5500 specific register and memory
122 *
123 * W5500 register and memory are organized by multiple blocks. Each one is
124 * selected by 16bits offset address and 5bits block select bits. So we
125 * encode it into 32bits address. (lower 16bits is offset address and
126 * upper 16bits is block select bits)
127 */
128 #define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
129 #define W5500_RTR 0x0019 /* Retry Time-value Register */
130
131 #define W5500_S0_REGS 0x10000
132
133 #define W5500_Sn_RXMEM_SIZE(n) \
134 (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
135 #define W5500_Sn_TXMEM_SIZE(n) \
136 (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
137
138 #define W5500_TX_MEM_START 0x20000
139 #define W5500_TX_MEM_SIZE 0x04000
140 #define W5500_RX_MEM_START 0x30000
141 #define W5500_RX_MEM_SIZE 0x04000
142
143 /*
144 * Device driver private data structure
145 */
146
147 struct w5100_priv {
148 const struct w5100_ops *ops;
149
150 /* Socket 0 register offset address */
151 u32 s0_regs;
152 /* Socket 0 TX buffer offset address and size */
153 u32 s0_tx_buf;
154 u16 s0_tx_buf_size;
155 /* Socket 0 RX buffer offset address and size */
156 u32 s0_rx_buf;
157 u16 s0_rx_buf_size;
158
159 int irq;
160 int link_irq;
161 int link_gpio;
162
163 struct napi_struct napi;
164 struct net_device *ndev;
165 bool promisc;
166 u32 msg_enable;
167
168 struct workqueue_struct *xfer_wq;
169 struct work_struct rx_work;
170 struct sk_buff *tx_skb;
171 struct work_struct tx_work;
172 struct work_struct setrx_work;
173 struct work_struct restart_work;
174 };
175
176 /************************************************************************
177 *
178 * Lowlevel I/O functions
179 *
180 ***********************************************************************/
181
182 struct w5100_mmio_priv {
183 void __iomem *base;
184 /* Serialize access in indirect address mode */
185 spinlock_t reg_lock;
186 };
187
188 static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
189 {
190 return w5100_ops_priv(dev);
191 }
192
193 static inline void __iomem *w5100_mmio(struct net_device *ndev)
194 {
195 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
196
197 return mmio_priv->base;
198 }
199
200 /*
201 * In direct address mode host system can directly access W5100 registers
202 * after mapping to Memory-Mapped I/O space.
203 *
204 * 0x8000 bytes are required for memory space.
205 */
206 static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
207 {
208 return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
209 }
210
211 static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
212 u8 data)
213 {
214 iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
215
216 return 0;
217 }
218
219 static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
220 {
221 __w5100_write_direct(ndev, addr, data);
222 mmiowb();
223
224 return 0;
225 }
226
227 static int w5100_read16_direct(struct net_device *ndev, u32 addr)
228 {
229 u16 data;
230 data = w5100_read_direct(ndev, addr) << 8;
231 data |= w5100_read_direct(ndev, addr + 1);
232 return data;
233 }
234
235 static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
236 {
237 __w5100_write_direct(ndev, addr, data >> 8);
238 __w5100_write_direct(ndev, addr + 1, data);
239 mmiowb();
240
241 return 0;
242 }
243
244 static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
245 int len)
246 {
247 int i;
248
249 for (i = 0; i < len; i++, addr++)
250 *buf++ = w5100_read_direct(ndev, addr);
251
252 return 0;
253 }
254
255 static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
256 const u8 *buf, int len)
257 {
258 int i;
259
260 for (i = 0; i < len; i++, addr++)
261 __w5100_write_direct(ndev, addr, *buf++);
262
263 mmiowb();
264
265 return 0;
266 }
267
268 static int w5100_mmio_init(struct net_device *ndev)
269 {
270 struct platform_device *pdev = to_platform_device(ndev->dev.parent);
271 struct w5100_priv *priv = netdev_priv(ndev);
272 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
273 struct resource *mem;
274
275 spin_lock_init(&mmio_priv->reg_lock);
276
277 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
278 mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
279 if (IS_ERR(mmio_priv->base))
280 return PTR_ERR(mmio_priv->base);
281
282 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
283
284 return 0;
285 }
286
287 static const struct w5100_ops w5100_mmio_direct_ops = {
288 .chip_id = W5100,
289 .read = w5100_read_direct,
290 .write = w5100_write_direct,
291 .read16 = w5100_read16_direct,
292 .write16 = w5100_write16_direct,
293 .readbulk = w5100_readbulk_direct,
294 .writebulk = w5100_writebulk_direct,
295 .init = w5100_mmio_init,
296 };
297
298 /*
299 * In indirect address mode host system indirectly accesses registers by
300 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
301 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
302 * Mode Register (MR) is directly accessible.
303 *
304 * Only 0x04 bytes are required for memory space.
305 */
306 #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
307 #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
308
309 static int w5100_read_indirect(struct net_device *ndev, u32 addr)
310 {
311 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
312 unsigned long flags;
313 u8 data;
314
315 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
316 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
317 data = w5100_read_direct(ndev, W5100_IDM_DR);
318 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
319
320 return data;
321 }
322
323 static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
324 {
325 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
326 unsigned long flags;
327
328 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
329 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
330 w5100_write_direct(ndev, W5100_IDM_DR, data);
331 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
332
333 return 0;
334 }
335
336 static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
337 {
338 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
339 unsigned long flags;
340 u16 data;
341
342 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
343 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
344 data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
345 data |= w5100_read_direct(ndev, W5100_IDM_DR);
346 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
347
348 return data;
349 }
350
351 static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
352 {
353 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
354 unsigned long flags;
355
356 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
357 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
358 __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
359 w5100_write_direct(ndev, W5100_IDM_DR, data);
360 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
361
362 return 0;
363 }
364
365 static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
366 int len)
367 {
368 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
369 unsigned long flags;
370 int i;
371
372 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
373 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
374
375 for (i = 0; i < len; i++)
376 *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
377
378 mmiowb();
379 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
380
381 return 0;
382 }
383
384 static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
385 const u8 *buf, int len)
386 {
387 struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
388 unsigned long flags;
389 int i;
390
391 spin_lock_irqsave(&mmio_priv->reg_lock, flags);
392 w5100_write16_direct(ndev, W5100_IDM_AR, addr);
393
394 for (i = 0; i < len; i++)
395 __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
396
397 mmiowb();
398 spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
399
400 return 0;
401 }
402
403 static int w5100_reset_indirect(struct net_device *ndev)
404 {
405 w5100_write_direct(ndev, W5100_MR, MR_RST);
406 mdelay(5);
407 w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
408
409 return 0;
410 }
411
412 static const struct w5100_ops w5100_mmio_indirect_ops = {
413 .chip_id = W5100,
414 .read = w5100_read_indirect,
415 .write = w5100_write_indirect,
416 .read16 = w5100_read16_indirect,
417 .write16 = w5100_write16_indirect,
418 .readbulk = w5100_readbulk_indirect,
419 .writebulk = w5100_writebulk_indirect,
420 .init = w5100_mmio_init,
421 .reset = w5100_reset_indirect,
422 };
423
424 #if defined(CONFIG_WIZNET_BUS_DIRECT)
425
426 static int w5100_read(struct w5100_priv *priv, u32 addr)
427 {
428 return w5100_read_direct(priv->ndev, addr);
429 }
430
431 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
432 {
433 return w5100_write_direct(priv->ndev, addr, data);
434 }
435
436 static int w5100_read16(struct w5100_priv *priv, u32 addr)
437 {
438 return w5100_read16_direct(priv->ndev, addr);
439 }
440
441 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
442 {
443 return w5100_write16_direct(priv->ndev, addr, data);
444 }
445
446 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
447 {
448 return w5100_readbulk_direct(priv->ndev, addr, buf, len);
449 }
450
451 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
452 int len)
453 {
454 return w5100_writebulk_direct(priv->ndev, addr, buf, len);
455 }
456
457 #elif defined(CONFIG_WIZNET_BUS_INDIRECT)
458
459 static int w5100_read(struct w5100_priv *priv, u32 addr)
460 {
461 return w5100_read_indirect(priv->ndev, addr);
462 }
463
464 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
465 {
466 return w5100_write_indirect(priv->ndev, addr, data);
467 }
468
469 static int w5100_read16(struct w5100_priv *priv, u32 addr)
470 {
471 return w5100_read16_indirect(priv->ndev, addr);
472 }
473
474 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
475 {
476 return w5100_write16_indirect(priv->ndev, addr, data);
477 }
478
479 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
480 {
481 return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
482 }
483
484 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
485 int len)
486 {
487 return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
488 }
489
490 #else /* CONFIG_WIZNET_BUS_ANY */
491
492 static int w5100_read(struct w5100_priv *priv, u32 addr)
493 {
494 return priv->ops->read(priv->ndev, addr);
495 }
496
497 static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
498 {
499 return priv->ops->write(priv->ndev, addr, data);
500 }
501
502 static int w5100_read16(struct w5100_priv *priv, u32 addr)
503 {
504 return priv->ops->read16(priv->ndev, addr);
505 }
506
507 static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
508 {
509 return priv->ops->write16(priv->ndev, addr, data);
510 }
511
512 static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
513 {
514 return priv->ops->readbulk(priv->ndev, addr, buf, len);
515 }
516
517 static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
518 int len)
519 {
520 return priv->ops->writebulk(priv->ndev, addr, buf, len);
521 }
522
523 #endif
524
525 static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
526 {
527 u32 addr;
528 int remain = 0;
529 int ret;
530 const u32 mem_start = priv->s0_rx_buf;
531 const u16 mem_size = priv->s0_rx_buf_size;
532
533 offset %= mem_size;
534 addr = mem_start + offset;
535
536 if (offset + len > mem_size) {
537 remain = (offset + len) % mem_size;
538 len = mem_size - offset;
539 }
540
541 ret = w5100_readbulk(priv, addr, buf, len);
542 if (ret || !remain)
543 return ret;
544
545 return w5100_readbulk(priv, mem_start, buf + len, remain);
546 }
547
548 static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
549 int len)
550 {
551 u32 addr;
552 int ret;
553 int remain = 0;
554 const u32 mem_start = priv->s0_tx_buf;
555 const u16 mem_size = priv->s0_tx_buf_size;
556
557 offset %= mem_size;
558 addr = mem_start + offset;
559
560 if (offset + len > mem_size) {
561 remain = (offset + len) % mem_size;
562 len = mem_size - offset;
563 }
564
565 ret = w5100_writebulk(priv, addr, buf, len);
566 if (ret || !remain)
567 return ret;
568
569 return w5100_writebulk(priv, mem_start, buf + len, remain);
570 }
571
572 static int w5100_reset(struct w5100_priv *priv)
573 {
574 if (priv->ops->reset)
575 return priv->ops->reset(priv->ndev);
576
577 w5100_write(priv, W5100_MR, MR_RST);
578 mdelay(5);
579 w5100_write(priv, W5100_MR, MR_PB);
580
581 return 0;
582 }
583
584 static int w5100_command(struct w5100_priv *priv, u16 cmd)
585 {
586 unsigned long timeout;
587
588 w5100_write(priv, W5100_S0_CR(priv), cmd);
589
590 timeout = jiffies + msecs_to_jiffies(100);
591
592 while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
593 if (time_after(jiffies, timeout))
594 return -EIO;
595 cpu_relax();
596 }
597
598 return 0;
599 }
600
601 static void w5100_write_macaddr(struct w5100_priv *priv)
602 {
603 struct net_device *ndev = priv->ndev;
604
605 w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
606 }
607
608 static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
609 {
610 u32 imr;
611
612 if (priv->ops->chip_id == W5500)
613 imr = W5500_SIMR;
614 else
615 imr = W5100_IMR;
616
617 w5100_write(priv, imr, mask);
618 }
619
620 static void w5100_enable_intr(struct w5100_priv *priv)
621 {
622 w5100_socket_intr_mask(priv, IR_S0);
623 }
624
625 static void w5100_disable_intr(struct w5100_priv *priv)
626 {
627 w5100_socket_intr_mask(priv, 0);
628 }
629
630 static void w5100_memory_configure(struct w5100_priv *priv)
631 {
632 /* Configure 16K of internal memory
633 * as 8K RX buffer and 8K TX buffer
634 */
635 w5100_write(priv, W5100_RMSR, 0x03);
636 w5100_write(priv, W5100_TMSR, 0x03);
637 }
638
639 static void w5200_memory_configure(struct w5100_priv *priv)
640 {
641 int i;
642
643 /* Configure internal RX memory as 16K RX buffer and
644 * internal TX memory as 16K TX buffer
645 */
646 w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
647 w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
648
649 for (i = 1; i < 8; i++) {
650 w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
651 w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
652 }
653 }
654
655 static void w5500_memory_configure(struct w5100_priv *priv)
656 {
657 int i;
658
659 /* Configure internal RX memory as 16K RX buffer and
660 * internal TX memory as 16K TX buffer
661 */
662 w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
663 w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
664
665 for (i = 1; i < 8; i++) {
666 w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
667 w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
668 }
669 }
670
671 static int w5100_hw_reset(struct w5100_priv *priv)
672 {
673 u32 rtr;
674
675 w5100_reset(priv);
676
677 w5100_disable_intr(priv);
678 w5100_write_macaddr(priv);
679
680 switch (priv->ops->chip_id) {
681 case W5100:
682 w5100_memory_configure(priv);
683 rtr = W5100_RTR;
684 break;
685 case W5200:
686 w5200_memory_configure(priv);
687 rtr = W5100_RTR;
688 break;
689 case W5500:
690 w5500_memory_configure(priv);
691 rtr = W5500_RTR;
692 break;
693 default:
694 return -EINVAL;
695 }
696
697 if (w5100_read16(priv, rtr) != RTR_DEFAULT)
698 return -ENODEV;
699
700 return 0;
701 }
702
703 static void w5100_hw_start(struct w5100_priv *priv)
704 {
705 u8 mode = S0_MR_MACRAW;
706
707 if (!priv->promisc) {
708 if (priv->ops->chip_id == W5500)
709 mode |= W5500_S0_MR_MF;
710 else
711 mode |= S0_MR_MF;
712 }
713
714 w5100_write(priv, W5100_S0_MR(priv), mode);
715 w5100_command(priv, S0_CR_OPEN);
716 w5100_enable_intr(priv);
717 }
718
719 static void w5100_hw_close(struct w5100_priv *priv)
720 {
721 w5100_disable_intr(priv);
722 w5100_command(priv, S0_CR_CLOSE);
723 }
724
725 /***********************************************************************
726 *
727 * Device driver functions / callbacks
728 *
729 ***********************************************************************/
730
731 static void w5100_get_drvinfo(struct net_device *ndev,
732 struct ethtool_drvinfo *info)
733 {
734 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
735 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
736 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
737 sizeof(info->bus_info));
738 }
739
740 static u32 w5100_get_link(struct net_device *ndev)
741 {
742 struct w5100_priv *priv = netdev_priv(ndev);
743
744 if (gpio_is_valid(priv->link_gpio))
745 return !!gpio_get_value(priv->link_gpio);
746
747 return 1;
748 }
749
750 static u32 w5100_get_msglevel(struct net_device *ndev)
751 {
752 struct w5100_priv *priv = netdev_priv(ndev);
753
754 return priv->msg_enable;
755 }
756
757 static void w5100_set_msglevel(struct net_device *ndev, u32 value)
758 {
759 struct w5100_priv *priv = netdev_priv(ndev);
760
761 priv->msg_enable = value;
762 }
763
764 static int w5100_get_regs_len(struct net_device *ndev)
765 {
766 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
767 }
768
769 static void w5100_get_regs(struct net_device *ndev,
770 struct ethtool_regs *regs, void *buf)
771 {
772 struct w5100_priv *priv = netdev_priv(ndev);
773
774 regs->version = 1;
775 w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
776 buf += W5100_COMMON_REGS_LEN;
777 w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
778 }
779
780 static void w5100_restart(struct net_device *ndev)
781 {
782 struct w5100_priv *priv = netdev_priv(ndev);
783
784 netif_stop_queue(ndev);
785 w5100_hw_reset(priv);
786 w5100_hw_start(priv);
787 ndev->stats.tx_errors++;
788 netif_trans_update(ndev);
789 netif_wake_queue(ndev);
790 }
791
792 static void w5100_restart_work(struct work_struct *work)
793 {
794 struct w5100_priv *priv = container_of(work, struct w5100_priv,
795 restart_work);
796
797 w5100_restart(priv->ndev);
798 }
799
800 static void w5100_tx_timeout(struct net_device *ndev)
801 {
802 struct w5100_priv *priv = netdev_priv(ndev);
803
804 if (priv->ops->may_sleep)
805 schedule_work(&priv->restart_work);
806 else
807 w5100_restart(ndev);
808 }
809
810 static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
811 {
812 struct w5100_priv *priv = netdev_priv(ndev);
813 u16 offset;
814
815 offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
816 w5100_writebuf(priv, offset, skb->data, skb->len);
817 w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
818 ndev->stats.tx_bytes += skb->len;
819 ndev->stats.tx_packets++;
820 dev_kfree_skb(skb);
821
822 w5100_command(priv, S0_CR_SEND);
823 }
824
825 static void w5100_tx_work(struct work_struct *work)
826 {
827 struct w5100_priv *priv = container_of(work, struct w5100_priv,
828 tx_work);
829 struct sk_buff *skb = priv->tx_skb;
830
831 priv->tx_skb = NULL;
832
833 if (WARN_ON(!skb))
834 return;
835 w5100_tx_skb(priv->ndev, skb);
836 }
837
838 static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
839 {
840 struct w5100_priv *priv = netdev_priv(ndev);
841
842 netif_stop_queue(ndev);
843
844 if (priv->ops->may_sleep) {
845 WARN_ON(priv->tx_skb);
846 priv->tx_skb = skb;
847 queue_work(priv->xfer_wq, &priv->tx_work);
848 } else {
849 w5100_tx_skb(ndev, skb);
850 }
851
852 return NETDEV_TX_OK;
853 }
854
855 static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
856 {
857 struct w5100_priv *priv = netdev_priv(ndev);
858 struct sk_buff *skb;
859 u16 rx_len;
860 u16 offset;
861 u8 header[2];
862 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
863
864 if (rx_buf_len == 0)
865 return NULL;
866
867 offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
868 w5100_readbuf(priv, offset, header, 2);
869 rx_len = get_unaligned_be16(header) - 2;
870
871 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
872 if (unlikely(!skb)) {
873 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
874 w5100_command(priv, S0_CR_RECV);
875 ndev->stats.rx_dropped++;
876 return NULL;
877 }
878
879 skb_put(skb, rx_len);
880 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
881 w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
882 w5100_command(priv, S0_CR_RECV);
883 skb->protocol = eth_type_trans(skb, ndev);
884
885 ndev->stats.rx_packets++;
886 ndev->stats.rx_bytes += rx_len;
887
888 return skb;
889 }
890
891 static void w5100_rx_work(struct work_struct *work)
892 {
893 struct w5100_priv *priv = container_of(work, struct w5100_priv,
894 rx_work);
895 struct sk_buff *skb;
896
897 while ((skb = w5100_rx_skb(priv->ndev)))
898 netif_rx_ni(skb);
899
900 w5100_enable_intr(priv);
901 }
902
903 static int w5100_napi_poll(struct napi_struct *napi, int budget)
904 {
905 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
906 int rx_count;
907
908 for (rx_count = 0; rx_count < budget; rx_count++) {
909 struct sk_buff *skb = w5100_rx_skb(priv->ndev);
910
911 if (skb)
912 netif_receive_skb(skb);
913 else
914 break;
915 }
916
917 if (rx_count < budget) {
918 napi_complete(napi);
919 w5100_enable_intr(priv);
920 }
921
922 return rx_count;
923 }
924
925 static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
926 {
927 struct net_device *ndev = ndev_instance;
928 struct w5100_priv *priv = netdev_priv(ndev);
929
930 int ir = w5100_read(priv, W5100_S0_IR(priv));
931 if (!ir)
932 return IRQ_NONE;
933 w5100_write(priv, W5100_S0_IR(priv), ir);
934
935 if (ir & S0_IR_SENDOK) {
936 netif_dbg(priv, tx_done, ndev, "tx done\n");
937 netif_wake_queue(ndev);
938 }
939
940 if (ir & S0_IR_RECV) {
941 w5100_disable_intr(priv);
942
943 if (priv->ops->may_sleep)
944 queue_work(priv->xfer_wq, &priv->rx_work);
945 else if (napi_schedule_prep(&priv->napi))
946 __napi_schedule(&priv->napi);
947 }
948
949 return IRQ_HANDLED;
950 }
951
952 static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
953 {
954 struct net_device *ndev = ndev_instance;
955 struct w5100_priv *priv = netdev_priv(ndev);
956
957 if (netif_running(ndev)) {
958 if (gpio_get_value(priv->link_gpio) != 0) {
959 netif_info(priv, link, ndev, "link is up\n");
960 netif_carrier_on(ndev);
961 } else {
962 netif_info(priv, link, ndev, "link is down\n");
963 netif_carrier_off(ndev);
964 }
965 }
966
967 return IRQ_HANDLED;
968 }
969
970 static void w5100_setrx_work(struct work_struct *work)
971 {
972 struct w5100_priv *priv = container_of(work, struct w5100_priv,
973 setrx_work);
974
975 w5100_hw_start(priv);
976 }
977
978 static void w5100_set_rx_mode(struct net_device *ndev)
979 {
980 struct w5100_priv *priv = netdev_priv(ndev);
981 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
982
983 if (priv->promisc != set_promisc) {
984 priv->promisc = set_promisc;
985
986 if (priv->ops->may_sleep)
987 schedule_work(&priv->setrx_work);
988 else
989 w5100_hw_start(priv);
990 }
991 }
992
993 static int w5100_set_macaddr(struct net_device *ndev, void *addr)
994 {
995 struct w5100_priv *priv = netdev_priv(ndev);
996 struct sockaddr *sock_addr = addr;
997
998 if (!is_valid_ether_addr(sock_addr->sa_data))
999 return -EADDRNOTAVAIL;
1000 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
1001 w5100_write_macaddr(priv);
1002 return 0;
1003 }
1004
1005 static int w5100_open(struct net_device *ndev)
1006 {
1007 struct w5100_priv *priv = netdev_priv(ndev);
1008
1009 netif_info(priv, ifup, ndev, "enabling\n");
1010 w5100_hw_start(priv);
1011 napi_enable(&priv->napi);
1012 netif_start_queue(ndev);
1013 if (!gpio_is_valid(priv->link_gpio) ||
1014 gpio_get_value(priv->link_gpio) != 0)
1015 netif_carrier_on(ndev);
1016 return 0;
1017 }
1018
1019 static int w5100_stop(struct net_device *ndev)
1020 {
1021 struct w5100_priv *priv = netdev_priv(ndev);
1022
1023 netif_info(priv, ifdown, ndev, "shutting down\n");
1024 w5100_hw_close(priv);
1025 netif_carrier_off(ndev);
1026 netif_stop_queue(ndev);
1027 napi_disable(&priv->napi);
1028 return 0;
1029 }
1030
1031 static const struct ethtool_ops w5100_ethtool_ops = {
1032 .get_drvinfo = w5100_get_drvinfo,
1033 .get_msglevel = w5100_get_msglevel,
1034 .set_msglevel = w5100_set_msglevel,
1035 .get_link = w5100_get_link,
1036 .get_regs_len = w5100_get_regs_len,
1037 .get_regs = w5100_get_regs,
1038 };
1039
1040 static const struct net_device_ops w5100_netdev_ops = {
1041 .ndo_open = w5100_open,
1042 .ndo_stop = w5100_stop,
1043 .ndo_start_xmit = w5100_start_tx,
1044 .ndo_tx_timeout = w5100_tx_timeout,
1045 .ndo_set_rx_mode = w5100_set_rx_mode,
1046 .ndo_set_mac_address = w5100_set_macaddr,
1047 .ndo_validate_addr = eth_validate_addr,
1048 .ndo_change_mtu = eth_change_mtu,
1049 };
1050
1051 static int w5100_mmio_probe(struct platform_device *pdev)
1052 {
1053 struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
1054 const void *mac_addr = NULL;
1055 struct resource *mem;
1056 const struct w5100_ops *ops;
1057 int irq;
1058
1059 if (data && is_valid_ether_addr(data->mac_addr))
1060 mac_addr = data->mac_addr;
1061
1062 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1063 if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
1064 ops = &w5100_mmio_indirect_ops;
1065 else
1066 ops = &w5100_mmio_direct_ops;
1067
1068 irq = platform_get_irq(pdev, 0);
1069 if (irq < 0)
1070 return irq;
1071
1072 return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
1073 mac_addr, irq, data ? data->link_gpio : -EINVAL);
1074 }
1075
1076 static int w5100_mmio_remove(struct platform_device *pdev)
1077 {
1078 return w5100_remove(&pdev->dev);
1079 }
1080
1081 void *w5100_ops_priv(const struct net_device *ndev)
1082 {
1083 return netdev_priv(ndev) +
1084 ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
1085 }
1086 EXPORT_SYMBOL_GPL(w5100_ops_priv);
1087
1088 int w5100_probe(struct device *dev, const struct w5100_ops *ops,
1089 int sizeof_ops_priv, const void *mac_addr, int irq,
1090 int link_gpio)
1091 {
1092 struct w5100_priv *priv;
1093 struct net_device *ndev;
1094 int err;
1095 size_t alloc_size;
1096
1097 alloc_size = sizeof(*priv);
1098 if (sizeof_ops_priv) {
1099 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
1100 alloc_size += sizeof_ops_priv;
1101 }
1102 alloc_size += NETDEV_ALIGN - 1;
1103
1104 ndev = alloc_etherdev(alloc_size);
1105 if (!ndev)
1106 return -ENOMEM;
1107 SET_NETDEV_DEV(ndev, dev);
1108 dev_set_drvdata(dev, ndev);
1109 priv = netdev_priv(ndev);
1110
1111 switch (ops->chip_id) {
1112 case W5100:
1113 priv->s0_regs = W5100_S0_REGS;
1114 priv->s0_tx_buf = W5100_TX_MEM_START;
1115 priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
1116 priv->s0_rx_buf = W5100_RX_MEM_START;
1117 priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
1118 break;
1119 case W5200:
1120 priv->s0_regs = W5200_S0_REGS;
1121 priv->s0_tx_buf = W5200_TX_MEM_START;
1122 priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
1123 priv->s0_rx_buf = W5200_RX_MEM_START;
1124 priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
1125 break;
1126 case W5500:
1127 priv->s0_regs = W5500_S0_REGS;
1128 priv->s0_tx_buf = W5500_TX_MEM_START;
1129 priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
1130 priv->s0_rx_buf = W5500_RX_MEM_START;
1131 priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
1132 break;
1133 default:
1134 err = -EINVAL;
1135 goto err_register;
1136 }
1137
1138 priv->ndev = ndev;
1139 priv->ops = ops;
1140 priv->irq = irq;
1141 priv->link_gpio = link_gpio;
1142
1143 ndev->netdev_ops = &w5100_netdev_ops;
1144 ndev->ethtool_ops = &w5100_ethtool_ops;
1145 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
1146
1147 /* This chip doesn't support VLAN packets with normal MTU,
1148 * so disable VLAN for this device.
1149 */
1150 ndev->features |= NETIF_F_VLAN_CHALLENGED;
1151
1152 err = register_netdev(ndev);
1153 if (err < 0)
1154 goto err_register;
1155
1156 priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
1157 if (!priv->xfer_wq) {
1158 err = -ENOMEM;
1159 goto err_wq;
1160 }
1161
1162 INIT_WORK(&priv->rx_work, w5100_rx_work);
1163 INIT_WORK(&priv->tx_work, w5100_tx_work);
1164 INIT_WORK(&priv->setrx_work, w5100_setrx_work);
1165 INIT_WORK(&priv->restart_work, w5100_restart_work);
1166
1167 if (mac_addr)
1168 memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
1169 else
1170 eth_hw_addr_random(ndev);
1171
1172 if (priv->ops->init) {
1173 err = priv->ops->init(priv->ndev);
1174 if (err)
1175 goto err_hw;
1176 }
1177
1178 err = w5100_hw_reset(priv);
1179 if (err)
1180 goto err_hw;
1181
1182 if (ops->may_sleep) {
1183 err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
1184 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1185 netdev_name(ndev), ndev);
1186 } else {
1187 err = request_irq(priv->irq, w5100_interrupt,
1188 IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
1189 }
1190 if (err)
1191 goto err_hw;
1192
1193 if (gpio_is_valid(priv->link_gpio)) {
1194 char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
1195
1196 if (!link_name) {
1197 err = -ENOMEM;
1198 goto err_gpio;
1199 }
1200 snprintf(link_name, 16, "%s-link", netdev_name(ndev));
1201 priv->link_irq = gpio_to_irq(priv->link_gpio);
1202 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
1203 IRQF_TRIGGER_RISING |
1204 IRQF_TRIGGER_FALLING,
1205 link_name, priv->ndev) < 0)
1206 priv->link_gpio = -EINVAL;
1207 }
1208
1209 return 0;
1210
1211 err_gpio:
1212 free_irq(priv->irq, ndev);
1213 err_hw:
1214 destroy_workqueue(priv->xfer_wq);
1215 err_wq:
1216 unregister_netdev(ndev);
1217 err_register:
1218 free_netdev(ndev);
1219 return err;
1220 }
1221 EXPORT_SYMBOL_GPL(w5100_probe);
1222
1223 int w5100_remove(struct device *dev)
1224 {
1225 struct net_device *ndev = dev_get_drvdata(dev);
1226 struct w5100_priv *priv = netdev_priv(ndev);
1227
1228 w5100_hw_reset(priv);
1229 free_irq(priv->irq, ndev);
1230 if (gpio_is_valid(priv->link_gpio))
1231 free_irq(priv->link_irq, ndev);
1232
1233 flush_work(&priv->setrx_work);
1234 flush_work(&priv->restart_work);
1235 destroy_workqueue(priv->xfer_wq);
1236
1237 unregister_netdev(ndev);
1238 free_netdev(ndev);
1239 return 0;
1240 }
1241 EXPORT_SYMBOL_GPL(w5100_remove);
1242
1243 #ifdef CONFIG_PM_SLEEP
1244 static int w5100_suspend(struct device *dev)
1245 {
1246 struct net_device *ndev = dev_get_drvdata(dev);
1247 struct w5100_priv *priv = netdev_priv(ndev);
1248
1249 if (netif_running(ndev)) {
1250 netif_carrier_off(ndev);
1251 netif_device_detach(ndev);
1252
1253 w5100_hw_close(priv);
1254 }
1255 return 0;
1256 }
1257
1258 static int w5100_resume(struct device *dev)
1259 {
1260 struct net_device *ndev = dev_get_drvdata(dev);
1261 struct w5100_priv *priv = netdev_priv(ndev);
1262
1263 if (netif_running(ndev)) {
1264 w5100_hw_reset(priv);
1265 w5100_hw_start(priv);
1266
1267 netif_device_attach(ndev);
1268 if (!gpio_is_valid(priv->link_gpio) ||
1269 gpio_get_value(priv->link_gpio) != 0)
1270 netif_carrier_on(ndev);
1271 }
1272 return 0;
1273 }
1274 #endif /* CONFIG_PM_SLEEP */
1275
1276 SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
1277 EXPORT_SYMBOL_GPL(w5100_pm_ops);
1278
1279 static struct platform_driver w5100_mmio_driver = {
1280 .driver = {
1281 .name = DRV_NAME,
1282 .pm = &w5100_pm_ops,
1283 },
1284 .probe = w5100_mmio_probe,
1285 .remove = w5100_mmio_remove,
1286 };
1287 module_platform_driver(w5100_mmio_driver);