]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ks8851_mll.c
staging:iio:trigger core. Trivial code cleanups.
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ks8851_mll.c
CommitLineData
a55c0a0e
CD
1/**
2 * drivers/net/ks8851_mll.c
3 * Copyright (c) 2009 Micrel Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/**
20 * Supports:
21 * KS8851 16bit MLL chip from Micrel Inc.
22 */
23
0dc7d2b3
JP
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
a6b7a407 26#include <linux/interrupt.h>
a55c0a0e
CD
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/ethtool.h>
32#include <linux/cache.h>
33#include <linux/crc32.h>
34#include <linux/mii.h>
35#include <linux/platform_device.h>
36#include <linux/delay.h>
5a0e3ad6 37#include <linux/slab.h>
b7f080cf 38#include <asm/io.h>
a55c0a0e
CD
39
40#define DRV_NAME "ks8851_mll"
41
42static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
43#define MAX_RECV_FRAMES 32
44#define MAX_BUF_SIZE 2048
45#define TX_BUF_SIZE 2000
46#define RX_BUF_SIZE 2000
47
48#define KS_CCR 0x08
49#define CCR_EEPROM (1 << 9)
50#define CCR_SPI (1 << 8)
51#define CCR_8BIT (1 << 7)
52#define CCR_16BIT (1 << 6)
53#define CCR_32BIT (1 << 5)
54#define CCR_SHARED (1 << 4)
55#define CCR_32PIN (1 << 0)
56
57/* MAC address registers */
58#define KS_MARL 0x10
59#define KS_MARM 0x12
60#define KS_MARH 0x14
61
62#define KS_OBCR 0x20
63#define OBCR_ODS_16MA (1 << 6)
64
65#define KS_EEPCR 0x22
66#define EEPCR_EESA (1 << 4)
67#define EEPCR_EESB (1 << 3)
68#define EEPCR_EEDO (1 << 2)
69#define EEPCR_EESCK (1 << 1)
70#define EEPCR_EECS (1 << 0)
71
72#define KS_MBIR 0x24
73#define MBIR_TXMBF (1 << 12)
74#define MBIR_TXMBFA (1 << 11)
75#define MBIR_RXMBF (1 << 4)
76#define MBIR_RXMBFA (1 << 3)
77
78#define KS_GRR 0x26
79#define GRR_QMU (1 << 1)
80#define GRR_GSR (1 << 0)
81
82#define KS_WFCR 0x2A
83#define WFCR_MPRXE (1 << 7)
84#define WFCR_WF3E (1 << 3)
85#define WFCR_WF2E (1 << 2)
86#define WFCR_WF1E (1 << 1)
87#define WFCR_WF0E (1 << 0)
88
89#define KS_WF0CRC0 0x30
90#define KS_WF0CRC1 0x32
91#define KS_WF0BM0 0x34
92#define KS_WF0BM1 0x36
93#define KS_WF0BM2 0x38
94#define KS_WF0BM3 0x3A
95
96#define KS_WF1CRC0 0x40
97#define KS_WF1CRC1 0x42
98#define KS_WF1BM0 0x44
99#define KS_WF1BM1 0x46
100#define KS_WF1BM2 0x48
101#define KS_WF1BM3 0x4A
102
103#define KS_WF2CRC0 0x50
104#define KS_WF2CRC1 0x52
105#define KS_WF2BM0 0x54
106#define KS_WF2BM1 0x56
107#define KS_WF2BM2 0x58
108#define KS_WF2BM3 0x5A
109
110#define KS_WF3CRC0 0x60
111#define KS_WF3CRC1 0x62
112#define KS_WF3BM0 0x64
113#define KS_WF3BM1 0x66
114#define KS_WF3BM2 0x68
115#define KS_WF3BM3 0x6A
116
117#define KS_TXCR 0x70
118#define TXCR_TCGICMP (1 << 8)
119#define TXCR_TCGUDP (1 << 7)
120#define TXCR_TCGTCP (1 << 6)
121#define TXCR_TCGIP (1 << 5)
122#define TXCR_FTXQ (1 << 4)
123#define TXCR_TXFCE (1 << 3)
124#define TXCR_TXPE (1 << 2)
125#define TXCR_TXCRC (1 << 1)
126#define TXCR_TXE (1 << 0)
127
128#define KS_TXSR 0x72
129#define TXSR_TXLC (1 << 13)
130#define TXSR_TXMC (1 << 12)
131#define TXSR_TXFID_MASK (0x3f << 0)
132#define TXSR_TXFID_SHIFT (0)
133#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
134
135
136#define KS_RXCR1 0x74
137#define RXCR1_FRXQ (1 << 15)
138#define RXCR1_RXUDPFCC (1 << 14)
139#define RXCR1_RXTCPFCC (1 << 13)
140#define RXCR1_RXIPFCC (1 << 12)
141#define RXCR1_RXPAFMA (1 << 11)
142#define RXCR1_RXFCE (1 << 10)
143#define RXCR1_RXEFE (1 << 9)
144#define RXCR1_RXMAFMA (1 << 8)
145#define RXCR1_RXBE (1 << 7)
146#define RXCR1_RXME (1 << 6)
147#define RXCR1_RXUE (1 << 5)
148#define RXCR1_RXAE (1 << 4)
149#define RXCR1_RXINVF (1 << 1)
150#define RXCR1_RXE (1 << 0)
151#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
152 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
153
154#define KS_RXCR2 0x76
155#define RXCR2_SRDBL_MASK (0x7 << 5)
156#define RXCR2_SRDBL_SHIFT (5)
157#define RXCR2_SRDBL_4B (0x0 << 5)
158#define RXCR2_SRDBL_8B (0x1 << 5)
159#define RXCR2_SRDBL_16B (0x2 << 5)
160#define RXCR2_SRDBL_32B (0x3 << 5)
161/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
162#define RXCR2_IUFFP (1 << 4)
163#define RXCR2_RXIUFCEZ (1 << 3)
164#define RXCR2_UDPLFE (1 << 2)
165#define RXCR2_RXICMPFCC (1 << 1)
166#define RXCR2_RXSAF (1 << 0)
167
168#define KS_TXMIR 0x78
169
170#define KS_RXFHSR 0x7C
171#define RXFSHR_RXFV (1 << 15)
172#define RXFSHR_RXICMPFCS (1 << 13)
173#define RXFSHR_RXIPFCS (1 << 12)
174#define RXFSHR_RXTCPFCS (1 << 11)
175#define RXFSHR_RXUDPFCS (1 << 10)
176#define RXFSHR_RXBF (1 << 7)
177#define RXFSHR_RXMF (1 << 6)
178#define RXFSHR_RXUF (1 << 5)
179#define RXFSHR_RXMR (1 << 4)
180#define RXFSHR_RXFT (1 << 3)
181#define RXFSHR_RXFTL (1 << 2)
182#define RXFSHR_RXRF (1 << 1)
183#define RXFSHR_RXCE (1 << 0)
184#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
185 RXFSHR_RXFTL | RXFSHR_RXMR |\
186 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
187 RXFSHR_RXTCPFCS)
188#define KS_RXFHBCR 0x7E
189#define RXFHBCR_CNT_MASK 0x0FFF
190
191#define KS_TXQCR 0x80
192#define TXQCR_AETFE (1 << 2)
193#define TXQCR_TXQMAM (1 << 1)
194#define TXQCR_METFE (1 << 0)
195
196#define KS_RXQCR 0x82
197#define RXQCR_RXDTTS (1 << 12)
198#define RXQCR_RXDBCTS (1 << 11)
199#define RXQCR_RXFCTS (1 << 10)
200#define RXQCR_RXIPHTOE (1 << 9)
201#define RXQCR_RXDTTE (1 << 7)
202#define RXQCR_RXDBCTE (1 << 6)
203#define RXQCR_RXFCTE (1 << 5)
204#define RXQCR_ADRFE (1 << 4)
205#define RXQCR_SDA (1 << 3)
206#define RXQCR_RRXEF (1 << 0)
207#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
208
209#define KS_TXFDPR 0x84
210#define TXFDPR_TXFPAI (1 << 14)
211#define TXFDPR_TXFP_MASK (0x7ff << 0)
212#define TXFDPR_TXFP_SHIFT (0)
213
214#define KS_RXFDPR 0x86
215#define RXFDPR_RXFPAI (1 << 14)
216
217#define KS_RXDTTR 0x8C
218#define KS_RXDBCTR 0x8E
219
220#define KS_IER 0x90
221#define KS_ISR 0x92
222#define IRQ_LCI (1 << 15)
223#define IRQ_TXI (1 << 14)
224#define IRQ_RXI (1 << 13)
225#define IRQ_RXOI (1 << 11)
226#define IRQ_TXPSI (1 << 9)
227#define IRQ_RXPSI (1 << 8)
228#define IRQ_TXSAI (1 << 6)
229#define IRQ_RXWFDI (1 << 5)
230#define IRQ_RXMPDI (1 << 4)
231#define IRQ_LDI (1 << 3)
232#define IRQ_EDI (1 << 2)
233#define IRQ_SPIBEI (1 << 1)
234#define IRQ_DEDI (1 << 0)
235
236#define KS_RXFCTR 0x9C
237#define RXFCTR_THRESHOLD_MASK 0x00FF
238
239#define KS_RXFC 0x9D
240#define RXFCTR_RXFC_MASK (0xff << 8)
241#define RXFCTR_RXFC_SHIFT (8)
242#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
243#define RXFCTR_RXFCT_MASK (0xff << 0)
244#define RXFCTR_RXFCT_SHIFT (0)
245
246#define KS_TXNTFSR 0x9E
247
248#define KS_MAHTR0 0xA0
249#define KS_MAHTR1 0xA2
250#define KS_MAHTR2 0xA4
251#define KS_MAHTR3 0xA6
252
253#define KS_FCLWR 0xB0
254#define KS_FCHWR 0xB2
255#define KS_FCOWR 0xB4
256
257#define KS_CIDER 0xC0
258#define CIDER_ID 0x8870
259#define CIDER_REV_MASK (0x7 << 1)
260#define CIDER_REV_SHIFT (1)
261#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
262
263#define KS_CGCR 0xC6
264#define KS_IACR 0xC8
265#define IACR_RDEN (1 << 12)
266#define IACR_TSEL_MASK (0x3 << 10)
267#define IACR_TSEL_SHIFT (10)
268#define IACR_TSEL_MIB (0x3 << 10)
269#define IACR_ADDR_MASK (0x1f << 0)
270#define IACR_ADDR_SHIFT (0)
271
272#define KS_IADLR 0xD0
273#define KS_IAHDR 0xD2
274
275#define KS_PMECR 0xD4
276#define PMECR_PME_DELAY (1 << 14)
277#define PMECR_PME_POL (1 << 12)
278#define PMECR_WOL_WAKEUP (1 << 11)
279#define PMECR_WOL_MAGICPKT (1 << 10)
280#define PMECR_WOL_LINKUP (1 << 9)
281#define PMECR_WOL_ENERGY (1 << 8)
282#define PMECR_AUTO_WAKE_EN (1 << 7)
283#define PMECR_WAKEUP_NORMAL (1 << 6)
284#define PMECR_WKEVT_MASK (0xf << 2)
285#define PMECR_WKEVT_SHIFT (2)
286#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
287#define PMECR_WKEVT_ENERGY (0x1 << 2)
288#define PMECR_WKEVT_LINK (0x2 << 2)
289#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
290#define PMECR_WKEVT_FRAME (0x8 << 2)
291#define PMECR_PM_MASK (0x3 << 0)
292#define PMECR_PM_SHIFT (0)
293#define PMECR_PM_NORMAL (0x0 << 0)
294#define PMECR_PM_ENERGY (0x1 << 0)
295#define PMECR_PM_SOFTDOWN (0x2 << 0)
296#define PMECR_PM_POWERSAVE (0x3 << 0)
297
298/* Standard MII PHY data */
299#define KS_P1MBCR 0xE4
300#define P1MBCR_FORCE_FDX (1 << 8)
301
302#define KS_P1MBSR 0xE6
303#define P1MBSR_AN_COMPLETE (1 << 5)
304#define P1MBSR_AN_CAPABLE (1 << 3)
305#define P1MBSR_LINK_UP (1 << 2)
306
307#define KS_PHY1ILR 0xE8
308#define KS_PHY1IHR 0xEA
309#define KS_P1ANAR 0xEC
310#define KS_P1ANLPR 0xEE
311
312#define KS_P1SCLMD 0xF4
313#define P1SCLMD_LEDOFF (1 << 15)
314#define P1SCLMD_TXIDS (1 << 14)
315#define P1SCLMD_RESTARTAN (1 << 13)
316#define P1SCLMD_DISAUTOMDIX (1 << 10)
317#define P1SCLMD_FORCEMDIX (1 << 9)
318#define P1SCLMD_AUTONEGEN (1 << 7)
319#define P1SCLMD_FORCE100 (1 << 6)
320#define P1SCLMD_FORCEFDX (1 << 5)
321#define P1SCLMD_ADV_FLOW (1 << 4)
322#define P1SCLMD_ADV_100BT_FDX (1 << 3)
323#define P1SCLMD_ADV_100BT_HDX (1 << 2)
324#define P1SCLMD_ADV_10BT_FDX (1 << 1)
325#define P1SCLMD_ADV_10BT_HDX (1 << 0)
326
327#define KS_P1CR 0xF6
328#define P1CR_HP_MDIX (1 << 15)
329#define P1CR_REV_POL (1 << 13)
330#define P1CR_OP_100M (1 << 10)
331#define P1CR_OP_FDX (1 << 9)
332#define P1CR_OP_MDI (1 << 7)
333#define P1CR_AN_DONE (1 << 6)
334#define P1CR_LINK_GOOD (1 << 5)
335#define P1CR_PNTR_FLOW (1 << 4)
336#define P1CR_PNTR_100BT_FDX (1 << 3)
337#define P1CR_PNTR_100BT_HDX (1 << 2)
338#define P1CR_PNTR_10BT_FDX (1 << 1)
339#define P1CR_PNTR_10BT_HDX (1 << 0)
340
341/* TX Frame control */
342
343#define TXFR_TXIC (1 << 15)
344#define TXFR_TXFID_MASK (0x3f << 0)
345#define TXFR_TXFID_SHIFT (0)
346
347#define KS_P1SR 0xF8
348#define P1SR_HP_MDIX (1 << 15)
349#define P1SR_REV_POL (1 << 13)
350#define P1SR_OP_100M (1 << 10)
351#define P1SR_OP_FDX (1 << 9)
352#define P1SR_OP_MDI (1 << 7)
353#define P1SR_AN_DONE (1 << 6)
354#define P1SR_LINK_GOOD (1 << 5)
355#define P1SR_PNTR_FLOW (1 << 4)
356#define P1SR_PNTR_100BT_FDX (1 << 3)
357#define P1SR_PNTR_100BT_HDX (1 << 2)
358#define P1SR_PNTR_10BT_FDX (1 << 1)
359#define P1SR_PNTR_10BT_HDX (1 << 0)
360
361#define ENUM_BUS_NONE 0
362#define ENUM_BUS_8BIT 1
363#define ENUM_BUS_16BIT 2
364#define ENUM_BUS_32BIT 3
365
366#define MAX_MCAST_LST 32
367#define HW_MCAST_SIZE 8
a55c0a0e
CD
368
369/**
370 * union ks_tx_hdr - tx header data
371 * @txb: The header as bytes
372 * @txw: The header as 16bit, little-endian words
373 *
374 * A dual representation of the tx header data to allow
375 * access to individual bytes, and to allow 16bit accesses
376 * with 16bit alignment.
377 */
378union ks_tx_hdr {
379 u8 txb[4];
380 __le16 txw[2];
381};
382
383/**
384 * struct ks_net - KS8851 driver private data
385 * @net_device : The network device we're bound to
386 * @hw_addr : start address of data register.
387 * @hw_addr_cmd : start address of command register.
388 * @txh : temporaly buffer to save status/length.
389 * @lock : Lock to ensure that the device is not accessed when busy.
390 * @pdev : Pointer to platform device.
391 * @mii : The MII state information for the mii calls.
392 * @frame_head_info : frame header information for multi-pkt rx.
393 * @statelock : Lock on this structure for tx list.
394 * @msg_enable : The message flags controlling driver output (see ethtool).
395 * @frame_cnt : number of frames received.
396 * @bus_width : i/o bus width.
397 * @irq : irq number assigned to this device.
398 * @rc_rxqcr : Cached copy of KS_RXQCR.
399 * @rc_txcr : Cached copy of KS_TXCR.
400 * @rc_ier : Cached copy of KS_IER.
401 * @sharedbus : Multipex(addr and data bus) mode indicator.
402 * @cmd_reg_cache : command register cached.
403 * @cmd_reg_cache_int : command register cached. Used in the irq handler.
404 * @promiscuous : promiscuous mode indicator.
405 * @all_mcast : mutlicast indicator.
406 * @mcast_lst_size : size of multicast list.
407 * @mcast_lst : multicast list.
408 * @mcast_bits : multicast enabed.
409 * @mac_addr : MAC address assigned to this device.
410 * @fid : frame id.
411 * @extra_byte : number of extra byte prepended rx pkt.
412 * @enabled : indicator this device works.
413 *
414 * The @lock ensures that the chip is protected when certain operations are
415 * in progress. When the read or write packet transfer is in progress, most
416 * of the chip registers are not accessible until the transfer is finished and
417 * the DMA has been de-asserted.
418 *
419 * The @statelock is used to protect information in the structure which may
420 * need to be accessed via several sources, such as the network driver layer
421 * or one of the work queues.
422 *
423 */
424
425/* Receive multiplex framer header info */
426struct type_frame_head {
427 u16 sts; /* Frame status */
428 u16 len; /* Byte count */
429};
430
431struct ks_net {
432 struct net_device *netdev;
433 void __iomem *hw_addr;
434 void __iomem *hw_addr_cmd;
435 union ks_tx_hdr txh ____cacheline_aligned;
436 struct mutex lock; /* spinlock to be interrupt safe */
437 struct platform_device *pdev;
438 struct mii_if_info mii;
439 struct type_frame_head *frame_head_info;
440 spinlock_t statelock;
441 u32 msg_enable;
442 u32 frame_cnt;
443 int bus_width;
444 int irq;
445
446 u16 rc_rxqcr;
447 u16 rc_txcr;
448 u16 rc_ier;
449 u16 sharedbus;
450 u16 cmd_reg_cache;
451 u16 cmd_reg_cache_int;
452 u16 promiscuous;
453 u16 all_mcast;
454 u16 mcast_lst_size;
22bedad3 455 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
a55c0a0e
CD
456 u8 mcast_bits[HW_MCAST_SIZE];
457 u8 mac_addr[6];
458 u8 fid;
459 u8 extra_byte;
460 u8 enabled;
461};
462
463static int msg_enable;
464
a55c0a0e
CD
465#define BE3 0x8000 /* Byte Enable 3 */
466#define BE2 0x4000 /* Byte Enable 2 */
467#define BE1 0x2000 /* Byte Enable 1 */
468#define BE0 0x1000 /* Byte Enable 0 */
469
470/**
471 * register read/write calls.
472 *
473 * All these calls issue transactions to access the chip's registers. They
474 * all require that the necessary lock is held to prevent accesses when the
25985edc 475 * chip is busy transferring packet data (RX/TX FIFO accesses).
a55c0a0e
CD
476 */
477
478/**
479 * ks_rdreg8 - read 8 bit register from device
480 * @ks : The chip information
481 * @offset: The register address
482 *
483 * Read a 8bit register from the chip, returning the result
484 */
485static u8 ks_rdreg8(struct ks_net *ks, int offset)
486{
487 u16 data;
488 u8 shift_bit = offset & 0x03;
489 u8 shift_data = (offset & 1) << 3;
490 ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
491 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
492 data = ioread16(ks->hw_addr);
493 return (u8)(data >> shift_data);
494}
495
496/**
497 * ks_rdreg16 - read 16 bit register from device
498 * @ks : The chip information
499 * @offset: The register address
500 *
501 * Read a 16bit register from the chip, returning the result
502 */
503
504static u16 ks_rdreg16(struct ks_net *ks, int offset)
505{
506 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
507 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
508 return ioread16(ks->hw_addr);
509}
510
511/**
512 * ks_wrreg8 - write 8bit register value to chip
513 * @ks: The chip information
514 * @offset: The register address
515 * @value: The value to write
516 *
517 */
518static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
519{
520 u8 shift_bit = (offset & 0x03);
521 u16 value_write = (u16)(value << ((offset & 1) << 3));
522 ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
523 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
524 iowrite16(value_write, ks->hw_addr);
525}
526
527/**
528 * ks_wrreg16 - write 16bit register value to chip
529 * @ks: The chip information
530 * @offset: The register address
531 * @value: The value to write
532 *
533 */
534
535static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
536{
537 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
538 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
539 iowrite16(value, ks->hw_addr);
540}
541
542/**
543 * ks_inblk - read a block of data from QMU. This is called after sudo DMA mode enabled.
544 * @ks: The chip state
545 * @wptr: buffer address to save data
546 * @len: length in byte to read
547 *
548 */
549static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
550{
551 len >>= 1;
552 while (len--)
553 *wptr++ = (u16)ioread16(ks->hw_addr);
554}
555
556/**
557 * ks_outblk - write data to QMU. This is called after sudo DMA mode enabled.
558 * @ks: The chip information
559 * @wptr: buffer address
560 * @len: length in byte to write
561 *
562 */
563static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
564{
565 len >>= 1;
566 while (len--)
567 iowrite16(*wptr++, ks->hw_addr);
568}
569
4a91ca4e
DC
570static void ks_disable_int(struct ks_net *ks)
571{
572 ks_wrreg16(ks, KS_IER, 0x0000);
573} /* ks_disable_int */
574
575static void ks_enable_int(struct ks_net *ks)
576{
577 ks_wrreg16(ks, KS_IER, ks->rc_ier);
578} /* ks_enable_int */
579
a55c0a0e
CD
580/**
581 * ks_tx_fifo_space - return the available hardware buffer size.
582 * @ks: The chip information
583 *
584 */
585static inline u16 ks_tx_fifo_space(struct ks_net *ks)
586{
587 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
588}
589
590/**
591 * ks_save_cmd_reg - save the command register from the cache.
592 * @ks: The chip information
593 *
594 */
595static inline void ks_save_cmd_reg(struct ks_net *ks)
596{
597 /*ks8851 MLL has a bug to read back the command register.
598 * So rely on software to save the content of command register.
599 */
600 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
601}
602
603/**
604 * ks_restore_cmd_reg - restore the command register from the cache and
605 * write to hardware register.
606 * @ks: The chip information
607 *
608 */
609static inline void ks_restore_cmd_reg(struct ks_net *ks)
610{
611 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
612 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
613}
614
615/**
616 * ks_set_powermode - set power mode of the device
617 * @ks: The chip information
618 * @pwrmode: The power mode value to write to KS_PMECR.
619 *
620 * Change the power mode of the chip.
621 */
622static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
623{
624 unsigned pmecr;
625
0dc7d2b3 626 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
a55c0a0e
CD
627
628 ks_rdreg16(ks, KS_GRR);
629 pmecr = ks_rdreg16(ks, KS_PMECR);
630 pmecr &= ~PMECR_PM_MASK;
631 pmecr |= pwrmode;
632
633 ks_wrreg16(ks, KS_PMECR, pmecr);
634}
635
636/**
637 * ks_read_config - read chip configuration of bus width.
638 * @ks: The chip information
639 *
640 */
641static void ks_read_config(struct ks_net *ks)
642{
643 u16 reg_data = 0;
644
645 /* Regardless of bus width, 8 bit read should always work.*/
646 reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
647 reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
648
649 /* addr/data bus are multiplexed */
650 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
651
652 /* There are garbage data when reading data from QMU,
653 depending on bus-width.
654 */
655
656 if (reg_data & CCR_8BIT) {
657 ks->bus_width = ENUM_BUS_8BIT;
658 ks->extra_byte = 1;
659 } else if (reg_data & CCR_16BIT) {
660 ks->bus_width = ENUM_BUS_16BIT;
661 ks->extra_byte = 2;
662 } else {
663 ks->bus_width = ENUM_BUS_32BIT;
664 ks->extra_byte = 4;
665 }
666}
667
668/**
669 * ks_soft_reset - issue one of the soft reset to the device
670 * @ks: The device state.
671 * @op: The bit(s) to set in the GRR
672 *
673 * Issue the relevant soft-reset command to the device's GRR register
674 * specified by @op.
675 *
676 * Note, the delays are in there as a caution to ensure that the reset
677 * has time to take effect and then complete. Since the datasheet does
678 * not currently specify the exact sequence, we have chosen something
679 * that seems to work with our device.
680 */
681static void ks_soft_reset(struct ks_net *ks, unsigned op)
682{
683 /* Disable interrupt first */
684 ks_wrreg16(ks, KS_IER, 0x0000);
685 ks_wrreg16(ks, KS_GRR, op);
686 mdelay(10); /* wait a short time to effect reset */
687 ks_wrreg16(ks, KS_GRR, 0);
688 mdelay(1); /* wait for condition to clear */
689}
690
691
4a91ca4e
DC
692void ks_enable_qmu(struct ks_net *ks)
693{
694 u16 w;
695
696 w = ks_rdreg16(ks, KS_TXCR);
697 /* Enables QMU Transmit (TXCR). */
698 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
699
700 /*
701 * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
702 * Enable
703 */
704
705 w = ks_rdreg16(ks, KS_RXQCR);
706 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
707
708 /* Enables QMU Receive (RXCR1). */
709 w = ks_rdreg16(ks, KS_RXCR1);
710 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
711 ks->enabled = true;
712} /* ks_enable_qmu */
713
714static void ks_disable_qmu(struct ks_net *ks)
715{
716 u16 w;
717
718 w = ks_rdreg16(ks, KS_TXCR);
719
720 /* Disables QMU Transmit (TXCR). */
721 w &= ~TXCR_TXE;
722 ks_wrreg16(ks, KS_TXCR, w);
723
724 /* Disables QMU Receive (RXCR1). */
725 w = ks_rdreg16(ks, KS_RXCR1);
726 w &= ~RXCR1_RXE ;
727 ks_wrreg16(ks, KS_RXCR1, w);
728
729 ks->enabled = false;
730
731} /* ks_disable_qmu */
732
a55c0a0e
CD
733/**
734 * ks_read_qmu - read 1 pkt data from the QMU.
735 * @ks: The chip information
736 * @buf: buffer address to save 1 pkt
737 * @len: Pkt length
738 * Here is the sequence to read 1 pkt:
739 * 1. set sudo DMA mode
740 * 2. read prepend data
741 * 3. read pkt data
742 * 4. reset sudo DMA Mode
743 */
744static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
745{
746 u32 r = ks->extra_byte & 0x1 ;
747 u32 w = ks->extra_byte - r;
748
749 /* 1. set sudo DMA mode */
750 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
751 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
752
753 /* 2. read prepend data */
754 /**
755 * read 4 + extra bytes and discard them.
756 * extra bytes for dummy, 2 for status, 2 for len
757 */
758
759 /* use likely(r) for 8 bit access for performance */
760 if (unlikely(r))
761 ioread8(ks->hw_addr);
762 ks_inblk(ks, buf, w + 2 + 2);
763
764 /* 3. read pkt data */
765 ks_inblk(ks, buf, ALIGN(len, 4));
766
767 /* 4. reset sudo DMA Mode */
768 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
769}
770
771/**
772 * ks_rcv - read multiple pkts data from the QMU.
773 * @ks: The chip information
774 * @netdev: The network device being opened.
775 *
776 * Read all of header information before reading pkt content.
777 * It is not allowed only port of pkts in QMU after issuing
778 * interrupt ack.
779 */
780static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
781{
782 u32 i;
783 struct type_frame_head *frame_hdr = ks->frame_head_info;
784 struct sk_buff *skb;
785
786 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
787
788 /* read all header information */
789 for (i = 0; i < ks->frame_cnt; i++) {
790 /* Checking Received packet status */
791 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
792 /* Get packet len from hardware */
793 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
794 frame_hdr++;
795 }
796
797 frame_hdr = ks->frame_head_info;
798 while (ks->frame_cnt--) {
799 skb = dev_alloc_skb(frame_hdr->len + 16);
800 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
801 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
802 skb_reserve(skb, 2);
803 /* read data block including CRC 4 bytes */
4a91ca4e 804 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
a55c0a0e 805 skb_put(skb, frame_hdr->len);
a55c0a0e
CD
806 skb->protocol = eth_type_trans(skb, netdev);
807 netif_rx(skb);
808 } else {
0dc7d2b3 809 pr_err("%s: err:skb alloc\n", __func__);
a55c0a0e
CD
810 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
811 if (skb)
812 dev_kfree_skb_irq(skb);
813 }
814 frame_hdr++;
815 }
816}
817
818/**
819 * ks_update_link_status - link status update.
820 * @netdev: The network device being opened.
821 * @ks: The chip information
822 *
823 */
824
825static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
826{
827 /* check the status of the link */
828 u32 link_up_status;
829 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
830 netif_carrier_on(netdev);
831 link_up_status = true;
832 } else {
833 netif_carrier_off(netdev);
834 link_up_status = false;
835 }
0dc7d2b3
JP
836 netif_dbg(ks, link, ks->netdev,
837 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
a55c0a0e
CD
838}
839
840/**
841 * ks_irq - device interrupt handler
842 * @irq: Interrupt number passed from the IRQ hnalder.
843 * @pw: The private word passed to register_irq(), our struct ks_net.
844 *
845 * This is the handler invoked to find out what happened
846 *
847 * Read the interrupt status, work out what needs to be done and then clear
848 * any of the interrupts that are not needed.
849 */
850
851static irqreturn_t ks_irq(int irq, void *pw)
852{
aeedba8b
CD
853 struct net_device *netdev = pw;
854 struct ks_net *ks = netdev_priv(netdev);
a55c0a0e
CD
855 u16 status;
856
857 /*this should be the first in IRQ handler */
858 ks_save_cmd_reg(ks);
859
860 status = ks_rdreg16(ks, KS_ISR);
861 if (unlikely(!status)) {
862 ks_restore_cmd_reg(ks);
863 return IRQ_NONE;
864 }
865
866 ks_wrreg16(ks, KS_ISR, status);
867
868 if (likely(status & IRQ_RXI))
869 ks_rcv(ks, netdev);
870
871 if (unlikely(status & IRQ_LCI))
872 ks_update_link_status(netdev, ks);
873
874 if (unlikely(status & IRQ_TXI))
875 netif_wake_queue(netdev);
876
877 if (unlikely(status & IRQ_LDI)) {
878
879 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
880 pmecr &= ~PMECR_WKEVT_MASK;
881 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
882 }
883
884 /* this should be the last in IRQ handler*/
885 ks_restore_cmd_reg(ks);
886 return IRQ_HANDLED;
887}
888
889
890/**
891 * ks_net_open - open network device
892 * @netdev: The network device being opened.
893 *
894 * Called when the network device is marked active, such as a user executing
895 * 'ifconfig up' on the device.
896 */
897static int ks_net_open(struct net_device *netdev)
898{
899 struct ks_net *ks = netdev_priv(netdev);
900 int err;
901
902#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
903 /* lock the card, even if we may not actually do anything
904 * else at the moment.
905 */
906
0dc7d2b3 907 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
a55c0a0e
CD
908
909 /* reset the HW */
4a91ca4e 910 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
a55c0a0e
CD
911
912 if (err) {
0dc7d2b3 913 pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
a55c0a0e
CD
914 return err;
915 }
916
4a91ca4e
DC
917 /* wake up powermode to normal mode */
918 ks_set_powermode(ks, PMECR_PM_NORMAL);
919 mdelay(1); /* wait for normal mode to take effect */
920
921 ks_wrreg16(ks, KS_ISR, 0xffff);
922 ks_enable_int(ks);
923 ks_enable_qmu(ks);
924 netif_start_queue(ks->netdev);
925
0dc7d2b3 926 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
a55c0a0e
CD
927
928 return 0;
929}
930
931/**
932 * ks_net_stop - close network device
933 * @netdev: The device being closed.
934 *
935 * Called to close down a network device which has been active. Cancell any
936 * work, shutdown the RX and TX process and then place the chip into a low
937 * power state whilst it is not being used.
938 */
939static int ks_net_stop(struct net_device *netdev)
940{
941 struct ks_net *ks = netdev_priv(netdev);
942
0dc7d2b3 943 netif_info(ks, ifdown, netdev, "shutting down\n");
a55c0a0e
CD
944
945 netif_stop_queue(netdev);
946
a55c0a0e
CD
947 mutex_lock(&ks->lock);
948
949 /* turn off the IRQs and ack any outstanding */
950 ks_wrreg16(ks, KS_IER, 0x0000);
951 ks_wrreg16(ks, KS_ISR, 0xffff);
952
4a91ca4e
DC
953 /* shutdown RX/TX QMU */
954 ks_disable_qmu(ks);
a55c0a0e
CD
955
956 /* set powermode to soft power down to save power */
957 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
958 free_irq(ks->irq, netdev);
959 mutex_unlock(&ks->lock);
960 return 0;
961}
962
963
964/**
965 * ks_write_qmu - write 1 pkt data to the QMU.
966 * @ks: The chip information
967 * @pdata: buffer address to save 1 pkt
968 * @len: Pkt length in byte
969 * Here is the sequence to write 1 pkt:
970 * 1. set sudo DMA mode
971 * 2. write status/length
972 * 3. write pkt data
973 * 4. reset sudo DMA Mode
974 * 5. reset sudo DMA mode
975 * 6. Wait until pkt is out
976 */
977static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
978{
a55c0a0e 979 /* start header at txb[0] to align txw entries */
4a91ca4e 980 ks->txh.txw[0] = 0;
a55c0a0e
CD
981 ks->txh.txw[1] = cpu_to_le16(len);
982
983 /* 1. set sudo-DMA mode */
984 ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
985 /* 2. write status/lenth info */
986 ks_outblk(ks, ks->txh.txw, 4);
987 /* 3. write pkt data */
988 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
989 /* 4. reset sudo-DMA mode */
990 ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
991 /* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
992 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
993 /* 6. wait until TXQCR_METFE is auto-cleared */
994 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
995 ;
996}
997
a55c0a0e
CD
998/**
999 * ks_start_xmit - transmit packet
1000 * @skb : The buffer to transmit
1001 * @netdev : The device used to transmit the packet.
1002 *
1003 * Called by the network layer to transmit the @skb.
1004 * spin_lock_irqsave is required because tx and rx should be mutual exclusive.
1005 * So while tx is in-progress, prevent IRQ interrupt from happenning.
1006 */
1007static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1008{
1009 int retv = NETDEV_TX_OK;
1010 struct ks_net *ks = netdev_priv(netdev);
1011
1012 disable_irq(netdev->irq);
1013 ks_disable_int(ks);
1014 spin_lock(&ks->statelock);
1015
1016 /* Extra space are required:
1017 * 4 byte for alignment, 4 for status/length, 4 for CRC
1018 */
1019
1020 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1021 ks_write_qmu(ks, skb->data, skb->len);
1022 dev_kfree_skb(skb);
1023 } else
1024 retv = NETDEV_TX_BUSY;
1025 spin_unlock(&ks->statelock);
1026 ks_enable_int(ks);
1027 enable_irq(netdev->irq);
1028 return retv;
1029}
1030
1031/**
1032 * ks_start_rx - ready to serve pkts
1033 * @ks : The chip information
1034 *
1035 */
1036static void ks_start_rx(struct ks_net *ks)
1037{
1038 u16 cntl;
1039
1040 /* Enables QMU Receive (RXCR1). */
1041 cntl = ks_rdreg16(ks, KS_RXCR1);
1042 cntl |= RXCR1_RXE ;
1043 ks_wrreg16(ks, KS_RXCR1, cntl);
1044} /* ks_start_rx */
1045
1046/**
1047 * ks_stop_rx - stop to serve pkts
1048 * @ks : The chip information
1049 *
1050 */
1051static void ks_stop_rx(struct ks_net *ks)
1052{
1053 u16 cntl;
1054
1055 /* Disables QMU Receive (RXCR1). */
1056 cntl = ks_rdreg16(ks, KS_RXCR1);
1057 cntl &= ~RXCR1_RXE ;
1058 ks_wrreg16(ks, KS_RXCR1, cntl);
1059
1060} /* ks_stop_rx */
1061
1062static unsigned long const ethernet_polynomial = 0x04c11db7U;
1063
1064static unsigned long ether_gen_crc(int length, u8 *data)
1065{
1066 long crc = -1;
1067 while (--length >= 0) {
1068 u8 current_octet = *data++;
1069 int bit;
1070
1071 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
1072 crc = (crc << 1) ^
1073 ((crc < 0) ^ (current_octet & 1) ?
1074 ethernet_polynomial : 0);
1075 }
1076 }
1077 return (unsigned long)crc;
1078} /* ether_gen_crc */
1079
1080/**
1081* ks_set_grpaddr - set multicast information
1082* @ks : The chip information
1083*/
1084
1085static void ks_set_grpaddr(struct ks_net *ks)
1086{
1087 u8 i;
1088 u32 index, position, value;
1089
1090 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
1091
1092 for (i = 0; i < ks->mcast_lst_size; i++) {
1093 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
1094 index = position >> 3;
1095 value = 1 << (position & 7);
1096 ks->mcast_bits[index] |= (u8)value;
1097 }
1098
1099 for (i = 0; i < HW_MCAST_SIZE; i++) {
1100 if (i & 1) {
1101 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
1102 (ks->mcast_bits[i] << 8) |
1103 ks->mcast_bits[i - 1]);
1104 }
1105 }
1106} /* ks_set_grpaddr */
1107
1108/*
1109* ks_clear_mcast - clear multicast information
1110*
1111* @ks : The chip information
1112* This routine removes all mcast addresses set in the hardware.
1113*/
1114
1115static void ks_clear_mcast(struct ks_net *ks)
1116{
1117 u16 i, mcast_size;
1118 for (i = 0; i < HW_MCAST_SIZE; i++)
1119 ks->mcast_bits[i] = 0;
1120
1121 mcast_size = HW_MCAST_SIZE >> 2;
1122 for (i = 0; i < mcast_size; i++)
1123 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
1124}
1125
1126static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
1127{
1128 u16 cntl;
1129 ks->promiscuous = promiscuous_mode;
1130 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1131 cntl = ks_rdreg16(ks, KS_RXCR1);
1132
1133 cntl &= ~RXCR1_FILTER_MASK;
1134 if (promiscuous_mode)
1135 /* Enable Promiscuous mode */
1136 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
1137 else
1138 /* Disable Promiscuous mode (default normal mode) */
1139 cntl |= RXCR1_RXPAFMA;
1140
1141 ks_wrreg16(ks, KS_RXCR1, cntl);
1142
1143 if (ks->enabled)
1144 ks_start_rx(ks);
1145
1146} /* ks_set_promis */
1147
1148static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1149{
1150 u16 cntl;
1151
1152 ks->all_mcast = mcast;
1153 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1154 cntl = ks_rdreg16(ks, KS_RXCR1);
1155 cntl &= ~RXCR1_FILTER_MASK;
1156 if (mcast)
1157 /* Enable "Perfect with Multicast address passed mode" */
1158 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1159 else
1160 /**
1161 * Disable "Perfect with Multicast address passed
1162 * mode" (normal mode).
1163 */
1164 cntl |= RXCR1_RXPAFMA;
1165
1166 ks_wrreg16(ks, KS_RXCR1, cntl);
1167
1168 if (ks->enabled)
1169 ks_start_rx(ks);
1170} /* ks_set_mcast */
1171
1172static void ks_set_rx_mode(struct net_device *netdev)
1173{
1174 struct ks_net *ks = netdev_priv(netdev);
22bedad3 1175 struct netdev_hw_addr *ha;
a55c0a0e
CD
1176
1177 /* Turn on/off promiscuous mode. */
1178 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
1179 ks_set_promis(ks,
1180 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
1181 /* Turn on/off all mcast mode. */
1182 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
1183 ks_set_mcast(ks,
1184 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
1185 else
1186 ks_set_promis(ks, false);
1187
4cd24eaf
JP
1188 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
1189 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
a55c0a0e 1190 int i = 0;
f9dcbcc9 1191
22bedad3 1192 netdev_for_each_mc_addr(ha, netdev) {
a55c0a0e
CD
1193 if (i >= MAX_MCAST_LST)
1194 break;
22bedad3 1195 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
a55c0a0e
CD
1196 }
1197 ks->mcast_lst_size = (u8)i;
1198 ks_set_grpaddr(ks);
1199 } else {
1200 /**
1201 * List too big to support so
1202 * turn on all mcast mode.
1203 */
1204 ks->mcast_lst_size = MAX_MCAST_LST;
1205 ks_set_mcast(ks, true);
1206 }
1207 } else {
1208 ks->mcast_lst_size = 0;
1209 ks_clear_mcast(ks);
1210 }
1211} /* ks_set_rx_mode */
1212
1213static void ks_set_mac(struct ks_net *ks, u8 *data)
1214{
1215 u16 *pw = (u16 *)data;
1216 u16 w, u;
1217
1218 ks_stop_rx(ks); /* Stop receiving for reconfiguration */
1219
1220 u = *pw++;
1221 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1222 ks_wrreg16(ks, KS_MARH, w);
1223
1224 u = *pw++;
1225 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1226 ks_wrreg16(ks, KS_MARM, w);
1227
1228 u = *pw;
1229 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
1230 ks_wrreg16(ks, KS_MARL, w);
1231
1232 memcpy(ks->mac_addr, data, 6);
1233
1234 if (ks->enabled)
1235 ks_start_rx(ks);
1236}
1237
1238static int ks_set_mac_address(struct net_device *netdev, void *paddr)
1239{
1240 struct ks_net *ks = netdev_priv(netdev);
1241 struct sockaddr *addr = paddr;
1242 u8 *da;
1243
1244 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1245
1246 da = (u8 *)netdev->dev_addr;
1247
1248 ks_set_mac(ks, da);
1249 return 0;
1250}
1251
1252static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1253{
1254 struct ks_net *ks = netdev_priv(netdev);
1255
1256 if (!netif_running(netdev))
1257 return -EINVAL;
1258
1259 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
1260}
1261
1262static const struct net_device_ops ks_netdev_ops = {
1263 .ndo_open = ks_net_open,
1264 .ndo_stop = ks_net_stop,
1265 .ndo_do_ioctl = ks_net_ioctl,
1266 .ndo_start_xmit = ks_start_xmit,
1267 .ndo_set_mac_address = ks_set_mac_address,
1268 .ndo_set_rx_mode = ks_set_rx_mode,
1269 .ndo_change_mtu = eth_change_mtu,
1270 .ndo_validate_addr = eth_validate_addr,
1271};
1272
1273/* ethtool support */
1274
1275static void ks_get_drvinfo(struct net_device *netdev,
1276 struct ethtool_drvinfo *di)
1277{
1278 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
1279 strlcpy(di->version, "1.00", sizeof(di->version));
1280 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
1281 sizeof(di->bus_info));
1282}
1283
1284static u32 ks_get_msglevel(struct net_device *netdev)
1285{
1286 struct ks_net *ks = netdev_priv(netdev);
1287 return ks->msg_enable;
1288}
1289
1290static void ks_set_msglevel(struct net_device *netdev, u32 to)
1291{
1292 struct ks_net *ks = netdev_priv(netdev);
1293 ks->msg_enable = to;
1294}
1295
1296static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1297{
1298 struct ks_net *ks = netdev_priv(netdev);
1299 return mii_ethtool_gset(&ks->mii, cmd);
1300}
1301
1302static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1303{
1304 struct ks_net *ks = netdev_priv(netdev);
1305 return mii_ethtool_sset(&ks->mii, cmd);
1306}
1307
1308static u32 ks_get_link(struct net_device *netdev)
1309{
1310 struct ks_net *ks = netdev_priv(netdev);
1311 return mii_link_ok(&ks->mii);
1312}
1313
1314static int ks_nway_reset(struct net_device *netdev)
1315{
1316 struct ks_net *ks = netdev_priv(netdev);
1317 return mii_nway_restart(&ks->mii);
1318}
1319
1320static const struct ethtool_ops ks_ethtool_ops = {
1321 .get_drvinfo = ks_get_drvinfo,
1322 .get_msglevel = ks_get_msglevel,
1323 .set_msglevel = ks_set_msglevel,
1324 .get_settings = ks_get_settings,
1325 .set_settings = ks_set_settings,
1326 .get_link = ks_get_link,
1327 .nway_reset = ks_nway_reset,
1328};
1329
1330/* MII interface controls */
1331
1332/**
1333 * ks_phy_reg - convert MII register into a KS8851 register
1334 * @reg: MII register number.
1335 *
1336 * Return the KS8851 register number for the corresponding MII PHY register
1337 * if possible. Return zero if the MII register has no direct mapping to the
1338 * KS8851 register set.
1339 */
1340static int ks_phy_reg(int reg)
1341{
1342 switch (reg) {
1343 case MII_BMCR:
1344 return KS_P1MBCR;
1345 case MII_BMSR:
1346 return KS_P1MBSR;
1347 case MII_PHYSID1:
1348 return KS_PHY1ILR;
1349 case MII_PHYSID2:
1350 return KS_PHY1IHR;
1351 case MII_ADVERTISE:
1352 return KS_P1ANAR;
1353 case MII_LPA:
1354 return KS_P1ANLPR;
1355 }
1356
1357 return 0x0;
1358}
1359
1360/**
1361 * ks_phy_read - MII interface PHY register read.
1362 * @netdev: The network device the PHY is on.
1363 * @phy_addr: Address of PHY (ignored as we only have one)
1364 * @reg: The register to read.
1365 *
1366 * This call reads data from the PHY register specified in @reg. Since the
25985edc 1367 * device does not support all the MII registers, the non-existent values
a55c0a0e
CD
1368 * are always returned as zero.
1369 *
1370 * We return zero for unsupported registers as the MII code does not check
1371 * the value returned for any error status, and simply returns it to the
1372 * caller. The mii-tool that the driver was tested with takes any -ve error
1373 * as real PHY capabilities, thus displaying incorrect data to the user.
1374 */
1375static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1376{
1377 struct ks_net *ks = netdev_priv(netdev);
1378 int ksreg;
1379 int result;
1380
1381 ksreg = ks_phy_reg(reg);
1382 if (!ksreg)
1383 return 0x0; /* no error return allowed, so use zero */
1384
1385 mutex_lock(&ks->lock);
1386 result = ks_rdreg16(ks, ksreg);
1387 mutex_unlock(&ks->lock);
1388
1389 return result;
1390}
1391
1392static void ks_phy_write(struct net_device *netdev,
1393 int phy, int reg, int value)
1394{
1395 struct ks_net *ks = netdev_priv(netdev);
1396 int ksreg;
1397
1398 ksreg = ks_phy_reg(reg);
1399 if (ksreg) {
1400 mutex_lock(&ks->lock);
1401 ks_wrreg16(ks, ksreg, value);
1402 mutex_unlock(&ks->lock);
1403 }
1404}
1405
1406/**
1407 * ks_read_selftest - read the selftest memory info.
1408 * @ks: The device state
1409 *
1410 * Read and check the TX/RX memory selftest information.
1411 */
1412static int ks_read_selftest(struct ks_net *ks)
1413{
1414 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1415 int ret = 0;
1416 unsigned rd;
1417
1418 rd = ks_rdreg16(ks, KS_MBIR);
1419
1420 if ((rd & both_done) != both_done) {
0dc7d2b3 1421 netdev_warn(ks->netdev, "Memory selftest not finished\n");
a55c0a0e
CD
1422 return 0;
1423 }
1424
1425 if (rd & MBIR_TXMBFA) {
0dc7d2b3 1426 netdev_err(ks->netdev, "TX memory selftest fails\n");
a55c0a0e
CD
1427 ret |= 1;
1428 }
1429
1430 if (rd & MBIR_RXMBFA) {
0dc7d2b3 1431 netdev_err(ks->netdev, "RX memory selftest fails\n");
a55c0a0e
CD
1432 ret |= 2;
1433 }
1434
0dc7d2b3 1435 netdev_info(ks->netdev, "the selftest passes\n");
a55c0a0e
CD
1436 return ret;
1437}
1438
a55c0a0e
CD
1439static void ks_setup(struct ks_net *ks)
1440{
1441 u16 w;
1442
1443 /**
1444 * Configure QMU Transmit
1445 */
1446
1447 /* Setup Transmit Frame Data Pointer Auto-Increment (TXFDPR) */
1448 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1449
1450 /* Setup Receive Frame Data Pointer Auto-Increment */
1451 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1452
1453 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1454 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
1455
1456 /* Setup RxQ Command Control (RXQCR) */
1457 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1458 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1459
1460 /**
1461 * set the force mode to half duplex, default is full duplex
1462 * because if the auto-negotiation fails, most switch uses
1463 * half-duplex.
1464 */
1465
1466 w = ks_rdreg16(ks, KS_P1MBCR);
1467 w &= ~P1MBCR_FORCE_FDX;
1468 ks_wrreg16(ks, KS_P1MBCR, w);
1469
1470 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1471 ks_wrreg16(ks, KS_TXCR, w);
1472
4a91ca4e 1473 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
a55c0a0e
CD
1474
1475 if (ks->promiscuous) /* bPromiscuous */
1476 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1477 else if (ks->all_mcast) /* Multicast address passed mode */
1478 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1479 else /* Normal mode */
1480 w |= RXCR1_RXPAFMA;
1481
1482 ks_wrreg16(ks, KS_RXCR1, w);
1483} /*ks_setup */
1484
1485
1486static void ks_setup_int(struct ks_net *ks)
1487{
1488 ks->rc_ier = 0x00;
1489 /* Clear the interrupts status of the hardware. */
1490 ks_wrreg16(ks, KS_ISR, 0xffff);
1491
1492 /* Enables the interrupts of the hardware. */
1493 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1494} /* ks_setup_int */
1495
a55c0a0e
CD
1496static int ks_hw_init(struct ks_net *ks)
1497{
1498#define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1499 ks->promiscuous = 0;
1500 ks->all_mcast = 0;
1501 ks->mcast_lst_size = 0;
1502
1503 ks->frame_head_info = (struct type_frame_head *) \
1504 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1505 if (!ks->frame_head_info) {
0dc7d2b3 1506 pr_err("Error: Fail to allocate frame memory\n");
a55c0a0e
CD
1507 return false;
1508 }
1509
1510 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1511 return true;
1512}
1513
1514
1515static int __devinit ks8851_probe(struct platform_device *pdev)
1516{
1517 int err = -ENOMEM;
1518 struct resource *io_d, *io_c;
1519 struct net_device *netdev;
1520 struct ks_net *ks;
1521 u16 id, data;
1522
1523 io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1524 io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1525
1526 if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
1527 goto err_mem_region;
1528
1529 if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
1530 goto err_mem_region1;
1531
1532 netdev = alloc_etherdev(sizeof(struct ks_net));
1533 if (!netdev)
1534 goto err_alloc_etherdev;
1535
1536 SET_NETDEV_DEV(netdev, &pdev->dev);
1537
1538 ks = netdev_priv(netdev);
1539 ks->netdev = netdev;
1540 ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
1541
1542 if (!ks->hw_addr)
1543 goto err_ioremap;
1544
1545 ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
1546 if (!ks->hw_addr_cmd)
1547 goto err_ioremap1;
1548
1549 ks->irq = platform_get_irq(pdev, 0);
1550
1551 if (ks->irq < 0) {
1552 err = ks->irq;
1553 goto err_get_irq;
1554 }
1555
1556 ks->pdev = pdev;
1557
1558 mutex_init(&ks->lock);
1559 spin_lock_init(&ks->statelock);
1560
1561 netdev->netdev_ops = &ks_netdev_ops;
1562 netdev->ethtool_ops = &ks_ethtool_ops;
1563
1564 /* setup mii state */
1565 ks->mii.dev = netdev;
1566 ks->mii.phy_id = 1,
1567 ks->mii.phy_id_mask = 1;
1568 ks->mii.reg_num_mask = 0xf;
1569 ks->mii.mdio_read = ks_phy_read;
1570 ks->mii.mdio_write = ks_phy_write;
1571
0dc7d2b3 1572 netdev_info(netdev, "message enable is %d\n", msg_enable);
a55c0a0e
CD
1573 /* set the default message enable */
1574 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1575 NETIF_MSG_PROBE |
1576 NETIF_MSG_LINK));
1577 ks_read_config(ks);
1578
1579 /* simple check for a valid chip being connected to the bus */
1580 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
0dc7d2b3 1581 netdev_err(netdev, "failed to read device ID\n");
a55c0a0e
CD
1582 err = -ENODEV;
1583 goto err_register;
1584 }
1585
1586 if (ks_read_selftest(ks)) {
0dc7d2b3 1587 netdev_err(netdev, "failed to read device ID\n");
a55c0a0e
CD
1588 err = -ENODEV;
1589 goto err_register;
1590 }
1591
1592 err = register_netdev(netdev);
1593 if (err)
1594 goto err_register;
1595
1596 platform_set_drvdata(pdev, netdev);
1597
1598 ks_soft_reset(ks, GRR_GSR);
1599 ks_hw_init(ks);
4a91ca4e 1600 ks_disable_qmu(ks);
a55c0a0e
CD
1601 ks_setup(ks);
1602 ks_setup_int(ks);
a55c0a0e
CD
1603 memcpy(netdev->dev_addr, ks->mac_addr, 6);
1604
1605 data = ks_rdreg16(ks, KS_OBCR);
1606 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
1607
1608 /**
1609 * If you want to use the default MAC addr,
1610 * comment out the 2 functions below.
1611 */
1612
1613 random_ether_addr(netdev->dev_addr);
1614 ks_set_mac(ks, netdev->dev_addr);
1615
1616 id = ks_rdreg16(ks, KS_CIDER);
1617
0dc7d2b3
JP
1618 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1619 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
a55c0a0e
CD
1620 return 0;
1621
1622err_register:
1623err_get_irq:
1624 iounmap(ks->hw_addr_cmd);
1625err_ioremap1:
1626 iounmap(ks->hw_addr);
1627err_ioremap:
1628 free_netdev(netdev);
1629err_alloc_etherdev:
1630 release_mem_region(io_c->start, resource_size(io_c));
1631err_mem_region1:
1632 release_mem_region(io_d->start, resource_size(io_d));
1633err_mem_region:
1634 return err;
1635}
1636
1637static int __devexit ks8851_remove(struct platform_device *pdev)
1638{
1639 struct net_device *netdev = platform_get_drvdata(pdev);
1640 struct ks_net *ks = netdev_priv(netdev);
1641 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1642
4a91ca4e 1643 kfree(ks->frame_head_info);
a55c0a0e
CD
1644 unregister_netdev(netdev);
1645 iounmap(ks->hw_addr);
1646 free_netdev(netdev);
1647 release_mem_region(iomem->start, resource_size(iomem));
1648 platform_set_drvdata(pdev, NULL);
1649 return 0;
1650
1651}
1652
1653static struct platform_driver ks8851_platform_driver = {
1654 .driver = {
1655 .name = DRV_NAME,
1656 .owner = THIS_MODULE,
1657 },
1658 .probe = ks8851_probe,
1659 .remove = __devexit_p(ks8851_remove),
1660};
1661
1662static int __init ks8851_init(void)
1663{
1664 return platform_driver_register(&ks8851_platform_driver);
1665}
1666
1667static void __exit ks8851_exit(void)
1668{
1669 platform_driver_unregister(&ks8851_platform_driver);
1670}
1671
1672module_init(ks8851_init);
1673module_exit(ks8851_exit);
1674
1675MODULE_DESCRIPTION("KS8851 MLL Network driver");
1676MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1677MODULE_LICENSE("GPL");
1678module_param_named(message, msg_enable, int, 0);
1679MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1680