2 * ColdFire Fast Ethernet Controller emulation.
4 * Copyright (c) 2007 CodeSourcery.
6 * This code is licensed under the GPL
9 #include "qemu/osdep.h"
13 #include "qemu/module.h"
14 #include "hw/m68k/mcf.h"
15 #include "hw/m68k/mcf_fec.h"
16 #include "hw/net/mii.h"
17 #include "hw/sysbus.h"
24 #define DPRINTF(fmt, ...) \
25 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0)
27 #define DPRINTF(fmt, ...) do {} while(0)
30 #define FEC_MAX_DESC 1024
31 #define FEC_MAX_FRAME_SIZE 2032
32 #define FEC_MIB_SIZE 64
35 SysBusDevice parent_obj
;
38 qemu_irq irq
[FEC_NUM_IRQ
];
45 uint32_t rx_descriptor
;
46 uint32_t tx_descriptor
;
57 uint32_t mib
[FEC_MIB_SIZE
];
60 #define FEC_INT_HB 0x80000000
61 #define FEC_INT_BABR 0x40000000
62 #define FEC_INT_BABT 0x20000000
63 #define FEC_INT_GRA 0x10000000
64 #define FEC_INT_TXF 0x08000000
65 #define FEC_INT_TXB 0x04000000
66 #define FEC_INT_RXF 0x02000000
67 #define FEC_INT_RXB 0x01000000
68 #define FEC_INT_MII 0x00800000
69 #define FEC_INT_EB 0x00400000
70 #define FEC_INT_LC 0x00200000
71 #define FEC_INT_RL 0x00100000
72 #define FEC_INT_UN 0x00080000
77 /* Map interrupt flags onto IRQ lines. */
78 static const uint32_t mcf_fec_irq_map
[FEC_NUM_IRQ
] = {
94 /* Buffer Descriptor. */
101 #define FEC_BD_R 0x8000
102 #define FEC_BD_E 0x8000
103 #define FEC_BD_O1 0x4000
104 #define FEC_BD_W 0x2000
105 #define FEC_BD_O2 0x1000
106 #define FEC_BD_L 0x0800
107 #define FEC_BD_TC 0x0400
108 #define FEC_BD_ABC 0x0200
109 #define FEC_BD_M 0x0100
110 #define FEC_BD_BC 0x0080
111 #define FEC_BD_MC 0x0040
112 #define FEC_BD_LG 0x0020
113 #define FEC_BD_NO 0x0010
114 #define FEC_BD_CR 0x0004
115 #define FEC_BD_OV 0x0002
116 #define FEC_BD_TR 0x0001
118 #define MIB_RMON_T_DROP 0
119 #define MIB_RMON_T_PACKETS 1
120 #define MIB_RMON_T_BC_PKT 2
121 #define MIB_RMON_T_MC_PKT 3
122 #define MIB_RMON_T_CRC_ALIGN 4
123 #define MIB_RMON_T_UNDERSIZE 5
124 #define MIB_RMON_T_OVERSIZE 6
125 #define MIB_RMON_T_FRAG 7
126 #define MIB_RMON_T_JAB 8
127 #define MIB_RMON_T_COL 9
128 #define MIB_RMON_T_P64 10
129 #define MIB_RMON_T_P65TO127 11
130 #define MIB_RMON_T_P128TO255 12
131 #define MIB_RMON_T_P256TO511 13
132 #define MIB_RMON_T_P512TO1023 14
133 #define MIB_RMON_T_P1024TO2047 15
134 #define MIB_RMON_T_P_GTE2048 16
135 #define MIB_RMON_T_OCTETS 17
136 #define MIB_IEEE_T_DROP 18
137 #define MIB_IEEE_T_FRAME_OK 19
138 #define MIB_IEEE_T_1COL 20
139 #define MIB_IEEE_T_MCOL 21
140 #define MIB_IEEE_T_DEF 22
141 #define MIB_IEEE_T_LCOL 23
142 #define MIB_IEEE_T_EXCOL 24
143 #define MIB_IEEE_T_MACERR 25
144 #define MIB_IEEE_T_CSERR 26
145 #define MIB_IEEE_T_SQE 27
146 #define MIB_IEEE_T_FDXFC 28
147 #define MIB_IEEE_T_OCTETS_OK 29
149 #define MIB_RMON_R_DROP 32
150 #define MIB_RMON_R_PACKETS 33
151 #define MIB_RMON_R_BC_PKT 34
152 #define MIB_RMON_R_MC_PKT 35
153 #define MIB_RMON_R_CRC_ALIGN 36
154 #define MIB_RMON_R_UNDERSIZE 37
155 #define MIB_RMON_R_OVERSIZE 38
156 #define MIB_RMON_R_FRAG 39
157 #define MIB_RMON_R_JAB 40
158 #define MIB_RMON_R_RESVD_0 41
159 #define MIB_RMON_R_P64 42
160 #define MIB_RMON_R_P65TO127 43
161 #define MIB_RMON_R_P128TO255 44
162 #define MIB_RMON_R_P256TO511 45
163 #define MIB_RMON_R_P512TO1023 46
164 #define MIB_RMON_R_P1024TO2047 47
165 #define MIB_RMON_R_P_GTE2048 48
166 #define MIB_RMON_R_OCTETS 49
167 #define MIB_IEEE_R_DROP 50
168 #define MIB_IEEE_R_FRAME_OK 51
169 #define MIB_IEEE_R_CRC 52
170 #define MIB_IEEE_R_ALIGN 53
171 #define MIB_IEEE_R_MACERR 54
172 #define MIB_IEEE_R_FDXFC 55
173 #define MIB_IEEE_R_OCTETS_OK 56
175 static void mcf_fec_read_bd(mcf_fec_bd
*bd
, uint32_t addr
)
177 cpu_physical_memory_read(addr
, bd
, sizeof(*bd
));
178 be16_to_cpus(&bd
->flags
);
179 be16_to_cpus(&bd
->length
);
180 be32_to_cpus(&bd
->data
);
183 static void mcf_fec_write_bd(mcf_fec_bd
*bd
, uint32_t addr
)
186 tmp
.flags
= cpu_to_be16(bd
->flags
);
187 tmp
.length
= cpu_to_be16(bd
->length
);
188 tmp
.data
= cpu_to_be32(bd
->data
);
189 cpu_physical_memory_write(addr
, &tmp
, sizeof(tmp
));
192 static void mcf_fec_update(mcf_fec_state
*s
)
199 active
= s
->eir
& s
->eimr
;
200 changed
= active
^s
->irq_state
;
201 for (i
= 0; i
< FEC_NUM_IRQ
; i
++) {
202 mask
= mcf_fec_irq_map
[i
];
203 if (changed
& mask
) {
204 DPRINTF("IRQ %d = %d\n", i
, (active
& mask
) != 0);
205 qemu_set_irq(s
->irq
[i
], (active
& mask
) != 0);
208 s
->irq_state
= active
;
211 static void mcf_fec_tx_stats(mcf_fec_state
*s
, int size
)
213 s
->mib
[MIB_RMON_T_PACKETS
]++;
214 s
->mib
[MIB_RMON_T_OCTETS
] += size
;
216 s
->mib
[MIB_RMON_T_FRAG
]++;
217 } else if (size
== 64) {
218 s
->mib
[MIB_RMON_T_P64
]++;
219 } else if (size
< 128) {
220 s
->mib
[MIB_RMON_T_P65TO127
]++;
221 } else if (size
< 256) {
222 s
->mib
[MIB_RMON_T_P128TO255
]++;
223 } else if (size
< 512) {
224 s
->mib
[MIB_RMON_T_P256TO511
]++;
225 } else if (size
< 1024) {
226 s
->mib
[MIB_RMON_T_P512TO1023
]++;
227 } else if (size
< 2048) {
228 s
->mib
[MIB_RMON_T_P1024TO2047
]++;
230 s
->mib
[MIB_RMON_T_P_GTE2048
]++;
232 s
->mib
[MIB_IEEE_T_FRAME_OK
]++;
233 s
->mib
[MIB_IEEE_T_OCTETS_OK
] += size
;
236 static void mcf_fec_do_tx(mcf_fec_state
*s
)
242 uint8_t frame
[FEC_MAX_FRAME_SIZE
];
248 addr
= s
->tx_descriptor
;
249 while (descnt
++ < FEC_MAX_DESC
) {
250 mcf_fec_read_bd(&bd
, addr
);
251 DPRINTF("tx_bd %x flags %04x len %d data %08x\n",
252 addr
, bd
.flags
, bd
.length
, bd
.data
);
253 if ((bd
.flags
& FEC_BD_R
) == 0) {
254 /* Run out of descriptors to transmit. */
258 if (frame_size
+ len
> FEC_MAX_FRAME_SIZE
) {
259 len
= FEC_MAX_FRAME_SIZE
- frame_size
;
260 s
->eir
|= FEC_INT_BABT
;
262 cpu_physical_memory_read(bd
.data
, ptr
, len
);
265 if (bd
.flags
& FEC_BD_L
) {
266 /* Last buffer in frame. */
267 DPRINTF("Sending packet\n");
268 qemu_send_packet(qemu_get_queue(s
->nic
), frame
, frame_size
);
269 mcf_fec_tx_stats(s
, frame_size
);
272 s
->eir
|= FEC_INT_TXF
;
274 s
->eir
|= FEC_INT_TXB
;
275 bd
.flags
&= ~FEC_BD_R
;
276 /* Write back the modified descriptor. */
277 mcf_fec_write_bd(&bd
, addr
);
278 /* Advance to the next descriptor. */
279 if ((bd
.flags
& FEC_BD_W
) != 0) {
285 s
->tx_descriptor
= addr
;
288 static void mcf_fec_enable_rx(mcf_fec_state
*s
)
290 NetClientState
*nc
= qemu_get_queue(s
->nic
);
293 mcf_fec_read_bd(&bd
, s
->rx_descriptor
);
294 s
->rx_enabled
= ((bd
.flags
& FEC_BD_E
) != 0);
296 qemu_flush_queued_packets(nc
);
300 static void mcf_fec_reset(DeviceState
*dev
)
302 mcf_fec_state
*s
= MCF_FEC_NET(dev
);
315 #define MMFR_WRITE_OP (1 << 28)
316 #define MMFR_READ_OP (2 << 28)
317 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f)
318 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f)
320 static uint64_t mcf_fec_read_mdio(mcf_fec_state
*s
)
324 if (s
->mmfr
& MMFR_WRITE_OP
)
326 if (MMFR_PHYADDR(s
->mmfr
) != 1)
327 return s
->mmfr
|= 0xffff;
329 switch (MMFR_REGNUM(s
->mmfr
)) {
331 v
= MII_BMCR_SPEED
| MII_BMCR_AUTOEN
| MII_BMCR_FD
;
334 v
= MII_BMSR_100TX_FD
| MII_BMSR_100TX_HD
| MII_BMSR_10T_FD
|
335 MII_BMSR_10T_HD
| MII_BMSR_MFPS
| MII_BMSR_AN_COMP
|
336 MII_BMSR_AUTONEG
| MII_BMSR_LINK_ST
;
345 v
= MII_ANAR_TXFD
| MII_ANAR_TX
| MII_ANAR_10FD
|
346 MII_ANAR_10
| MII_ANAR_CSMACD
;
349 v
= MII_ANLPAR_ACK
| MII_ANLPAR_TXFD
| MII_ANLPAR_TX
|
350 MII_ANLPAR_10FD
| MII_ANLPAR_10
| MII_ANLPAR_CSMACD
;
356 s
->mmfr
= (s
->mmfr
& ~0xffff) | v
;
360 static uint64_t mcf_fec_read(void *opaque
, hwaddr addr
,
363 mcf_fec_state
*s
= (mcf_fec_state
*)opaque
;
364 switch (addr
& 0x3ff) {
365 case 0x004: return s
->eir
;
366 case 0x008: return s
->eimr
;
367 case 0x010: return s
->rx_enabled
? (1 << 24) : 0; /* RDAR */
368 case 0x014: return 0; /* TDAR */
369 case 0x024: return s
->ecr
;
370 case 0x040: return mcf_fec_read_mdio(s
);
371 case 0x044: return s
->mscr
;
372 case 0x064: return 0; /* MIBC */
373 case 0x084: return s
->rcr
;
374 case 0x0c4: return s
->tcr
;
375 case 0x0e4: /* PALR */
376 return (s
->conf
.macaddr
.a
[0] << 24) | (s
->conf
.macaddr
.a
[1] << 16)
377 | (s
->conf
.macaddr
.a
[2] << 8) | s
->conf
.macaddr
.a
[3];
379 case 0x0e8: /* PAUR */
380 return (s
->conf
.macaddr
.a
[4] << 24) | (s
->conf
.macaddr
.a
[5] << 16) | 0x8808;
381 case 0x0ec: return 0x10000; /* OPD */
382 case 0x118: return 0;
383 case 0x11c: return 0;
384 case 0x120: return 0;
385 case 0x124: return 0;
386 case 0x144: return s
->tfwr
;
387 case 0x14c: return 0x600;
388 case 0x150: return s
->rfsr
;
389 case 0x180: return s
->erdsr
;
390 case 0x184: return s
->etdsr
;
391 case 0x188: return s
->emrbr
;
392 case 0x200 ... 0x2e0: return s
->mib
[(addr
& 0x1ff) / 4];
394 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr
);
399 static void mcf_fec_write(void *opaque
, hwaddr addr
,
400 uint64_t value
, unsigned size
)
402 mcf_fec_state
*s
= (mcf_fec_state
*)opaque
;
403 switch (addr
& 0x3ff) {
410 case 0x010: /* RDAR */
411 if ((s
->ecr
& FEC_EN
) && !s
->rx_enabled
) {
412 DPRINTF("RX enable\n");
413 mcf_fec_enable_rx(s
);
416 case 0x014: /* TDAR */
417 if (s
->ecr
& FEC_EN
) {
423 if (value
& FEC_RESET
) {
425 mcf_fec_reset(opaque
);
427 if ((s
->ecr
& FEC_EN
) == 0) {
433 s
->eir
|= FEC_INT_MII
;
436 s
->mscr
= value
& 0xfe;
439 /* TODO: Implement MIB. */
442 s
->rcr
= value
& 0x07ff003f;
443 /* TODO: Implement LOOP mode. */
445 case 0x0c4: /* TCR */
446 /* We transmit immediately, so raise GRA immediately. */
449 s
->eir
|= FEC_INT_GRA
;
451 case 0x0e4: /* PALR */
452 s
->conf
.macaddr
.a
[0] = value
>> 24;
453 s
->conf
.macaddr
.a
[1] = value
>> 16;
454 s
->conf
.macaddr
.a
[2] = value
>> 8;
455 s
->conf
.macaddr
.a
[3] = value
;
457 case 0x0e8: /* PAUR */
458 s
->conf
.macaddr
.a
[4] = value
>> 24;
459 s
->conf
.macaddr
.a
[5] = value
>> 16;
468 /* TODO: implement MAC hash filtering. */
474 /* FRBR writes ignored. */
477 s
->rfsr
= (value
& 0x3fc) | 0x400;
480 s
->erdsr
= value
& ~3;
481 s
->rx_descriptor
= s
->erdsr
;
484 s
->etdsr
= value
& ~3;
485 s
->tx_descriptor
= s
->etdsr
;
488 s
->emrbr
= value
> 0 ? value
& 0x7F0 : 0x7F0;
490 case 0x200 ... 0x2e0:
491 s
->mib
[(addr
& 0x1ff) / 4] = value
;
494 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr
);
499 static void mcf_fec_rx_stats(mcf_fec_state
*s
, int size
)
501 s
->mib
[MIB_RMON_R_PACKETS
]++;
502 s
->mib
[MIB_RMON_R_OCTETS
] += size
;
504 s
->mib
[MIB_RMON_R_FRAG
]++;
505 } else if (size
== 64) {
506 s
->mib
[MIB_RMON_R_P64
]++;
507 } else if (size
< 128) {
508 s
->mib
[MIB_RMON_R_P65TO127
]++;
509 } else if (size
< 256) {
510 s
->mib
[MIB_RMON_R_P128TO255
]++;
511 } else if (size
< 512) {
512 s
->mib
[MIB_RMON_R_P256TO511
]++;
513 } else if (size
< 1024) {
514 s
->mib
[MIB_RMON_R_P512TO1023
]++;
515 } else if (size
< 2048) {
516 s
->mib
[MIB_RMON_R_P1024TO2047
]++;
518 s
->mib
[MIB_RMON_R_P_GTE2048
]++;
520 s
->mib
[MIB_IEEE_R_FRAME_OK
]++;
521 s
->mib
[MIB_IEEE_R_OCTETS_OK
] += size
;
524 static int mcf_fec_have_receive_space(mcf_fec_state
*s
, size_t want
)
529 /* Walk descriptor list to determine if we have enough buffer */
530 addr
= s
->rx_descriptor
;
532 mcf_fec_read_bd(&bd
, addr
);
533 if ((bd
.flags
& FEC_BD_E
) == 0) {
536 if (want
< s
->emrbr
) {
540 /* Advance to the next descriptor. */
541 if ((bd
.flags
& FEC_BD_W
) != 0) {
550 static ssize_t
mcf_fec_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
552 mcf_fec_state
*s
= qemu_get_nic_opaque(nc
);
559 unsigned int buf_len
;
562 DPRINTF("do_rx len %d\n", size
);
563 if (!s
->rx_enabled
) {
566 /* 4 bytes for the CRC. */
568 crc
= cpu_to_be32(crc32(~0, buf
, size
));
569 crc_ptr
= (uint8_t *)&crc
;
570 /* Huge frames are truncted. */
571 if (size
> FEC_MAX_FRAME_SIZE
) {
572 size
= FEC_MAX_FRAME_SIZE
;
573 flags
|= FEC_BD_TR
| FEC_BD_LG
;
575 /* Frames larger than the user limit just set error flags. */
576 if (size
> (s
->rcr
>> 16)) {
579 /* Check if we have enough space in current descriptors */
580 if (!mcf_fec_have_receive_space(s
, size
)) {
583 addr
= s
->rx_descriptor
;
586 mcf_fec_read_bd(&bd
, addr
);
587 buf_len
= (size
<= s
->emrbr
) ? size
: s
->emrbr
;
590 DPRINTF("rx_bd %x length %d\n", addr
, bd
.length
);
591 /* The last 4 bytes are the CRC. */
595 cpu_physical_memory_write(buf_addr
, buf
, buf_len
);
598 cpu_physical_memory_write(buf_addr
+ buf_len
, crc_ptr
, 4 - size
);
601 bd
.flags
&= ~FEC_BD_E
;
603 /* Last buffer in frame. */
604 bd
.flags
|= flags
| FEC_BD_L
;
605 DPRINTF("rx frame flags %04x\n", bd
.flags
);
606 s
->eir
|= FEC_INT_RXF
;
608 s
->eir
|= FEC_INT_RXB
;
610 mcf_fec_write_bd(&bd
, addr
);
611 /* Advance to the next descriptor. */
612 if ((bd
.flags
& FEC_BD_W
) != 0) {
618 s
->rx_descriptor
= addr
;
619 mcf_fec_rx_stats(s
, retsize
);
620 mcf_fec_enable_rx(s
);
625 static const MemoryRegionOps mcf_fec_ops
= {
626 .read
= mcf_fec_read
,
627 .write
= mcf_fec_write
,
628 .endianness
= DEVICE_NATIVE_ENDIAN
,
631 static NetClientInfo net_mcf_fec_info
= {
632 .type
= NET_CLIENT_DRIVER_NIC
,
633 .size
= sizeof(NICState
),
634 .receive
= mcf_fec_receive
,
637 static void mcf_fec_realize(DeviceState
*dev
, Error
**errp
)
639 mcf_fec_state
*s
= MCF_FEC_NET(dev
);
641 s
->nic
= qemu_new_nic(&net_mcf_fec_info
, &s
->conf
,
642 object_get_typename(OBJECT(dev
)), dev
->id
, s
);
643 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
646 static void mcf_fec_instance_init(Object
*obj
)
648 SysBusDevice
*sbd
= SYS_BUS_DEVICE(obj
);
649 mcf_fec_state
*s
= MCF_FEC_NET(obj
);
652 memory_region_init_io(&s
->iomem
, obj
, &mcf_fec_ops
, s
, "fec", 0x400);
653 sysbus_init_mmio(sbd
, &s
->iomem
);
654 for (i
= 0; i
< FEC_NUM_IRQ
; i
++) {
655 sysbus_init_irq(sbd
, &s
->irq
[i
]);
659 static Property mcf_fec_properties
[] = {
660 DEFINE_NIC_PROPERTIES(mcf_fec_state
, conf
),
661 DEFINE_PROP_END_OF_LIST(),
664 static void mcf_fec_class_init(ObjectClass
*oc
, void *data
)
666 DeviceClass
*dc
= DEVICE_CLASS(oc
);
668 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
669 dc
->realize
= mcf_fec_realize
;
670 dc
->desc
= "MCF Fast Ethernet Controller network device";
671 dc
->reset
= mcf_fec_reset
;
672 dc
->props
= mcf_fec_properties
;
675 static const TypeInfo mcf_fec_info
= {
676 .name
= TYPE_MCF_FEC_NET
,
677 .parent
= TYPE_SYS_BUS_DEVICE
,
678 .instance_size
= sizeof(mcf_fec_state
),
679 .instance_init
= mcf_fec_instance_init
,
680 .class_init
= mcf_fec_class_init
,
683 static void mcf_fec_register_types(void)
685 type_register_static(&mcf_fec_info
);
688 type_init(mcf_fec_register_types
)