2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
39 #define DEBUG_IMX_FEC 0
42 #define FEC_PRINTF(fmt, args...) \
44 if (DEBUG_IMX_FEC) { \
45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
51 #define DEBUG_IMX_PHY 0
54 #define PHY_PRINTF(fmt, args...) \
56 if (DEBUG_IMX_PHY) { \
57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
62 #define IMX_MAX_DESC 1024
64 static const char *imx_default_reg_name(IMXFECState
*s
, uint32_t index
)
67 sprintf(tmp
, "index %d", index
);
71 static const char *imx_fec_reg_name(IMXFECState
*s
, uint32_t index
)
78 case ENET_MIIGSK_CFGR
:
83 return imx_default_reg_name(s
, index
);
87 static const char *imx_enet_reg_name(IMXFECState
*s
, uint32_t index
)
145 return imx_default_reg_name(s
, index
);
149 static const char *imx_eth_reg_name(IMXFECState
*s
, uint32_t index
)
196 return imx_fec_reg_name(s
, index
);
198 return imx_enet_reg_name(s
, index
);
204 * Versions of this device with more than one TX descriptor save the
205 * 2nd and 3rd descriptors in a subsection, to maintain migration
206 * compatibility with previous versions of the device that only
207 * supported a single descriptor.
209 static bool imx_eth_is_multi_tx_ring(void *opaque
)
211 IMXFECState
*s
= IMX_FEC(opaque
);
213 return s
->tx_ring_num
> 1;
216 static const VMStateDescription vmstate_imx_eth_txdescs
= {
217 .name
= "imx.fec/txdescs",
219 .minimum_version_id
= 1,
220 .needed
= imx_eth_is_multi_tx_ring
,
221 .fields
= (VMStateField
[]) {
222 VMSTATE_UINT32(tx_descriptor
[1], IMXFECState
),
223 VMSTATE_UINT32(tx_descriptor
[2], IMXFECState
),
224 VMSTATE_END_OF_LIST()
228 static const VMStateDescription vmstate_imx_eth
= {
229 .name
= TYPE_IMX_FEC
,
231 .minimum_version_id
= 2,
232 .fields
= (VMStateField
[]) {
233 VMSTATE_UINT32_ARRAY(regs
, IMXFECState
, ENET_MAX
),
234 VMSTATE_UINT32(rx_descriptor
, IMXFECState
),
235 VMSTATE_UINT32(tx_descriptor
[0], IMXFECState
),
236 VMSTATE_UINT32(phy_status
, IMXFECState
),
237 VMSTATE_UINT32(phy_control
, IMXFECState
),
238 VMSTATE_UINT32(phy_advertise
, IMXFECState
),
239 VMSTATE_UINT32(phy_int
, IMXFECState
),
240 VMSTATE_UINT32(phy_int_mask
, IMXFECState
),
241 VMSTATE_END_OF_LIST()
243 .subsections
= (const VMStateDescription
* []) {
244 &vmstate_imx_eth_txdescs
,
249 #define PHY_INT_ENERGYON (1 << 7)
250 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
251 #define PHY_INT_FAULT (1 << 5)
252 #define PHY_INT_DOWN (1 << 4)
253 #define PHY_INT_AUTONEG_LP (1 << 3)
254 #define PHY_INT_PARFAULT (1 << 2)
255 #define PHY_INT_AUTONEG_PAGE (1 << 1)
257 static void imx_eth_update(IMXFECState
*s
);
260 * The MII phy could raise a GPIO to the processor which in turn
261 * could be handled as an interrpt by the OS.
262 * For now we don't handle any GPIO/interrupt line, so the OS will
263 * have to poll for the PHY status.
265 static void phy_update_irq(IMXFECState
*s
)
270 static void phy_update_link(IMXFECState
*s
)
272 /* Autonegotiation status mirrors link status. */
273 if (qemu_get_queue(s
->nic
)->link_down
) {
274 PHY_PRINTF("link is down\n");
275 s
->phy_status
&= ~0x0024;
276 s
->phy_int
|= PHY_INT_DOWN
;
278 PHY_PRINTF("link is up\n");
279 s
->phy_status
|= 0x0024;
280 s
->phy_int
|= PHY_INT_ENERGYON
;
281 s
->phy_int
|= PHY_INT_AUTONEG_COMPLETE
;
286 static void imx_eth_set_link(NetClientState
*nc
)
288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc
)));
291 static void phy_reset(IMXFECState
*s
)
293 s
->phy_status
= 0x7809;
294 s
->phy_control
= 0x3000;
295 s
->phy_advertise
= 0x01e1;
301 static uint32_t do_phy_read(IMXFECState
*s
, int reg
)
306 /* we only advertise one phy */
311 case 0: /* Basic Control */
312 val
= s
->phy_control
;
314 case 1: /* Basic Status */
323 case 4: /* Auto-neg advertisement */
324 val
= s
->phy_advertise
;
326 case 5: /* Auto-neg Link Partner Ability */
329 case 6: /* Auto-neg Expansion */
332 case 29: /* Interrupt source. */
337 case 30: /* Interrupt mask */
338 val
= s
->phy_int_mask
;
344 qemu_log_mask(LOG_UNIMP
, "[%s.phy]%s: reg %d not implemented\n",
345 TYPE_IMX_FEC
, __func__
, reg
);
349 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
350 TYPE_IMX_FEC
, __func__
, reg
);
355 PHY_PRINTF("read 0x%04x @ %d\n", val
, reg
);
360 static void do_phy_write(IMXFECState
*s
, int reg
, uint32_t val
)
362 PHY_PRINTF("write 0x%04x @ %d\n", val
, reg
);
365 /* we only advertise one phy */
370 case 0: /* Basic Control */
374 s
->phy_control
= val
& 0x7980;
375 /* Complete autonegotiation immediately. */
377 s
->phy_status
|= 0x0020;
381 case 4: /* Auto-neg advertisement */
382 s
->phy_advertise
= (val
& 0x2d7f) | 0x80;
384 case 30: /* Interrupt mask */
385 s
->phy_int_mask
= val
& 0xff;
392 qemu_log_mask(LOG_UNIMP
, "[%s.phy)%s: reg %d not implemented\n",
393 TYPE_IMX_FEC
, __func__
, reg
);
396 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
397 TYPE_IMX_FEC
, __func__
, reg
);
402 static void imx_fec_read_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
404 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
407 static void imx_fec_write_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
409 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
412 static void imx_enet_read_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
414 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
417 static void imx_enet_write_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
419 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
422 static void imx_eth_update(IMXFECState
*s
)
425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
426 * interrupts swapped. This worked with older versions of Linux (4.14
427 * and older) since Linux associated both interrupt lines with Ethernet
428 * MAC interrupts. Specifically,
429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
430 * timer interrupts. Those versions of Linux fail with versions of QEMU
431 * with swapped interrupt assignments.
432 * - In linux 4.14, both interrupt lines were registered with the Ethernet
433 * MAC interrupt handler. As a result, all versions of qemu happen to
434 * work, though that is accidental.
435 * - In Linux 4.9 and older, the timer interrupt was registered directly
436 * with the Ethernet MAC interrupt handler. The MAC interrupt was
437 * redirected to a GPIO interrupt to work around erratum ERR006687.
438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
439 * interrupt never fired since IOMUX is currently not supported in qemu.
440 * Linux instead received MAC interrupts on the timer interrupt.
441 * As a result, qemu versions with the swapped interrupt assignment work,
442 * albeit accidentally, but qemu versions with the correct interrupt
445 * To ensure that all versions of Linux work, generate ENET_INT_MAC
446 * interrrupts on both interrupt lines. This should be changed if and when
447 * qemu supports IOMUX.
449 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] &
450 (ENET_INT_MAC
| ENET_INT_TS_TIMER
)) {
451 qemu_set_irq(s
->irq
[1], 1);
453 qemu_set_irq(s
->irq
[1], 0);
456 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] & ENET_INT_MAC
) {
457 qemu_set_irq(s
->irq
[0], 1);
459 qemu_set_irq(s
->irq
[0], 0);
463 static void imx_fec_do_tx(IMXFECState
*s
)
465 int frame_size
= 0, descnt
= 0;
466 uint8_t *ptr
= s
->frame
;
467 uint32_t addr
= s
->tx_descriptor
[0];
469 while (descnt
++ < IMX_MAX_DESC
) {
473 imx_fec_read_bd(&bd
, addr
);
474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
475 addr
, bd
.flags
, bd
.length
, bd
.data
);
476 if ((bd
.flags
& ENET_BD_R
) == 0) {
477 /* Run out of descriptors to transmit. */
478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
482 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
483 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
484 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
486 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
489 if (bd
.flags
& ENET_BD_L
) {
490 /* Last buffer in frame. */
491 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
494 s
->regs
[ENET_EIR
] |= ENET_INT_TXF
;
496 s
->regs
[ENET_EIR
] |= ENET_INT_TXB
;
497 bd
.flags
&= ~ENET_BD_R
;
498 /* Write back the modified descriptor. */
499 imx_fec_write_bd(&bd
, addr
);
500 /* Advance to the next descriptor. */
501 if ((bd
.flags
& ENET_BD_W
) != 0) {
502 addr
= s
->regs
[ENET_TDSR
];
508 s
->tx_descriptor
[0] = addr
;
513 static void imx_enet_do_tx(IMXFECState
*s
, uint32_t index
)
515 int frame_size
= 0, descnt
= 0;
517 uint8_t *ptr
= s
->frame
;
518 uint32_t addr
, int_txb
, int_txf
, tdsr
;
524 int_txb
= ENET_INT_TXB
;
525 int_txf
= ENET_INT_TXF
;
530 int_txb
= ENET_INT_TXB1
;
531 int_txf
= ENET_INT_TXF1
;
536 int_txb
= ENET_INT_TXB2
;
537 int_txf
= ENET_INT_TXF2
;
541 qemu_log_mask(LOG_GUEST_ERROR
,
542 "%s: bogus value for index %x\n",
548 addr
= s
->tx_descriptor
[ring
];
550 while (descnt
++ < IMX_MAX_DESC
) {
554 imx_enet_read_bd(&bd
, addr
);
555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
556 "status %04x\n", addr
, bd
.flags
, bd
.length
, bd
.data
,
557 bd
.option
, bd
.status
);
558 if ((bd
.flags
& ENET_BD_R
) == 0) {
559 /* Run out of descriptors to transmit. */
563 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
564 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
565 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
567 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
570 if (bd
.flags
& ENET_BD_L
) {
571 if (bd
.option
& ENET_BD_PINS
) {
572 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
573 if (IP_HEADER_VERSION(ip_hd
) == 4) {
574 net_checksum_calculate(s
->frame
, frame_size
);
577 if (bd
.option
& ENET_BD_IINS
) {
578 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
579 /* We compute checksum only for IPv4 frames */
580 if (IP_HEADER_VERSION(ip_hd
) == 4) {
583 csum
= net_raw_checksum((uint8_t *)ip_hd
, sizeof(*ip_hd
));
584 ip_hd
->ip_sum
= cpu_to_be16(csum
);
587 /* Last buffer in frame. */
589 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
593 if (bd
.option
& ENET_BD_TX_INT
) {
594 s
->regs
[ENET_EIR
] |= int_txf
;
597 if (bd
.option
& ENET_BD_TX_INT
) {
598 s
->regs
[ENET_EIR
] |= int_txb
;
600 bd
.flags
&= ~ENET_BD_R
;
601 /* Write back the modified descriptor. */
602 imx_enet_write_bd(&bd
, addr
);
603 /* Advance to the next descriptor. */
604 if ((bd
.flags
& ENET_BD_W
) != 0) {
605 addr
= s
->regs
[tdsr
];
611 s
->tx_descriptor
[ring
] = addr
;
616 static void imx_eth_do_tx(IMXFECState
*s
, uint32_t index
)
618 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
619 imx_enet_do_tx(s
, index
);
625 static void imx_eth_enable_rx(IMXFECState
*s
, bool flush
)
629 imx_fec_read_bd(&bd
, s
->rx_descriptor
);
631 s
->regs
[ENET_RDAR
] = (bd
.flags
& ENET_BD_E
) ? ENET_RDAR_RDAR
: 0;
633 if (!s
->regs
[ENET_RDAR
]) {
634 FEC_PRINTF("RX buffer full\n");
636 qemu_flush_queued_packets(qemu_get_queue(s
->nic
));
640 static void imx_eth_reset(DeviceState
*d
)
642 IMXFECState
*s
= IMX_FEC(d
);
644 /* Reset the Device */
645 memset(s
->regs
, 0, sizeof(s
->regs
));
646 s
->regs
[ENET_ECR
] = 0xf0000000;
647 s
->regs
[ENET_MIBC
] = 0xc0000000;
648 s
->regs
[ENET_RCR
] = 0x05ee0001;
649 s
->regs
[ENET_OPD
] = 0x00010000;
651 s
->regs
[ENET_PALR
] = (s
->conf
.macaddr
.a
[0] << 24)
652 | (s
->conf
.macaddr
.a
[1] << 16)
653 | (s
->conf
.macaddr
.a
[2] << 8)
654 | s
->conf
.macaddr
.a
[3];
655 s
->regs
[ENET_PAUR
] = (s
->conf
.macaddr
.a
[4] << 24)
656 | (s
->conf
.macaddr
.a
[5] << 16)
660 s
->regs
[ENET_FRBR
] = 0x00000600;
661 s
->regs
[ENET_FRSR
] = 0x00000500;
662 s
->regs
[ENET_MIIGSK_ENR
] = 0x00000006;
664 s
->regs
[ENET_RAEM
] = 0x00000004;
665 s
->regs
[ENET_RAFL
] = 0x00000004;
666 s
->regs
[ENET_TAEM
] = 0x00000004;
667 s
->regs
[ENET_TAFL
] = 0x00000008;
668 s
->regs
[ENET_TIPG
] = 0x0000000c;
669 s
->regs
[ENET_FTRL
] = 0x000007ff;
670 s
->regs
[ENET_ATPER
] = 0x3b9aca00;
673 s
->rx_descriptor
= 0;
674 memset(s
->tx_descriptor
, 0, sizeof(s
->tx_descriptor
));
676 /* We also reset the PHY */
680 static uint32_t imx_default_read(IMXFECState
*s
, uint32_t index
)
682 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad register at offset 0x%"
683 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
687 static uint32_t imx_fec_read(IMXFECState
*s
, uint32_t index
)
692 case ENET_MIIGSK_CFGR
:
693 case ENET_MIIGSK_ENR
:
694 return s
->regs
[index
];
696 return imx_default_read(s
, index
);
700 static uint32_t imx_enet_read(IMXFECState
*s
, uint32_t index
)
730 return s
->regs
[index
];
732 return imx_default_read(s
, index
);
736 static uint64_t imx_eth_read(void *opaque
, hwaddr offset
, unsigned size
)
739 IMXFECState
*s
= IMX_FEC(opaque
);
740 uint32_t index
= offset
>> 2;
764 value
= s
->regs
[index
];
768 value
= imx_fec_read(s
, index
);
770 value
= imx_enet_read(s
, index
);
775 FEC_PRINTF("reg[%s] => 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
781 static void imx_default_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
783 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad address at offset 0x%"
784 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
788 static void imx_fec_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
792 /* FRBR is read only */
793 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register FRBR is read only\n",
794 TYPE_IMX_FEC
, __func__
);
797 s
->regs
[index
] = (value
& 0x000003fc) | 0x00000400;
799 case ENET_MIIGSK_CFGR
:
800 s
->regs
[index
] = value
& 0x00000053;
802 case ENET_MIIGSK_ENR
:
803 s
->regs
[index
] = (value
& 0x00000002) ? 0x00000006 : 0;
806 imx_default_write(s
, index
, value
);
811 static void imx_enet_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
821 s
->regs
[index
] = value
& 0x000001ff;
824 s
->regs
[index
] = value
& 0x0000001f;
827 s
->regs
[index
] = value
& 0x00003fff;
830 s
->regs
[index
] = value
& 0x00000019;
833 s
->regs
[index
] = value
& 0x000000C7;
836 s
->regs
[index
] = value
& 0x00002a9d;
841 s
->regs
[index
] = value
;
844 /* ATSTMP is read only */
845 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register ATSTMP is read only\n",
846 TYPE_IMX_FEC
, __func__
);
849 s
->regs
[index
] = value
& 0x7fffffff;
852 s
->regs
[index
] = value
& 0x00007f7f;
855 /* implement clear timer flag */
856 value
= value
& 0x0000000f;
862 value
= value
& 0x000000fd;
868 s
->regs
[index
] = value
;
871 imx_default_write(s
, index
, value
);
876 static void imx_eth_write(void *opaque
, hwaddr offset
, uint64_t value
,
879 IMXFECState
*s
= IMX_FEC(opaque
);
880 const bool single_tx_ring
= !imx_eth_is_multi_tx_ring(s
);
881 uint32_t index
= offset
>> 2;
883 FEC_PRINTF("reg[%s] <= 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
888 s
->regs
[index
] &= ~value
;
891 s
->regs
[index
] = value
;
894 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
895 if (!s
->regs
[index
]) {
896 imx_eth_enable_rx(s
, true);
902 case ENET_TDAR1
: /* FALLTHROUGH */
903 case ENET_TDAR2
: /* FALLTHROUGH */
904 if (unlikely(single_tx_ring
)) {
905 qemu_log_mask(LOG_GUEST_ERROR
,
906 "[%s]%s: trying to access TDAR2 or TDAR1\n",
907 TYPE_IMX_FEC
, __func__
);
910 case ENET_TDAR
: /* FALLTHROUGH */
911 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
912 s
->regs
[index
] = ENET_TDAR_TDAR
;
913 imx_eth_do_tx(s
, index
);
918 if (value
& ENET_ECR_RESET
) {
919 return imx_eth_reset(DEVICE(s
));
921 s
->regs
[index
] = value
;
922 if ((s
->regs
[index
] & ENET_ECR_ETHEREN
) == 0) {
923 s
->regs
[ENET_RDAR
] = 0;
924 s
->rx_descriptor
= s
->regs
[ENET_RDSR
];
925 s
->regs
[ENET_TDAR
] = 0;
926 s
->regs
[ENET_TDAR1
] = 0;
927 s
->regs
[ENET_TDAR2
] = 0;
928 s
->tx_descriptor
[0] = s
->regs
[ENET_TDSR
];
929 s
->tx_descriptor
[1] = s
->regs
[ENET_TDSR1
];
930 s
->tx_descriptor
[2] = s
->regs
[ENET_TDSR2
];
934 s
->regs
[index
] = value
;
935 if (extract32(value
, 29, 1)) {
936 /* This is a read operation */
937 s
->regs
[ENET_MMFR
] = deposit32(s
->regs
[ENET_MMFR
], 0, 16,
942 /* This a write operation */
943 do_phy_write(s
, extract32(value
, 18, 10), extract32(value
, 0, 16));
945 /* raise the interrupt as the PHY operation is done */
946 s
->regs
[ENET_EIR
] |= ENET_INT_MII
;
949 s
->regs
[index
] = value
& 0xfe;
952 /* TODO: Implement MIB. */
953 s
->regs
[index
] = (value
& 0x80000000) ? 0xc0000000 : 0;
956 s
->regs
[index
] = value
& 0x07ff003f;
957 /* TODO: Implement LOOP mode. */
960 /* We transmit immediately, so raise GRA immediately. */
961 s
->regs
[index
] = value
;
963 s
->regs
[ENET_EIR
] |= ENET_INT_GRA
;
967 s
->regs
[index
] = value
;
968 s
->conf
.macaddr
.a
[0] = value
>> 24;
969 s
->conf
.macaddr
.a
[1] = value
>> 16;
970 s
->conf
.macaddr
.a
[2] = value
>> 8;
971 s
->conf
.macaddr
.a
[3] = value
;
974 s
->regs
[index
] = (value
| 0x0000ffff) & 0xffff8808;
975 s
->conf
.macaddr
.a
[4] = value
>> 24;
976 s
->conf
.macaddr
.a
[5] = value
>> 16;
979 s
->regs
[index
] = (value
& 0x0000ffff) | 0x00010000;
985 /* TODO: implement MAC hash filtering. */
989 s
->regs
[index
] = value
& 0x3;
991 s
->regs
[index
] = value
& 0x13f;
996 s
->regs
[index
] = value
& ~3;
998 s
->regs
[index
] = value
& ~7;
1000 s
->rx_descriptor
= s
->regs
[index
];
1004 s
->regs
[index
] = value
& ~3;
1006 s
->regs
[index
] = value
& ~7;
1008 s
->tx_descriptor
[0] = s
->regs
[index
];
1011 if (unlikely(single_tx_ring
)) {
1012 qemu_log_mask(LOG_GUEST_ERROR
,
1013 "[%s]%s: trying to access TDSR1\n",
1014 TYPE_IMX_FEC
, __func__
);
1018 s
->regs
[index
] = value
& ~7;
1019 s
->tx_descriptor
[1] = s
->regs
[index
];
1022 if (unlikely(single_tx_ring
)) {
1023 qemu_log_mask(LOG_GUEST_ERROR
,
1024 "[%s]%s: trying to access TDSR2\n",
1025 TYPE_IMX_FEC
, __func__
);
1029 s
->regs
[index
] = value
& ~7;
1030 s
->tx_descriptor
[2] = s
->regs
[index
];
1033 s
->regs
[index
] = value
& 0x00003ff0;
1037 imx_fec_write(s
, index
, value
);
1039 imx_enet_write(s
, index
, value
);
1047 static int imx_eth_can_receive(NetClientState
*nc
)
1049 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1053 return !!s
->regs
[ENET_RDAR
];
1056 static ssize_t
imx_fec_receive(NetClientState
*nc
, const uint8_t *buf
,
1059 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1066 unsigned int buf_len
;
1069 FEC_PRINTF("len %d\n", (int)size
);
1071 if (!s
->regs
[ENET_RDAR
]) {
1072 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1073 TYPE_IMX_FEC
, __func__
);
1077 /* 4 bytes for the CRC. */
1079 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1080 crc_ptr
= (uint8_t *) &crc
;
1082 /* Huge frames are truncated. */
1083 if (size
> ENET_MAX_FRAME_SIZE
) {
1084 size
= ENET_MAX_FRAME_SIZE
;
1085 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1088 /* Frames larger than the user limit just set error flags. */
1089 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1090 flags
|= ENET_BD_LG
;
1093 addr
= s
->rx_descriptor
;
1095 imx_fec_read_bd(&bd
, addr
);
1096 if ((bd
.flags
& ENET_BD_E
) == 0) {
1097 /* No descriptors available. Bail out. */
1099 * FIXME: This is wrong. We should probably either
1100 * save the remainder for when more RX buffers are
1101 * available, or flag an error.
1103 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1104 TYPE_IMX_FEC
, __func__
);
1107 buf_len
= (size
<= s
->regs
[ENET_MRBR
]) ? size
: s
->regs
[ENET_MRBR
];
1108 bd
.length
= buf_len
;
1111 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1113 /* The last 4 bytes are the CRC. */
1115 buf_len
+= size
- 4;
1118 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1121 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1123 crc_ptr
+= 4 - size
;
1125 bd
.flags
&= ~ENET_BD_E
;
1127 /* Last buffer in frame. */
1128 bd
.flags
|= flags
| ENET_BD_L
;
1129 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1130 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1132 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1134 imx_fec_write_bd(&bd
, addr
);
1135 /* Advance to the next descriptor. */
1136 if ((bd
.flags
& ENET_BD_W
) != 0) {
1137 addr
= s
->regs
[ENET_RDSR
];
1142 s
->rx_descriptor
= addr
;
1143 imx_eth_enable_rx(s
, false);
1148 static ssize_t
imx_enet_receive(NetClientState
*nc
, const uint8_t *buf
,
1151 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1158 unsigned int buf_len
;
1160 bool shift16
= s
->regs
[ENET_RACC
] & ENET_RACC_SHIFT16
;
1162 FEC_PRINTF("len %d\n", (int)size
);
1164 if (!s
->regs
[ENET_RDAR
]) {
1165 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1166 TYPE_IMX_FEC
, __func__
);
1170 /* 4 bytes for the CRC. */
1172 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1173 crc_ptr
= (uint8_t *) &crc
;
1179 /* Huge frames are truncated. */
1180 if (size
> s
->regs
[ENET_FTRL
]) {
1181 size
= s
->regs
[ENET_FTRL
];
1182 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1185 /* Frames larger than the user limit just set error flags. */
1186 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1187 flags
|= ENET_BD_LG
;
1190 addr
= s
->rx_descriptor
;
1192 imx_enet_read_bd(&bd
, addr
);
1193 if ((bd
.flags
& ENET_BD_E
) == 0) {
1194 /* No descriptors available. Bail out. */
1196 * FIXME: This is wrong. We should probably either
1197 * save the remainder for when more RX buffers are
1198 * available, or flag an error.
1200 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1201 TYPE_IMX_FEC
, __func__
);
1204 buf_len
= MIN(size
, s
->regs
[ENET_MRBR
]);
1205 bd
.length
= buf_len
;
1208 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1210 /* The last 4 bytes are the CRC. */
1212 buf_len
+= size
- 4;
1218 * If SHIFT16 bit of ENETx_RACC register is set we need to
1219 * align the payload to 4-byte boundary.
1221 const uint8_t zeros
[2] = { 0 };
1223 dma_memory_write(&address_space_memory
, buf_addr
,
1224 zeros
, sizeof(zeros
));
1226 buf_addr
+= sizeof(zeros
);
1227 buf_len
-= sizeof(zeros
);
1229 /* We only do this once per Ethernet frame */
1233 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1236 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1238 crc_ptr
+= 4 - size
;
1240 bd
.flags
&= ~ENET_BD_E
;
1242 /* Last buffer in frame. */
1243 bd
.flags
|= flags
| ENET_BD_L
;
1244 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1245 if (bd
.option
& ENET_BD_RX_INT
) {
1246 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1249 if (bd
.option
& ENET_BD_RX_INT
) {
1250 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1253 imx_enet_write_bd(&bd
, addr
);
1254 /* Advance to the next descriptor. */
1255 if ((bd
.flags
& ENET_BD_W
) != 0) {
1256 addr
= s
->regs
[ENET_RDSR
];
1261 s
->rx_descriptor
= addr
;
1262 imx_eth_enable_rx(s
, false);
1267 static ssize_t
imx_eth_receive(NetClientState
*nc
, const uint8_t *buf
,
1270 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1272 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
1273 return imx_enet_receive(nc
, buf
, len
);
1275 return imx_fec_receive(nc
, buf
, len
);
1279 static const MemoryRegionOps imx_eth_ops
= {
1280 .read
= imx_eth_read
,
1281 .write
= imx_eth_write
,
1282 .valid
.min_access_size
= 4,
1283 .valid
.max_access_size
= 4,
1284 .endianness
= DEVICE_NATIVE_ENDIAN
,
1287 static void imx_eth_cleanup(NetClientState
*nc
)
1289 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1294 static NetClientInfo imx_eth_net_info
= {
1295 .type
= NET_CLIENT_DRIVER_NIC
,
1296 .size
= sizeof(NICState
),
1297 .can_receive
= imx_eth_can_receive
,
1298 .receive
= imx_eth_receive
,
1299 .cleanup
= imx_eth_cleanup
,
1300 .link_status_changed
= imx_eth_set_link
,
1304 static void imx_eth_realize(DeviceState
*dev
, Error
**errp
)
1306 IMXFECState
*s
= IMX_FEC(dev
);
1307 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1309 memory_region_init_io(&s
->iomem
, OBJECT(dev
), &imx_eth_ops
, s
,
1310 TYPE_IMX_FEC
, FSL_IMX25_FEC_SIZE
);
1311 sysbus_init_mmio(sbd
, &s
->iomem
);
1312 sysbus_init_irq(sbd
, &s
->irq
[0]);
1313 sysbus_init_irq(sbd
, &s
->irq
[1]);
1315 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
1317 s
->nic
= qemu_new_nic(&imx_eth_net_info
, &s
->conf
,
1318 object_get_typename(OBJECT(dev
)),
1319 DEVICE(dev
)->id
, s
);
1321 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
1324 static Property imx_eth_properties
[] = {
1325 DEFINE_NIC_PROPERTIES(IMXFECState
, conf
),
1326 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState
, tx_ring_num
, 1),
1327 DEFINE_PROP_END_OF_LIST(),
1330 static void imx_eth_class_init(ObjectClass
*klass
, void *data
)
1332 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1334 dc
->vmsd
= &vmstate_imx_eth
;
1335 dc
->reset
= imx_eth_reset
;
1336 dc
->props
= imx_eth_properties
;
1337 dc
->realize
= imx_eth_realize
;
1338 dc
->desc
= "i.MX FEC/ENET Ethernet Controller";
1341 static void imx_fec_init(Object
*obj
)
1343 IMXFECState
*s
= IMX_FEC(obj
);
1348 static void imx_enet_init(Object
*obj
)
1350 IMXFECState
*s
= IMX_FEC(obj
);
1355 static const TypeInfo imx_fec_info
= {
1356 .name
= TYPE_IMX_FEC
,
1357 .parent
= TYPE_SYS_BUS_DEVICE
,
1358 .instance_size
= sizeof(IMXFECState
),
1359 .instance_init
= imx_fec_init
,
1360 .class_init
= imx_eth_class_init
,
1363 static const TypeInfo imx_enet_info
= {
1364 .name
= TYPE_IMX_ENET
,
1365 .parent
= TYPE_IMX_FEC
,
1366 .instance_init
= imx_enet_init
,
1369 static void imx_eth_register_types(void)
1371 type_register_static(&imx_fec_info
);
1372 type_register_static(&imx_enet_info
);
1375 type_init(imx_eth_register_types
)