2 * i.MX Fast Ethernet Controller emulation.
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
6 * Based on Coldfire Fast Ethernet Controller emulation.
8 * Copyright (c) 2007 CodeSourcery.
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
24 #include "qemu/osdep.h"
26 #include "hw/net/imx_fec.h"
27 #include "migration/vmstate.h"
28 #include "sysemu/dma.h"
30 #include "qemu/module.h"
31 #include "net/checksum.h"
38 #define DEBUG_IMX_FEC 0
41 #define FEC_PRINTF(fmt, args...) \
43 if (DEBUG_IMX_FEC) { \
44 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
50 #define DEBUG_IMX_PHY 0
53 #define PHY_PRINTF(fmt, args...) \
55 if (DEBUG_IMX_PHY) { \
56 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
61 #define IMX_MAX_DESC 1024
63 static const char *imx_default_reg_name(IMXFECState
*s
, uint32_t index
)
66 sprintf(tmp
, "index %d", index
);
70 static const char *imx_fec_reg_name(IMXFECState
*s
, uint32_t index
)
77 case ENET_MIIGSK_CFGR
:
82 return imx_default_reg_name(s
, index
);
86 static const char *imx_enet_reg_name(IMXFECState
*s
, uint32_t index
)
144 return imx_default_reg_name(s
, index
);
148 static const char *imx_eth_reg_name(IMXFECState
*s
, uint32_t index
)
195 return imx_fec_reg_name(s
, index
);
197 return imx_enet_reg_name(s
, index
);
203 * Versions of this device with more than one TX descriptor save the
204 * 2nd and 3rd descriptors in a subsection, to maintain migration
205 * compatibility with previous versions of the device that only
206 * supported a single descriptor.
208 static bool imx_eth_is_multi_tx_ring(void *opaque
)
210 IMXFECState
*s
= IMX_FEC(opaque
);
212 return s
->tx_ring_num
> 1;
215 static const VMStateDescription vmstate_imx_eth_txdescs
= {
216 .name
= "imx.fec/txdescs",
218 .minimum_version_id
= 1,
219 .needed
= imx_eth_is_multi_tx_ring
,
220 .fields
= (VMStateField
[]) {
221 VMSTATE_UINT32(tx_descriptor
[1], IMXFECState
),
222 VMSTATE_UINT32(tx_descriptor
[2], IMXFECState
),
223 VMSTATE_END_OF_LIST()
227 static const VMStateDescription vmstate_imx_eth
= {
228 .name
= TYPE_IMX_FEC
,
230 .minimum_version_id
= 2,
231 .fields
= (VMStateField
[]) {
232 VMSTATE_UINT32_ARRAY(regs
, IMXFECState
, ENET_MAX
),
233 VMSTATE_UINT32(rx_descriptor
, IMXFECState
),
234 VMSTATE_UINT32(tx_descriptor
[0], IMXFECState
),
235 VMSTATE_UINT32(phy_status
, IMXFECState
),
236 VMSTATE_UINT32(phy_control
, IMXFECState
),
237 VMSTATE_UINT32(phy_advertise
, IMXFECState
),
238 VMSTATE_UINT32(phy_int
, IMXFECState
),
239 VMSTATE_UINT32(phy_int_mask
, IMXFECState
),
240 VMSTATE_END_OF_LIST()
242 .subsections
= (const VMStateDescription
* []) {
243 &vmstate_imx_eth_txdescs
,
248 #define PHY_INT_ENERGYON (1 << 7)
249 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
250 #define PHY_INT_FAULT (1 << 5)
251 #define PHY_INT_DOWN (1 << 4)
252 #define PHY_INT_AUTONEG_LP (1 << 3)
253 #define PHY_INT_PARFAULT (1 << 2)
254 #define PHY_INT_AUTONEG_PAGE (1 << 1)
256 static void imx_eth_update(IMXFECState
*s
);
259 * The MII phy could raise a GPIO to the processor which in turn
260 * could be handled as an interrpt by the OS.
261 * For now we don't handle any GPIO/interrupt line, so the OS will
262 * have to poll for the PHY status.
264 static void phy_update_irq(IMXFECState
*s
)
269 static void phy_update_link(IMXFECState
*s
)
271 /* Autonegotiation status mirrors link status. */
272 if (qemu_get_queue(s
->nic
)->link_down
) {
273 PHY_PRINTF("link is down\n");
274 s
->phy_status
&= ~0x0024;
275 s
->phy_int
|= PHY_INT_DOWN
;
277 PHY_PRINTF("link is up\n");
278 s
->phy_status
|= 0x0024;
279 s
->phy_int
|= PHY_INT_ENERGYON
;
280 s
->phy_int
|= PHY_INT_AUTONEG_COMPLETE
;
285 static void imx_eth_set_link(NetClientState
*nc
)
287 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc
)));
290 static void phy_reset(IMXFECState
*s
)
292 s
->phy_status
= 0x7809;
293 s
->phy_control
= 0x3000;
294 s
->phy_advertise
= 0x01e1;
300 static uint32_t do_phy_read(IMXFECState
*s
, int reg
)
305 /* we only advertise one phy */
310 case 0: /* Basic Control */
311 val
= s
->phy_control
;
313 case 1: /* Basic Status */
322 case 4: /* Auto-neg advertisement */
323 val
= s
->phy_advertise
;
325 case 5: /* Auto-neg Link Partner Ability */
328 case 6: /* Auto-neg Expansion */
331 case 29: /* Interrupt source. */
336 case 30: /* Interrupt mask */
337 val
= s
->phy_int_mask
;
343 qemu_log_mask(LOG_UNIMP
, "[%s.phy]%s: reg %d not implemented\n",
344 TYPE_IMX_FEC
, __func__
, reg
);
348 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
349 TYPE_IMX_FEC
, __func__
, reg
);
354 PHY_PRINTF("read 0x%04x @ %d\n", val
, reg
);
359 static void do_phy_write(IMXFECState
*s
, int reg
, uint32_t val
)
361 PHY_PRINTF("write 0x%04x @ %d\n", val
, reg
);
364 /* we only advertise one phy */
369 case 0: /* Basic Control */
373 s
->phy_control
= val
& 0x7980;
374 /* Complete autonegotiation immediately. */
376 s
->phy_status
|= 0x0020;
380 case 4: /* Auto-neg advertisement */
381 s
->phy_advertise
= (val
& 0x2d7f) | 0x80;
383 case 30: /* Interrupt mask */
384 s
->phy_int_mask
= val
& 0xff;
391 qemu_log_mask(LOG_UNIMP
, "[%s.phy)%s: reg %d not implemented\n",
392 TYPE_IMX_FEC
, __func__
, reg
);
395 qemu_log_mask(LOG_GUEST_ERROR
, "[%s.phy]%s: Bad address at offset %d\n",
396 TYPE_IMX_FEC
, __func__
, reg
);
401 static void imx_fec_read_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
403 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
406 static void imx_fec_write_bd(IMXFECBufDesc
*bd
, dma_addr_t addr
)
408 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
411 static void imx_enet_read_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
413 dma_memory_read(&address_space_memory
, addr
, bd
, sizeof(*bd
));
416 static void imx_enet_write_bd(IMXENETBufDesc
*bd
, dma_addr_t addr
)
418 dma_memory_write(&address_space_memory
, addr
, bd
, sizeof(*bd
));
421 static void imx_eth_update(IMXFECState
*s
)
424 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
425 * interrupts swapped. This worked with older versions of Linux (4.14
426 * and older) since Linux associated both interrupt lines with Ethernet
427 * MAC interrupts. Specifically,
428 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
429 * timer interrupts. Those versions of Linux fail with versions of QEMU
430 * with swapped interrupt assignments.
431 * - In linux 4.14, both interrupt lines were registered with the Ethernet
432 * MAC interrupt handler. As a result, all versions of qemu happen to
433 * work, though that is accidental.
434 * - In Linux 4.9 and older, the timer interrupt was registered directly
435 * with the Ethernet MAC interrupt handler. The MAC interrupt was
436 * redirected to a GPIO interrupt to work around erratum ERR006687.
437 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
438 * interrupt never fired since IOMUX is currently not supported in qemu.
439 * Linux instead received MAC interrupts on the timer interrupt.
440 * As a result, qemu versions with the swapped interrupt assignment work,
441 * albeit accidentally, but qemu versions with the correct interrupt
444 * To ensure that all versions of Linux work, generate ENET_INT_MAC
445 * interrrupts on both interrupt lines. This should be changed if and when
446 * qemu supports IOMUX.
448 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] &
449 (ENET_INT_MAC
| ENET_INT_TS_TIMER
)) {
450 qemu_set_irq(s
->irq
[1], 1);
452 qemu_set_irq(s
->irq
[1], 0);
455 if (s
->regs
[ENET_EIR
] & s
->regs
[ENET_EIMR
] & ENET_INT_MAC
) {
456 qemu_set_irq(s
->irq
[0], 1);
458 qemu_set_irq(s
->irq
[0], 0);
462 static void imx_fec_do_tx(IMXFECState
*s
)
464 int frame_size
= 0, descnt
= 0;
465 uint8_t *ptr
= s
->frame
;
466 uint32_t addr
= s
->tx_descriptor
[0];
468 while (descnt
++ < IMX_MAX_DESC
) {
472 imx_fec_read_bd(&bd
, addr
);
473 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
474 addr
, bd
.flags
, bd
.length
, bd
.data
);
475 if ((bd
.flags
& ENET_BD_R
) == 0) {
476 /* Run out of descriptors to transmit. */
477 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
481 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
482 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
483 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
485 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
488 if (bd
.flags
& ENET_BD_L
) {
489 /* Last buffer in frame. */
490 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
493 s
->regs
[ENET_EIR
] |= ENET_INT_TXF
;
495 s
->regs
[ENET_EIR
] |= ENET_INT_TXB
;
496 bd
.flags
&= ~ENET_BD_R
;
497 /* Write back the modified descriptor. */
498 imx_fec_write_bd(&bd
, addr
);
499 /* Advance to the next descriptor. */
500 if ((bd
.flags
& ENET_BD_W
) != 0) {
501 addr
= s
->regs
[ENET_TDSR
];
507 s
->tx_descriptor
[0] = addr
;
512 static void imx_enet_do_tx(IMXFECState
*s
, uint32_t index
)
514 int frame_size
= 0, descnt
= 0;
516 uint8_t *ptr
= s
->frame
;
517 uint32_t addr
, int_txb
, int_txf
, tdsr
;
523 int_txb
= ENET_INT_TXB
;
524 int_txf
= ENET_INT_TXF
;
529 int_txb
= ENET_INT_TXB1
;
530 int_txf
= ENET_INT_TXF1
;
535 int_txb
= ENET_INT_TXB2
;
536 int_txf
= ENET_INT_TXF2
;
540 qemu_log_mask(LOG_GUEST_ERROR
,
541 "%s: bogus value for index %x\n",
547 addr
= s
->tx_descriptor
[ring
];
549 while (descnt
++ < IMX_MAX_DESC
) {
553 imx_enet_read_bd(&bd
, addr
);
554 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
555 "status %04x\n", addr
, bd
.flags
, bd
.length
, bd
.data
,
556 bd
.option
, bd
.status
);
557 if ((bd
.flags
& ENET_BD_R
) == 0) {
558 /* Run out of descriptors to transmit. */
562 if (frame_size
+ len
> ENET_MAX_FRAME_SIZE
) {
563 len
= ENET_MAX_FRAME_SIZE
- frame_size
;
564 s
->regs
[ENET_EIR
] |= ENET_INT_BABT
;
566 dma_memory_read(&address_space_memory
, bd
.data
, ptr
, len
);
569 if (bd
.flags
& ENET_BD_L
) {
570 if (bd
.option
& ENET_BD_PINS
) {
571 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
572 if (IP_HEADER_VERSION(ip_hd
) == 4) {
573 net_checksum_calculate(s
->frame
, frame_size
);
576 if (bd
.option
& ENET_BD_IINS
) {
577 struct ip_header
*ip_hd
= PKT_GET_IP_HDR(s
->frame
);
578 /* We compute checksum only for IPv4 frames */
579 if (IP_HEADER_VERSION(ip_hd
) == 4) {
582 csum
= net_raw_checksum((uint8_t *)ip_hd
, sizeof(*ip_hd
));
583 ip_hd
->ip_sum
= cpu_to_be16(csum
);
586 /* Last buffer in frame. */
588 qemu_send_packet(qemu_get_queue(s
->nic
), s
->frame
, frame_size
);
592 if (bd
.option
& ENET_BD_TX_INT
) {
593 s
->regs
[ENET_EIR
] |= int_txf
;
596 if (bd
.option
& ENET_BD_TX_INT
) {
597 s
->regs
[ENET_EIR
] |= int_txb
;
599 bd
.flags
&= ~ENET_BD_R
;
600 /* Write back the modified descriptor. */
601 imx_enet_write_bd(&bd
, addr
);
602 /* Advance to the next descriptor. */
603 if ((bd
.flags
& ENET_BD_W
) != 0) {
604 addr
= s
->regs
[tdsr
];
610 s
->tx_descriptor
[ring
] = addr
;
615 static void imx_eth_do_tx(IMXFECState
*s
, uint32_t index
)
617 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
618 imx_enet_do_tx(s
, index
);
624 static void imx_eth_enable_rx(IMXFECState
*s
, bool flush
)
628 imx_fec_read_bd(&bd
, s
->rx_descriptor
);
630 s
->regs
[ENET_RDAR
] = (bd
.flags
& ENET_BD_E
) ? ENET_RDAR_RDAR
: 0;
632 if (!s
->regs
[ENET_RDAR
]) {
633 FEC_PRINTF("RX buffer full\n");
635 qemu_flush_queued_packets(qemu_get_queue(s
->nic
));
639 static void imx_eth_reset(DeviceState
*d
)
641 IMXFECState
*s
= IMX_FEC(d
);
643 /* Reset the Device */
644 memset(s
->regs
, 0, sizeof(s
->regs
));
645 s
->regs
[ENET_ECR
] = 0xf0000000;
646 s
->regs
[ENET_MIBC
] = 0xc0000000;
647 s
->regs
[ENET_RCR
] = 0x05ee0001;
648 s
->regs
[ENET_OPD
] = 0x00010000;
650 s
->regs
[ENET_PALR
] = (s
->conf
.macaddr
.a
[0] << 24)
651 | (s
->conf
.macaddr
.a
[1] << 16)
652 | (s
->conf
.macaddr
.a
[2] << 8)
653 | s
->conf
.macaddr
.a
[3];
654 s
->regs
[ENET_PAUR
] = (s
->conf
.macaddr
.a
[4] << 24)
655 | (s
->conf
.macaddr
.a
[5] << 16)
659 s
->regs
[ENET_FRBR
] = 0x00000600;
660 s
->regs
[ENET_FRSR
] = 0x00000500;
661 s
->regs
[ENET_MIIGSK_ENR
] = 0x00000006;
663 s
->regs
[ENET_RAEM
] = 0x00000004;
664 s
->regs
[ENET_RAFL
] = 0x00000004;
665 s
->regs
[ENET_TAEM
] = 0x00000004;
666 s
->regs
[ENET_TAFL
] = 0x00000008;
667 s
->regs
[ENET_TIPG
] = 0x0000000c;
668 s
->regs
[ENET_FTRL
] = 0x000007ff;
669 s
->regs
[ENET_ATPER
] = 0x3b9aca00;
672 s
->rx_descriptor
= 0;
673 memset(s
->tx_descriptor
, 0, sizeof(s
->tx_descriptor
));
675 /* We also reset the PHY */
679 static uint32_t imx_default_read(IMXFECState
*s
, uint32_t index
)
681 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad register at offset 0x%"
682 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
686 static uint32_t imx_fec_read(IMXFECState
*s
, uint32_t index
)
691 case ENET_MIIGSK_CFGR
:
692 case ENET_MIIGSK_ENR
:
693 return s
->regs
[index
];
695 return imx_default_read(s
, index
);
699 static uint32_t imx_enet_read(IMXFECState
*s
, uint32_t index
)
729 return s
->regs
[index
];
731 return imx_default_read(s
, index
);
735 static uint64_t imx_eth_read(void *opaque
, hwaddr offset
, unsigned size
)
738 IMXFECState
*s
= IMX_FEC(opaque
);
739 uint32_t index
= offset
>> 2;
763 value
= s
->regs
[index
];
767 value
= imx_fec_read(s
, index
);
769 value
= imx_enet_read(s
, index
);
774 FEC_PRINTF("reg[%s] => 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
780 static void imx_default_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
782 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Bad address at offset 0x%"
783 PRIx32
"\n", TYPE_IMX_FEC
, __func__
, index
* 4);
787 static void imx_fec_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
791 /* FRBR is read only */
792 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register FRBR is read only\n",
793 TYPE_IMX_FEC
, __func__
);
796 s
->regs
[index
] = (value
& 0x000003fc) | 0x00000400;
798 case ENET_MIIGSK_CFGR
:
799 s
->regs
[index
] = value
& 0x00000053;
801 case ENET_MIIGSK_ENR
:
802 s
->regs
[index
] = (value
& 0x00000002) ? 0x00000006 : 0;
805 imx_default_write(s
, index
, value
);
810 static void imx_enet_write(IMXFECState
*s
, uint32_t index
, uint32_t value
)
820 s
->regs
[index
] = value
& 0x000001ff;
823 s
->regs
[index
] = value
& 0x0000001f;
826 s
->regs
[index
] = value
& 0x00003fff;
829 s
->regs
[index
] = value
& 0x00000019;
832 s
->regs
[index
] = value
& 0x000000C7;
835 s
->regs
[index
] = value
& 0x00002a9d;
840 s
->regs
[index
] = value
;
843 /* ATSTMP is read only */
844 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Register ATSTMP is read only\n",
845 TYPE_IMX_FEC
, __func__
);
848 s
->regs
[index
] = value
& 0x7fffffff;
851 s
->regs
[index
] = value
& 0x00007f7f;
854 /* implement clear timer flag */
855 value
= value
& 0x0000000f;
861 value
= value
& 0x000000fd;
867 s
->regs
[index
] = value
;
870 imx_default_write(s
, index
, value
);
875 static void imx_eth_write(void *opaque
, hwaddr offset
, uint64_t value
,
878 IMXFECState
*s
= IMX_FEC(opaque
);
879 const bool single_tx_ring
= !imx_eth_is_multi_tx_ring(s
);
880 uint32_t index
= offset
>> 2;
882 FEC_PRINTF("reg[%s] <= 0x%" PRIx32
"\n", imx_eth_reg_name(s
, index
),
887 s
->regs
[index
] &= ~value
;
890 s
->regs
[index
] = value
;
893 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
894 if (!s
->regs
[index
]) {
895 imx_eth_enable_rx(s
, true);
901 case ENET_TDAR1
: /* FALLTHROUGH */
902 case ENET_TDAR2
: /* FALLTHROUGH */
903 if (unlikely(single_tx_ring
)) {
904 qemu_log_mask(LOG_GUEST_ERROR
,
905 "[%s]%s: trying to access TDAR2 or TDAR1\n",
906 TYPE_IMX_FEC
, __func__
);
909 case ENET_TDAR
: /* FALLTHROUGH */
910 if (s
->regs
[ENET_ECR
] & ENET_ECR_ETHEREN
) {
911 s
->regs
[index
] = ENET_TDAR_TDAR
;
912 imx_eth_do_tx(s
, index
);
917 if (value
& ENET_ECR_RESET
) {
918 return imx_eth_reset(DEVICE(s
));
920 s
->regs
[index
] = value
;
921 if ((s
->regs
[index
] & ENET_ECR_ETHEREN
) == 0) {
922 s
->regs
[ENET_RDAR
] = 0;
923 s
->rx_descriptor
= s
->regs
[ENET_RDSR
];
924 s
->regs
[ENET_TDAR
] = 0;
925 s
->regs
[ENET_TDAR1
] = 0;
926 s
->regs
[ENET_TDAR2
] = 0;
927 s
->tx_descriptor
[0] = s
->regs
[ENET_TDSR
];
928 s
->tx_descriptor
[1] = s
->regs
[ENET_TDSR1
];
929 s
->tx_descriptor
[2] = s
->regs
[ENET_TDSR2
];
933 s
->regs
[index
] = value
;
934 if (extract32(value
, 29, 1)) {
935 /* This is a read operation */
936 s
->regs
[ENET_MMFR
] = deposit32(s
->regs
[ENET_MMFR
], 0, 16,
941 /* This a write operation */
942 do_phy_write(s
, extract32(value
, 18, 10), extract32(value
, 0, 16));
944 /* raise the interrupt as the PHY operation is done */
945 s
->regs
[ENET_EIR
] |= ENET_INT_MII
;
948 s
->regs
[index
] = value
& 0xfe;
951 /* TODO: Implement MIB. */
952 s
->regs
[index
] = (value
& 0x80000000) ? 0xc0000000 : 0;
955 s
->regs
[index
] = value
& 0x07ff003f;
956 /* TODO: Implement LOOP mode. */
959 /* We transmit immediately, so raise GRA immediately. */
960 s
->regs
[index
] = value
;
962 s
->regs
[ENET_EIR
] |= ENET_INT_GRA
;
966 s
->regs
[index
] = value
;
967 s
->conf
.macaddr
.a
[0] = value
>> 24;
968 s
->conf
.macaddr
.a
[1] = value
>> 16;
969 s
->conf
.macaddr
.a
[2] = value
>> 8;
970 s
->conf
.macaddr
.a
[3] = value
;
973 s
->regs
[index
] = (value
| 0x0000ffff) & 0xffff8808;
974 s
->conf
.macaddr
.a
[4] = value
>> 24;
975 s
->conf
.macaddr
.a
[5] = value
>> 16;
978 s
->regs
[index
] = (value
& 0x0000ffff) | 0x00010000;
984 /* TODO: implement MAC hash filtering. */
988 s
->regs
[index
] = value
& 0x3;
990 s
->regs
[index
] = value
& 0x13f;
995 s
->regs
[index
] = value
& ~3;
997 s
->regs
[index
] = value
& ~7;
999 s
->rx_descriptor
= s
->regs
[index
];
1003 s
->regs
[index
] = value
& ~3;
1005 s
->regs
[index
] = value
& ~7;
1007 s
->tx_descriptor
[0] = s
->regs
[index
];
1010 if (unlikely(single_tx_ring
)) {
1011 qemu_log_mask(LOG_GUEST_ERROR
,
1012 "[%s]%s: trying to access TDSR1\n",
1013 TYPE_IMX_FEC
, __func__
);
1017 s
->regs
[index
] = value
& ~7;
1018 s
->tx_descriptor
[1] = s
->regs
[index
];
1021 if (unlikely(single_tx_ring
)) {
1022 qemu_log_mask(LOG_GUEST_ERROR
,
1023 "[%s]%s: trying to access TDSR2\n",
1024 TYPE_IMX_FEC
, __func__
);
1028 s
->regs
[index
] = value
& ~7;
1029 s
->tx_descriptor
[2] = s
->regs
[index
];
1032 s
->regs
[index
] = value
& 0x00003ff0;
1036 imx_fec_write(s
, index
, value
);
1038 imx_enet_write(s
, index
, value
);
1046 static int imx_eth_can_receive(NetClientState
*nc
)
1048 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1052 return !!s
->regs
[ENET_RDAR
];
1055 static ssize_t
imx_fec_receive(NetClientState
*nc
, const uint8_t *buf
,
1058 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1065 unsigned int buf_len
;
1068 FEC_PRINTF("len %d\n", (int)size
);
1070 if (!s
->regs
[ENET_RDAR
]) {
1071 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1072 TYPE_IMX_FEC
, __func__
);
1076 /* 4 bytes for the CRC. */
1078 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1079 crc_ptr
= (uint8_t *) &crc
;
1081 /* Huge frames are truncated. */
1082 if (size
> ENET_MAX_FRAME_SIZE
) {
1083 size
= ENET_MAX_FRAME_SIZE
;
1084 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1087 /* Frames larger than the user limit just set error flags. */
1088 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1089 flags
|= ENET_BD_LG
;
1092 addr
= s
->rx_descriptor
;
1094 imx_fec_read_bd(&bd
, addr
);
1095 if ((bd
.flags
& ENET_BD_E
) == 0) {
1096 /* No descriptors available. Bail out. */
1098 * FIXME: This is wrong. We should probably either
1099 * save the remainder for when more RX buffers are
1100 * available, or flag an error.
1102 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1103 TYPE_IMX_FEC
, __func__
);
1106 buf_len
= (size
<= s
->regs
[ENET_MRBR
]) ? size
: s
->regs
[ENET_MRBR
];
1107 bd
.length
= buf_len
;
1110 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1112 /* The last 4 bytes are the CRC. */
1114 buf_len
+= size
- 4;
1117 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1120 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1122 crc_ptr
+= 4 - size
;
1124 bd
.flags
&= ~ENET_BD_E
;
1126 /* Last buffer in frame. */
1127 bd
.flags
|= flags
| ENET_BD_L
;
1128 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1129 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1131 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1133 imx_fec_write_bd(&bd
, addr
);
1134 /* Advance to the next descriptor. */
1135 if ((bd
.flags
& ENET_BD_W
) != 0) {
1136 addr
= s
->regs
[ENET_RDSR
];
1141 s
->rx_descriptor
= addr
;
1142 imx_eth_enable_rx(s
, false);
1147 static ssize_t
imx_enet_receive(NetClientState
*nc
, const uint8_t *buf
,
1150 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1157 unsigned int buf_len
;
1159 bool shift16
= s
->regs
[ENET_RACC
] & ENET_RACC_SHIFT16
;
1161 FEC_PRINTF("len %d\n", (int)size
);
1163 if (!s
->regs
[ENET_RDAR
]) {
1164 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Unexpected packet\n",
1165 TYPE_IMX_FEC
, __func__
);
1169 /* 4 bytes for the CRC. */
1171 crc
= cpu_to_be32(crc32(~0, buf
, size
));
1172 crc_ptr
= (uint8_t *) &crc
;
1178 /* Huge frames are truncated. */
1179 if (size
> s
->regs
[ENET_FTRL
]) {
1180 size
= s
->regs
[ENET_FTRL
];
1181 flags
|= ENET_BD_TR
| ENET_BD_LG
;
1184 /* Frames larger than the user limit just set error flags. */
1185 if (size
> (s
->regs
[ENET_RCR
] >> 16)) {
1186 flags
|= ENET_BD_LG
;
1189 addr
= s
->rx_descriptor
;
1191 imx_enet_read_bd(&bd
, addr
);
1192 if ((bd
.flags
& ENET_BD_E
) == 0) {
1193 /* No descriptors available. Bail out. */
1195 * FIXME: This is wrong. We should probably either
1196 * save the remainder for when more RX buffers are
1197 * available, or flag an error.
1199 qemu_log_mask(LOG_GUEST_ERROR
, "[%s]%s: Lost end of frame\n",
1200 TYPE_IMX_FEC
, __func__
);
1203 buf_len
= MIN(size
, s
->regs
[ENET_MRBR
]);
1204 bd
.length
= buf_len
;
1207 FEC_PRINTF("rx_bd 0x%x length %d\n", addr
, bd
.length
);
1209 /* The last 4 bytes are the CRC. */
1211 buf_len
+= size
- 4;
1217 * If SHIFT16 bit of ENETx_RACC register is set we need to
1218 * align the payload to 4-byte boundary.
1220 const uint8_t zeros
[2] = { 0 };
1222 dma_memory_write(&address_space_memory
, buf_addr
,
1223 zeros
, sizeof(zeros
));
1225 buf_addr
+= sizeof(zeros
);
1226 buf_len
-= sizeof(zeros
);
1228 /* We only do this once per Ethernet frame */
1232 dma_memory_write(&address_space_memory
, buf_addr
, buf
, buf_len
);
1235 dma_memory_write(&address_space_memory
, buf_addr
+ buf_len
,
1237 crc_ptr
+= 4 - size
;
1239 bd
.flags
&= ~ENET_BD_E
;
1241 /* Last buffer in frame. */
1242 bd
.flags
|= flags
| ENET_BD_L
;
1243 FEC_PRINTF("rx frame flags %04x\n", bd
.flags
);
1244 if (bd
.option
& ENET_BD_RX_INT
) {
1245 s
->regs
[ENET_EIR
] |= ENET_INT_RXF
;
1248 if (bd
.option
& ENET_BD_RX_INT
) {
1249 s
->regs
[ENET_EIR
] |= ENET_INT_RXB
;
1252 imx_enet_write_bd(&bd
, addr
);
1253 /* Advance to the next descriptor. */
1254 if ((bd
.flags
& ENET_BD_W
) != 0) {
1255 addr
= s
->regs
[ENET_RDSR
];
1260 s
->rx_descriptor
= addr
;
1261 imx_eth_enable_rx(s
, false);
1266 static ssize_t
imx_eth_receive(NetClientState
*nc
, const uint8_t *buf
,
1269 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1271 if (!s
->is_fec
&& (s
->regs
[ENET_ECR
] & ENET_ECR_EN1588
)) {
1272 return imx_enet_receive(nc
, buf
, len
);
1274 return imx_fec_receive(nc
, buf
, len
);
1278 static const MemoryRegionOps imx_eth_ops
= {
1279 .read
= imx_eth_read
,
1280 .write
= imx_eth_write
,
1281 .valid
.min_access_size
= 4,
1282 .valid
.max_access_size
= 4,
1283 .endianness
= DEVICE_NATIVE_ENDIAN
,
1286 static void imx_eth_cleanup(NetClientState
*nc
)
1288 IMXFECState
*s
= IMX_FEC(qemu_get_nic_opaque(nc
));
1293 static NetClientInfo imx_eth_net_info
= {
1294 .type
= NET_CLIENT_DRIVER_NIC
,
1295 .size
= sizeof(NICState
),
1296 .can_receive
= imx_eth_can_receive
,
1297 .receive
= imx_eth_receive
,
1298 .cleanup
= imx_eth_cleanup
,
1299 .link_status_changed
= imx_eth_set_link
,
1303 static void imx_eth_realize(DeviceState
*dev
, Error
**errp
)
1305 IMXFECState
*s
= IMX_FEC(dev
);
1306 SysBusDevice
*sbd
= SYS_BUS_DEVICE(dev
);
1308 memory_region_init_io(&s
->iomem
, OBJECT(dev
), &imx_eth_ops
, s
,
1309 TYPE_IMX_FEC
, FSL_IMX25_FEC_SIZE
);
1310 sysbus_init_mmio(sbd
, &s
->iomem
);
1311 sysbus_init_irq(sbd
, &s
->irq
[0]);
1312 sysbus_init_irq(sbd
, &s
->irq
[1]);
1314 qemu_macaddr_default_if_unset(&s
->conf
.macaddr
);
1316 s
->nic
= qemu_new_nic(&imx_eth_net_info
, &s
->conf
,
1317 object_get_typename(OBJECT(dev
)),
1318 DEVICE(dev
)->id
, s
);
1320 qemu_format_nic_info_str(qemu_get_queue(s
->nic
), s
->conf
.macaddr
.a
);
1323 static Property imx_eth_properties
[] = {
1324 DEFINE_NIC_PROPERTIES(IMXFECState
, conf
),
1325 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState
, tx_ring_num
, 1),
1326 DEFINE_PROP_END_OF_LIST(),
1329 static void imx_eth_class_init(ObjectClass
*klass
, void *data
)
1331 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1333 dc
->vmsd
= &vmstate_imx_eth
;
1334 dc
->reset
= imx_eth_reset
;
1335 dc
->props
= imx_eth_properties
;
1336 dc
->realize
= imx_eth_realize
;
1337 dc
->desc
= "i.MX FEC/ENET Ethernet Controller";
1340 static void imx_fec_init(Object
*obj
)
1342 IMXFECState
*s
= IMX_FEC(obj
);
1347 static void imx_enet_init(Object
*obj
)
1349 IMXFECState
*s
= IMX_FEC(obj
);
1354 static const TypeInfo imx_fec_info
= {
1355 .name
= TYPE_IMX_FEC
,
1356 .parent
= TYPE_SYS_BUS_DEVICE
,
1357 .instance_size
= sizeof(IMXFECState
),
1358 .instance_init
= imx_fec_init
,
1359 .class_init
= imx_eth_class_init
,
1362 static const TypeInfo imx_enet_info
= {
1363 .name
= TYPE_IMX_ENET
,
1364 .parent
= TYPE_IMX_FEC
,
1365 .instance_init
= imx_enet_init
,
1368 static void imx_eth_register_types(void)
1370 type_register_static(&imx_fec_info
);
1371 type_register_static(&imx_enet_info
);
1374 type_init(imx_eth_register_types
)