]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/imx_fec.c
7ac4ed7c125d69af4335bb2ab608a01b5347bef7
[mirror_qemu.git] / hw / net / imx_fec.c
1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "hw/qdev-properties.h"
28 #include "migration/vmstate.h"
29 #include "sysemu/dma.h"
30 #include "qemu/log.h"
31 #include "qemu/module.h"
32 #include "net/checksum.h"
33 #include "net/eth.h"
34
35 /* For crc32 */
36 #include <zlib.h>
37
38 #ifndef DEBUG_IMX_FEC
39 #define DEBUG_IMX_FEC 0
40 #endif
41
42 #define FEC_PRINTF(fmt, args...) \
43 do { \
44 if (DEBUG_IMX_FEC) { \
45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
46 __func__, ##args); \
47 } \
48 } while (0)
49
50 #ifndef DEBUG_IMX_PHY
51 #define DEBUG_IMX_PHY 0
52 #endif
53
54 #define PHY_PRINTF(fmt, args...) \
55 do { \
56 if (DEBUG_IMX_PHY) { \
57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
58 __func__, ##args); \
59 } \
60 } while (0)
61
62 #define IMX_MAX_DESC 1024
63
64 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
65 {
66 static char tmp[20];
67 sprintf(tmp, "index %d", index);
68 return tmp;
69 }
70
71 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
72 {
73 switch (index) {
74 case ENET_FRBR:
75 return "FRBR";
76 case ENET_FRSR:
77 return "FRSR";
78 case ENET_MIIGSK_CFGR:
79 return "MIIGSK_CFGR";
80 case ENET_MIIGSK_ENR:
81 return "MIIGSK_ENR";
82 default:
83 return imx_default_reg_name(s, index);
84 }
85 }
86
87 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
88 {
89 switch (index) {
90 case ENET_RSFL:
91 return "RSFL";
92 case ENET_RSEM:
93 return "RSEM";
94 case ENET_RAEM:
95 return "RAEM";
96 case ENET_RAFL:
97 return "RAFL";
98 case ENET_TSEM:
99 return "TSEM";
100 case ENET_TAEM:
101 return "TAEM";
102 case ENET_TAFL:
103 return "TAFL";
104 case ENET_TIPG:
105 return "TIPG";
106 case ENET_FTRL:
107 return "FTRL";
108 case ENET_TACC:
109 return "TACC";
110 case ENET_RACC:
111 return "RACC";
112 case ENET_ATCR:
113 return "ATCR";
114 case ENET_ATVR:
115 return "ATVR";
116 case ENET_ATOFF:
117 return "ATOFF";
118 case ENET_ATPER:
119 return "ATPER";
120 case ENET_ATCOR:
121 return "ATCOR";
122 case ENET_ATINC:
123 return "ATINC";
124 case ENET_ATSTMP:
125 return "ATSTMP";
126 case ENET_TGSR:
127 return "TGSR";
128 case ENET_TCSR0:
129 return "TCSR0";
130 case ENET_TCCR0:
131 return "TCCR0";
132 case ENET_TCSR1:
133 return "TCSR1";
134 case ENET_TCCR1:
135 return "TCCR1";
136 case ENET_TCSR2:
137 return "TCSR2";
138 case ENET_TCCR2:
139 return "TCCR2";
140 case ENET_TCSR3:
141 return "TCSR3";
142 case ENET_TCCR3:
143 return "TCCR3";
144 default:
145 return imx_default_reg_name(s, index);
146 }
147 }
148
149 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
150 {
151 switch (index) {
152 case ENET_EIR:
153 return "EIR";
154 case ENET_EIMR:
155 return "EIMR";
156 case ENET_RDAR:
157 return "RDAR";
158 case ENET_TDAR:
159 return "TDAR";
160 case ENET_ECR:
161 return "ECR";
162 case ENET_MMFR:
163 return "MMFR";
164 case ENET_MSCR:
165 return "MSCR";
166 case ENET_MIBC:
167 return "MIBC";
168 case ENET_RCR:
169 return "RCR";
170 case ENET_TCR:
171 return "TCR";
172 case ENET_PALR:
173 return "PALR";
174 case ENET_PAUR:
175 return "PAUR";
176 case ENET_OPD:
177 return "OPD";
178 case ENET_IAUR:
179 return "IAUR";
180 case ENET_IALR:
181 return "IALR";
182 case ENET_GAUR:
183 return "GAUR";
184 case ENET_GALR:
185 return "GALR";
186 case ENET_TFWR:
187 return "TFWR";
188 case ENET_RDSR:
189 return "RDSR";
190 case ENET_TDSR:
191 return "TDSR";
192 case ENET_MRBR:
193 return "MRBR";
194 default:
195 if (s->is_fec) {
196 return imx_fec_reg_name(s, index);
197 } else {
198 return imx_enet_reg_name(s, index);
199 }
200 }
201 }
202
203 /*
204 * Versions of this device with more than one TX descriptor save the
205 * 2nd and 3rd descriptors in a subsection, to maintain migration
206 * compatibility with previous versions of the device that only
207 * supported a single descriptor.
208 */
209 static bool imx_eth_is_multi_tx_ring(void *opaque)
210 {
211 IMXFECState *s = IMX_FEC(opaque);
212
213 return s->tx_ring_num > 1;
214 }
215
216 static const VMStateDescription vmstate_imx_eth_txdescs = {
217 .name = "imx.fec/txdescs",
218 .version_id = 1,
219 .minimum_version_id = 1,
220 .needed = imx_eth_is_multi_tx_ring,
221 .fields = (VMStateField[]) {
222 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
223 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
224 VMSTATE_END_OF_LIST()
225 }
226 };
227
228 static const VMStateDescription vmstate_imx_eth = {
229 .name = TYPE_IMX_FEC,
230 .version_id = 2,
231 .minimum_version_id = 2,
232 .fields = (VMStateField[]) {
233 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
234 VMSTATE_UINT32(rx_descriptor, IMXFECState),
235 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
236 VMSTATE_UINT32(phy_status, IMXFECState),
237 VMSTATE_UINT32(phy_control, IMXFECState),
238 VMSTATE_UINT32(phy_advertise, IMXFECState),
239 VMSTATE_UINT32(phy_int, IMXFECState),
240 VMSTATE_UINT32(phy_int_mask, IMXFECState),
241 VMSTATE_END_OF_LIST()
242 },
243 .subsections = (const VMStateDescription * []) {
244 &vmstate_imx_eth_txdescs,
245 NULL
246 },
247 };
248
249 #define PHY_INT_ENERGYON (1 << 7)
250 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
251 #define PHY_INT_FAULT (1 << 5)
252 #define PHY_INT_DOWN (1 << 4)
253 #define PHY_INT_AUTONEG_LP (1 << 3)
254 #define PHY_INT_PARFAULT (1 << 2)
255 #define PHY_INT_AUTONEG_PAGE (1 << 1)
256
257 static void imx_eth_update(IMXFECState *s);
258
259 /*
260 * The MII phy could raise a GPIO to the processor which in turn
261 * could be handled as an interrpt by the OS.
262 * For now we don't handle any GPIO/interrupt line, so the OS will
263 * have to poll for the PHY status.
264 */
265 static void phy_update_irq(IMXFECState *s)
266 {
267 imx_eth_update(s);
268 }
269
270 static void phy_update_link(IMXFECState *s)
271 {
272 /* Autonegotiation status mirrors link status. */
273 if (qemu_get_queue(s->nic)->link_down) {
274 PHY_PRINTF("link is down\n");
275 s->phy_status &= ~0x0024;
276 s->phy_int |= PHY_INT_DOWN;
277 } else {
278 PHY_PRINTF("link is up\n");
279 s->phy_status |= 0x0024;
280 s->phy_int |= PHY_INT_ENERGYON;
281 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
282 }
283 phy_update_irq(s);
284 }
285
286 static void imx_eth_set_link(NetClientState *nc)
287 {
288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
289 }
290
291 static void phy_reset(IMXFECState *s)
292 {
293 s->phy_status = 0x7809;
294 s->phy_control = 0x3000;
295 s->phy_advertise = 0x01e1;
296 s->phy_int_mask = 0;
297 s->phy_int = 0;
298 phy_update_link(s);
299 }
300
301 static uint32_t do_phy_read(IMXFECState *s, int reg)
302 {
303 uint32_t val;
304
305 if (reg > 31) {
306 /* we only advertise one phy */
307 return 0;
308 }
309
310 switch (reg) {
311 case 0: /* Basic Control */
312 val = s->phy_control;
313 break;
314 case 1: /* Basic Status */
315 val = s->phy_status;
316 break;
317 case 2: /* ID1 */
318 val = 0x0007;
319 break;
320 case 3: /* ID2 */
321 val = 0xc0d1;
322 break;
323 case 4: /* Auto-neg advertisement */
324 val = s->phy_advertise;
325 break;
326 case 5: /* Auto-neg Link Partner Ability */
327 val = 0x0f71;
328 break;
329 case 6: /* Auto-neg Expansion */
330 val = 1;
331 break;
332 case 29: /* Interrupt source. */
333 val = s->phy_int;
334 s->phy_int = 0;
335 phy_update_irq(s);
336 break;
337 case 30: /* Interrupt mask */
338 val = s->phy_int_mask;
339 break;
340 case 17:
341 case 18:
342 case 27:
343 case 31:
344 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
345 TYPE_IMX_FEC, __func__, reg);
346 val = 0;
347 break;
348 default:
349 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
350 TYPE_IMX_FEC, __func__, reg);
351 val = 0;
352 break;
353 }
354
355 PHY_PRINTF("read 0x%04x @ %d\n", val, reg);
356
357 return val;
358 }
359
360 static void do_phy_write(IMXFECState *s, int reg, uint32_t val)
361 {
362 PHY_PRINTF("write 0x%04x @ %d\n", val, reg);
363
364 if (reg > 31) {
365 /* we only advertise one phy */
366 return;
367 }
368
369 switch (reg) {
370 case 0: /* Basic Control */
371 if (val & 0x8000) {
372 phy_reset(s);
373 } else {
374 s->phy_control = val & 0x7980;
375 /* Complete autonegotiation immediately. */
376 if (val & 0x1000) {
377 s->phy_status |= 0x0020;
378 }
379 }
380 break;
381 case 4: /* Auto-neg advertisement */
382 s->phy_advertise = (val & 0x2d7f) | 0x80;
383 break;
384 case 30: /* Interrupt mask */
385 s->phy_int_mask = val & 0xff;
386 phy_update_irq(s);
387 break;
388 case 17:
389 case 18:
390 case 27:
391 case 31:
392 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
393 TYPE_IMX_FEC, __func__, reg);
394 break;
395 default:
396 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
397 TYPE_IMX_FEC, __func__, reg);
398 break;
399 }
400 }
401
402 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
403 {
404 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
405 }
406
407 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
408 {
409 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
410 }
411
412 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
413 {
414 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
415 }
416
417 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
418 {
419 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
420 }
421
422 static void imx_eth_update(IMXFECState *s)
423 {
424 /*
425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
426 * interrupts swapped. This worked with older versions of Linux (4.14
427 * and older) since Linux associated both interrupt lines with Ethernet
428 * MAC interrupts. Specifically,
429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
430 * timer interrupts. Those versions of Linux fail with versions of QEMU
431 * with swapped interrupt assignments.
432 * - In linux 4.14, both interrupt lines were registered with the Ethernet
433 * MAC interrupt handler. As a result, all versions of qemu happen to
434 * work, though that is accidental.
435 * - In Linux 4.9 and older, the timer interrupt was registered directly
436 * with the Ethernet MAC interrupt handler. The MAC interrupt was
437 * redirected to a GPIO interrupt to work around erratum ERR006687.
438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
439 * interrupt never fired since IOMUX is currently not supported in qemu.
440 * Linux instead received MAC interrupts on the timer interrupt.
441 * As a result, qemu versions with the swapped interrupt assignment work,
442 * albeit accidentally, but qemu versions with the correct interrupt
443 * assignment fail.
444 *
445 * To ensure that all versions of Linux work, generate ENET_INT_MAC
446 * interrrupts on both interrupt lines. This should be changed if and when
447 * qemu supports IOMUX.
448 */
449 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
450 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
451 qemu_set_irq(s->irq[1], 1);
452 } else {
453 qemu_set_irq(s->irq[1], 0);
454 }
455
456 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
457 qemu_set_irq(s->irq[0], 1);
458 } else {
459 qemu_set_irq(s->irq[0], 0);
460 }
461 }
462
463 static void imx_fec_do_tx(IMXFECState *s)
464 {
465 int frame_size = 0, descnt = 0;
466 uint8_t *ptr = s->frame;
467 uint32_t addr = s->tx_descriptor[0];
468
469 while (descnt++ < IMX_MAX_DESC) {
470 IMXFECBufDesc bd;
471 int len;
472
473 imx_fec_read_bd(&bd, addr);
474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
475 addr, bd.flags, bd.length, bd.data);
476 if ((bd.flags & ENET_BD_R) == 0) {
477 /* Run out of descriptors to transmit. */
478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
479 break;
480 }
481 len = bd.length;
482 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
483 len = ENET_MAX_FRAME_SIZE - frame_size;
484 s->regs[ENET_EIR] |= ENET_INT_BABT;
485 }
486 dma_memory_read(&address_space_memory, bd.data, ptr, len);
487 ptr += len;
488 frame_size += len;
489 if (bd.flags & ENET_BD_L) {
490 /* Last buffer in frame. */
491 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
492 ptr = s->frame;
493 frame_size = 0;
494 s->regs[ENET_EIR] |= ENET_INT_TXF;
495 }
496 s->regs[ENET_EIR] |= ENET_INT_TXB;
497 bd.flags &= ~ENET_BD_R;
498 /* Write back the modified descriptor. */
499 imx_fec_write_bd(&bd, addr);
500 /* Advance to the next descriptor. */
501 if ((bd.flags & ENET_BD_W) != 0) {
502 addr = s->regs[ENET_TDSR];
503 } else {
504 addr += sizeof(bd);
505 }
506 }
507
508 s->tx_descriptor[0] = addr;
509
510 imx_eth_update(s);
511 }
512
513 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
514 {
515 int frame_size = 0, descnt = 0;
516
517 uint8_t *ptr = s->frame;
518 uint32_t addr, int_txb, int_txf, tdsr;
519 size_t ring;
520
521 switch (index) {
522 case ENET_TDAR:
523 ring = 0;
524 int_txb = ENET_INT_TXB;
525 int_txf = ENET_INT_TXF;
526 tdsr = ENET_TDSR;
527 break;
528 case ENET_TDAR1:
529 ring = 1;
530 int_txb = ENET_INT_TXB1;
531 int_txf = ENET_INT_TXF1;
532 tdsr = ENET_TDSR1;
533 break;
534 case ENET_TDAR2:
535 ring = 2;
536 int_txb = ENET_INT_TXB2;
537 int_txf = ENET_INT_TXF2;
538 tdsr = ENET_TDSR2;
539 break;
540 default:
541 qemu_log_mask(LOG_GUEST_ERROR,
542 "%s: bogus value for index %x\n",
543 __func__, index);
544 abort();
545 break;
546 }
547
548 addr = s->tx_descriptor[ring];
549
550 while (descnt++ < IMX_MAX_DESC) {
551 IMXENETBufDesc bd;
552 int len;
553
554 imx_enet_read_bd(&bd, addr);
555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
556 "status %04x\n", addr, bd.flags, bd.length, bd.data,
557 bd.option, bd.status);
558 if ((bd.flags & ENET_BD_R) == 0) {
559 /* Run out of descriptors to transmit. */
560 break;
561 }
562 len = bd.length;
563 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
564 len = ENET_MAX_FRAME_SIZE - frame_size;
565 s->regs[ENET_EIR] |= ENET_INT_BABT;
566 }
567 dma_memory_read(&address_space_memory, bd.data, ptr, len);
568 ptr += len;
569 frame_size += len;
570 if (bd.flags & ENET_BD_L) {
571 if (bd.option & ENET_BD_PINS) {
572 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
573 if (IP_HEADER_VERSION(ip_hd) == 4) {
574 net_checksum_calculate(s->frame, frame_size);
575 }
576 }
577 if (bd.option & ENET_BD_IINS) {
578 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
579 /* We compute checksum only for IPv4 frames */
580 if (IP_HEADER_VERSION(ip_hd) == 4) {
581 uint16_t csum;
582 ip_hd->ip_sum = 0;
583 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
584 ip_hd->ip_sum = cpu_to_be16(csum);
585 }
586 }
587 /* Last buffer in frame. */
588
589 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
590 ptr = s->frame;
591
592 frame_size = 0;
593 if (bd.option & ENET_BD_TX_INT) {
594 s->regs[ENET_EIR] |= int_txf;
595 }
596 }
597 if (bd.option & ENET_BD_TX_INT) {
598 s->regs[ENET_EIR] |= int_txb;
599 }
600 bd.flags &= ~ENET_BD_R;
601 /* Write back the modified descriptor. */
602 imx_enet_write_bd(&bd, addr);
603 /* Advance to the next descriptor. */
604 if ((bd.flags & ENET_BD_W) != 0) {
605 addr = s->regs[tdsr];
606 } else {
607 addr += sizeof(bd);
608 }
609 }
610
611 s->tx_descriptor[ring] = addr;
612
613 imx_eth_update(s);
614 }
615
616 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
617 {
618 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
619 imx_enet_do_tx(s, index);
620 } else {
621 imx_fec_do_tx(s);
622 }
623 }
624
625 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
626 {
627 IMXFECBufDesc bd;
628
629 imx_fec_read_bd(&bd, s->rx_descriptor);
630
631 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
632
633 if (!s->regs[ENET_RDAR]) {
634 FEC_PRINTF("RX buffer full\n");
635 } else if (flush) {
636 qemu_flush_queued_packets(qemu_get_queue(s->nic));
637 }
638 }
639
640 static void imx_eth_reset(DeviceState *d)
641 {
642 IMXFECState *s = IMX_FEC(d);
643
644 /* Reset the Device */
645 memset(s->regs, 0, sizeof(s->regs));
646 s->regs[ENET_ECR] = 0xf0000000;
647 s->regs[ENET_MIBC] = 0xc0000000;
648 s->regs[ENET_RCR] = 0x05ee0001;
649 s->regs[ENET_OPD] = 0x00010000;
650
651 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
652 | (s->conf.macaddr.a[1] << 16)
653 | (s->conf.macaddr.a[2] << 8)
654 | s->conf.macaddr.a[3];
655 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
656 | (s->conf.macaddr.a[5] << 16)
657 | 0x8808;
658
659 if (s->is_fec) {
660 s->regs[ENET_FRBR] = 0x00000600;
661 s->regs[ENET_FRSR] = 0x00000500;
662 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
663 } else {
664 s->regs[ENET_RAEM] = 0x00000004;
665 s->regs[ENET_RAFL] = 0x00000004;
666 s->regs[ENET_TAEM] = 0x00000004;
667 s->regs[ENET_TAFL] = 0x00000008;
668 s->regs[ENET_TIPG] = 0x0000000c;
669 s->regs[ENET_FTRL] = 0x000007ff;
670 s->regs[ENET_ATPER] = 0x3b9aca00;
671 }
672
673 s->rx_descriptor = 0;
674 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
675
676 /* We also reset the PHY */
677 phy_reset(s);
678 }
679
680 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
681 {
682 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
683 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
684 return 0;
685 }
686
687 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
688 {
689 switch (index) {
690 case ENET_FRBR:
691 case ENET_FRSR:
692 case ENET_MIIGSK_CFGR:
693 case ENET_MIIGSK_ENR:
694 return s->regs[index];
695 default:
696 return imx_default_read(s, index);
697 }
698 }
699
700 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
701 {
702 switch (index) {
703 case ENET_RSFL:
704 case ENET_RSEM:
705 case ENET_RAEM:
706 case ENET_RAFL:
707 case ENET_TSEM:
708 case ENET_TAEM:
709 case ENET_TAFL:
710 case ENET_TIPG:
711 case ENET_FTRL:
712 case ENET_TACC:
713 case ENET_RACC:
714 case ENET_ATCR:
715 case ENET_ATVR:
716 case ENET_ATOFF:
717 case ENET_ATPER:
718 case ENET_ATCOR:
719 case ENET_ATINC:
720 case ENET_ATSTMP:
721 case ENET_TGSR:
722 case ENET_TCSR0:
723 case ENET_TCCR0:
724 case ENET_TCSR1:
725 case ENET_TCCR1:
726 case ENET_TCSR2:
727 case ENET_TCCR2:
728 case ENET_TCSR3:
729 case ENET_TCCR3:
730 return s->regs[index];
731 default:
732 return imx_default_read(s, index);
733 }
734 }
735
736 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
737 {
738 uint32_t value = 0;
739 IMXFECState *s = IMX_FEC(opaque);
740 uint32_t index = offset >> 2;
741
742 switch (index) {
743 case ENET_EIR:
744 case ENET_EIMR:
745 case ENET_RDAR:
746 case ENET_TDAR:
747 case ENET_ECR:
748 case ENET_MMFR:
749 case ENET_MSCR:
750 case ENET_MIBC:
751 case ENET_RCR:
752 case ENET_TCR:
753 case ENET_PALR:
754 case ENET_PAUR:
755 case ENET_OPD:
756 case ENET_IAUR:
757 case ENET_IALR:
758 case ENET_GAUR:
759 case ENET_GALR:
760 case ENET_TFWR:
761 case ENET_RDSR:
762 case ENET_TDSR:
763 case ENET_MRBR:
764 value = s->regs[index];
765 break;
766 default:
767 if (s->is_fec) {
768 value = imx_fec_read(s, index);
769 } else {
770 value = imx_enet_read(s, index);
771 }
772 break;
773 }
774
775 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
776 value);
777
778 return value;
779 }
780
781 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
782 {
783 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
784 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
785 return;
786 }
787
788 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
789 {
790 switch (index) {
791 case ENET_FRBR:
792 /* FRBR is read only */
793 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
794 TYPE_IMX_FEC, __func__);
795 break;
796 case ENET_FRSR:
797 s->regs[index] = (value & 0x000003fc) | 0x00000400;
798 break;
799 case ENET_MIIGSK_CFGR:
800 s->regs[index] = value & 0x00000053;
801 break;
802 case ENET_MIIGSK_ENR:
803 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
804 break;
805 default:
806 imx_default_write(s, index, value);
807 break;
808 }
809 }
810
811 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
812 {
813 switch (index) {
814 case ENET_RSFL:
815 case ENET_RSEM:
816 case ENET_RAEM:
817 case ENET_RAFL:
818 case ENET_TSEM:
819 case ENET_TAEM:
820 case ENET_TAFL:
821 s->regs[index] = value & 0x000001ff;
822 break;
823 case ENET_TIPG:
824 s->regs[index] = value & 0x0000001f;
825 break;
826 case ENET_FTRL:
827 s->regs[index] = value & 0x00003fff;
828 break;
829 case ENET_TACC:
830 s->regs[index] = value & 0x00000019;
831 break;
832 case ENET_RACC:
833 s->regs[index] = value & 0x000000C7;
834 break;
835 case ENET_ATCR:
836 s->regs[index] = value & 0x00002a9d;
837 break;
838 case ENET_ATVR:
839 case ENET_ATOFF:
840 case ENET_ATPER:
841 s->regs[index] = value;
842 break;
843 case ENET_ATSTMP:
844 /* ATSTMP is read only */
845 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
846 TYPE_IMX_FEC, __func__);
847 break;
848 case ENET_ATCOR:
849 s->regs[index] = value & 0x7fffffff;
850 break;
851 case ENET_ATINC:
852 s->regs[index] = value & 0x00007f7f;
853 break;
854 case ENET_TGSR:
855 /* implement clear timer flag */
856 value = value & 0x0000000f;
857 break;
858 case ENET_TCSR0:
859 case ENET_TCSR1:
860 case ENET_TCSR2:
861 case ENET_TCSR3:
862 value = value & 0x000000fd;
863 break;
864 case ENET_TCCR0:
865 case ENET_TCCR1:
866 case ENET_TCCR2:
867 case ENET_TCCR3:
868 s->regs[index] = value;
869 break;
870 default:
871 imx_default_write(s, index, value);
872 break;
873 }
874 }
875
876 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
877 unsigned size)
878 {
879 IMXFECState *s = IMX_FEC(opaque);
880 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
881 uint32_t index = offset >> 2;
882
883 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
884 (uint32_t)value);
885
886 switch (index) {
887 case ENET_EIR:
888 s->regs[index] &= ~value;
889 break;
890 case ENET_EIMR:
891 s->regs[index] = value;
892 break;
893 case ENET_RDAR:
894 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
895 if (!s->regs[index]) {
896 imx_eth_enable_rx(s, true);
897 }
898 } else {
899 s->regs[index] = 0;
900 }
901 break;
902 case ENET_TDAR1: /* FALLTHROUGH */
903 case ENET_TDAR2: /* FALLTHROUGH */
904 if (unlikely(single_tx_ring)) {
905 qemu_log_mask(LOG_GUEST_ERROR,
906 "[%s]%s: trying to access TDAR2 or TDAR1\n",
907 TYPE_IMX_FEC, __func__);
908 return;
909 }
910 case ENET_TDAR: /* FALLTHROUGH */
911 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
912 s->regs[index] = ENET_TDAR_TDAR;
913 imx_eth_do_tx(s, index);
914 }
915 s->regs[index] = 0;
916 break;
917 case ENET_ECR:
918 if (value & ENET_ECR_RESET) {
919 return imx_eth_reset(DEVICE(s));
920 }
921 s->regs[index] = value;
922 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
923 s->regs[ENET_RDAR] = 0;
924 s->rx_descriptor = s->regs[ENET_RDSR];
925 s->regs[ENET_TDAR] = 0;
926 s->regs[ENET_TDAR1] = 0;
927 s->regs[ENET_TDAR2] = 0;
928 s->tx_descriptor[0] = s->regs[ENET_TDSR];
929 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
930 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
931 }
932 break;
933 case ENET_MMFR:
934 s->regs[index] = value;
935 if (extract32(value, 29, 1)) {
936 /* This is a read operation */
937 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
938 do_phy_read(s,
939 extract32(value,
940 18, 10)));
941 } else {
942 /* This a write operation */
943 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
944 }
945 /* raise the interrupt as the PHY operation is done */
946 s->regs[ENET_EIR] |= ENET_INT_MII;
947 break;
948 case ENET_MSCR:
949 s->regs[index] = value & 0xfe;
950 break;
951 case ENET_MIBC:
952 /* TODO: Implement MIB. */
953 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
954 break;
955 case ENET_RCR:
956 s->regs[index] = value & 0x07ff003f;
957 /* TODO: Implement LOOP mode. */
958 break;
959 case ENET_TCR:
960 /* We transmit immediately, so raise GRA immediately. */
961 s->regs[index] = value;
962 if (value & 1) {
963 s->regs[ENET_EIR] |= ENET_INT_GRA;
964 }
965 break;
966 case ENET_PALR:
967 s->regs[index] = value;
968 s->conf.macaddr.a[0] = value >> 24;
969 s->conf.macaddr.a[1] = value >> 16;
970 s->conf.macaddr.a[2] = value >> 8;
971 s->conf.macaddr.a[3] = value;
972 break;
973 case ENET_PAUR:
974 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
975 s->conf.macaddr.a[4] = value >> 24;
976 s->conf.macaddr.a[5] = value >> 16;
977 break;
978 case ENET_OPD:
979 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
980 break;
981 case ENET_IAUR:
982 case ENET_IALR:
983 case ENET_GAUR:
984 case ENET_GALR:
985 /* TODO: implement MAC hash filtering. */
986 break;
987 case ENET_TFWR:
988 if (s->is_fec) {
989 s->regs[index] = value & 0x3;
990 } else {
991 s->regs[index] = value & 0x13f;
992 }
993 break;
994 case ENET_RDSR:
995 if (s->is_fec) {
996 s->regs[index] = value & ~3;
997 } else {
998 s->regs[index] = value & ~7;
999 }
1000 s->rx_descriptor = s->regs[index];
1001 break;
1002 case ENET_TDSR:
1003 if (s->is_fec) {
1004 s->regs[index] = value & ~3;
1005 } else {
1006 s->regs[index] = value & ~7;
1007 }
1008 s->tx_descriptor[0] = s->regs[index];
1009 break;
1010 case ENET_TDSR1:
1011 if (unlikely(single_tx_ring)) {
1012 qemu_log_mask(LOG_GUEST_ERROR,
1013 "[%s]%s: trying to access TDSR1\n",
1014 TYPE_IMX_FEC, __func__);
1015 return;
1016 }
1017
1018 s->regs[index] = value & ~7;
1019 s->tx_descriptor[1] = s->regs[index];
1020 break;
1021 case ENET_TDSR2:
1022 if (unlikely(single_tx_ring)) {
1023 qemu_log_mask(LOG_GUEST_ERROR,
1024 "[%s]%s: trying to access TDSR2\n",
1025 TYPE_IMX_FEC, __func__);
1026 return;
1027 }
1028
1029 s->regs[index] = value & ~7;
1030 s->tx_descriptor[2] = s->regs[index];
1031 break;
1032 case ENET_MRBR:
1033 s->regs[index] = value & 0x00003ff0;
1034 break;
1035 default:
1036 if (s->is_fec) {
1037 imx_fec_write(s, index, value);
1038 } else {
1039 imx_enet_write(s, index, value);
1040 }
1041 return;
1042 }
1043
1044 imx_eth_update(s);
1045 }
1046
1047 static int imx_eth_can_receive(NetClientState *nc)
1048 {
1049 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1050
1051 FEC_PRINTF("\n");
1052
1053 return !!s->regs[ENET_RDAR];
1054 }
1055
1056 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1057 size_t len)
1058 {
1059 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1060 IMXFECBufDesc bd;
1061 uint32_t flags = 0;
1062 uint32_t addr;
1063 uint32_t crc;
1064 uint32_t buf_addr;
1065 uint8_t *crc_ptr;
1066 unsigned int buf_len;
1067 size_t size = len;
1068
1069 FEC_PRINTF("len %d\n", (int)size);
1070
1071 if (!s->regs[ENET_RDAR]) {
1072 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1073 TYPE_IMX_FEC, __func__);
1074 return 0;
1075 }
1076
1077 /* 4 bytes for the CRC. */
1078 size += 4;
1079 crc = cpu_to_be32(crc32(~0, buf, size));
1080 crc_ptr = (uint8_t *) &crc;
1081
1082 /* Huge frames are truncated. */
1083 if (size > ENET_MAX_FRAME_SIZE) {
1084 size = ENET_MAX_FRAME_SIZE;
1085 flags |= ENET_BD_TR | ENET_BD_LG;
1086 }
1087
1088 /* Frames larger than the user limit just set error flags. */
1089 if (size > (s->regs[ENET_RCR] >> 16)) {
1090 flags |= ENET_BD_LG;
1091 }
1092
1093 addr = s->rx_descriptor;
1094 while (size > 0) {
1095 imx_fec_read_bd(&bd, addr);
1096 if ((bd.flags & ENET_BD_E) == 0) {
1097 /* No descriptors available. Bail out. */
1098 /*
1099 * FIXME: This is wrong. We should probably either
1100 * save the remainder for when more RX buffers are
1101 * available, or flag an error.
1102 */
1103 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1104 TYPE_IMX_FEC, __func__);
1105 break;
1106 }
1107 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1108 bd.length = buf_len;
1109 size -= buf_len;
1110
1111 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1112
1113 /* The last 4 bytes are the CRC. */
1114 if (size < 4) {
1115 buf_len += size - 4;
1116 }
1117 buf_addr = bd.data;
1118 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1119 buf += buf_len;
1120 if (size < 4) {
1121 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1122 crc_ptr, 4 - size);
1123 crc_ptr += 4 - size;
1124 }
1125 bd.flags &= ~ENET_BD_E;
1126 if (size == 0) {
1127 /* Last buffer in frame. */
1128 bd.flags |= flags | ENET_BD_L;
1129 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1130 s->regs[ENET_EIR] |= ENET_INT_RXF;
1131 } else {
1132 s->regs[ENET_EIR] |= ENET_INT_RXB;
1133 }
1134 imx_fec_write_bd(&bd, addr);
1135 /* Advance to the next descriptor. */
1136 if ((bd.flags & ENET_BD_W) != 0) {
1137 addr = s->regs[ENET_RDSR];
1138 } else {
1139 addr += sizeof(bd);
1140 }
1141 }
1142 s->rx_descriptor = addr;
1143 imx_eth_enable_rx(s, false);
1144 imx_eth_update(s);
1145 return len;
1146 }
1147
1148 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1149 size_t len)
1150 {
1151 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1152 IMXENETBufDesc bd;
1153 uint32_t flags = 0;
1154 uint32_t addr;
1155 uint32_t crc;
1156 uint32_t buf_addr;
1157 uint8_t *crc_ptr;
1158 unsigned int buf_len;
1159 size_t size = len;
1160 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1161
1162 FEC_PRINTF("len %d\n", (int)size);
1163
1164 if (!s->regs[ENET_RDAR]) {
1165 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1166 TYPE_IMX_FEC, __func__);
1167 return 0;
1168 }
1169
1170 /* 4 bytes for the CRC. */
1171 size += 4;
1172 crc = cpu_to_be32(crc32(~0, buf, size));
1173 crc_ptr = (uint8_t *) &crc;
1174
1175 if (shift16) {
1176 size += 2;
1177 }
1178
1179 /* Huge frames are truncated. */
1180 if (size > s->regs[ENET_FTRL]) {
1181 size = s->regs[ENET_FTRL];
1182 flags |= ENET_BD_TR | ENET_BD_LG;
1183 }
1184
1185 /* Frames larger than the user limit just set error flags. */
1186 if (size > (s->regs[ENET_RCR] >> 16)) {
1187 flags |= ENET_BD_LG;
1188 }
1189
1190 addr = s->rx_descriptor;
1191 while (size > 0) {
1192 imx_enet_read_bd(&bd, addr);
1193 if ((bd.flags & ENET_BD_E) == 0) {
1194 /* No descriptors available. Bail out. */
1195 /*
1196 * FIXME: This is wrong. We should probably either
1197 * save the remainder for when more RX buffers are
1198 * available, or flag an error.
1199 */
1200 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1201 TYPE_IMX_FEC, __func__);
1202 break;
1203 }
1204 buf_len = MIN(size, s->regs[ENET_MRBR]);
1205 bd.length = buf_len;
1206 size -= buf_len;
1207
1208 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1209
1210 /* The last 4 bytes are the CRC. */
1211 if (size < 4) {
1212 buf_len += size - 4;
1213 }
1214 buf_addr = bd.data;
1215
1216 if (shift16) {
1217 /*
1218 * If SHIFT16 bit of ENETx_RACC register is set we need to
1219 * align the payload to 4-byte boundary.
1220 */
1221 const uint8_t zeros[2] = { 0 };
1222
1223 dma_memory_write(&address_space_memory, buf_addr,
1224 zeros, sizeof(zeros));
1225
1226 buf_addr += sizeof(zeros);
1227 buf_len -= sizeof(zeros);
1228
1229 /* We only do this once per Ethernet frame */
1230 shift16 = false;
1231 }
1232
1233 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1234 buf += buf_len;
1235 if (size < 4) {
1236 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1237 crc_ptr, 4 - size);
1238 crc_ptr += 4 - size;
1239 }
1240 bd.flags &= ~ENET_BD_E;
1241 if (size == 0) {
1242 /* Last buffer in frame. */
1243 bd.flags |= flags | ENET_BD_L;
1244 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1245 if (bd.option & ENET_BD_RX_INT) {
1246 s->regs[ENET_EIR] |= ENET_INT_RXF;
1247 }
1248 } else {
1249 if (bd.option & ENET_BD_RX_INT) {
1250 s->regs[ENET_EIR] |= ENET_INT_RXB;
1251 }
1252 }
1253 imx_enet_write_bd(&bd, addr);
1254 /* Advance to the next descriptor. */
1255 if ((bd.flags & ENET_BD_W) != 0) {
1256 addr = s->regs[ENET_RDSR];
1257 } else {
1258 addr += sizeof(bd);
1259 }
1260 }
1261 s->rx_descriptor = addr;
1262 imx_eth_enable_rx(s, false);
1263 imx_eth_update(s);
1264 return len;
1265 }
1266
1267 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1268 size_t len)
1269 {
1270 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1271
1272 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1273 return imx_enet_receive(nc, buf, len);
1274 } else {
1275 return imx_fec_receive(nc, buf, len);
1276 }
1277 }
1278
1279 static const MemoryRegionOps imx_eth_ops = {
1280 .read = imx_eth_read,
1281 .write = imx_eth_write,
1282 .valid.min_access_size = 4,
1283 .valid.max_access_size = 4,
1284 .endianness = DEVICE_NATIVE_ENDIAN,
1285 };
1286
1287 static void imx_eth_cleanup(NetClientState *nc)
1288 {
1289 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1290
1291 s->nic = NULL;
1292 }
1293
1294 static NetClientInfo imx_eth_net_info = {
1295 .type = NET_CLIENT_DRIVER_NIC,
1296 .size = sizeof(NICState),
1297 .can_receive = imx_eth_can_receive,
1298 .receive = imx_eth_receive,
1299 .cleanup = imx_eth_cleanup,
1300 .link_status_changed = imx_eth_set_link,
1301 };
1302
1303
1304 static void imx_eth_realize(DeviceState *dev, Error **errp)
1305 {
1306 IMXFECState *s = IMX_FEC(dev);
1307 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1308
1309 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1310 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1311 sysbus_init_mmio(sbd, &s->iomem);
1312 sysbus_init_irq(sbd, &s->irq[0]);
1313 sysbus_init_irq(sbd, &s->irq[1]);
1314
1315 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1316
1317 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1318 object_get_typename(OBJECT(dev)),
1319 DEVICE(dev)->id, s);
1320
1321 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1322 }
1323
1324 static Property imx_eth_properties[] = {
1325 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1326 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1327 DEFINE_PROP_END_OF_LIST(),
1328 };
1329
1330 static void imx_eth_class_init(ObjectClass *klass, void *data)
1331 {
1332 DeviceClass *dc = DEVICE_CLASS(klass);
1333
1334 dc->vmsd = &vmstate_imx_eth;
1335 dc->reset = imx_eth_reset;
1336 dc->props = imx_eth_properties;
1337 dc->realize = imx_eth_realize;
1338 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1339 }
1340
1341 static void imx_fec_init(Object *obj)
1342 {
1343 IMXFECState *s = IMX_FEC(obj);
1344
1345 s->is_fec = true;
1346 }
1347
1348 static void imx_enet_init(Object *obj)
1349 {
1350 IMXFECState *s = IMX_FEC(obj);
1351
1352 s->is_fec = false;
1353 }
1354
1355 static const TypeInfo imx_fec_info = {
1356 .name = TYPE_IMX_FEC,
1357 .parent = TYPE_SYS_BUS_DEVICE,
1358 .instance_size = sizeof(IMXFECState),
1359 .instance_init = imx_fec_init,
1360 .class_init = imx_eth_class_init,
1361 };
1362
1363 static const TypeInfo imx_enet_info = {
1364 .name = TYPE_IMX_ENET,
1365 .parent = TYPE_IMX_FEC,
1366 .instance_init = imx_enet_init,
1367 };
1368
1369 static void imx_eth_register_types(void)
1370 {
1371 type_register_static(&imx_fec_info);
1372 type_register_static(&imx_enet_info);
1373 }
1374
1375 type_init(imx_eth_register_types)