]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/xilinx_axienet.c
Move QOM typedefs and add missing includes
[mirror_qemu.git] / hw / net / xilinx_axienet.c
1 /*
2 * QEMU model of Xilinx AXI-Ethernet.
3 *
4 * Copyright (c) 2011 Edgar E. Iglesias.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/sysbus.h"
28 #include "qapi/error.h"
29 #include "qemu/log.h"
30 #include "qemu/module.h"
31 #include "net/net.h"
32 #include "net/checksum.h"
33
34 #include "hw/hw.h"
35 #include "hw/irq.h"
36 #include "hw/qdev-properties.h"
37 #include "hw/stream.h"
38 #include "qom/object.h"
39
40 #define DPHY(x)
41
42 #define TYPE_XILINX_AXI_ENET "xlnx.axi-ethernet"
43 #define TYPE_XILINX_AXI_ENET_DATA_STREAM "xilinx-axienet-data-stream"
44 #define TYPE_XILINX_AXI_ENET_CONTROL_STREAM "xilinx-axienet-control-stream"
45
46 typedef struct XilinxAXIEnet XilinxAXIEnet;
47 #define XILINX_AXI_ENET(obj) \
48 OBJECT_CHECK(XilinxAXIEnet, (obj), TYPE_XILINX_AXI_ENET)
49
50 typedef struct XilinxAXIEnetStreamSlave XilinxAXIEnetStreamSlave;
51 #define XILINX_AXI_ENET_DATA_STREAM(obj) \
52 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
53 TYPE_XILINX_AXI_ENET_DATA_STREAM)
54
55 #define XILINX_AXI_ENET_CONTROL_STREAM(obj) \
56 OBJECT_CHECK(XilinxAXIEnetStreamSlave, (obj),\
57 TYPE_XILINX_AXI_ENET_CONTROL_STREAM)
58
59 /* Advertisement control register. */
60 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
61 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
62 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
63
64 #define CONTROL_PAYLOAD_WORDS 5
65 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
66
67 struct PHY {
68 uint32_t regs[32];
69
70 int link;
71
72 unsigned int (*read)(struct PHY *phy, unsigned int req);
73 void (*write)(struct PHY *phy, unsigned int req,
74 unsigned int data);
75 };
76
77 static unsigned int tdk_read(struct PHY *phy, unsigned int req)
78 {
79 int regnum;
80 unsigned r = 0;
81
82 regnum = req & 0x1f;
83
84 switch (regnum) {
85 case 1:
86 if (!phy->link) {
87 break;
88 }
89 /* MR1. */
90 /* Speeds and modes. */
91 r |= (1 << 13) | (1 << 14);
92 r |= (1 << 11) | (1 << 12);
93 r |= (1 << 5); /* Autoneg complete. */
94 r |= (1 << 3); /* Autoneg able. */
95 r |= (1 << 2); /* link. */
96 r |= (1 << 1); /* link. */
97 break;
98 case 5:
99 /* Link partner ability.
100 We are kind; always agree with whatever best mode
101 the guest advertises. */
102 r = 1 << 14; /* Success. */
103 /* Copy advertised modes. */
104 r |= phy->regs[4] & (15 << 5);
105 /* Autoneg support. */
106 r |= 1;
107 break;
108 case 17:
109 /* Marvell PHY on many xilinx boards. */
110 r = 0x8000; /* 1000Mb */
111 break;
112 case 18:
113 {
114 /* Diagnostics reg. */
115 int duplex = 0;
116 int speed_100 = 0;
117
118 if (!phy->link) {
119 break;
120 }
121
122 /* Are we advertising 100 half or 100 duplex ? */
123 speed_100 = !!(phy->regs[4] & ADVERTISE_100HALF);
124 speed_100 |= !!(phy->regs[4] & ADVERTISE_100FULL);
125
126 /* Are we advertising 10 duplex or 100 duplex ? */
127 duplex = !!(phy->regs[4] & ADVERTISE_100FULL);
128 duplex |= !!(phy->regs[4] & ADVERTISE_10FULL);
129 r = (speed_100 << 10) | (duplex << 11);
130 }
131 break;
132
133 default:
134 r = phy->regs[regnum];
135 break;
136 }
137 DPHY(qemu_log("\n%s %x = reg[%d]\n", __func__, r, regnum));
138 return r;
139 }
140
141 static void
142 tdk_write(struct PHY *phy, unsigned int req, unsigned int data)
143 {
144 int regnum;
145
146 regnum = req & 0x1f;
147 DPHY(qemu_log("%s reg[%d] = %x\n", __func__, regnum, data));
148 switch (regnum) {
149 default:
150 phy->regs[regnum] = data;
151 break;
152 }
153
154 /* Unconditionally clear regs[BMCR][BMCR_RESET] and auto-neg */
155 phy->regs[0] &= ~0x8200;
156 }
157
158 static void
159 tdk_init(struct PHY *phy)
160 {
161 phy->regs[0] = 0x3100;
162 /* PHY Id. */
163 phy->regs[2] = 0x0300;
164 phy->regs[3] = 0xe400;
165 /* Autonegotiation advertisement reg. */
166 phy->regs[4] = 0x01E1;
167 phy->link = 1;
168
169 phy->read = tdk_read;
170 phy->write = tdk_write;
171 }
172
173 struct MDIOBus {
174 struct PHY *devs[32];
175 };
176
177 static void
178 mdio_attach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
179 {
180 bus->devs[addr & 0x1f] = phy;
181 }
182
183 #ifdef USE_THIS_DEAD_CODE
184 static void
185 mdio_detach(struct MDIOBus *bus, struct PHY *phy, unsigned int addr)
186 {
187 bus->devs[addr & 0x1f] = NULL;
188 }
189 #endif
190
191 static uint16_t mdio_read_req(struct MDIOBus *bus, unsigned int addr,
192 unsigned int reg)
193 {
194 struct PHY *phy;
195 uint16_t data;
196
197 phy = bus->devs[addr];
198 if (phy && phy->read) {
199 data = phy->read(phy, reg);
200 } else {
201 data = 0xffff;
202 }
203 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
204 return data;
205 }
206
207 static void mdio_write_req(struct MDIOBus *bus, unsigned int addr,
208 unsigned int reg, uint16_t data)
209 {
210 struct PHY *phy;
211
212 DPHY(qemu_log("%s addr=%d reg=%d data=%x\n", __func__, addr, reg, data));
213 phy = bus->devs[addr];
214 if (phy && phy->write) {
215 phy->write(phy, reg, data);
216 }
217 }
218
219 #define DENET(x)
220
221 #define R_RAF (0x000 / 4)
222 enum {
223 RAF_MCAST_REJ = (1 << 1),
224 RAF_BCAST_REJ = (1 << 2),
225 RAF_EMCF_EN = (1 << 12),
226 RAF_NEWFUNC_EN = (1 << 11)
227 };
228
229 #define R_IS (0x00C / 4)
230 enum {
231 IS_HARD_ACCESS_COMPLETE = 1,
232 IS_AUTONEG = (1 << 1),
233 IS_RX_COMPLETE = (1 << 2),
234 IS_RX_REJECT = (1 << 3),
235 IS_TX_COMPLETE = (1 << 5),
236 IS_RX_DCM_LOCK = (1 << 6),
237 IS_MGM_RDY = (1 << 7),
238 IS_PHY_RST_DONE = (1 << 8),
239 };
240
241 #define R_IP (0x010 / 4)
242 #define R_IE (0x014 / 4)
243 #define R_UAWL (0x020 / 4)
244 #define R_UAWU (0x024 / 4)
245 #define R_PPST (0x030 / 4)
246 enum {
247 PPST_LINKSTATUS = (1 << 0),
248 PPST_PHY_LINKSTATUS = (1 << 7),
249 };
250
251 #define R_STATS_RX_BYTESL (0x200 / 4)
252 #define R_STATS_RX_BYTESH (0x204 / 4)
253 #define R_STATS_TX_BYTESL (0x208 / 4)
254 #define R_STATS_TX_BYTESH (0x20C / 4)
255 #define R_STATS_RXL (0x290 / 4)
256 #define R_STATS_RXH (0x294 / 4)
257 #define R_STATS_RX_BCASTL (0x2a0 / 4)
258 #define R_STATS_RX_BCASTH (0x2a4 / 4)
259 #define R_STATS_RX_MCASTL (0x2a8 / 4)
260 #define R_STATS_RX_MCASTH (0x2ac / 4)
261
262 #define R_RCW0 (0x400 / 4)
263 #define R_RCW1 (0x404 / 4)
264 enum {
265 RCW1_VLAN = (1 << 27),
266 RCW1_RX = (1 << 28),
267 RCW1_FCS = (1 << 29),
268 RCW1_JUM = (1 << 30),
269 RCW1_RST = (1 << 31),
270 };
271
272 #define R_TC (0x408 / 4)
273 enum {
274 TC_VLAN = (1 << 27),
275 TC_TX = (1 << 28),
276 TC_FCS = (1 << 29),
277 TC_JUM = (1 << 30),
278 TC_RST = (1 << 31),
279 };
280
281 #define R_EMMC (0x410 / 4)
282 enum {
283 EMMC_LINKSPEED_10MB = (0 << 30),
284 EMMC_LINKSPEED_100MB = (1 << 30),
285 EMMC_LINKSPEED_1000MB = (2 << 30),
286 };
287
288 #define R_PHYC (0x414 / 4)
289
290 #define R_MC (0x500 / 4)
291 #define MC_EN (1 << 6)
292
293 #define R_MCR (0x504 / 4)
294 #define R_MWD (0x508 / 4)
295 #define R_MRD (0x50c / 4)
296 #define R_MIS (0x600 / 4)
297 #define R_MIP (0x620 / 4)
298 #define R_MIE (0x640 / 4)
299 #define R_MIC (0x640 / 4)
300
301 #define R_UAW0 (0x700 / 4)
302 #define R_UAW1 (0x704 / 4)
303 #define R_FMI (0x708 / 4)
304 #define R_AF0 (0x710 / 4)
305 #define R_AF1 (0x714 / 4)
306 #define R_MAX (0x34 / 4)
307
308 /* Indirect registers. */
309 struct TEMAC {
310 struct MDIOBus mdio_bus;
311 struct PHY phy;
312
313 void *parent;
314 };
315
316
317 struct XilinxAXIEnetStreamSlave {
318 Object parent;
319
320 struct XilinxAXIEnet *enet;
321 } ;
322
323 struct XilinxAXIEnet {
324 SysBusDevice busdev;
325 MemoryRegion iomem;
326 qemu_irq irq;
327 StreamSlave *tx_data_dev;
328 StreamSlave *tx_control_dev;
329 XilinxAXIEnetStreamSlave rx_data_dev;
330 XilinxAXIEnetStreamSlave rx_control_dev;
331 NICState *nic;
332 NICConf conf;
333
334
335 uint32_t c_rxmem;
336 uint32_t c_txmem;
337 uint32_t c_phyaddr;
338
339 struct TEMAC TEMAC;
340
341 /* MII regs. */
342 union {
343 uint32_t regs[4];
344 struct {
345 uint32_t mc;
346 uint32_t mcr;
347 uint32_t mwd;
348 uint32_t mrd;
349 };
350 } mii;
351
352 struct {
353 uint64_t rx_bytes;
354 uint64_t tx_bytes;
355
356 uint64_t rx;
357 uint64_t rx_bcast;
358 uint64_t rx_mcast;
359 } stats;
360
361 /* Receive configuration words. */
362 uint32_t rcw[2];
363 /* Transmit config. */
364 uint32_t tc;
365 uint32_t emmc;
366 uint32_t phyc;
367
368 /* Unicast Address Word. */
369 uint32_t uaw[2];
370 /* Unicast address filter used with extended mcast. */
371 uint32_t ext_uaw[2];
372 uint32_t fmi;
373
374 uint32_t regs[R_MAX];
375
376 /* Multicast filter addrs. */
377 uint32_t maddr[4][2];
378 /* 32K x 1 lookup filter. */
379 uint32_t ext_mtable[1024];
380
381 uint32_t hdr[CONTROL_PAYLOAD_WORDS];
382
383 uint8_t *txmem;
384 uint32_t txpos;
385
386 uint8_t *rxmem;
387 uint32_t rxsize;
388 uint32_t rxpos;
389
390 uint8_t rxapp[CONTROL_PAYLOAD_SIZE];
391 uint32_t rxappsize;
392
393 /* Whether axienet_eth_rx_notify should flush incoming queue. */
394 bool need_flush;
395 };
396
397 static void axienet_rx_reset(XilinxAXIEnet *s)
398 {
399 s->rcw[1] = RCW1_JUM | RCW1_FCS | RCW1_RX | RCW1_VLAN;
400 }
401
402 static void axienet_tx_reset(XilinxAXIEnet *s)
403 {
404 s->tc = TC_JUM | TC_TX | TC_VLAN;
405 s->txpos = 0;
406 }
407
408 static inline int axienet_rx_resetting(XilinxAXIEnet *s)
409 {
410 return s->rcw[1] & RCW1_RST;
411 }
412
413 static inline int axienet_rx_enabled(XilinxAXIEnet *s)
414 {
415 return s->rcw[1] & RCW1_RX;
416 }
417
418 static inline int axienet_extmcf_enabled(XilinxAXIEnet *s)
419 {
420 return !!(s->regs[R_RAF] & RAF_EMCF_EN);
421 }
422
423 static inline int axienet_newfunc_enabled(XilinxAXIEnet *s)
424 {
425 return !!(s->regs[R_RAF] & RAF_NEWFUNC_EN);
426 }
427
428 static void xilinx_axienet_reset(DeviceState *d)
429 {
430 XilinxAXIEnet *s = XILINX_AXI_ENET(d);
431
432 axienet_rx_reset(s);
433 axienet_tx_reset(s);
434
435 s->regs[R_PPST] = PPST_LINKSTATUS | PPST_PHY_LINKSTATUS;
436 s->regs[R_IS] = IS_AUTONEG | IS_RX_DCM_LOCK | IS_MGM_RDY | IS_PHY_RST_DONE;
437
438 s->emmc = EMMC_LINKSPEED_100MB;
439 }
440
441 static void enet_update_irq(XilinxAXIEnet *s)
442 {
443 s->regs[R_IP] = s->regs[R_IS] & s->regs[R_IE];
444 qemu_set_irq(s->irq, !!s->regs[R_IP]);
445 }
446
447 static uint64_t enet_read(void *opaque, hwaddr addr, unsigned size)
448 {
449 XilinxAXIEnet *s = opaque;
450 uint32_t r = 0;
451 addr >>= 2;
452
453 switch (addr) {
454 case R_RCW0:
455 case R_RCW1:
456 r = s->rcw[addr & 1];
457 break;
458
459 case R_TC:
460 r = s->tc;
461 break;
462
463 case R_EMMC:
464 r = s->emmc;
465 break;
466
467 case R_PHYC:
468 r = s->phyc;
469 break;
470
471 case R_MCR:
472 r = s->mii.regs[addr & 3] | (1 << 7); /* Always ready. */
473 break;
474
475 case R_STATS_RX_BYTESL:
476 case R_STATS_RX_BYTESH:
477 r = s->stats.rx_bytes >> (32 * (addr & 1));
478 break;
479
480 case R_STATS_TX_BYTESL:
481 case R_STATS_TX_BYTESH:
482 r = s->stats.tx_bytes >> (32 * (addr & 1));
483 break;
484
485 case R_STATS_RXL:
486 case R_STATS_RXH:
487 r = s->stats.rx >> (32 * (addr & 1));
488 break;
489 case R_STATS_RX_BCASTL:
490 case R_STATS_RX_BCASTH:
491 r = s->stats.rx_bcast >> (32 * (addr & 1));
492 break;
493 case R_STATS_RX_MCASTL:
494 case R_STATS_RX_MCASTH:
495 r = s->stats.rx_mcast >> (32 * (addr & 1));
496 break;
497
498 case R_MC:
499 case R_MWD:
500 case R_MRD:
501 r = s->mii.regs[addr & 3];
502 break;
503
504 case R_UAW0:
505 case R_UAW1:
506 r = s->uaw[addr & 1];
507 break;
508
509 case R_UAWU:
510 case R_UAWL:
511 r = s->ext_uaw[addr & 1];
512 break;
513
514 case R_FMI:
515 r = s->fmi;
516 break;
517
518 case R_AF0:
519 case R_AF1:
520 r = s->maddr[s->fmi & 3][addr & 1];
521 break;
522
523 case 0x8000 ... 0x83ff:
524 r = s->ext_mtable[addr - 0x8000];
525 break;
526
527 default:
528 if (addr < ARRAY_SIZE(s->regs)) {
529 r = s->regs[addr];
530 }
531 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
532 __func__, addr * 4, r));
533 break;
534 }
535 return r;
536 }
537
538 static void enet_write(void *opaque, hwaddr addr,
539 uint64_t value, unsigned size)
540 {
541 XilinxAXIEnet *s = opaque;
542 struct TEMAC *t = &s->TEMAC;
543
544 addr >>= 2;
545 switch (addr) {
546 case R_RCW0:
547 case R_RCW1:
548 s->rcw[addr & 1] = value;
549 if ((addr & 1) && value & RCW1_RST) {
550 axienet_rx_reset(s);
551 } else {
552 qemu_flush_queued_packets(qemu_get_queue(s->nic));
553 }
554 break;
555
556 case R_TC:
557 s->tc = value;
558 if (value & TC_RST) {
559 axienet_tx_reset(s);
560 }
561 break;
562
563 case R_EMMC:
564 s->emmc = value;
565 break;
566
567 case R_PHYC:
568 s->phyc = value;
569 break;
570
571 case R_MC:
572 value &= ((1 << 7) - 1);
573
574 /* Enable the MII. */
575 if (value & MC_EN) {
576 unsigned int miiclkdiv = value & ((1 << 6) - 1);
577 if (!miiclkdiv) {
578 qemu_log("AXIENET: MDIO enabled but MDIOCLK is zero!\n");
579 }
580 }
581 s->mii.mc = value;
582 break;
583
584 case R_MCR: {
585 unsigned int phyaddr = (value >> 24) & 0x1f;
586 unsigned int regaddr = (value >> 16) & 0x1f;
587 unsigned int op = (value >> 14) & 3;
588 unsigned int initiate = (value >> 11) & 1;
589
590 if (initiate) {
591 if (op == 1) {
592 mdio_write_req(&t->mdio_bus, phyaddr, regaddr, s->mii.mwd);
593 } else if (op == 2) {
594 s->mii.mrd = mdio_read_req(&t->mdio_bus, phyaddr, regaddr);
595 } else {
596 qemu_log("AXIENET: invalid MDIOBus OP=%d\n", op);
597 }
598 }
599 s->mii.mcr = value;
600 break;
601 }
602
603 case R_MWD:
604 case R_MRD:
605 s->mii.regs[addr & 3] = value;
606 break;
607
608
609 case R_UAW0:
610 case R_UAW1:
611 s->uaw[addr & 1] = value;
612 break;
613
614 case R_UAWL:
615 case R_UAWU:
616 s->ext_uaw[addr & 1] = value;
617 break;
618
619 case R_FMI:
620 s->fmi = value;
621 break;
622
623 case R_AF0:
624 case R_AF1:
625 s->maddr[s->fmi & 3][addr & 1] = value;
626 break;
627
628 case R_IS:
629 s->regs[addr] &= ~value;
630 break;
631
632 case 0x8000 ... 0x83ff:
633 s->ext_mtable[addr - 0x8000] = value;
634 break;
635
636 default:
637 DENET(qemu_log("%s addr=" TARGET_FMT_plx " v=%x\n",
638 __func__, addr * 4, (unsigned)value));
639 if (addr < ARRAY_SIZE(s->regs)) {
640 s->regs[addr] = value;
641 }
642 break;
643 }
644 enet_update_irq(s);
645 }
646
647 static const MemoryRegionOps enet_ops = {
648 .read = enet_read,
649 .write = enet_write,
650 .endianness = DEVICE_LITTLE_ENDIAN,
651 };
652
653 static int eth_can_rx(XilinxAXIEnet *s)
654 {
655 /* RX enabled? */
656 return !s->rxsize && !axienet_rx_resetting(s) && axienet_rx_enabled(s);
657 }
658
659 static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
660 {
661 int match = 1;
662
663 if (memcmp(buf, &f0, 4)) {
664 match = 0;
665 }
666
667 if (buf[4] != (f1 & 0xff) || buf[5] != ((f1 >> 8) & 0xff)) {
668 match = 0;
669 }
670
671 return match;
672 }
673
674 static void axienet_eth_rx_notify(void *opaque)
675 {
676 XilinxAXIEnet *s = XILINX_AXI_ENET(opaque);
677
678 while (s->rxappsize && stream_can_push(s->tx_control_dev,
679 axienet_eth_rx_notify, s)) {
680 size_t ret = stream_push(s->tx_control_dev,
681 (void *)s->rxapp + CONTROL_PAYLOAD_SIZE
682 - s->rxappsize, s->rxappsize, true);
683 s->rxappsize -= ret;
684 }
685
686 while (s->rxsize && stream_can_push(s->tx_data_dev,
687 axienet_eth_rx_notify, s)) {
688 size_t ret = stream_push(s->tx_data_dev, (void *)s->rxmem + s->rxpos,
689 s->rxsize, true);
690 s->rxsize -= ret;
691 s->rxpos += ret;
692 if (!s->rxsize) {
693 s->regs[R_IS] |= IS_RX_COMPLETE;
694 if (s->need_flush) {
695 s->need_flush = false;
696 qemu_flush_queued_packets(qemu_get_queue(s->nic));
697 }
698 }
699 }
700 enet_update_irq(s);
701 }
702
703 static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
704 {
705 XilinxAXIEnet *s = qemu_get_nic_opaque(nc);
706 static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
707 0xff, 0xff, 0xff};
708 static const unsigned char sa_ipmcast[3] = {0x01, 0x00, 0x52};
709 uint32_t app[CONTROL_PAYLOAD_WORDS] = {0};
710 int promisc = s->fmi & (1 << 31);
711 int unicast, broadcast, multicast, ip_multicast = 0;
712 uint32_t csum32;
713 uint16_t csum16;
714 int i;
715
716 DENET(qemu_log("%s: %zd bytes\n", __func__, size));
717
718 if (!eth_can_rx(s)) {
719 s->need_flush = true;
720 return 0;
721 }
722
723 unicast = ~buf[0] & 0x1;
724 broadcast = memcmp(buf, sa_bcast, 6) == 0;
725 multicast = !unicast && !broadcast;
726 if (multicast && (memcmp(sa_ipmcast, buf, sizeof sa_ipmcast) == 0)) {
727 ip_multicast = 1;
728 }
729
730 /* Jumbo or vlan sizes ? */
731 if (!(s->rcw[1] & RCW1_JUM)) {
732 if (size > 1518 && size <= 1522 && !(s->rcw[1] & RCW1_VLAN)) {
733 return size;
734 }
735 }
736
737 /* Basic Address filters. If you want to use the extended filters
738 you'll generally have to place the ethernet mac into promiscuous mode
739 to avoid the basic filtering from dropping most frames. */
740 if (!promisc) {
741 if (unicast) {
742 if (!enet_match_addr(buf, s->uaw[0], s->uaw[1])) {
743 return size;
744 }
745 } else {
746 if (broadcast) {
747 /* Broadcast. */
748 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
749 return size;
750 }
751 } else {
752 int drop = 1;
753
754 /* Multicast. */
755 if (s->regs[R_RAF] & RAF_MCAST_REJ) {
756 return size;
757 }
758
759 for (i = 0; i < 4; i++) {
760 if (enet_match_addr(buf, s->maddr[i][0], s->maddr[i][1])) {
761 drop = 0;
762 break;
763 }
764 }
765
766 if (drop) {
767 return size;
768 }
769 }
770 }
771 }
772
773 /* Extended mcast filtering enabled? */
774 if (axienet_newfunc_enabled(s) && axienet_extmcf_enabled(s)) {
775 if (unicast) {
776 if (!enet_match_addr(buf, s->ext_uaw[0], s->ext_uaw[1])) {
777 return size;
778 }
779 } else {
780 if (broadcast) {
781 /* Broadcast. ??? */
782 if (s->regs[R_RAF] & RAF_BCAST_REJ) {
783 return size;
784 }
785 } else {
786 int idx, bit;
787
788 /* Multicast. */
789 if (!memcmp(buf, sa_ipmcast, 3)) {
790 return size;
791 }
792
793 idx = (buf[4] & 0x7f) << 8;
794 idx |= buf[5];
795
796 bit = 1 << (idx & 0x1f);
797 idx >>= 5;
798
799 if (!(s->ext_mtable[idx] & bit)) {
800 return size;
801 }
802 }
803 }
804 }
805
806 if (size < 12) {
807 s->regs[R_IS] |= IS_RX_REJECT;
808 enet_update_irq(s);
809 return -1;
810 }
811
812 if (size > (s->c_rxmem - 4)) {
813 size = s->c_rxmem - 4;
814 }
815
816 memcpy(s->rxmem, buf, size);
817 memset(s->rxmem + size, 0, 4); /* Clear the FCS. */
818
819 if (s->rcw[1] & RCW1_FCS) {
820 size += 4; /* fcs is inband. */
821 }
822
823 app[0] = 5 << 28;
824 csum32 = net_checksum_add(size - 14, (uint8_t *)s->rxmem + 14);
825 /* Fold it once. */
826 csum32 = (csum32 & 0xffff) + (csum32 >> 16);
827 /* And twice to get rid of possible carries. */
828 csum16 = (csum32 & 0xffff) + (csum32 >> 16);
829 app[3] = csum16;
830 app[4] = size & 0xffff;
831
832 s->stats.rx_bytes += size;
833 s->stats.rx++;
834 if (multicast) {
835 s->stats.rx_mcast++;
836 app[2] |= 1 | (ip_multicast << 1);
837 } else if (broadcast) {
838 s->stats.rx_bcast++;
839 app[2] |= 1 << 3;
840 }
841
842 /* Good frame. */
843 app[2] |= 1 << 6;
844
845 s->rxsize = size;
846 s->rxpos = 0;
847 for (i = 0; i < ARRAY_SIZE(app); ++i) {
848 app[i] = cpu_to_le32(app[i]);
849 }
850 s->rxappsize = CONTROL_PAYLOAD_SIZE;
851 memcpy(s->rxapp, app, s->rxappsize);
852 axienet_eth_rx_notify(s);
853
854 enet_update_irq(s);
855 return size;
856 }
857
858 static size_t
859 xilinx_axienet_control_stream_push(StreamSlave *obj, uint8_t *buf, size_t len,
860 bool eop)
861 {
862 int i;
863 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(obj);
864 XilinxAXIEnet *s = cs->enet;
865
866 assert(eop);
867 if (len != CONTROL_PAYLOAD_SIZE) {
868 hw_error("AXI Enet requires %d byte control stream payload\n",
869 (int)CONTROL_PAYLOAD_SIZE);
870 }
871
872 memcpy(s->hdr, buf, len);
873
874 for (i = 0; i < ARRAY_SIZE(s->hdr); ++i) {
875 s->hdr[i] = le32_to_cpu(s->hdr[i]);
876 }
877 return len;
878 }
879
880 static size_t
881 xilinx_axienet_data_stream_push(StreamSlave *obj, uint8_t *buf, size_t size,
882 bool eop)
883 {
884 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(obj);
885 XilinxAXIEnet *s = ds->enet;
886
887 /* TX enable ? */
888 if (!(s->tc & TC_TX)) {
889 return size;
890 }
891
892 if (s->txpos + size > s->c_txmem) {
893 qemu_log_mask(LOG_GUEST_ERROR, "%s: Packet larger than txmem\n",
894 TYPE_XILINX_AXI_ENET);
895 s->txpos = 0;
896 return size;
897 }
898
899 if (s->txpos == 0 && eop) {
900 /* Fast path single fragment. */
901 s->txpos = size;
902 } else {
903 memcpy(s->txmem + s->txpos, buf, size);
904 buf = s->txmem;
905 s->txpos += size;
906
907 if (!eop) {
908 return size;
909 }
910 }
911
912 /* Jumbo or vlan sizes ? */
913 if (!(s->tc & TC_JUM)) {
914 if (s->txpos > 1518 && s->txpos <= 1522 && !(s->tc & TC_VLAN)) {
915 s->txpos = 0;
916 return size;
917 }
918 }
919
920 if (s->hdr[0] & 1) {
921 unsigned int start_off = s->hdr[1] >> 16;
922 unsigned int write_off = s->hdr[1] & 0xffff;
923 uint32_t tmp_csum;
924 uint16_t csum;
925
926 tmp_csum = net_checksum_add(s->txpos - start_off,
927 buf + start_off);
928 /* Accumulate the seed. */
929 tmp_csum += s->hdr[2] & 0xffff;
930
931 /* Fold the 32bit partial checksum. */
932 csum = net_checksum_finish(tmp_csum);
933
934 /* Writeback. */
935 buf[write_off] = csum >> 8;
936 buf[write_off + 1] = csum & 0xff;
937 }
938
939 qemu_send_packet(qemu_get_queue(s->nic), buf, s->txpos);
940
941 s->stats.tx_bytes += s->txpos;
942 s->regs[R_IS] |= IS_TX_COMPLETE;
943 enet_update_irq(s);
944
945 s->txpos = 0;
946 return size;
947 }
948
949 static NetClientInfo net_xilinx_enet_info = {
950 .type = NET_CLIENT_DRIVER_NIC,
951 .size = sizeof(NICState),
952 .receive = eth_rx,
953 };
954
955 static void xilinx_enet_realize(DeviceState *dev, Error **errp)
956 {
957 XilinxAXIEnet *s = XILINX_AXI_ENET(dev);
958 XilinxAXIEnetStreamSlave *ds = XILINX_AXI_ENET_DATA_STREAM(&s->rx_data_dev);
959 XilinxAXIEnetStreamSlave *cs = XILINX_AXI_ENET_CONTROL_STREAM(
960 &s->rx_control_dev);
961
962 object_property_add_link(OBJECT(ds), "enet", "xlnx.axi-ethernet",
963 (Object **) &ds->enet,
964 object_property_allow_set_link,
965 OBJ_PROP_LINK_STRONG);
966 object_property_add_link(OBJECT(cs), "enet", "xlnx.axi-ethernet",
967 (Object **) &cs->enet,
968 object_property_allow_set_link,
969 OBJ_PROP_LINK_STRONG);
970 object_property_set_link(OBJECT(ds), "enet", OBJECT(s), &error_abort);
971 object_property_set_link(OBJECT(cs), "enet", OBJECT(s), &error_abort);
972
973 qemu_macaddr_default_if_unset(&s->conf.macaddr);
974 s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf,
975 object_get_typename(OBJECT(dev)), dev->id, s);
976 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
977
978 tdk_init(&s->TEMAC.phy);
979 mdio_attach(&s->TEMAC.mdio_bus, &s->TEMAC.phy, s->c_phyaddr);
980
981 s->TEMAC.parent = s;
982
983 s->rxmem = g_malloc(s->c_rxmem);
984 s->txmem = g_malloc(s->c_txmem);
985 }
986
987 static void xilinx_enet_init(Object *obj)
988 {
989 XilinxAXIEnet *s = XILINX_AXI_ENET(obj);
990 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
991
992 object_initialize_child(OBJECT(s), "axistream-connected-target",
993 &s->rx_data_dev, TYPE_XILINX_AXI_ENET_DATA_STREAM);
994 object_initialize_child(OBJECT(s), "axistream-control-connected-target",
995 &s->rx_control_dev,
996 TYPE_XILINX_AXI_ENET_CONTROL_STREAM);
997 sysbus_init_irq(sbd, &s->irq);
998
999 memory_region_init_io(&s->iomem, OBJECT(s), &enet_ops, s, "enet", 0x40000);
1000 sysbus_init_mmio(sbd, &s->iomem);
1001 }
1002
1003 static Property xilinx_enet_properties[] = {
1004 DEFINE_PROP_UINT32("phyaddr", XilinxAXIEnet, c_phyaddr, 7),
1005 DEFINE_PROP_UINT32("rxmem", XilinxAXIEnet, c_rxmem, 0x1000),
1006 DEFINE_PROP_UINT32("txmem", XilinxAXIEnet, c_txmem, 0x1000),
1007 DEFINE_NIC_PROPERTIES(XilinxAXIEnet, conf),
1008 DEFINE_PROP_LINK("axistream-connected", XilinxAXIEnet,
1009 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1010 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIEnet,
1011 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *),
1012 DEFINE_PROP_END_OF_LIST(),
1013 };
1014
1015 static void xilinx_enet_class_init(ObjectClass *klass, void *data)
1016 {
1017 DeviceClass *dc = DEVICE_CLASS(klass);
1018
1019 dc->realize = xilinx_enet_realize;
1020 device_class_set_props(dc, xilinx_enet_properties);
1021 dc->reset = xilinx_axienet_reset;
1022 }
1023
1024 static void xilinx_enet_control_stream_class_init(ObjectClass *klass,
1025 void *data)
1026 {
1027 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1028
1029 ssc->push = xilinx_axienet_control_stream_push;
1030 }
1031
1032 static void xilinx_enet_data_stream_class_init(ObjectClass *klass, void *data)
1033 {
1034 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
1035
1036 ssc->push = xilinx_axienet_data_stream_push;
1037 }
1038
1039 static const TypeInfo xilinx_enet_info = {
1040 .name = TYPE_XILINX_AXI_ENET,
1041 .parent = TYPE_SYS_BUS_DEVICE,
1042 .instance_size = sizeof(XilinxAXIEnet),
1043 .class_init = xilinx_enet_class_init,
1044 .instance_init = xilinx_enet_init,
1045 };
1046
1047 static const TypeInfo xilinx_enet_data_stream_info = {
1048 .name = TYPE_XILINX_AXI_ENET_DATA_STREAM,
1049 .parent = TYPE_OBJECT,
1050 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1051 .class_init = xilinx_enet_data_stream_class_init,
1052 .interfaces = (InterfaceInfo[]) {
1053 { TYPE_STREAM_SLAVE },
1054 { }
1055 }
1056 };
1057
1058 static const TypeInfo xilinx_enet_control_stream_info = {
1059 .name = TYPE_XILINX_AXI_ENET_CONTROL_STREAM,
1060 .parent = TYPE_OBJECT,
1061 .instance_size = sizeof(struct XilinxAXIEnetStreamSlave),
1062 .class_init = xilinx_enet_control_stream_class_init,
1063 .interfaces = (InterfaceInfo[]) {
1064 { TYPE_STREAM_SLAVE },
1065 { }
1066 }
1067 };
1068
1069 static void xilinx_enet_register_types(void)
1070 {
1071 type_register_static(&xilinx_enet_info);
1072 type_register_static(&xilinx_enet_data_stream_info);
1073 type_register_static(&xilinx_enet_control_stream_info);
1074 }
1075
1076 type_init(xilinx_enet_register_types)