]> git.proxmox.com Git - mirror_qemu.git/blame - hw/net/sunhme.c
sunhme: fix return values from sunhme_receive() during receive packet processing
[mirror_qemu.git] / hw / net / sunhme.c
CommitLineData
c110425d
MCA
1/*
2 * QEMU Sun Happy Meal Ethernet emulation
3 *
4 * Copyright (c) 2017 Mark Cave-Ayland
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25#include "qemu/osdep.h"
26#include "hw/hw.h"
27#include "hw/pci/pci.h"
28#include "hw/net/mii.h"
29#include "net/net.h"
0b8fa32f 30#include "qemu/module.h"
c110425d
MCA
31#include "net/checksum.h"
32#include "net/eth.h"
33#include "sysemu/sysemu.h"
34#include "trace.h"
35
36#define HME_REG_SIZE 0x8000
37
38#define HME_SEB_REG_SIZE 0x2000
39
40#define HME_SEBI_RESET 0x0
41#define HME_SEB_RESET_ETX 0x1
42#define HME_SEB_RESET_ERX 0x2
43
44#define HME_SEBI_STAT 0x100
45#define HME_SEBI_STAT_LINUXBUG 0x108
46#define HME_SEB_STAT_RXTOHOST 0x10000
47#define HME_SEB_STAT_MIFIRQ 0x800000
48#define HME_SEB_STAT_HOSTTOTX 0x1000000
49#define HME_SEB_STAT_TXALL 0x2000000
50
51#define HME_SEBI_IMASK 0x104
52#define HME_SEBI_IMASK_LINUXBUG 0x10c
53
54#define HME_ETX_REG_SIZE 0x2000
55
56#define HME_ETXI_PENDING 0x0
57
58#define HME_ETXI_RING 0x8
59#define HME_ETXI_RING_ADDR 0xffffff00
60#define HME_ETXI_RING_OFFSET 0xff
61
62#define HME_ETXI_RSIZE 0x2c
63
64#define HME_ERX_REG_SIZE 0x2000
65
66#define HME_ERXI_CFG 0x0
67#define HME_ERX_CFG_RINGSIZE 0x600
68#define HME_ERX_CFG_RINGSIZE_SHIFT 9
69#define HME_ERX_CFG_BYTEOFFSET 0x38
70#define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
71#define HME_ERX_CFG_CSUMSTART 0x7f0000
72#define HME_ERX_CFG_CSUMSHIFT 16
73
74#define HME_ERXI_RING 0x4
75#define HME_ERXI_RING_ADDR 0xffffff00
76#define HME_ERXI_RING_OFFSET 0xff
77
78#define HME_MAC_REG_SIZE 0x1000
79
80#define HME_MACI_TXCFG 0x20c
81#define HME_MAC_TXCFG_ENABLE 0x1
82
83#define HME_MACI_RXCFG 0x30c
84#define HME_MAC_RXCFG_ENABLE 0x1
85#define HME_MAC_RXCFG_PMISC 0x40
86#define HME_MAC_RXCFG_HENABLE 0x800
87
88#define HME_MACI_MACADDR2 0x318
89#define HME_MACI_MACADDR1 0x31c
90#define HME_MACI_MACADDR0 0x320
91
92#define HME_MACI_HASHTAB3 0x340
93#define HME_MACI_HASHTAB2 0x344
94#define HME_MACI_HASHTAB1 0x348
95#define HME_MACI_HASHTAB0 0x34c
96
97#define HME_MIF_REG_SIZE 0x20
98
99#define HME_MIFI_FO 0xc
100#define HME_MIF_FO_ST 0xc0000000
101#define HME_MIF_FO_ST_SHIFT 30
102#define HME_MIF_FO_OPC 0x30000000
103#define HME_MIF_FO_OPC_SHIFT 28
104#define HME_MIF_FO_PHYAD 0x0f800000
105#define HME_MIF_FO_PHYAD_SHIFT 23
106#define HME_MIF_FO_REGAD 0x007c0000
107#define HME_MIF_FO_REGAD_SHIFT 18
108#define HME_MIF_FO_TAMSB 0x20000
109#define HME_MIF_FO_TALSB 0x10000
110#define HME_MIF_FO_DATA 0xffff
111
112#define HME_MIFI_CFG 0x10
113#define HME_MIF_CFG_MDI0 0x100
114#define HME_MIF_CFG_MDI1 0x200
115
116#define HME_MIFI_IMASK 0x14
117
118#define HME_MIFI_STAT 0x18
119
120
121/* Wired HME PHY addresses */
122#define HME_PHYAD_INTERNAL 1
123#define HME_PHYAD_EXTERNAL 0
124
125#define MII_COMMAND_START 0x1
126#define MII_COMMAND_READ 0x2
127#define MII_COMMAND_WRITE 0x1
128
129#define TYPE_SUNHME "sunhme"
130#define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME)
131
132/* Maximum size of buffer */
133#define HME_FIFO_SIZE 0x800
134
135/* Size of TX/RX descriptor */
136#define HME_DESC_SIZE 0x8
137
138#define HME_XD_OWN 0x80000000
139#define HME_XD_OFL 0x40000000
140#define HME_XD_SOP 0x40000000
141#define HME_XD_EOP 0x20000000
142#define HME_XD_RXLENMSK 0x3fff0000
143#define HME_XD_RXLENSHIFT 16
144#define HME_XD_RXCKSUM 0xffff
145#define HME_XD_TXLENMSK 0x00001fff
146#define HME_XD_TXCKSUM 0x10000000
147#define HME_XD_TXCSSTUFF 0xff00000
148#define HME_XD_TXCSSTUFFSHIFT 20
149#define HME_XD_TXCSSTART 0xfc000
150#define HME_XD_TXCSSTARTSHIFT 14
151
152#define HME_MII_REGS_SIZE 0x20
153
154typedef struct SunHMEState {
155 /*< private >*/
156 PCIDevice parent_obj;
157
158 NICState *nic;
159 NICConf conf;
160
161 MemoryRegion hme;
162 MemoryRegion sebreg;
163 MemoryRegion etxreg;
164 MemoryRegion erxreg;
165 MemoryRegion macreg;
166 MemoryRegion mifreg;
167
168 uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
169 uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
170 uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
171 uint32_t macregs[HME_MAC_REG_SIZE >> 2];
172 uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
173
174 uint16_t miiregs[HME_MII_REGS_SIZE];
175} SunHMEState;
176
177static Property sunhme_properties[] = {
178 DEFINE_NIC_PROPERTIES(SunHMEState, conf),
179 DEFINE_PROP_END_OF_LIST(),
180};
181
182static void sunhme_reset_tx(SunHMEState *s)
183{
184 /* Indicate TX reset complete */
185 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
186}
187
188static void sunhme_reset_rx(SunHMEState *s)
189{
190 /* Indicate RX reset complete */
191 s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
192}
193
194static void sunhme_update_irq(SunHMEState *s)
195{
196 PCIDevice *d = PCI_DEVICE(s);
197 int level;
198
199 /* MIF interrupt mask (16-bit) */
200 uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
201 uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
202
203 /* Main SEB interrupt mask (include MIF status from above) */
204 uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
205 ~HME_SEB_STAT_MIFIRQ;
206 uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
207 if (mif) {
208 seb |= HME_SEB_STAT_MIFIRQ;
209 }
210
211 level = (seb ? 1 : 0);
6bdc3707
MCA
212 trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
213
c110425d
MCA
214 pci_set_irq(d, level);
215}
216
217static void sunhme_seb_write(void *opaque, hwaddr addr,
218 uint64_t val, unsigned size)
219{
220 SunHMEState *s = SUNHME(opaque);
221
222 trace_sunhme_seb_write(addr, val);
223
224 /* Handly buggy Linux drivers before 4.13 which have
225 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
226 switch (addr) {
227 case HME_SEBI_STAT_LINUXBUG:
228 addr = HME_SEBI_STAT;
229 break;
230 case HME_SEBI_IMASK_LINUXBUG:
231 addr = HME_SEBI_IMASK;
232 break;
233 default:
234 break;
235 }
236
237 switch (addr) {
238 case HME_SEBI_RESET:
239 if (val & HME_SEB_RESET_ETX) {
240 sunhme_reset_tx(s);
241 }
242 if (val & HME_SEB_RESET_ERX) {
243 sunhme_reset_rx(s);
244 }
245 val = s->sebregs[HME_SEBI_RESET >> 2];
246 break;
247 }
248
249 s->sebregs[addr >> 2] = val;
250}
251
252static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
253 unsigned size)
254{
255 SunHMEState *s = SUNHME(opaque);
256 uint64_t val;
257
258 /* Handly buggy Linux drivers before 4.13 which have
259 the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
260 switch (addr) {
261 case HME_SEBI_STAT_LINUXBUG:
262 addr = HME_SEBI_STAT;
263 break;
264 case HME_SEBI_IMASK_LINUXBUG:
265 addr = HME_SEBI_IMASK;
266 break;
267 default:
268 break;
269 }
270
271 val = s->sebregs[addr >> 2];
272
273 switch (addr) {
274 case HME_SEBI_STAT:
275 /* Autoclear status (except MIF) */
276 s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
277 sunhme_update_irq(s);
278 break;
279 }
280
281 trace_sunhme_seb_read(addr, val);
282
283 return val;
284}
285
286static const MemoryRegionOps sunhme_seb_ops = {
287 .read = sunhme_seb_read,
288 .write = sunhme_seb_write,
289 .endianness = DEVICE_LITTLE_ENDIAN,
290 .valid = {
291 .min_access_size = 4,
292 .max_access_size = 4,
293 },
294};
295
296static void sunhme_transmit(SunHMEState *s);
297
298static void sunhme_etx_write(void *opaque, hwaddr addr,
299 uint64_t val, unsigned size)
300{
301 SunHMEState *s = SUNHME(opaque);
302
303 trace_sunhme_etx_write(addr, val);
304
305 switch (addr) {
306 case HME_ETXI_PENDING:
307 if (val) {
308 sunhme_transmit(s);
309 }
310 break;
311 }
312
313 s->etxregs[addr >> 2] = val;
314}
315
316static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
317 unsigned size)
318{
319 SunHMEState *s = SUNHME(opaque);
320 uint64_t val;
321
322 val = s->etxregs[addr >> 2];
323
324 trace_sunhme_etx_read(addr, val);
325
326 return val;
327}
328
329static const MemoryRegionOps sunhme_etx_ops = {
330 .read = sunhme_etx_read,
331 .write = sunhme_etx_write,
332 .endianness = DEVICE_LITTLE_ENDIAN,
333 .valid = {
334 .min_access_size = 4,
335 .max_access_size = 4,
336 },
337};
338
339static void sunhme_erx_write(void *opaque, hwaddr addr,
340 uint64_t val, unsigned size)
341{
342 SunHMEState *s = SUNHME(opaque);
343
344 trace_sunhme_erx_write(addr, val);
345
346 s->erxregs[addr >> 2] = val;
347}
348
349static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
350 unsigned size)
351{
352 SunHMEState *s = SUNHME(opaque);
353 uint64_t val;
354
355 val = s->erxregs[addr >> 2];
356
357 trace_sunhme_erx_read(addr, val);
358
359 return val;
360}
361
362static const MemoryRegionOps sunhme_erx_ops = {
363 .read = sunhme_erx_read,
364 .write = sunhme_erx_write,
365 .endianness = DEVICE_LITTLE_ENDIAN,
366 .valid = {
367 .min_access_size = 4,
368 .max_access_size = 4,
369 },
370};
371
372static void sunhme_mac_write(void *opaque, hwaddr addr,
373 uint64_t val, unsigned size)
374{
375 SunHMEState *s = SUNHME(opaque);
1058e1a3 376 uint64_t oldval = s->macregs[addr >> 2];
c110425d
MCA
377
378 trace_sunhme_mac_write(addr, val);
379
380 s->macregs[addr >> 2] = val;
1058e1a3
MCA
381
382 switch (addr) {
383 case HME_MACI_RXCFG:
384 if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
385 (val & HME_MAC_RXCFG_ENABLE)) {
386 qemu_flush_queued_packets(qemu_get_queue(s->nic));
387 }
388 break;
389 }
c110425d
MCA
390}
391
392static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
393 unsigned size)
394{
395 SunHMEState *s = SUNHME(opaque);
396 uint64_t val;
397
398 val = s->macregs[addr >> 2];
399
400 trace_sunhme_mac_read(addr, val);
401
402 return val;
403}
404
405static const MemoryRegionOps sunhme_mac_ops = {
406 .read = sunhme_mac_read,
407 .write = sunhme_mac_write,
408 .endianness = DEVICE_LITTLE_ENDIAN,
409 .valid = {
410 .min_access_size = 4,
411 .max_access_size = 4,
412 },
413};
414
415static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
416{
417 trace_sunhme_mii_write(reg, data);
418
419 switch (reg) {
420 case MII_BMCR:
421 if (data & MII_BMCR_RESET) {
422 /* Autoclear reset bit, enable auto negotiation */
423 data &= ~MII_BMCR_RESET;
424 data |= MII_BMCR_AUTOEN;
425 }
426 if (data & MII_BMCR_ANRESTART) {
427 /* Autoclear auto negotiation restart */
428 data &= ~MII_BMCR_ANRESTART;
429
430 /* Indicate negotiation complete */
431 s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
432
433 if (!qemu_get_queue(s->nic)->link_down) {
434 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
435 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
436 }
437 }
438 break;
439 }
440
441 s->miiregs[reg] = data;
442}
443
444static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
445{
446 uint16_t data = s->miiregs[reg];
447
448 trace_sunhme_mii_read(reg, data);
449
450 return data;
451}
452
453static void sunhme_mif_write(void *opaque, hwaddr addr,
454 uint64_t val, unsigned size)
455{
456 SunHMEState *s = SUNHME(opaque);
457 uint8_t cmd, reg;
458 uint16_t data;
459
460 trace_sunhme_mif_write(addr, val);
461
462 switch (addr) {
463 case HME_MIFI_CFG:
464 /* Mask the read-only bits */
465 val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
466 val |= s->mifregs[HME_MIFI_CFG >> 2] &
467 (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
468 break;
469 case HME_MIFI_FO:
470 /* Detect start of MII command */
471 if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
472 != MII_COMMAND_START) {
473 val |= HME_MIF_FO_TALSB;
474 break;
475 }
476
477 /* Internal phy only */
478 if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
479 != HME_PHYAD_INTERNAL) {
480 val |= HME_MIF_FO_TALSB;
481 break;
482 }
483
484 cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
485 reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
486 data = (val & HME_MIF_FO_DATA);
487
488 switch (cmd) {
489 case MII_COMMAND_WRITE:
490 sunhme_mii_write(s, reg, data);
491 break;
492
493 case MII_COMMAND_READ:
494 val &= ~HME_MIF_FO_DATA;
495 val |= sunhme_mii_read(s, reg);
496 break;
497 }
498
499 val |= HME_MIF_FO_TALSB;
500 break;
501 }
502
503 s->mifregs[addr >> 2] = val;
504}
505
506static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
507 unsigned size)
508{
509 SunHMEState *s = SUNHME(opaque);
510 uint64_t val;
511
512 val = s->mifregs[addr >> 2];
513
514 switch (addr) {
515 case HME_MIFI_STAT:
516 /* Autoclear MIF interrupt status */
517 s->mifregs[HME_MIFI_STAT >> 2] = 0;
518 sunhme_update_irq(s);
519 break;
520 }
521
522 trace_sunhme_mif_read(addr, val);
523
524 return val;
525}
526
527static const MemoryRegionOps sunhme_mif_ops = {
528 .read = sunhme_mif_read,
529 .write = sunhme_mif_write,
530 .endianness = DEVICE_LITTLE_ENDIAN,
531 .valid = {
532 .min_access_size = 4,
533 .max_access_size = 4,
534 },
535};
536
537static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
538{
539 qemu_send_packet(qemu_get_queue(s->nic), buf, size);
540}
541
542static inline int sunhme_get_tx_ring_count(SunHMEState *s)
543{
544 return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
545}
546
547static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
548{
549 return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
550}
551
552static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
553{
554 uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
555 ring |= i & HME_ETXI_RING_OFFSET;
556
557 s->etxregs[HME_ETXI_RING >> 2] = ring;
558}
559
560static void sunhme_transmit(SunHMEState *s)
561{
562 PCIDevice *d = PCI_DEVICE(s);
563 dma_addr_t tb, addr;
564 uint32_t intstatus, status, buffer, sum = 0;
565 int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
566 uint16_t csum = 0;
567 uint8_t xmit_buffer[HME_FIFO_SIZE];
568
569 tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
570 nr = sunhme_get_tx_ring_count(s);
571 cr = sunhme_get_tx_ring_nr(s);
572
573 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
574 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
575
576 xmit_pos = 0;
577 while (status & HME_XD_OWN) {
578 trace_sunhme_tx_desc(buffer, status, cr, nr);
579
580 /* Copy data into transmit buffer */
581 addr = buffer;
582 len = status & HME_XD_TXLENMSK;
583
584 if (xmit_pos + len > HME_FIFO_SIZE) {
585 len = HME_FIFO_SIZE - xmit_pos;
586 }
587
588 pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
589 xmit_pos += len;
590
591 /* Detect start of packet for TX checksum */
592 if (status & HME_XD_SOP) {
593 sum = 0;
594 csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
595 csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
596 HME_XD_TXCSSTUFFSHIFT;
597 }
598
599 if (status & HME_XD_TXCKSUM) {
600 /* Only start calculation from csum_offset */
601 if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
602 sum += net_checksum_add(xmit_pos - csum_offset,
603 xmit_buffer + csum_offset);
604 trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
605 } else {
606 sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
607 trace_sunhme_tx_xsum_add(xmit_pos - len, len);
608 }
609 }
610
611 /* Detect end of packet for TX checksum */
612 if (status & HME_XD_EOP) {
613 /* Stuff the checksum if required */
614 if (status & HME_XD_TXCKSUM) {
615 csum = net_checksum_finish(sum);
616 stw_be_p(xmit_buffer + csum_stuff_offset, csum);
617 trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
618 }
619
620 if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
621 sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
622 trace_sunhme_tx_done(xmit_pos);
623 }
624 }
625
626 /* Update status */
627 status &= ~HME_XD_OWN;
628 pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
629
630 /* Move onto next descriptor */
631 cr++;
632 if (cr >= nr) {
633 cr = 0;
634 }
635 sunhme_set_tx_ring_nr(s, cr);
636
637 pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
638 pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
639
640 /* Indicate TX complete */
641 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
642 intstatus |= HME_SEB_STAT_HOSTTOTX;
643 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
644
645 /* Autoclear TX pending */
646 s->etxregs[HME_ETXI_PENDING >> 2] = 0;
647
648 sunhme_update_irq(s);
649 }
650
651 /* TX FIFO now clear */
652 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
653 intstatus |= HME_SEB_STAT_TXALL;
654 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
655 sunhme_update_irq(s);
656}
657
658static int sunhme_can_receive(NetClientState *nc)
659{
660 SunHMEState *s = qemu_get_nic_opaque(nc);
661
076489c0 662 return s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE;
c110425d
MCA
663}
664
665static void sunhme_link_status_changed(NetClientState *nc)
666{
667 SunHMEState *s = qemu_get_nic_opaque(nc);
668
669 if (nc->link_down) {
670 s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
671 s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
672 } else {
673 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
674 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
675 }
676
677 /* Exact bits unknown */
678 s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
679 sunhme_update_irq(s);
680}
681
682static inline int sunhme_get_rx_ring_count(SunHMEState *s)
683{
684 uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
685 >> HME_ERX_CFG_RINGSIZE_SHIFT;
686
687 switch (rings) {
688 case 0:
689 return 32;
690 case 1:
691 return 64;
692 case 2:
693 return 128;
694 case 3:
695 return 256;
696 }
697
698 return 0;
699}
700
701static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
702{
703 return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
704}
705
706static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
707{
708 uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
709 ring |= i & HME_ERXI_RING_OFFSET;
710
711 s->erxregs[HME_ERXI_RING >> 2] = ring;
712}
713
c110425d
MCA
714#define MIN_BUF_SIZE 60
715
716static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
717 size_t size)
718{
719 SunHMEState *s = qemu_get_nic_opaque(nc);
720 PCIDevice *d = PCI_DEVICE(s);
721 dma_addr_t rb, addr;
722 uint32_t intstatus, status, buffer, buffersize, sum;
723 uint16_t csum;
724 uint8_t buf1[60];
725 int nr, cr, len, rxoffset, csum_offset;
726
727 trace_sunhme_rx_incoming(size);
728
729 /* Do nothing if MAC RX disabled */
730 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
29df47a5 731 return 0;
c110425d
MCA
732 }
733
734 trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
735 buf[3], buf[4], buf[5]);
736
737 /* Check destination MAC address */
738 if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
739 /* Try and match local MAC address */
740 if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
741 (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
742 ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
743 (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
744 ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
745 (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
746 /* Matched local MAC address */
747 trace_sunhme_rx_filter_local_match();
748 } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
749 buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
750 /* Matched broadcast address */
751 trace_sunhme_rx_filter_bcast_match();
752 } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
753 /* Didn't match local address, check hash filter */
a89a6b05 754 int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
c110425d
MCA
755 if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
756 (1 << (mcast_idx & 0xf)))) {
757 /* Didn't match hash filter */
758 trace_sunhme_rx_filter_hash_nomatch();
759 trace_sunhme_rx_filter_reject();
29df47a5 760 return -1;
c110425d
MCA
761 } else {
762 trace_sunhme_rx_filter_hash_match();
763 }
764 } else {
765 /* Not for us */
766 trace_sunhme_rx_filter_reject();
29df47a5 767 return -1;
c110425d
MCA
768 }
769 } else {
770 trace_sunhme_rx_filter_promisc_match();
771 }
772
773 trace_sunhme_rx_filter_accept();
774
775 /* If too small buffer, then expand it */
776 if (size < MIN_BUF_SIZE) {
777 memcpy(buf1, buf, size);
778 memset(buf1 + size, 0, MIN_BUF_SIZE - size);
779 buf = buf1;
780 size = MIN_BUF_SIZE;
781 }
782
783 rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
784 nr = sunhme_get_rx_ring_count(s);
785 cr = sunhme_get_rx_ring_nr(s);
786
787 pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
788 pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
789
790 rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
791 HME_ERX_CFG_BYTEOFFSET_SHIFT;
792
793 addr = buffer + rxoffset;
794 buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
795
796 /* Detect receive overflow */
797 len = size;
798 if (size > buffersize) {
799 status |= HME_XD_OFL;
800 len = buffersize;
801 }
802
803 pci_dma_write(d, addr, buf, len);
804
805 trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
806
807 /* Calculate the receive checksum */
808 csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
809 HME_ERX_CFG_CSUMSHIFT << 1;
810 sum = 0;
811 sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
812 csum = net_checksum_finish(sum);
813
814 trace_sunhme_rx_xsum_calc(csum);
815
816 /* Update status */
817 status &= ~HME_XD_OWN;
818 status &= ~HME_XD_RXLENMSK;
819 status |= len << HME_XD_RXLENSHIFT;
820 status &= ~HME_XD_RXCKSUM;
821 status |= csum;
822
823 pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
824
825 cr++;
826 if (cr >= nr) {
827 cr = 0;
828 }
829
830 sunhme_set_rx_ring_nr(s, cr);
831
832 /* Indicate RX complete */
833 intstatus = s->sebregs[HME_SEBI_STAT >> 2];
834 intstatus |= HME_SEB_STAT_RXTOHOST;
835 s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
836
837 sunhme_update_irq(s);
838
839 return len;
840}
841
842static NetClientInfo net_sunhme_info = {
843 .type = NET_CLIENT_DRIVER_NIC,
844 .size = sizeof(NICState),
845 .can_receive = sunhme_can_receive,
846 .receive = sunhme_receive,
847 .link_status_changed = sunhme_link_status_changed,
848};
849
850static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
851{
852 SunHMEState *s = SUNHME(pci_dev);
853 DeviceState *d = DEVICE(pci_dev);
854 uint8_t *pci_conf;
855
856 pci_conf = pci_dev->config;
857 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
858
859 memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
860 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
861
862 memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
863 "sunhme.seb", HME_SEB_REG_SIZE);
864 memory_region_add_subregion(&s->hme, 0, &s->sebreg);
865
866 memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
867 "sunhme.etx", HME_ETX_REG_SIZE);
868 memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
869
870 memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
871 "sunhme.erx", HME_ERX_REG_SIZE);
872 memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
873
874 memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
875 "sunhme.mac", HME_MAC_REG_SIZE);
876 memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
877
878 memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
879 "sunhme.mif", HME_MIF_REG_SIZE);
880 memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
881
882 qemu_macaddr_default_if_unset(&s->conf.macaddr);
883 s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
884 object_get_typename(OBJECT(d)), d->id, s);
885 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
886}
887
888static void sunhme_instance_init(Object *obj)
889{
890 SunHMEState *s = SUNHME(obj);
891
892 device_add_bootindex_property(obj, &s->conf.bootindex,
893 "bootindex", "/ethernet-phy@0",
894 DEVICE(obj), NULL);
895}
896
897static void sunhme_reset(DeviceState *ds)
898{
899 SunHMEState *s = SUNHME(ds);
900
901 /* Configure internal transceiver */
902 s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
903
904 /* Advetise auto, 100Mbps FD */
905 s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
906 s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
907 MII_BMSR_AN_COMP;
908
909 if (!qemu_get_queue(s->nic)->link_down) {
910 s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
911 s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
912 }
913
914 /* Set manufacturer */
915 s->miiregs[MII_PHYID1] = DP83840_PHYID1;
916 s->miiregs[MII_PHYID2] = DP83840_PHYID2;
917
918 /* Configure default interrupt mask */
919 s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
920 s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
921}
922
923static const VMStateDescription vmstate_hme = {
924 .name = "sunhme",
925 .version_id = 0,
926 .minimum_version_id = 0,
927 .fields = (VMStateField[]) {
928 VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
929 VMSTATE_MACADDR(conf.macaddr, SunHMEState),
930 VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
931 VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
932 VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
933 VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
934 VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
935 VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
936 VMSTATE_END_OF_LIST()
937 }
938};
939
940static void sunhme_class_init(ObjectClass *klass, void *data)
941{
942 DeviceClass *dc = DEVICE_CLASS(klass);
943 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
944
945 k->realize = sunhme_realize;
946 k->vendor_id = PCI_VENDOR_ID_SUN;
947 k->device_id = PCI_DEVICE_ID_SUN_HME;
948 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
949 dc->vmsd = &vmstate_hme;
950 dc->reset = sunhme_reset;
951 dc->props = sunhme_properties;
952 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
953}
954
955static const TypeInfo sunhme_info = {
956 .name = TYPE_SUNHME,
957 .parent = TYPE_PCI_DEVICE,
958 .class_init = sunhme_class_init,
959 .instance_size = sizeof(SunHMEState),
960 .instance_init = sunhme_instance_init,
fd3b02c8
EH
961 .interfaces = (InterfaceInfo[]) {
962 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
963 { }
964 }
c110425d
MCA
965};
966
967static void sunhme_register_types(void)
968{
969 type_register_static(&sunhme_info);
970}
971
972type_init(sunhme_register_types)