]> git.proxmox.com Git - mirror_qemu.git/blame - hw/net/sungem.c
Merge tag 'net-pull-request' of https://github.com/jasowang/qemu into staging
[mirror_qemu.git] / hw / net / sungem.c
CommitLineData
f85504b2
BH
1/*
2 * QEMU model of SUN GEM ethernet controller
3 *
4 * As found in Apple ASICs among others
5 *
6 * Copyright 2016 Ben Herrenschmidt
7 * Copyright 2017 Mark Cave-Ayland
8 */
9
10#include "qemu/osdep.h"
edf5ca5d 11#include "hw/pci/pci_device.h"
a27bd6c7 12#include "hw/qdev-properties.h"
d6454270 13#include "migration/vmstate.h"
f85504b2 14#include "qemu/log.h"
0b8fa32f 15#include "qemu/module.h"
f85504b2 16#include "net/net.h"
8f90bc2f 17#include "net/eth.h"
f85504b2
BH
18#include "net/checksum.h"
19#include "hw/net/mii.h"
20#include "sysemu/sysemu.h"
21#include "trace.h"
db1015e9 22#include "qom/object.h"
f85504b2
BH
23
24#define TYPE_SUNGEM "sungem"
25
8063396b 26OBJECT_DECLARE_SIMPLE_TYPE(SunGEMState, SUNGEM)
f85504b2
BH
27
28#define MAX_PACKET_SIZE 9016
29
30#define SUNGEM_MMIO_SIZE 0x200000
31
32/* Global registers */
33#define SUNGEM_MMIO_GREG_SIZE 0x2000
34
35#define GREG_SEBSTATE 0x0000UL /* SEB State Register */
36
37#define GREG_STAT 0x000CUL /* Status Register */
38#define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
39#define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
40#define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
41#define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
42#define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
43#define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
44#define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
45#define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
46#define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
47#define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
48#define GREG_STAT_TXNR_SHIFT 19
49
50/* These interrupts are edge latches in the status register,
51 * reading it (or writing the corresponding bit in IACK) will
52 * clear them
53 */
54#define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
55 GREG_STAT_RXDONE | GREG_STAT_RXDONE | \
56 GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR)
57
58#define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
59#define GREG_IACK 0x0014UL /* Interrupt ACK Register */
60#define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
61#define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
62#define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
63
64#define GREG_SWRST 0x1010UL /* Software Reset Register */
65#define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
66#define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
67#define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
68
69/* TX DMA Registers */
70#define SUNGEM_MMIO_TXDMA_SIZE 0x1000
71
72#define TXDMA_KICK 0x0000UL /* TX Kick Register */
73
74#define TXDMA_CFG 0x0004UL /* TX Configuration Register */
75#define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
76#define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
77
78#define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */
79#define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */
80#define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */
81#define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */
82#define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */
83#define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */
84#define TXDMA_TXDONE 0x0100UL /* TX Completion Register */
85#define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */
86#define TXDMA_FSZ 0x0118UL /* TX FIFO Size */
87
88/* Receive DMA Registers */
89#define SUNGEM_MMIO_RXDMA_SIZE 0x2000
90
91#define RXDMA_CFG 0x0000UL /* RX Configuration Register */
92#define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
93#define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
94#define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
95#define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
96
97#define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */
98#define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */
99#define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */
100#define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */
101#define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */
102#define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */
103#define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */
104#define RXDMA_KICK 0x0100UL /* RX Kick Register */
105#define RXDMA_DONE 0x0104UL /* RX Completion Register */
106#define RXDMA_BLANK 0x0108UL /* RX Blanking Register */
107#define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */
108#define RXDMA_FSZ 0x0120UL /* RX FIFO Size */
109
110/* MAC Registers */
111#define SUNGEM_MMIO_MAC_SIZE 0x200
112
113#define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */
114#define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */
115#define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */
116#define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */
117
118#define MAC_CSTAT 0x0018UL /* MAC Control Status Register */
119#define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
120
121#define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */
122#define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */
123#define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */
124
125#define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */
126#define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
127
128#define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */
129#define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
130#define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
131#define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
132#define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
133#define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
134
135#define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */
136#define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
137
138#define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */
139#define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */
140#define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */
141#define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */
142#define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */
143#define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */
144#define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */
145#define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */
146#define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */
147#define MAC_PATMPS 0x0114UL /* Peak Attempts Register */
148#define MAC_SMACHINE 0x0134UL /* State Machine Register */
149
150/* MIF Registers */
151#define SUNGEM_MMIO_MIF_SIZE 0x20
152
153#define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */
154#define MIF_FRAME_OP 0x30000000 /* OPcode */
155#define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
156#define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
157#define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
158#define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
159
160#define MIF_CFG 0x0010UL /* MIF Configuration Register */
161#define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
162#define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
163
164#define MIF_STATUS 0x0018UL /* MIF Status Register */
165#define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */
166
167/* PCS/Serialink Registers */
168#define SUNGEM_MMIO_PCS_SIZE 0x60
169#define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */
170#define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */
171#define PCS_SSTATE 0x005CUL /* Serialink State Register */
172
173/* Descriptors */
174struct gem_txd {
175 uint64_t control_word;
176 uint64_t buffer;
177};
178
179#define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
180#define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
181#define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
182#define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
183#define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
184#define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
185#define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
186
187struct gem_rxd {
188 uint64_t status_word;
189 uint64_t buffer;
190};
191
192#define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
193#define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
194
195
db1015e9 196struct SunGEMState {
f85504b2
BH
197 PCIDevice pdev;
198
199 MemoryRegion sungem;
200 MemoryRegion greg;
201 MemoryRegion txdma;
202 MemoryRegion rxdma;
203 MemoryRegion mac;
204 MemoryRegion mif;
205 MemoryRegion pcs;
206 NICState *nic;
207 NICConf conf;
208 uint32_t phy_addr;
209
210 uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2];
211 uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2];
212 uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2];
213 uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2];
214 uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2];
215 uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2];
216
217 /* Cache some useful things */
218 uint32_t rx_mask;
219 uint32_t tx_mask;
220
221 /* Current tx packet */
222 uint8_t tx_data[MAX_PACKET_SIZE];
223 uint32_t tx_size;
224 uint64_t tx_first_ctl;
db1015e9 225};
f85504b2
BH
226
227
228static void sungem_eval_irq(SunGEMState *s)
229{
230 uint32_t stat, mask;
231
232 mask = s->gregs[GREG_IMASK >> 2];
233 stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
234 if (stat & ~mask) {
235 pci_set_irq(PCI_DEVICE(s), 1);
236 } else {
237 pci_set_irq(PCI_DEVICE(s), 0);
238 }
239}
240
241static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
242{
243 uint32_t stat;
244
245 stat = s->gregs[GREG_STAT >> 2];
246 if (val) {
247 stat |= bits;
248 } else {
249 stat &= ~bits;
250 }
251 s->gregs[GREG_STAT >> 2] = stat;
252 sungem_eval_irq(s);
253}
254
255static void sungem_eval_cascade_irq(SunGEMState *s)
256{
257 uint32_t stat, mask;
258
259 mask = s->macregs[MAC_TXSTAT >> 2];
260 stat = s->macregs[MAC_TXMASK >> 2];
261 if (stat & ~mask) {
262 sungem_update_status(s, GREG_STAT_TXMAC, true);
263 } else {
264 sungem_update_status(s, GREG_STAT_TXMAC, false);
265 }
266
267 mask = s->macregs[MAC_RXSTAT >> 2];
268 stat = s->macregs[MAC_RXMASK >> 2];
269 if (stat & ~mask) {
270 sungem_update_status(s, GREG_STAT_RXMAC, true);
271 } else {
272 sungem_update_status(s, GREG_STAT_RXMAC, false);
273 }
274
275 mask = s->macregs[MAC_CSTAT >> 2];
276 stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR;
277 if (stat & ~mask) {
278 sungem_update_status(s, GREG_STAT_MAC, true);
279 } else {
280 sungem_update_status(s, GREG_STAT_MAC, false);
281 }
282}
283
284static void sungem_do_tx_csum(SunGEMState *s)
285{
286 uint16_t start, off;
287 uint32_t csum;
288
289 start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15;
290 off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21;
291
292 trace_sungem_tx_checksum(start, off);
293
294 if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) {
295 trace_sungem_tx_checksum_oob();
296 return;
297 }
298
299 csum = net_raw_checksum(s->tx_data + start, s->tx_size - start);
300 stw_be_p(s->tx_data + off, csum);
301}
302
303static void sungem_send_packet(SunGEMState *s, const uint8_t *buf,
304 int size)
305{
306 NetClientState *nc = qemu_get_queue(s->nic);
307
308 if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
8c92060d 309 qemu_receive_packet(nc, buf, size);
f85504b2
BH
310 } else {
311 qemu_send_packet(nc, buf, size);
312 }
313}
314
315static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc)
316{
317 PCIDevice *d = PCI_DEVICE(s);
318 uint32_t len;
319
320 /* If it's a start of frame, discard anything we had in the
321 * buffer and start again. This should be an error condition
322 * if we had something ... for now we ignore it
323 */
324 if (desc->control_word & TXDCTRL_SOF) {
325 if (s->tx_first_ctl) {
326 trace_sungem_tx_unfinished();
327 }
328 s->tx_size = 0;
329 s->tx_first_ctl = desc->control_word;
330 }
331
332 /* Grab data size */
333 len = desc->control_word & TXDCTRL_BUFSZ;
334
335 /* Clamp it to our max size */
336 if ((s->tx_size + len) > MAX_PACKET_SIZE) {
337 trace_sungem_tx_overflow();
338 len = MAX_PACKET_SIZE - s->tx_size;
339 }
340
341 /* Read the data */
342 pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len);
343 s->tx_size += len;
344
345 /* If end of frame, send packet */
346 if (desc->control_word & TXDCTRL_EOF) {
347 trace_sungem_tx_finished(s->tx_size);
348
349 /* Handle csum */
350 if (s->tx_first_ctl & TXDCTRL_CENAB) {
351 sungem_do_tx_csum(s);
352 }
353
354 /* Send it */
355 sungem_send_packet(s, s->tx_data, s->tx_size);
356
357 /* No more pending packet */
358 s->tx_size = 0;
359 s->tx_first_ctl = 0;
360 }
361}
362
363static void sungem_tx_kick(SunGEMState *s)
364{
365 PCIDevice *d = PCI_DEVICE(s);
366 uint32_t comp, kick;
367 uint32_t txdma_cfg, txmac_cfg, ints;
368 uint64_t dbase;
369
370 trace_sungem_tx_kick();
371
372 /* Check that both TX MAC and TX DMA are enabled. We don't
373 * handle DMA-less direct FIFO operations (we don't emulate
374 * the FIFO at all).
375 *
376 * A write to TXDMA_KICK while DMA isn't enabled can happen
377 * when the driver is resetting the pointer.
378 */
379 txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2];
380 txmac_cfg = s->macregs[MAC_TXCFG >> 2];
381 if (!(txdma_cfg & TXDMA_CFG_ENABLE) ||
382 !(txmac_cfg & MAC_TXCFG_ENAB)) {
383 trace_sungem_tx_disabled();
384 return;
385 }
386
387 /* XXX Test min frame size register ? */
388 /* XXX Test max frame size register ? */
389
390 dbase = s->txdmaregs[TXDMA_DBHI >> 2];
391 dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2];
392
393 comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask;
394 kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask;
395
396 trace_sungem_tx_process(comp, kick, s->tx_mask + 1);
397
398 /* This is rather primitive for now, we just send everything we
399 * can in one go, like e1000. Ideally we should do the sending
400 * from some kind of background task
401 */
402 while (comp != kick) {
403 struct gem_txd desc;
404
405 /* Read the next descriptor */
406 pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc));
407
408 /* Byteswap descriptor */
409 desc.control_word = le64_to_cpu(desc.control_word);
410 desc.buffer = le64_to_cpu(desc.buffer);
411 trace_sungem_tx_desc(comp, desc.control_word, desc.buffer);
412
413 /* Send it for processing */
414 sungem_process_tx_desc(s, &desc);
415
416 /* Interrupt */
417 ints = GREG_STAT_TXDONE;
418 if (desc.control_word & TXDCTRL_INTME) {
419 ints |= GREG_STAT_TXINTME;
420 }
421 sungem_update_status(s, ints, true);
422
423 /* Next ! */
424 comp = (comp + 1) & s->tx_mask;
425 s->txdmaregs[TXDMA_TXDONE >> 2] = comp;
426 }
427
428 /* We sent everything, set status/irq bit */
429 sungem_update_status(s, GREG_STAT_TXALL, true);
430}
431
432static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done)
433{
434 return kick == ((done + 1) & s->rx_mask);
435}
436
b8c4b67e 437static bool sungem_can_receive(NetClientState *nc)
f85504b2
BH
438{
439 SunGEMState *s = qemu_get_nic_opaque(nc);
440 uint32_t kick, done, rxdma_cfg, rxmac_cfg;
441 bool full;
442
443 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
444 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
445
446 /* If MAC disabled, can't receive */
447 if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) {
448 trace_sungem_rx_mac_disabled();
b8c4b67e 449 return false;
f85504b2
BH
450 }
451 if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) {
452 trace_sungem_rx_txdma_disabled();
b8c4b67e 453 return false;
f85504b2
BH
454 }
455
456 /* Check RX availability */
457 kick = s->rxdmaregs[RXDMA_KICK >> 2];
458 done = s->rxdmaregs[RXDMA_DONE >> 2];
459 full = sungem_rx_full(s, kick, done);
460
461 trace_sungem_rx_check(!full, kick, done);
462
463 return !full;
464}
465
466enum {
467 rx_no_match,
468 rx_match_promisc,
469 rx_match_bcast,
470 rx_match_allmcast,
471 rx_match_mcast,
472 rx_match_mac,
473 rx_match_altmac,
474};
475
476static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc)
477{
478 uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2];
479 uint32_t mac0, mac1, mac2;
480
481 /* Promisc enabled ? */
482 if (rxcfg & MAC_RXCFG_PROM) {
483 return rx_match_promisc;
484 }
485
486 /* Format MAC address into dwords */
487 mac0 = (mac[4] << 8) | mac[5];
488 mac1 = (mac[2] << 8) | mac[3];
489 mac2 = (mac[0] << 8) | mac[1];
490
491 trace_sungem_rx_mac_check(mac0, mac1, mac2);
492
493 /* Is this a broadcast frame ? */
494 if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) {
495 return rx_match_bcast;
496 }
497
498 /* TODO: Implement address filter registers (or we don't care ?) */
499
500 /* Is this a multicast frame ? */
501 if (mac[0] & 1) {
502 trace_sungem_rx_mac_multicast();
503
504 /* Promisc group enabled ? */
505 if (rxcfg & MAC_RXCFG_PGRP) {
506 return rx_match_allmcast;
507 }
508
509 /* TODO: Check MAC control frames (or we don't care) ? */
510
511 /* Check hash filter (somebody check that's correct ?) */
512 if (rxcfg & MAC_RXCFG_HFE) {
513 uint32_t hash, idx;
514
515 crc >>= 24;
516 idx = (crc >> 2) & 0x3c;
517 hash = s->macregs[(MAC_HASH0 + idx) >> 2];
518 if (hash & (1 << (15 - (crc & 0xf)))) {
519 return rx_match_mcast;
520 }
521 }
522 return rx_no_match;
523 }
524
525 /* Main MAC check */
526 trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2],
527 s->macregs[MAC_ADDR1 >> 2],
528 s->macregs[MAC_ADDR2 >> 2]);
529
530 if (mac0 == s->macregs[MAC_ADDR0 >> 2] &&
531 mac1 == s->macregs[MAC_ADDR1 >> 2] &&
532 mac2 == s->macregs[MAC_ADDR2 >> 2]) {
533 return rx_match_mac;
534 }
535
536 /* Alt MAC check */
537 if (mac0 == s->macregs[MAC_ADDR3 >> 2] &&
538 mac1 == s->macregs[MAC_ADDR4 >> 2] &&
539 mac2 == s->macregs[MAC_ADDR5 >> 2]) {
540 return rx_match_altmac;
541 }
542
543 return rx_no_match;
544}
545
546static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
547 size_t size)
548{
549 SunGEMState *s = qemu_get_nic_opaque(nc);
550 PCIDevice *d = PCI_DEVICE(s);
551 uint32_t mac_crc, done, kick, max_fsize;
552 uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff;
f85504b2
BH
553 struct gem_rxd desc;
554 uint64_t dbase, baddr;
555 unsigned int rx_cond;
556
557 trace_sungem_rx_packet(size);
558
559 rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
560 rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
561 max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff;
562
563 /* If MAC or DMA disabled, can't receive */
564 if (!(rxdma_cfg & RXDMA_CFG_ENABLE) ||
565 !(rxmac_cfg & MAC_RXCFG_ENAB)) {
566 trace_sungem_rx_disabled();
567 return 0;
568 }
569
570 /* Size adjustment for FCS */
571 if (rxmac_cfg & MAC_RXCFG_SFCS) {
572 fcs_size = 0;
573 } else {
574 fcs_size = 4;
575 }
576
577 /* Discard frame smaller than a MAC or larger than max frame size
578 * (when accounting for FCS)
579 */
580 if (size < 6 || (size + 4) > max_fsize) {
581 trace_sungem_rx_bad_frame_size(size);
582 /* XXX Increment error statistics ? */
583 return size;
584 }
585
f85504b2 586 /* Get MAC crc */
8f90bc2f 587 mac_crc = net_crc32_le(buf, ETH_ALEN);
f85504b2
BH
588
589 /* Packet isn't for me ? */
590 rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
591 if (rx_cond == rx_no_match) {
592 /* Just drop it */
593 trace_sungem_rx_unmatched();
594 return size;
595 }
596
597 /* Get ring pointers */
598 kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask;
599 done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask;
600
601 trace_sungem_rx_process(done, kick, s->rx_mask + 1);
602
603 /* Ring full ? Can't receive */
604 if (sungem_rx_full(s, kick, done)) {
605 trace_sungem_rx_ringfull();
606 return 0;
607 }
608
609 /* Note: The real GEM will fetch descriptors in blocks of 4,
610 * for now we handle them one at a time, I think the driver will
611 * cope
612 */
613
614 dbase = s->rxdmaregs[RXDMA_DBHI >> 2];
615 dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2];
616
617 /* Read the next descriptor */
618 pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
619
620 trace_sungem_rx_desc(le64_to_cpu(desc.status_word),
621 le64_to_cpu(desc.buffer));
622
623 /* Effective buffer address */
624 baddr = le64_to_cpu(desc.buffer) & ~7ull;
625 baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10;
626
627 /* Write buffer out */
628 pci_dma_write(d, baddr, buf, size);
629
630 if (fcs_size) {
631 /* Should we add an FCS ? Linux doesn't ask us to strip it,
632 * however I believe nothing checks it... For now we just
633 * do nothing. It's faster this way.
634 */
635 }
636
637 /* Calculate the checksum */
638 coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13;
639 csum = net_raw_checksum((uint8_t *)buf + coff, size - coff);
640
641 /* Build the updated descriptor */
642 desc.status_word = (size + fcs_size) << 16;
643 desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44;
644 desc.status_word |= csum;
645 if (rx_cond == rx_match_mcast) {
646 desc.status_word |= RXDCTRL_HPASS;
647 }
648 if (rx_cond == rx_match_altmac) {
649 desc.status_word |= RXDCTRL_ALTMAC;
650 }
651 desc.status_word = cpu_to_le64(desc.status_word);
652
653 pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
654
655 done = (done + 1) & s->rx_mask;
656 s->rxdmaregs[RXDMA_DONE >> 2] = done;
657
658 /* XXX Unconditionally set RX interrupt for now. The interrupt
659 * mitigation timer might well end up adding more overhead than
660 * helping here...
661 */
662 ints = GREG_STAT_RXDONE;
663 if (sungem_rx_full(s, kick, done)) {
664 ints |= GREG_STAT_RXNOBUF;
665 }
666 sungem_update_status(s, ints, true);
667
668 return size;
669}
670
671static void sungem_set_link_status(NetClientState *nc)
672{
673 /* We don't do anything for now as I believe none of the OSes
674 * drivers use the MIF autopoll feature nor the PHY interrupt
675 */
676}
677
678static void sungem_update_masks(SunGEMState *s)
679{
680 uint32_t sz;
681
682 sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5);
683 s->rx_mask = sz - 1;
684
685 sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5);
686 s->tx_mask = sz - 1;
687}
688
689static void sungem_reset_rx(SunGEMState *s)
690{
691 trace_sungem_rx_reset();
692
693 /* XXX Do RXCFG */
694 /* XXX Check value */
695 s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140;
696 s->rxdmaregs[RXDMA_DONE >> 2] = 0;
697 s->rxdmaregs[RXDMA_KICK >> 2] = 0;
698 s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010;
699 s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8;
700 s->rxdmaregs[RXDMA_BLANK >> 2] = 0;
701
702 sungem_update_masks(s);
703}
704
705static void sungem_reset_tx(SunGEMState *s)
706{
707 trace_sungem_tx_reset();
708
709 /* XXX Do TXCFG */
710 /* XXX Check value */
711 s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
712 s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
713 s->txdmaregs[TXDMA_KICK >> 2] = 0;
714 s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
715
716 sungem_update_masks(s);
717
718 s->tx_size = 0;
719 s->tx_first_ctl = 0;
720}
721
722static void sungem_reset_all(SunGEMState *s, bool pci_reset)
723{
724 trace_sungem_reset(pci_reset);
725
726 sungem_reset_rx(s);
727 sungem_reset_tx(s);
728
729 s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF;
730 s->gregs[GREG_STAT >> 2] = 0;
731 if (pci_reset) {
732 uint8_t *ma = s->conf.macaddr.a;
733
734 s->gregs[GREG_SWRST >> 2] = 0;
735 s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5];
736 s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3];
737 s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1];
738 } else {
739 s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT;
740 }
741 s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0;
742}
743
744static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr,
745 uint8_t reg_addr, uint16_t val)
746{
747 trace_sungem_mii_write(phy_addr, reg_addr, val);
748
749 /* XXX TODO */
750}
751
752static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
753 uint8_t reg_addr)
754{
755 if (phy_addr != s->phy_addr) {
756 return 0xffff;
757 }
758 /* Primitive emulation of a BCM5201 to please the driver,
759 * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400
760 */
761 switch (reg_addr) {
762 case MII_BMCR:
763 return 0;
764 case MII_PHYID1:
765 return 0x0040;
766 case MII_PHYID2:
767 return 0x6210;
768 case MII_BMSR:
769 if (qemu_get_queue(s->nic)->link_down) {
770 return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
771 } else {
772 return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
773 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
774 }
775 case MII_ANLPAR:
776 case MII_ANAR:
777 return MII_ANLPAR_TXFD;
778 case 0x18: /* 5201 AUX status */
779 return 3; /* 100FD */
780 default:
781 return 0;
782 };
783}
784static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
785 uint8_t reg_addr)
786{
787 uint16_t val;
788
789 val = __sungem_mii_read(s, phy_addr, reg_addr);
790
791 trace_sungem_mii_read(phy_addr, reg_addr, val);
792
793 return val;
794}
795
796static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val)
797{
798 uint8_t phy_addr, reg_addr, op;
799
800 /* Ignore not start of frame */
801 if ((val >> 30) != 1) {
802 trace_sungem_mii_invalid_sof(val >> 30);
803 return 0xffff;
804 }
805 phy_addr = (val & MIF_FRAME_PHYAD) >> 23;
806 reg_addr = (val & MIF_FRAME_REGAD) >> 18;
807 op = (val & MIF_FRAME_OP) >> 28;
808 switch (op) {
809 case 1:
810 sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA);
811 return val | MIF_FRAME_TALSB;
812 case 2:
813 return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB;
814 default:
815 trace_sungem_mii_invalid_op(op);
816 }
817 return 0xffff | MIF_FRAME_TALSB;
818}
819
820static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
821 unsigned size)
822{
823 SunGEMState *s = opaque;
824
825 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
826 qemu_log_mask(LOG_GUEST_ERROR,
827 "Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
828 addr);
829 return;
830 }
831
832 trace_sungem_mmio_greg_write(addr, val);
833
834 /* Pre-write filter */
835 switch (addr) {
836 /* Read only registers */
837 case GREG_SEBSTATE:
838 case GREG_STAT:
839 case GREG_STAT2:
840 case GREG_PCIESTAT:
841 return; /* No actual write */
842 case GREG_IACK:
843 val &= GREG_STAT_LATCH;
844 s->gregs[GREG_STAT >> 2] &= ~val;
845 sungem_eval_irq(s);
846 return; /* No actual write */
847 case GREG_PCIEMASK:
848 val &= 0x7;
849 break;
850 }
851
852 s->gregs[addr >> 2] = val;
853
854 /* Post write action */
855 switch (addr) {
856 case GREG_IMASK:
857 /* Re-evaluate interrupt */
858 sungem_eval_irq(s);
859 break;
860 case GREG_SWRST:
861 switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
862 case GREG_SWRST_RXRST:
863 sungem_reset_rx(s);
864 break;
865 case GREG_SWRST_TXRST:
866 sungem_reset_tx(s);
867 break;
868 case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
869 sungem_reset_all(s, false);
870 }
871 break;
872 }
873}
874
875static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size)
876{
877 SunGEMState *s = opaque;
878 uint32_t val;
879
880 if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
881 qemu_log_mask(LOG_GUEST_ERROR,
882 "Read from unknown GREG register 0x%"HWADDR_PRIx"\n",
883 addr);
884 return 0;
885 }
886
887 val = s->gregs[addr >> 2];
888
889 trace_sungem_mmio_greg_read(addr, val);
890
891 switch (addr) {
892 case GREG_STAT:
893 /* Side effect, clear bottom 7 bits */
894 s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH;
895 sungem_eval_irq(s);
896
897 /* Inject TX completion in returned value */
898 val = (val & ~GREG_STAT_TXNR) |
899 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
900 break;
901 case GREG_STAT2:
902 /* Return the status reg without side effect
903 * (and inject TX completion in returned value)
904 */
905 val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) |
906 (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
907 break;
908 }
909
910 return val;
911}
912
913static const MemoryRegionOps sungem_mmio_greg_ops = {
914 .read = sungem_mmio_greg_read,
915 .write = sungem_mmio_greg_write,
916 .endianness = DEVICE_LITTLE_ENDIAN,
917 .impl = {
918 .min_access_size = 4,
919 .max_access_size = 4,
920 },
921};
922
923static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val,
924 unsigned size)
925{
926 SunGEMState *s = opaque;
927
928 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
929 qemu_log_mask(LOG_GUEST_ERROR,
930 "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n",
931 addr);
932 return;
933 }
934
935 trace_sungem_mmio_txdma_write(addr, val);
936
937 /* Pre-write filter */
938 switch (addr) {
939 /* Read only registers */
940 case TXDMA_TXDONE:
941 case TXDMA_PCNT:
942 case TXDMA_SMACHINE:
943 case TXDMA_DPLOW:
944 case TXDMA_DPHI:
945 case TXDMA_FSZ:
946 case TXDMA_FTAG:
947 return; /* No actual write */
948 }
949
950 s->txdmaregs[addr >> 2] = val;
951
952 /* Post write action */
953 switch (addr) {
954 case TXDMA_KICK:
955 sungem_tx_kick(s);
956 break;
957 case TXDMA_CFG:
958 sungem_update_masks(s);
959 break;
960 }
961}
962
963static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
964{
965 SunGEMState *s = opaque;
966 uint32_t val;
967
968 if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
969 qemu_log_mask(LOG_GUEST_ERROR,
970 "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
971 addr);
972 return 0;
973 }
974
975 val = s->txdmaregs[addr >> 2];
976
977 trace_sungem_mmio_txdma_read(addr, val);
978
979 return val;
980}
981
982static const MemoryRegionOps sungem_mmio_txdma_ops = {
983 .read = sungem_mmio_txdma_read,
984 .write = sungem_mmio_txdma_write,
985 .endianness = DEVICE_LITTLE_ENDIAN,
986 .impl = {
987 .min_access_size = 4,
988 .max_access_size = 4,
989 },
990};
991
992static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val,
993 unsigned size)
994{
995 SunGEMState *s = opaque;
996
997 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
998 qemu_log_mask(LOG_GUEST_ERROR,
999 "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1000 addr);
1001 return;
1002 }
1003
1004 trace_sungem_mmio_rxdma_write(addr, val);
1005
1006 /* Pre-write filter */
1007 switch (addr) {
1008 /* Read only registers */
1009 case RXDMA_DONE:
1010 case RXDMA_PCNT:
1011 case RXDMA_SMACHINE:
1012 case RXDMA_DPLOW:
1013 case RXDMA_DPHI:
1014 case RXDMA_FSZ:
1015 case RXDMA_FTAG:
1016 return; /* No actual write */
1017 }
1018
1019 s->rxdmaregs[addr >> 2] = val;
1020
1021 /* Post write action */
1022 switch (addr) {
1023 case RXDMA_KICK:
1024 trace_sungem_rx_kick(val);
1025 break;
1026 case RXDMA_CFG:
1027 sungem_update_masks(s);
1028 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1029 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1030 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1031 }
1032 break;
1033 }
1034}
1035
1036static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size)
1037{
1038 SunGEMState *s = opaque;
1039 uint32_t val;
1040
1041 if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
1042 qemu_log_mask(LOG_GUEST_ERROR,
1043 "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n",
1044 addr);
1045 return 0;
1046 }
1047
1048 val = s->rxdmaregs[addr >> 2];
1049
1050 trace_sungem_mmio_rxdma_read(addr, val);
1051
1052 return val;
1053}
1054
1055static const MemoryRegionOps sungem_mmio_rxdma_ops = {
1056 .read = sungem_mmio_rxdma_read,
1057 .write = sungem_mmio_rxdma_write,
1058 .endianness = DEVICE_LITTLE_ENDIAN,
1059 .impl = {
1060 .min_access_size = 4,
1061 .max_access_size = 4,
1062 },
1063};
1064
1065static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val,
1066 unsigned size)
1067{
1068 SunGEMState *s = opaque;
1069
1070 if (!(addr <= 0x134)) {
1071 qemu_log_mask(LOG_GUEST_ERROR,
1072 "Write to unknown MAC register 0x%"HWADDR_PRIx"\n",
1073 addr);
1074 return;
1075 }
1076
1077 trace_sungem_mmio_mac_write(addr, val);
1078
1079 /* Pre-write filter */
1080 switch (addr) {
1081 /* Read only registers */
1082 case MAC_TXRST: /* Not technically read-only but will do for now */
1083 case MAC_RXRST: /* Not technically read-only but will do for now */
1084 case MAC_TXSTAT:
1085 case MAC_RXSTAT:
1086 case MAC_CSTAT:
1087 case MAC_PATMPS:
1088 case MAC_SMACHINE:
1089 return; /* No actual write */
1090 case MAC_MINFSZ:
1091 /* 10-bits implemented */
1092 val &= 0x3ff;
1093 break;
1094 }
1095
1096 s->macregs[addr >> 2] = val;
1097
1098 /* Post write action */
1099 switch (addr) {
1100 case MAC_TXMASK:
1101 case MAC_RXMASK:
1102 case MAC_MCMASK:
1103 sungem_eval_cascade_irq(s);
1104 break;
1105 case MAC_RXCFG:
1106 sungem_update_masks(s);
1107 if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
1108 (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
1109 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1110 }
1111 break;
1112 }
1113}
1114
1115static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size)
1116{
1117 SunGEMState *s = opaque;
1118 uint32_t val;
1119
1120 if (!(addr <= 0x134)) {
1121 qemu_log_mask(LOG_GUEST_ERROR,
1122 "Read from unknown MAC register 0x%"HWADDR_PRIx"\n",
1123 addr);
1124 return 0;
1125 }
1126
1127 val = s->macregs[addr >> 2];
1128
1129 trace_sungem_mmio_mac_read(addr, val);
1130
1131 switch (addr) {
1132 case MAC_TXSTAT:
1133 /* Side effect, clear all */
1134 s->macregs[addr >> 2] = 0;
1135 sungem_update_status(s, GREG_STAT_TXMAC, false);
1136 break;
1137 case MAC_RXSTAT:
1138 /* Side effect, clear all */
1139 s->macregs[addr >> 2] = 0;
1140 sungem_update_status(s, GREG_STAT_RXMAC, false);
1141 break;
1142 case MAC_CSTAT:
1143 /* Side effect, interrupt bits */
1144 s->macregs[addr >> 2] &= MAC_CSTAT_PTR;
1145 sungem_update_status(s, GREG_STAT_MAC, false);
1146 break;
1147 }
1148
1149 return val;
1150}
1151
1152static const MemoryRegionOps sungem_mmio_mac_ops = {
1153 .read = sungem_mmio_mac_read,
1154 .write = sungem_mmio_mac_write,
1155 .endianness = DEVICE_LITTLE_ENDIAN,
1156 .impl = {
1157 .min_access_size = 4,
1158 .max_access_size = 4,
1159 },
1160};
1161
1162static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
1163 unsigned size)
1164{
1165 SunGEMState *s = opaque;
1166
1167 if (!(addr <= 0x1c)) {
1168 qemu_log_mask(LOG_GUEST_ERROR,
1169 "Write to unknown MIF register 0x%"HWADDR_PRIx"\n",
1170 addr);
1171 return;
1172 }
1173
1174 trace_sungem_mmio_mif_write(addr, val);
1175
1176 /* Pre-write filter */
1177 switch (addr) {
1178 /* Read only registers */
1179 case MIF_STATUS:
1180 case MIF_SMACHINE:
1181 return; /* No actual write */
1182 case MIF_CFG:
1183 /* Maintain the RO MDI bits to advertize an MDIO PHY on MDI0 */
1184 val &= ~MIF_CFG_MDI1;
1185 val |= MIF_CFG_MDI0;
1186 break;
1187 }
1188
1189 s->mifregs[addr >> 2] = val;
1190
1191 /* Post write action */
1192 switch (addr) {
1193 case MIF_FRAME:
1194 s->mifregs[addr >> 2] = sungem_mii_op(s, val);
1195 break;
1196 }
1197}
1198
1199static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size)
1200{
1201 SunGEMState *s = opaque;
1202 uint32_t val;
1203
1204 if (!(addr <= 0x1c)) {
1205 qemu_log_mask(LOG_GUEST_ERROR,
1206 "Read from unknown MIF register 0x%"HWADDR_PRIx"\n",
1207 addr);
1208 return 0;
1209 }
1210
1211 val = s->mifregs[addr >> 2];
1212
1213 trace_sungem_mmio_mif_read(addr, val);
1214
1215 return val;
1216}
1217
1218static const MemoryRegionOps sungem_mmio_mif_ops = {
1219 .read = sungem_mmio_mif_read,
1220 .write = sungem_mmio_mif_write,
1221 .endianness = DEVICE_LITTLE_ENDIAN,
1222 .impl = {
1223 .min_access_size = 4,
1224 .max_access_size = 4,
1225 },
1226};
1227
1228static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val,
1229 unsigned size)
1230{
1231 SunGEMState *s = opaque;
1232
1233 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1234 qemu_log_mask(LOG_GUEST_ERROR,
1235 "Write to unknown PCS register 0x%"HWADDR_PRIx"\n",
1236 addr);
1237 return;
1238 }
1239
1240 trace_sungem_mmio_pcs_write(addr, val);
1241
1242 /* Pre-write filter */
1243 switch (addr) {
1244 /* Read only registers */
1245 case PCS_MIISTAT:
1246 case PCS_ISTAT:
1247 case PCS_SSTATE:
1248 return; /* No actual write */
1249 }
1250
1251 s->pcsregs[addr >> 2] = val;
1252}
1253
1254static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size)
1255{
1256 SunGEMState *s = opaque;
1257 uint32_t val;
1258
1259 if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
1260 qemu_log_mask(LOG_GUEST_ERROR,
1261 "Read from unknown PCS register 0x%"HWADDR_PRIx"\n",
1262 addr);
1263 return 0;
1264 }
1265
1266 val = s->pcsregs[addr >> 2];
1267
1268 trace_sungem_mmio_pcs_read(addr, val);
1269
1270 return val;
1271}
1272
1273static const MemoryRegionOps sungem_mmio_pcs_ops = {
1274 .read = sungem_mmio_pcs_read,
1275 .write = sungem_mmio_pcs_write,
1276 .endianness = DEVICE_LITTLE_ENDIAN,
1277 .impl = {
1278 .min_access_size = 4,
1279 .max_access_size = 4,
1280 },
1281};
1282
1283static void sungem_uninit(PCIDevice *dev)
1284{
1285 SunGEMState *s = SUNGEM(dev);
1286
1287 qemu_del_nic(s->nic);
1288}
1289
1290static NetClientInfo net_sungem_info = {
1291 .type = NET_CLIENT_DRIVER_NIC,
1292 .size = sizeof(NICState),
1293 .can_receive = sungem_can_receive,
1294 .receive = sungem_receive,
1295 .link_status_changed = sungem_set_link_status,
1296};
1297
1298static void sungem_realize(PCIDevice *pci_dev, Error **errp)
1299{
1300 DeviceState *dev = DEVICE(pci_dev);
1301 SunGEMState *s = SUNGEM(pci_dev);
1302 uint8_t *pci_conf;
1303
1304 pci_conf = pci_dev->config;
1305
1306 pci_set_word(pci_conf + PCI_STATUS,
1307 PCI_STATUS_FAST_BACK |
1308 PCI_STATUS_DEVSEL_MEDIUM |
1309 PCI_STATUS_66MHZ);
1310
1311 pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
1312 pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
1313
1314 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1315 pci_conf[PCI_MIN_GNT] = 0x40;
1316 pci_conf[PCI_MAX_LAT] = 0x40;
1317
1318 sungem_reset_all(s, true);
1319 memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE);
1320
1321 memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s,
1322 "sungem.greg", SUNGEM_MMIO_GREG_SIZE);
1323 memory_region_add_subregion(&s->sungem, 0, &s->greg);
1324
1325 memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s,
1326 "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE);
1327 memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma);
1328
1329 memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s,
1330 "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE);
1331 memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma);
1332
1333 memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s,
1334 "sungem.mac", SUNGEM_MMIO_MAC_SIZE);
1335 memory_region_add_subregion(&s->sungem, 0x6000, &s->mac);
1336
1337 memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s,
1338 "sungem.mif", SUNGEM_MMIO_MIF_SIZE);
1339 memory_region_add_subregion(&s->sungem, 0x6200, &s->mif);
1340
1341 memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s,
1342 "sungem.pcs", SUNGEM_MMIO_PCS_SIZE);
1343 memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs);
1344
1345 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem);
1346
1347 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1348 s->nic = qemu_new_nic(&net_sungem_info, &s->conf,
1349 object_get_typename(OBJECT(dev)),
1350 dev->id, s);
1351 qemu_format_nic_info_str(qemu_get_queue(s->nic),
1352 s->conf.macaddr.a);
1353}
1354
1355static void sungem_reset(DeviceState *dev)
1356{
1357 SunGEMState *s = SUNGEM(dev);
1358
1359 sungem_reset_all(s, true);
1360}
1361
1362static void sungem_instance_init(Object *obj)
1363{
1364 SunGEMState *s = SUNGEM(obj);
1365
1366 device_add_bootindex_property(obj, &s->conf.bootindex,
1367 "bootindex", "/ethernet-phy@0",
40c2281c 1368 DEVICE(obj));
f85504b2
BH
1369}
1370
1371static Property sungem_properties[] = {
1372 DEFINE_NIC_PROPERTIES(SunGEMState, conf),
1373 /* Phy address should be 0 for most Apple machines except
1374 * for K2 in which case it's 1. Will be set by a machine
1375 * override.
1376 */
1377 DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
1378 DEFINE_PROP_END_OF_LIST(),
1379};
1380
1381static const VMStateDescription vmstate_sungem = {
1382 .name = "sungem",
1383 .version_id = 0,
1384 .minimum_version_id = 0,
1385 .fields = (VMStateField[]) {
1386 VMSTATE_PCI_DEVICE(pdev, SunGEMState),
1387 VMSTATE_MACADDR(conf.macaddr, SunGEMState),
1388 VMSTATE_UINT32(phy_addr, SunGEMState),
1389 VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)),
1390 VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState,
1391 (SUNGEM_MMIO_TXDMA_SIZE >> 2)),
1392 VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState,
1393 (SUNGEM_MMIO_RXDMA_SIZE >> 2)),
1394 VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)),
1395 VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)),
1396 VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)),
1397 VMSTATE_UINT32(rx_mask, SunGEMState),
1398 VMSTATE_UINT32(tx_mask, SunGEMState),
1399 VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE),
1400 VMSTATE_UINT32(tx_size, SunGEMState),
1401 VMSTATE_UINT64(tx_first_ctl, SunGEMState),
1402 VMSTATE_END_OF_LIST()
1403 }
1404};
1405
1406static void sungem_class_init(ObjectClass *klass, void *data)
1407{
1408 DeviceClass *dc = DEVICE_CLASS(klass);
1409 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1410
1411 k->realize = sungem_realize;
1412 k->exit = sungem_uninit;
1413 k->vendor_id = PCI_VENDOR_ID_APPLE;
1414 k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC;
1415 k->revision = 0x01;
1416 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1417 dc->vmsd = &vmstate_sungem;
1418 dc->reset = sungem_reset;
4f67d30b 1419 device_class_set_props(dc, sungem_properties);
f85504b2
BH
1420 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1421}
1422
1423static const TypeInfo sungem_info = {
1424 .name = TYPE_SUNGEM,
1425 .parent = TYPE_PCI_DEVICE,
1426 .instance_size = sizeof(SunGEMState),
1427 .class_init = sungem_class_init,
1428 .instance_init = sungem_instance_init,
fd3b02c8
EH
1429 .interfaces = (InterfaceInfo[]) {
1430 { INTERFACE_CONVENTIONAL_PCI_DEVICE },
1431 { }
1432 }
f85504b2
BH
1433};
1434
1435static void sungem_register_types(void)
1436{
1437 type_register_static(&sungem_info);
1438}
1439
1440type_init(sungem_register_types)