4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
36 DEBUG_GENERAL
, DEBUG_IO
, DEBUG_MMIO
, DEBUG_INTERRUPT
,
37 DEBUG_RX
, DEBUG_TX
, DEBUG_MDIC
, DEBUG_EEPROM
,
38 DEBUG_UNKNOWN
, DEBUG_TXSUM
, DEBUG_TXERR
, DEBUG_RXERR
,
39 DEBUG_RXFILTER
, DEBUG_NOTYET
,
41 #define DBGBIT(x) (1<<DEBUG_##x)
42 static int debugflags
= DBGBIT(TXERR
) | DBGBIT(GENERAL
);
44 #define DBGOUT(what, fmt, params...) do { \
45 if (debugflags & DBGBIT(what)) \
46 fprintf(stderr, "e1000: " fmt, ##params); \
49 #define DBGOUT(what, fmt, params...) do {} while (0)
52 #define IOPORT_SIZE 0x40
53 #define PNPMMIO_SIZE 0x20000
57 * E1000_DEV_ID_82540EM works with Windows and Linux
58 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
59 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
60 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
63 enum { E1000_DEVID
= E1000_DEV_ID_82540EM
};
66 * May need to specify additional MAC-to-PHY entries --
67 * Intel's Windows driver refuses to initialize unless they match
70 PHY_ID2_INIT
= E1000_DEVID
== E1000_DEV_ID_82573L
? 0xcc2 :
71 E1000_DEVID
== E1000_DEV_ID_82544GC_COPPER
? 0xc30 :
72 /* default to E1000_DEV_ID_82540EM */ 0xc20
75 typedef struct E1000State_st
{
80 uint32_t mac_reg
[0x8000];
81 uint16_t phy_reg
[0x20];
82 uint16_t eeprom_data
[64];
85 uint32_t rxbuf_min_shift
;
88 unsigned char header
[256];
89 unsigned char vlan_header
[4];
90 unsigned char vlan
[4];
91 unsigned char data
[0x10000];
93 unsigned char sum_needed
;
94 unsigned char vlan_needed
;
108 char cptse
; // current packet tse bit
112 uint32_t val_in
; // shifted in from guest driver
120 #define defreg(x) x = (E1000_##x>>2)
122 defreg(CTRL
), defreg(EECD
), defreg(EERD
), defreg(GPRC
),
123 defreg(GPTC
), defreg(ICR
), defreg(ICS
), defreg(IMC
),
124 defreg(IMS
), defreg(LEDCTL
), defreg(MANC
), defreg(MDIC
),
125 defreg(MPC
), defreg(PBA
), defreg(RCTL
), defreg(RDBAH
),
126 defreg(RDBAL
), defreg(RDH
), defreg(RDLEN
), defreg(RDT
),
127 defreg(STATUS
), defreg(SWSM
), defreg(TCTL
), defreg(TDBAH
),
128 defreg(TDBAL
), defreg(TDH
), defreg(TDLEN
), defreg(TDT
),
129 defreg(TORH
), defreg(TORL
), defreg(TOTH
), defreg(TOTL
),
130 defreg(TPR
), defreg(TPT
), defreg(TXDCTL
), defreg(WUFC
),
131 defreg(RA
), defreg(MTA
), defreg(CRCERRS
),defreg(VFTA
),
135 enum { PHY_R
= 1, PHY_W
= 2, PHY_RW
= PHY_R
| PHY_W
};
136 static const char phy_regcap
[0x20] = {
137 [PHY_STATUS
] = PHY_R
, [M88E1000_EXT_PHY_SPEC_CTRL
] = PHY_RW
,
138 [PHY_ID1
] = PHY_R
, [M88E1000_PHY_SPEC_CTRL
] = PHY_RW
,
139 [PHY_CTRL
] = PHY_RW
, [PHY_1000T_CTRL
] = PHY_RW
,
140 [PHY_LP_ABILITY
] = PHY_R
, [PHY_1000T_STATUS
] = PHY_R
,
141 [PHY_AUTONEG_ADV
] = PHY_RW
, [M88E1000_RX_ERR_CNTR
] = PHY_R
,
142 [PHY_ID2
] = PHY_R
, [M88E1000_PHY_SPEC_STATUS
] = PHY_R
146 ioport_map(PCIDevice
*pci_dev
, int region_num
, uint32_t addr
,
147 uint32_t size
, int type
)
149 DBGOUT(IO
, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr
, size
);
153 set_interrupt_cause(E1000State
*s
, int index
, uint32_t val
)
156 val
|= E1000_ICR_INT_ASSERTED
;
157 s
->mac_reg
[ICR
] = val
;
158 qemu_set_irq(s
->dev
.irq
[0], (s
->mac_reg
[IMS
] & s
->mac_reg
[ICR
]) != 0);
162 set_ics(E1000State
*s
, int index
, uint32_t val
)
164 DBGOUT(INTERRUPT
, "set_ics %x, ICR %x, IMR %x\n", val
, s
->mac_reg
[ICR
],
166 set_interrupt_cause(s
, 0, val
| s
->mac_reg
[ICR
]);
170 rxbufsize(uint32_t v
)
172 v
&= E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
| E1000_RCTL_SZ_8192
|
173 E1000_RCTL_SZ_4096
| E1000_RCTL_SZ_2048
| E1000_RCTL_SZ_1024
|
174 E1000_RCTL_SZ_512
| E1000_RCTL_SZ_256
;
176 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_16384
:
178 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_8192
:
180 case E1000_RCTL_BSEX
| E1000_RCTL_SZ_4096
:
182 case E1000_RCTL_SZ_1024
:
184 case E1000_RCTL_SZ_512
:
186 case E1000_RCTL_SZ_256
:
193 set_ctrl(E1000State
*s
, int index
, uint32_t val
)
195 /* RST is self clearing */
196 s
->mac_reg
[CTRL
] = val
& ~E1000_CTRL_RST
;
200 set_rx_control(E1000State
*s
, int index
, uint32_t val
)
202 s
->mac_reg
[RCTL
] = val
;
203 s
->rxbuf_size
= rxbufsize(val
);
204 s
->rxbuf_min_shift
= ((val
/ E1000_RCTL_RDMTS_QUAT
) & 3) + 1;
205 DBGOUT(RX
, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s
->mac_reg
[RDT
],
210 set_mdic(E1000State
*s
, int index
, uint32_t val
)
212 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
213 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
215 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) // phy #
216 val
= s
->mac_reg
[MDIC
] | E1000_MDIC_ERROR
;
217 else if (val
& E1000_MDIC_OP_READ
) {
218 DBGOUT(MDIC
, "MDIC read reg 0x%x\n", addr
);
219 if (!(phy_regcap
[addr
] & PHY_R
)) {
220 DBGOUT(MDIC
, "MDIC read reg %x unhandled\n", addr
);
221 val
|= E1000_MDIC_ERROR
;
223 val
= (val
^ data
) | s
->phy_reg
[addr
];
224 } else if (val
& E1000_MDIC_OP_WRITE
) {
225 DBGOUT(MDIC
, "MDIC write reg 0x%x, value 0x%x\n", addr
, data
);
226 if (!(phy_regcap
[addr
] & PHY_W
)) {
227 DBGOUT(MDIC
, "MDIC write reg %x unhandled\n", addr
);
228 val
|= E1000_MDIC_ERROR
;
230 s
->phy_reg
[addr
] = data
;
232 s
->mac_reg
[MDIC
] = val
| E1000_MDIC_READY
;
233 set_ics(s
, 0, E1000_ICR_MDAC
);
237 get_eecd(E1000State
*s
, int index
)
239 uint32_t ret
= E1000_EECD_PRES
|E1000_EECD_GNT
| s
->eecd_state
.old_eecd
;
241 DBGOUT(EEPROM
, "reading eeprom bit %d (reading %d)\n",
242 s
->eecd_state
.bitnum_out
, s
->eecd_state
.reading
);
243 if (!s
->eecd_state
.reading
||
244 ((s
->eeprom_data
[(s
->eecd_state
.bitnum_out
>> 4) & 0x3f] >>
245 ((s
->eecd_state
.bitnum_out
& 0xf) ^ 0xf))) & 1)
246 ret
|= E1000_EECD_DO
;
251 set_eecd(E1000State
*s
, int index
, uint32_t val
)
253 uint32_t oldval
= s
->eecd_state
.old_eecd
;
255 s
->eecd_state
.old_eecd
= val
& (E1000_EECD_SK
| E1000_EECD_CS
|
256 E1000_EECD_DI
|E1000_EECD_FWE_MASK
|E1000_EECD_REQ
);
257 if (!(E1000_EECD_SK
& (val
^ oldval
))) // no clock edge
259 if (!(E1000_EECD_SK
& val
)) { // falling edge
260 s
->eecd_state
.bitnum_out
++;
263 if (!(val
& E1000_EECD_CS
)) { // rising, no CS (EEPROM reset)
264 memset(&s
->eecd_state
, 0, sizeof s
->eecd_state
);
266 * restore old_eecd's E1000_EECD_SK (known to be on)
267 * to avoid false detection of a clock edge
269 s
->eecd_state
.old_eecd
= E1000_EECD_SK
;
272 s
->eecd_state
.val_in
<<= 1;
273 if (val
& E1000_EECD_DI
)
274 s
->eecd_state
.val_in
|= 1;
275 if (++s
->eecd_state
.bitnum_in
== 9 && !s
->eecd_state
.reading
) {
276 s
->eecd_state
.bitnum_out
= ((s
->eecd_state
.val_in
& 0x3f)<<4)-1;
277 s
->eecd_state
.reading
= (((s
->eecd_state
.val_in
>> 6) & 7) ==
278 EEPROM_READ_OPCODE_MICROWIRE
);
280 DBGOUT(EEPROM
, "eeprom bitnum in %d out %d, reading %d\n",
281 s
->eecd_state
.bitnum_in
, s
->eecd_state
.bitnum_out
,
282 s
->eecd_state
.reading
);
286 flash_eerd_read(E1000State
*s
, int x
)
288 unsigned int index
, r
= s
->mac_reg
[EERD
] & ~E1000_EEPROM_RW_REG_START
;
290 if ((index
= r
>> E1000_EEPROM_RW_ADDR_SHIFT
) > EEPROM_CHECKSUM_REG
)
292 return (s
->eeprom_data
[index
] << E1000_EEPROM_RW_REG_DATA
) |
293 E1000_EEPROM_RW_REG_DONE
| r
;
297 putsum(uint8_t *data
, uint32_t n
, uint32_t sloc
, uint32_t css
, uint32_t cse
)
304 sum
= net_checksum_add(n
-css
, data
+css
);
305 cpu_to_be16wu((uint16_t *)(data
+ sloc
),
306 net_checksum_finish(sum
));
311 vlan_enabled(E1000State
*s
)
313 return ((s
->mac_reg
[CTRL
] & E1000_CTRL_VME
) != 0);
317 vlan_rx_filter_enabled(E1000State
*s
)
319 return ((s
->mac_reg
[RCTL
] & E1000_RCTL_VFE
) != 0);
323 is_vlan_packet(E1000State
*s
, const uint8_t *buf
)
325 return (be16_to_cpup((uint16_t *)(buf
+ 12)) ==
326 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
330 is_vlan_txd(uint32_t txd_lower
)
332 return ((txd_lower
& E1000_TXD_CMD_VLE
) != 0);
336 xmit_seg(E1000State
*s
)
339 unsigned int frames
= s
->tx
.tso_frames
, css
, sofar
, n
;
340 struct e1000_tx
*tp
= &s
->tx
;
342 if (tp
->tse
&& tp
->cptse
) {
344 DBGOUT(TXSUM
, "frames %d size %d ipcss %d\n",
345 frames
, tp
->size
, css
);
346 if (tp
->ip
) { // IPv4
347 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+2),
349 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
350 be16_to_cpup((uint16_t *)(tp
->data
+css
+4))+frames
);
352 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4),
355 len
= tp
->size
- css
;
356 DBGOUT(TXSUM
, "tcp %d tucss %d len %d\n", tp
->tcp
, css
, len
);
358 sofar
= frames
* tp
->mss
;
359 cpu_to_be32wu((uint32_t *)(tp
->data
+css
+4), // seq
360 be32_to_cpupu((uint32_t *)(tp
->data
+css
+4))+sofar
);
361 if (tp
->paylen
- sofar
> tp
->mss
)
362 tp
->data
[css
+ 13] &= ~9; // PSH, FIN
364 cpu_to_be16wu((uint16_t *)(tp
->data
+css
+4), len
);
365 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
) {
366 // add pseudo-header length before checksum calculation
367 sp
= (uint16_t *)(tp
->data
+ tp
->tucso
);
368 cpu_to_be16wu(sp
, be16_to_cpup(sp
) + len
);
373 if (tp
->sum_needed
& E1000_TXD_POPTS_TXSM
)
374 putsum(tp
->data
, tp
->size
, tp
->tucso
, tp
->tucss
, tp
->tucse
);
375 if (tp
->sum_needed
& E1000_TXD_POPTS_IXSM
)
376 putsum(tp
->data
, tp
->size
, tp
->ipcso
, tp
->ipcss
, tp
->ipcse
);
377 if (tp
->vlan_needed
) {
378 memmove(tp
->vlan
, tp
->data
, 12);
379 memcpy(tp
->data
+ 8, tp
->vlan_header
, 4);
380 qemu_send_packet(s
->vc
, tp
->vlan
, tp
->size
+ 4);
382 qemu_send_packet(s
->vc
, tp
->data
, tp
->size
);
385 n
= s
->mac_reg
[TOTL
];
386 if ((s
->mac_reg
[TOTL
] += s
->tx
.size
) < n
)
391 process_tx_desc(E1000State
*s
, struct e1000_tx_desc
*dp
)
393 uint32_t txd_lower
= le32_to_cpu(dp
->lower
.data
);
394 uint32_t dtype
= txd_lower
& (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
);
395 unsigned int split_size
= txd_lower
& 0xffff, bytes
, sz
, op
;
396 unsigned int msh
= 0xfffff, hdr
= 0;
398 struct e1000_context_desc
*xp
= (struct e1000_context_desc
*)dp
;
399 struct e1000_tx
*tp
= &s
->tx
;
401 if (dtype
== E1000_TXD_CMD_DEXT
) { // context descriptor
402 op
= le32_to_cpu(xp
->cmd_and_length
);
403 tp
->ipcss
= xp
->lower_setup
.ip_fields
.ipcss
;
404 tp
->ipcso
= xp
->lower_setup
.ip_fields
.ipcso
;
405 tp
->ipcse
= le16_to_cpu(xp
->lower_setup
.ip_fields
.ipcse
);
406 tp
->tucss
= xp
->upper_setup
.tcp_fields
.tucss
;
407 tp
->tucso
= xp
->upper_setup
.tcp_fields
.tucso
;
408 tp
->tucse
= le16_to_cpu(xp
->upper_setup
.tcp_fields
.tucse
);
409 tp
->paylen
= op
& 0xfffff;
410 tp
->hdr_len
= xp
->tcp_seg_setup
.fields
.hdr_len
;
411 tp
->mss
= le16_to_cpu(xp
->tcp_seg_setup
.fields
.mss
);
412 tp
->ip
= (op
& E1000_TXD_CMD_IP
) ? 1 : 0;
413 tp
->tcp
= (op
& E1000_TXD_CMD_TCP
) ? 1 : 0;
414 tp
->tse
= (op
& E1000_TXD_CMD_TSE
) ? 1 : 0;
416 if (tp
->tucso
== 0) { // this is probably wrong
417 DBGOUT(TXSUM
, "TCP/UDP: cso 0!\n");
418 tp
->tucso
= tp
->tucss
+ (tp
->tcp
? 16 : 6);
421 } else if (dtype
== (E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
)) {
423 tp
->sum_needed
= le32_to_cpu(dp
->upper
.data
) >> 8;
424 tp
->cptse
= ( txd_lower
& E1000_TXD_CMD_TSE
) ? 1 : 0;
429 if (vlan_enabled(s
) && is_vlan_txd(txd_lower
) &&
430 (tp
->cptse
|| txd_lower
& E1000_TXD_CMD_EOP
)) {
432 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
),
433 le16_to_cpup((uint16_t *)(s
->mac_reg
+ VET
)));
434 cpu_to_be16wu((uint16_t *)(tp
->vlan_header
+ 2),
435 le16_to_cpu(dp
->upper
.fields
.special
));
438 addr
= le64_to_cpu(dp
->buffer_addr
);
439 if (tp
->tse
&& tp
->cptse
) {
444 if (tp
->size
+ bytes
> msh
)
445 bytes
= msh
- tp
->size
;
446 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, bytes
);
447 if ((sz
= tp
->size
+ bytes
) >= hdr
&& tp
->size
< hdr
)
448 memmove(tp
->header
, tp
->data
, hdr
);
453 memmove(tp
->data
, tp
->header
, hdr
);
456 } while (split_size
-= bytes
);
457 } else if (!tp
->tse
&& tp
->cptse
) {
458 // context descriptor TSE is not set, while data descriptor TSE is set
459 DBGOUT(TXERR
, "TCP segmentaion Error\n");
461 cpu_physical_memory_read(addr
, tp
->data
+ tp
->size
, split_size
);
462 tp
->size
+= split_size
;
465 if (!(txd_lower
& E1000_TXD_CMD_EOP
))
467 if (!(tp
->tse
&& tp
->cptse
&& tp
->size
< hdr
))
477 txdesc_writeback(target_phys_addr_t base
, struct e1000_tx_desc
*dp
)
479 uint32_t txd_upper
, txd_lower
= le32_to_cpu(dp
->lower
.data
);
481 if (!(txd_lower
& (E1000_TXD_CMD_RS
|E1000_TXD_CMD_RPS
)))
483 txd_upper
= (le32_to_cpu(dp
->upper
.data
) | E1000_TXD_STAT_DD
) &
484 ~(E1000_TXD_STAT_EC
| E1000_TXD_STAT_LC
| E1000_TXD_STAT_TU
);
485 dp
->upper
.data
= cpu_to_le32(txd_upper
);
486 cpu_physical_memory_write(base
+ ((char *)&dp
->upper
- (char *)dp
),
487 (void *)&dp
->upper
, sizeof(dp
->upper
));
488 return E1000_ICR_TXDW
;
492 start_xmit(E1000State
*s
)
494 target_phys_addr_t base
;
495 struct e1000_tx_desc desc
;
496 uint32_t tdh_start
= s
->mac_reg
[TDH
], cause
= E1000_ICS_TXQE
;
498 if (!(s
->mac_reg
[TCTL
] & E1000_TCTL_EN
)) {
499 DBGOUT(TX
, "tx disabled\n");
503 while (s
->mac_reg
[TDH
] != s
->mac_reg
[TDT
]) {
504 base
= ((uint64_t)s
->mac_reg
[TDBAH
] << 32) + s
->mac_reg
[TDBAL
] +
505 sizeof(struct e1000_tx_desc
) * s
->mac_reg
[TDH
];
506 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
508 DBGOUT(TX
, "index %d: %p : %x %x\n", s
->mac_reg
[TDH
],
509 (void *)(intptr_t)desc
.buffer_addr
, desc
.lower
.data
,
512 process_tx_desc(s
, &desc
);
513 cause
|= txdesc_writeback(base
, &desc
);
515 if (++s
->mac_reg
[TDH
] * sizeof(desc
) >= s
->mac_reg
[TDLEN
])
518 * the following could happen only if guest sw assigns
519 * bogus values to TDT/TDLEN.
520 * there's nothing too intelligent we could do about this.
522 if (s
->mac_reg
[TDH
] == tdh_start
) {
523 DBGOUT(TXERR
, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
524 tdh_start
, s
->mac_reg
[TDT
], s
->mac_reg
[TDLEN
]);
528 set_ics(s
, 0, cause
);
532 receive_filter(E1000State
*s
, const uint8_t *buf
, int size
)
534 static uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
535 static int mta_shift
[] = {4, 3, 2, 0};
536 uint32_t f
, rctl
= s
->mac_reg
[RCTL
], ra
[2], *rp
;
538 if (is_vlan_packet(s
, buf
) && vlan_rx_filter_enabled(s
)) {
539 uint16_t vid
= be16_to_cpup((uint16_t *)(buf
+ 14));
540 uint32_t vfta
= le32_to_cpup((uint32_t *)(s
->mac_reg
+ VFTA
) +
541 ((vid
>> 5) & 0x7f));
542 if ((vfta
& (1 << (vid
& 0x1f))) == 0)
546 if (rctl
& E1000_RCTL_UPE
) // promiscuous
549 if ((buf
[0] & 1) && (rctl
& E1000_RCTL_MPE
)) // promiscuous mcast
552 if ((rctl
& E1000_RCTL_BAM
) && !memcmp(buf
, bcast
, sizeof bcast
))
555 for (rp
= s
->mac_reg
+ RA
; rp
< s
->mac_reg
+ RA
+ 32; rp
+= 2) {
556 if (!(rp
[1] & E1000_RAH_AV
))
558 ra
[0] = cpu_to_le32(rp
[0]);
559 ra
[1] = cpu_to_le32(rp
[1]);
560 if (!memcmp(buf
, (uint8_t *)ra
, 6)) {
562 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
563 (int)(rp
- s
->mac_reg
- RA
)/2,
564 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
568 DBGOUT(RXFILTER
, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
569 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5]);
571 f
= mta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
572 f
= (((buf
[5] << 8) | buf
[4]) >> f
) & 0xfff;
573 if (s
->mac_reg
[MTA
+ (f
>> 5)] & (1 << (f
& 0x1f)))
576 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
577 buf
[0], buf
[1], buf
[2], buf
[3], buf
[4], buf
[5],
578 (rctl
>> E1000_RCTL_MO_SHIFT
) & 3, f
>> 5,
579 s
->mac_reg
[MTA
+ (f
>> 5)]);
585 e1000_set_link_status(VLANClientState
*vc
)
587 E1000State
*s
= vc
->opaque
;
588 uint32_t old_status
= s
->mac_reg
[STATUS
];
591 s
->mac_reg
[STATUS
] &= ~E1000_STATUS_LU
;
593 s
->mac_reg
[STATUS
] |= E1000_STATUS_LU
;
595 if (s
->mac_reg
[STATUS
] != old_status
)
596 set_ics(s
, 0, E1000_ICR_LSC
);
600 e1000_can_receive(void *opaque
)
602 E1000State
*s
= opaque
;
604 return (s
->mac_reg
[RCTL
] & E1000_RCTL_EN
);
608 e1000_receive(void *opaque
, const uint8_t *buf
, int size
)
610 E1000State
*s
= opaque
;
611 struct e1000_rx_desc desc
;
612 target_phys_addr_t base
;
615 uint16_t vlan_special
= 0;
616 uint8_t vlan_status
= 0, vlan_offset
= 0;
618 if (!(s
->mac_reg
[RCTL
] & E1000_RCTL_EN
))
621 if (size
> s
->rxbuf_size
) {
622 DBGOUT(RX
, "packet too large for buffers (%d > %d)\n", size
,
627 if (!receive_filter(s
, buf
, size
))
630 if (vlan_enabled(s
) && is_vlan_packet(s
, buf
)) {
631 vlan_special
= cpu_to_le16(be16_to_cpup((uint16_t *)(buf
+ 14)));
632 memmove((void *)(buf
+ 4), buf
, 12);
633 vlan_status
= E1000_RXD_STAT_VP
;
638 rdh_start
= s
->mac_reg
[RDH
];
639 size
+= 4; // for the header
641 if (s
->mac_reg
[RDH
] == s
->mac_reg
[RDT
] && s
->check_rxov
) {
642 set_ics(s
, 0, E1000_ICS_RXO
);
645 base
= ((uint64_t)s
->mac_reg
[RDBAH
] << 32) + s
->mac_reg
[RDBAL
] +
646 sizeof(desc
) * s
->mac_reg
[RDH
];
647 cpu_physical_memory_read(base
, (void *)&desc
, sizeof(desc
));
648 desc
.special
= vlan_special
;
649 desc
.status
|= (vlan_status
| E1000_RXD_STAT_DD
);
650 if (desc
.buffer_addr
) {
651 cpu_physical_memory_write(le64_to_cpu(desc
.buffer_addr
),
652 (void *)(buf
+ vlan_offset
), size
);
653 desc
.length
= cpu_to_le16(size
);
654 desc
.status
|= E1000_RXD_STAT_EOP
|E1000_RXD_STAT_IXSM
;
655 } else // as per intel docs; skip descriptors with null buf addr
656 DBGOUT(RX
, "Null RX descriptor!!\n");
657 cpu_physical_memory_write(base
, (void *)&desc
, sizeof(desc
));
659 if (++s
->mac_reg
[RDH
] * sizeof(desc
) >= s
->mac_reg
[RDLEN
])
662 /* see comment in start_xmit; same here */
663 if (s
->mac_reg
[RDH
] == rdh_start
) {
664 DBGOUT(RXERR
, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
665 rdh_start
, s
->mac_reg
[RDT
], s
->mac_reg
[RDLEN
]);
666 set_ics(s
, 0, E1000_ICS_RXO
);
669 } while (desc
.buffer_addr
== 0);
673 n
= s
->mac_reg
[TORL
];
674 if ((s
->mac_reg
[TORL
] += size
) < n
)
678 if ((rdt
= s
->mac_reg
[RDT
]) < s
->mac_reg
[RDH
])
679 rdt
+= s
->mac_reg
[RDLEN
] / sizeof(desc
);
680 if (((rdt
- s
->mac_reg
[RDH
]) * sizeof(desc
)) <= s
->mac_reg
[RDLEN
] >>
682 n
|= E1000_ICS_RXDMT0
;
688 mac_readreg(E1000State
*s
, int index
)
690 return s
->mac_reg
[index
];
694 mac_icr_read(E1000State
*s
, int index
)
696 uint32_t ret
= s
->mac_reg
[ICR
];
698 DBGOUT(INTERRUPT
, "ICR read: %x\n", ret
);
699 set_interrupt_cause(s
, 0, 0);
704 mac_read_clr4(E1000State
*s
, int index
)
706 uint32_t ret
= s
->mac_reg
[index
];
708 s
->mac_reg
[index
] = 0;
713 mac_read_clr8(E1000State
*s
, int index
)
715 uint32_t ret
= s
->mac_reg
[index
];
717 s
->mac_reg
[index
] = 0;
718 s
->mac_reg
[index
-1] = 0;
723 mac_writereg(E1000State
*s
, int index
, uint32_t val
)
725 s
->mac_reg
[index
] = val
;
729 set_rdt(E1000State
*s
, int index
, uint32_t val
)
732 s
->mac_reg
[index
] = val
& 0xffff;
736 set_16bit(E1000State
*s
, int index
, uint32_t val
)
738 s
->mac_reg
[index
] = val
& 0xffff;
742 set_dlen(E1000State
*s
, int index
, uint32_t val
)
744 s
->mac_reg
[index
] = val
& 0xfff80;
748 set_tctl(E1000State
*s
, int index
, uint32_t val
)
750 s
->mac_reg
[index
] = val
;
751 s
->mac_reg
[TDT
] &= 0xffff;
756 set_icr(E1000State
*s
, int index
, uint32_t val
)
758 DBGOUT(INTERRUPT
, "set_icr %x\n", val
);
759 set_interrupt_cause(s
, 0, s
->mac_reg
[ICR
] & ~val
);
763 set_imc(E1000State
*s
, int index
, uint32_t val
)
765 s
->mac_reg
[IMS
] &= ~val
;
770 set_ims(E1000State
*s
, int index
, uint32_t val
)
772 s
->mac_reg
[IMS
] |= val
;
776 #define getreg(x) [x] = mac_readreg
777 static uint32_t (*macreg_readops
[])(E1000State
*, int) = {
778 getreg(PBA
), getreg(RCTL
), getreg(TDH
), getreg(TXDCTL
),
779 getreg(WUFC
), getreg(TDT
), getreg(CTRL
), getreg(LEDCTL
),
780 getreg(MANC
), getreg(MDIC
), getreg(SWSM
), getreg(STATUS
),
781 getreg(TORL
), getreg(TOTL
), getreg(IMS
), getreg(TCTL
),
782 getreg(RDH
), getreg(RDT
), getreg(VET
),
784 [TOTH
] = mac_read_clr8
, [TORH
] = mac_read_clr8
, [GPRC
] = mac_read_clr4
,
785 [GPTC
] = mac_read_clr4
, [TPR
] = mac_read_clr4
, [TPT
] = mac_read_clr4
,
786 [ICR
] = mac_icr_read
, [EECD
] = get_eecd
, [EERD
] = flash_eerd_read
,
787 [CRCERRS
... MPC
] = &mac_readreg
,
788 [RA
... RA
+31] = &mac_readreg
,
789 [MTA
... MTA
+127] = &mac_readreg
,
790 [VFTA
... VFTA
+127] = &mac_readreg
,
792 enum { NREADOPS
= ARRAY_SIZE(macreg_readops
) };
794 #define putreg(x) [x] = mac_writereg
795 static void (*macreg_writeops
[])(E1000State
*, int, uint32_t) = {
796 putreg(PBA
), putreg(EERD
), putreg(SWSM
), putreg(WUFC
),
797 putreg(TDBAL
), putreg(TDBAH
), putreg(TXDCTL
), putreg(RDBAH
),
798 putreg(RDBAL
), putreg(LEDCTL
), putreg(VET
),
799 [TDLEN
] = set_dlen
, [RDLEN
] = set_dlen
, [TCTL
] = set_tctl
,
800 [TDT
] = set_tctl
, [MDIC
] = set_mdic
, [ICS
] = set_ics
,
801 [TDH
] = set_16bit
, [RDH
] = set_16bit
, [RDT
] = set_rdt
,
802 [IMC
] = set_imc
, [IMS
] = set_ims
, [ICR
] = set_icr
,
803 [EECD
] = set_eecd
, [RCTL
] = set_rx_control
, [CTRL
] = set_ctrl
,
804 [RA
... RA
+31] = &mac_writereg
,
805 [MTA
... MTA
+127] = &mac_writereg
,
806 [VFTA
... VFTA
+127] = &mac_writereg
,
808 enum { NWRITEOPS
= ARRAY_SIZE(macreg_writeops
) };
811 e1000_mmio_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
813 E1000State
*s
= opaque
;
814 unsigned int index
= (addr
& 0x1ffff) >> 2;
816 #ifdef TARGET_WORDS_BIGENDIAN
819 if (index
< NWRITEOPS
&& macreg_writeops
[index
])
820 macreg_writeops
[index
](s
, index
, val
);
821 else if (index
< NREADOPS
&& macreg_readops
[index
])
822 DBGOUT(MMIO
, "e1000_mmio_writel RO %x: 0x%04x\n", index
<<2, val
);
824 DBGOUT(UNKNOWN
, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
829 e1000_mmio_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
831 // emulate hw without byte enables: no RMW
832 e1000_mmio_writel(opaque
, addr
& ~3,
833 (val
& 0xffff) << (8*(addr
& 3)));
837 e1000_mmio_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
839 // emulate hw without byte enables: no RMW
840 e1000_mmio_writel(opaque
, addr
& ~3,
841 (val
& 0xff) << (8*(addr
& 3)));
845 e1000_mmio_readl(void *opaque
, target_phys_addr_t addr
)
847 E1000State
*s
= opaque
;
848 unsigned int index
= (addr
& 0x1ffff) >> 2;
850 if (index
< NREADOPS
&& macreg_readops
[index
])
852 uint32_t val
= macreg_readops
[index
](s
, index
);
853 #ifdef TARGET_WORDS_BIGENDIAN
858 DBGOUT(UNKNOWN
, "MMIO unknown read addr=0x%08x\n", index
<<2);
863 e1000_mmio_readb(void *opaque
, target_phys_addr_t addr
)
865 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
866 (8 * (addr
& 3))) & 0xff;
870 e1000_mmio_readw(void *opaque
, target_phys_addr_t addr
)
872 return ((e1000_mmio_readl(opaque
, addr
& ~3)) >>
873 (8 * (addr
& 3))) & 0xffff;
876 static const int mac_regtosave
[] = {
877 CTRL
, EECD
, EERD
, GPRC
, GPTC
, ICR
, ICS
, IMC
, IMS
,
878 LEDCTL
, MANC
, MDIC
, MPC
, PBA
, RCTL
, RDBAH
, RDBAL
, RDH
,
879 RDLEN
, RDT
, STATUS
, SWSM
, TCTL
, TDBAH
, TDBAL
, TDH
, TDLEN
,
880 TDT
, TORH
, TORL
, TOTH
, TOTL
, TPR
, TPT
, TXDCTL
, WUFC
,
883 enum { MAC_NSAVE
= ARRAY_SIZE(mac_regtosave
) };
885 static const struct {
888 } mac_regarraystosave
[] = { {32, RA
}, {128, MTA
}, {128, VFTA
} };
889 enum { MAC_NARRAYS
= ARRAY_SIZE(mac_regarraystosave
) };
892 nic_save(QEMUFile
*f
, void *opaque
)
894 E1000State
*s
= (E1000State
*)opaque
;
897 pci_device_save(&s
->dev
, f
);
899 qemu_put_be32s(f
, &s
->rxbuf_size
);
900 qemu_put_be32s(f
, &s
->rxbuf_min_shift
);
901 qemu_put_be32s(f
, &s
->eecd_state
.val_in
);
902 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_in
);
903 qemu_put_be16s(f
, &s
->eecd_state
.bitnum_out
);
904 qemu_put_be16s(f
, &s
->eecd_state
.reading
);
905 qemu_put_be32s(f
, &s
->eecd_state
.old_eecd
);
906 qemu_put_8s(f
, &s
->tx
.ipcss
);
907 qemu_put_8s(f
, &s
->tx
.ipcso
);
908 qemu_put_be16s(f
, &s
->tx
.ipcse
);
909 qemu_put_8s(f
, &s
->tx
.tucss
);
910 qemu_put_8s(f
, &s
->tx
.tucso
);
911 qemu_put_be16s(f
, &s
->tx
.tucse
);
912 qemu_put_be32s(f
, &s
->tx
.paylen
);
913 qemu_put_8s(f
, &s
->tx
.hdr_len
);
914 qemu_put_be16s(f
, &s
->tx
.mss
);
915 qemu_put_be16s(f
, &s
->tx
.size
);
916 qemu_put_be16s(f
, &s
->tx
.tso_frames
);
917 qemu_put_8s(f
, &s
->tx
.sum_needed
);
918 qemu_put_s8s(f
, &s
->tx
.ip
);
919 qemu_put_s8s(f
, &s
->tx
.tcp
);
920 qemu_put_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
921 qemu_put_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
922 for (i
= 0; i
< 64; i
++)
923 qemu_put_be16s(f
, s
->eeprom_data
+ i
);
924 for (i
= 0; i
< 0x20; i
++)
925 qemu_put_be16s(f
, s
->phy_reg
+ i
);
926 for (i
= 0; i
< MAC_NSAVE
; i
++)
927 qemu_put_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
928 for (i
= 0; i
< MAC_NARRAYS
; i
++)
929 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
931 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
935 nic_load(QEMUFile
*f
, void *opaque
, int version_id
)
937 E1000State
*s
= (E1000State
*)opaque
;
940 if ((ret
= pci_device_load(&s
->dev
, f
)) < 0)
943 qemu_get_sbe32s(f
, &i
); /* once some unused instance id */
944 qemu_get_be32(f
); /* Ignored. Was mmio_base. */
945 qemu_get_be32s(f
, &s
->rxbuf_size
);
946 qemu_get_be32s(f
, &s
->rxbuf_min_shift
);
947 qemu_get_be32s(f
, &s
->eecd_state
.val_in
);
948 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_in
);
949 qemu_get_be16s(f
, &s
->eecd_state
.bitnum_out
);
950 qemu_get_be16s(f
, &s
->eecd_state
.reading
);
951 qemu_get_be32s(f
, &s
->eecd_state
.old_eecd
);
952 qemu_get_8s(f
, &s
->tx
.ipcss
);
953 qemu_get_8s(f
, &s
->tx
.ipcso
);
954 qemu_get_be16s(f
, &s
->tx
.ipcse
);
955 qemu_get_8s(f
, &s
->tx
.tucss
);
956 qemu_get_8s(f
, &s
->tx
.tucso
);
957 qemu_get_be16s(f
, &s
->tx
.tucse
);
958 qemu_get_be32s(f
, &s
->tx
.paylen
);
959 qemu_get_8s(f
, &s
->tx
.hdr_len
);
960 qemu_get_be16s(f
, &s
->tx
.mss
);
961 qemu_get_be16s(f
, &s
->tx
.size
);
962 qemu_get_be16s(f
, &s
->tx
.tso_frames
);
963 qemu_get_8s(f
, &s
->tx
.sum_needed
);
964 qemu_get_s8s(f
, &s
->tx
.ip
);
965 qemu_get_s8s(f
, &s
->tx
.tcp
);
966 qemu_get_buffer(f
, s
->tx
.header
, sizeof s
->tx
.header
);
967 qemu_get_buffer(f
, s
->tx
.data
, sizeof s
->tx
.data
);
968 for (i
= 0; i
< 64; i
++)
969 qemu_get_be16s(f
, s
->eeprom_data
+ i
);
970 for (i
= 0; i
< 0x20; i
++)
971 qemu_get_be16s(f
, s
->phy_reg
+ i
);
972 for (i
= 0; i
< MAC_NSAVE
; i
++)
973 qemu_get_be32s(f
, s
->mac_reg
+ mac_regtosave
[i
]);
974 for (i
= 0; i
< MAC_NARRAYS
; i
++)
975 for (j
= 0; j
< mac_regarraystosave
[i
].size
; j
++)
977 s
->mac_reg
+ mac_regarraystosave
[i
].array0
+ j
);
981 static const uint16_t e1000_eeprom_template
[64] = {
982 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
983 0x3000, 0x1000, 0x6403, E1000_DEVID
, 0x8086, E1000_DEVID
, 0x8086, 0x3040,
984 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
985 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
986 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
987 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
988 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
989 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
992 static const uint16_t phy_reg_init
[] = {
993 [PHY_CTRL
] = 0x1140, [PHY_STATUS
] = 0x796d, // link initially up
994 [PHY_ID1
] = 0x141, [PHY_ID2
] = PHY_ID2_INIT
,
995 [PHY_1000T_CTRL
] = 0x0e00, [M88E1000_PHY_SPEC_CTRL
] = 0x360,
996 [M88E1000_EXT_PHY_SPEC_CTRL
] = 0x0d60, [PHY_AUTONEG_ADV
] = 0xde1,
997 [PHY_LP_ABILITY
] = 0x1e0, [PHY_1000T_STATUS
] = 0x3c00,
998 [M88E1000_PHY_SPEC_STATUS
] = 0xac00,
1001 static const uint32_t mac_reg_init
[] = {
1004 [CTRL
] = E1000_CTRL_SWDPIN2
| E1000_CTRL_SWDPIN0
|
1005 E1000_CTRL_SPD_1000
| E1000_CTRL_SLU
,
1006 [STATUS
] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE
|
1007 E1000_STATUS_ASDV
| E1000_STATUS_MTXCKOK
|
1008 E1000_STATUS_SPEED_1000
| E1000_STATUS_FD
|
1010 [MANC
] = E1000_MANC_EN_MNG2HOST
| E1000_MANC_RCV_TCO_EN
|
1011 E1000_MANC_ARP_EN
| E1000_MANC_0298_EN
|
1017 static CPUWriteMemoryFunc
*e1000_mmio_write
[] = {
1018 e1000_mmio_writeb
, e1000_mmio_writew
, e1000_mmio_writel
1021 static CPUReadMemoryFunc
*e1000_mmio_read
[] = {
1022 e1000_mmio_readb
, e1000_mmio_readw
, e1000_mmio_readl
1026 e1000_mmio_map(PCIDevice
*pci_dev
, int region_num
,
1027 uint32_t addr
, uint32_t size
, int type
)
1029 E1000State
*d
= (E1000State
*)pci_dev
;
1031 const uint32_t excluded_regs
[] = {
1032 E1000_MDIC
, E1000_ICR
, E1000_ICS
, E1000_IMS
,
1033 E1000_IMC
, E1000_TCTL
, E1000_TDT
, PNPMMIO_SIZE
1037 DBGOUT(MMIO
, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr
, size
);
1039 cpu_register_physical_memory(addr
, PNPMMIO_SIZE
, d
->mmio_index
);
1040 qemu_register_coalesced_mmio(addr
, excluded_regs
[0]);
1042 for (i
= 0; excluded_regs
[i
] != PNPMMIO_SIZE
; i
++)
1043 qemu_register_coalesced_mmio(addr
+ excluded_regs
[i
] + 4,
1044 excluded_regs
[i
+ 1] -
1045 excluded_regs
[i
] - 4);
1049 e1000_cleanup(VLANClientState
*vc
)
1051 E1000State
*d
= vc
->opaque
;
1053 unregister_savevm("e1000", d
);
1057 pci_e1000_uninit(PCIDevice
*dev
)
1059 E1000State
*d
= (E1000State
*) dev
;
1061 cpu_unregister_io_memory(d
->mmio_index
);
1067 pci_e1000_init(PCIBus
*bus
, NICInfo
*nd
, int devfn
)
1071 uint16_t checksum
= 0;
1072 static const char info_str
[] = "e1000";
1075 d
= (E1000State
*)pci_register_device(bus
, "e1000",
1076 sizeof(E1000State
), devfn
, NULL
, NULL
);
1081 pci_conf
= d
->dev
.config
;
1083 pci_config_set_vendor_id(pci_conf
, PCI_VENDOR_ID_INTEL
);
1084 pci_config_set_device_id(pci_conf
, E1000_DEVID
);
1085 *(uint16_t *)(pci_conf
+0x04) = cpu_to_le16(0x0407);
1086 *(uint16_t *)(pci_conf
+0x06) = cpu_to_le16(0x0010);
1087 pci_conf
[0x08] = 0x03;
1088 pci_config_set_class(pci_conf
, PCI_CLASS_NETWORK_ETHERNET
);
1089 pci_conf
[0x0c] = 0x10;
1091 pci_conf
[0x3d] = 1; // interrupt pin 0
1093 d
->mmio_index
= cpu_register_io_memory(0, e1000_mmio_read
,
1094 e1000_mmio_write
, d
);
1096 pci_register_io_region((PCIDevice
*)d
, 0, PNPMMIO_SIZE
,
1097 PCI_ADDRESS_SPACE_MEM
, e1000_mmio_map
);
1099 pci_register_io_region((PCIDevice
*)d
, 1, IOPORT_SIZE
,
1100 PCI_ADDRESS_SPACE_IO
, ioport_map
);
1102 memmove(d
->eeprom_data
, e1000_eeprom_template
,
1103 sizeof e1000_eeprom_template
);
1104 for (i
= 0; i
< 3; i
++)
1105 d
->eeprom_data
[i
] = (nd
->macaddr
[2*i
+1]<<8) | nd
->macaddr
[2*i
];
1106 for (i
= 0; i
< EEPROM_CHECKSUM_REG
; i
++)
1107 checksum
+= d
->eeprom_data
[i
];
1108 checksum
= (uint16_t) EEPROM_SUM
- checksum
;
1109 d
->eeprom_data
[EEPROM_CHECKSUM_REG
] = checksum
;
1111 memset(d
->phy_reg
, 0, sizeof d
->phy_reg
);
1112 memmove(d
->phy_reg
, phy_reg_init
, sizeof phy_reg_init
);
1113 memset(d
->mac_reg
, 0, sizeof d
->mac_reg
);
1114 memmove(d
->mac_reg
, mac_reg_init
, sizeof mac_reg_init
);
1115 d
->rxbuf_min_shift
= 1;
1116 memset(&d
->tx
, 0, sizeof d
->tx
);
1118 d
->vc
= qemu_new_vlan_client(nd
->vlan
, nd
->model
, nd
->name
,
1119 e1000_receive
, e1000_can_receive
,
1121 d
->vc
->link_status_changed
= e1000_set_link_status
;
1123 qemu_format_nic_info_str(d
->vc
, nd
->macaddr
);
1125 register_savevm(info_str
, -1, 2, nic_save
, nic_load
, d
);
1126 d
->dev
.unregister
= pci_e1000_uninit
;
1128 return (PCIDevice
*)d
;