]> git.proxmox.com Git - qemu.git/blob - hw/e1000.c
e1000.c doesn't properly emulate EERD and ICS registers
[qemu.git] / hw / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 */
24
25
26 #include "hw.h"
27 #include "pci.h"
28 #include "net.h"
29
30 #include "e1000_hw.h"
31
32 #define DEBUG
33
34 #ifdef DEBUG
35 enum {
36 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
37 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
38 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
39 DEBUG_RXFILTER, DEBUG_NOTYET,
40 };
41 #define DBGBIT(x) (1<<DEBUG_##x)
42 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
43
44 #define DBGOUT(what, fmt, params...) do { \
45 if (debugflags & DBGBIT(what)) \
46 fprintf(stderr, "e1000: " fmt, ##params); \
47 } while (0)
48 #else
49 #define DBGOUT(what, fmt, params...) do {} while (0)
50 #endif
51
52 #define IOPORT_SIZE 0x40
53 #define PNPMMIO_SIZE 0x20000
54
55 /*
56 * HW models:
57 * E1000_DEV_ID_82540EM works with Windows and Linux
58 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
59 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
60 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
61 * Others never tested
62 */
63 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
64
65 /*
66 * May need to specify additional MAC-to-PHY entries --
67 * Intel's Windows driver refuses to initialize unless they match
68 */
69 enum {
70 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
71 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
72 /* default to E1000_DEV_ID_82540EM */ 0xc20
73 };
74
75 typedef struct E1000State_st {
76 PCIDevice dev;
77 VLANClientState *vc;
78 int mmio_index;
79
80 uint32_t mac_reg[0x8000];
81 uint16_t phy_reg[0x20];
82 uint16_t eeprom_data[64];
83
84 uint32_t rxbuf_size;
85 uint32_t rxbuf_min_shift;
86 int check_rxov;
87 struct e1000_tx {
88 unsigned char header[256];
89 unsigned char vlan_header[4];
90 unsigned char vlan[4];
91 unsigned char data[0x10000];
92 uint16_t size;
93 unsigned char sum_needed;
94 unsigned char vlan_needed;
95 uint8_t ipcss;
96 uint8_t ipcso;
97 uint16_t ipcse;
98 uint8_t tucss;
99 uint8_t tucso;
100 uint16_t tucse;
101 uint8_t hdr_len;
102 uint16_t mss;
103 uint32_t paylen;
104 uint16_t tso_frames;
105 char tse;
106 int8_t ip;
107 int8_t tcp;
108 char cptse; // current packet tse bit
109 } tx;
110
111 struct {
112 uint32_t val_in; // shifted in from guest driver
113 uint16_t bitnum_in;
114 uint16_t bitnum_out;
115 uint16_t reading;
116 uint32_t old_eecd;
117 } eecd_state;
118 } E1000State;
119
120 #define defreg(x) x = (E1000_##x>>2)
121 enum {
122 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
123 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
124 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
125 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
126 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
127 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
128 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
129 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
130 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
131 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
132 defreg(VET),
133 };
134
135 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
136 static const char phy_regcap[0x20] = {
137 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
138 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
139 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
140 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
141 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
142 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
143 };
144
145 static void
146 ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr,
147 uint32_t size, int type)
148 {
149 DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
150 }
151
152 static void
153 set_interrupt_cause(E1000State *s, int index, uint32_t val)
154 {
155 if (val)
156 val |= E1000_ICR_INT_ASSERTED;
157 s->mac_reg[ICR] = val;
158 s->mac_reg[ICS] = val;
159 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
160 }
161
162 static void
163 set_ics(E1000State *s, int index, uint32_t val)
164 {
165 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
166 s->mac_reg[IMS]);
167 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
168 }
169
170 static int
171 rxbufsize(uint32_t v)
172 {
173 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
174 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
175 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
176 switch (v) {
177 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
178 return 16384;
179 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
180 return 8192;
181 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
182 return 4096;
183 case E1000_RCTL_SZ_1024:
184 return 1024;
185 case E1000_RCTL_SZ_512:
186 return 512;
187 case E1000_RCTL_SZ_256:
188 return 256;
189 }
190 return 2048;
191 }
192
193 static void
194 set_ctrl(E1000State *s, int index, uint32_t val)
195 {
196 /* RST is self clearing */
197 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
198 }
199
200 static void
201 set_rx_control(E1000State *s, int index, uint32_t val)
202 {
203 s->mac_reg[RCTL] = val;
204 s->rxbuf_size = rxbufsize(val);
205 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
206 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
207 s->mac_reg[RCTL]);
208 }
209
210 static void
211 set_mdic(E1000State *s, int index, uint32_t val)
212 {
213 uint32_t data = val & E1000_MDIC_DATA_MASK;
214 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
215
216 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
217 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
218 else if (val & E1000_MDIC_OP_READ) {
219 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
220 if (!(phy_regcap[addr] & PHY_R)) {
221 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
222 val |= E1000_MDIC_ERROR;
223 } else
224 val = (val ^ data) | s->phy_reg[addr];
225 } else if (val & E1000_MDIC_OP_WRITE) {
226 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
227 if (!(phy_regcap[addr] & PHY_W)) {
228 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
229 val |= E1000_MDIC_ERROR;
230 } else
231 s->phy_reg[addr] = data;
232 }
233 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
234 set_ics(s, 0, E1000_ICR_MDAC);
235 }
236
237 static uint32_t
238 get_eecd(E1000State *s, int index)
239 {
240 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
241
242 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
243 s->eecd_state.bitnum_out, s->eecd_state.reading);
244 if (!s->eecd_state.reading ||
245 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
246 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
247 ret |= E1000_EECD_DO;
248 return ret;
249 }
250
251 static void
252 set_eecd(E1000State *s, int index, uint32_t val)
253 {
254 uint32_t oldval = s->eecd_state.old_eecd;
255
256 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
257 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
258 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
259 return;
260 if (!(E1000_EECD_SK & val)) { // falling edge
261 s->eecd_state.bitnum_out++;
262 return;
263 }
264 if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
265 memset(&s->eecd_state, 0, sizeof s->eecd_state);
266 /*
267 * restore old_eecd's E1000_EECD_SK (known to be on)
268 * to avoid false detection of a clock edge
269 */
270 s->eecd_state.old_eecd = E1000_EECD_SK;
271 return;
272 }
273 s->eecd_state.val_in <<= 1;
274 if (val & E1000_EECD_DI)
275 s->eecd_state.val_in |= 1;
276 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
277 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
278 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
279 EEPROM_READ_OPCODE_MICROWIRE);
280 }
281 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
282 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
283 s->eecd_state.reading);
284 }
285
286 static uint32_t
287 flash_eerd_read(E1000State *s, int x)
288 {
289 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
290
291 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
292 return (s->mac_reg[EERD]);
293
294 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
295 return (E1000_EEPROM_RW_REG_DONE | r);
296
297 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
298 E1000_EEPROM_RW_REG_DONE | r);
299 }
300
301 static void
302 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
303 {
304 uint32_t sum;
305
306 if (cse && cse < n)
307 n = cse + 1;
308 if (sloc < n-1) {
309 sum = net_checksum_add(n-css, data+css);
310 cpu_to_be16wu((uint16_t *)(data + sloc),
311 net_checksum_finish(sum));
312 }
313 }
314
315 static inline int
316 vlan_enabled(E1000State *s)
317 {
318 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
319 }
320
321 static inline int
322 vlan_rx_filter_enabled(E1000State *s)
323 {
324 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
325 }
326
327 static inline int
328 is_vlan_packet(E1000State *s, const uint8_t *buf)
329 {
330 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
331 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
332 }
333
334 static inline int
335 is_vlan_txd(uint32_t txd_lower)
336 {
337 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
338 }
339
340 static void
341 xmit_seg(E1000State *s)
342 {
343 uint16_t len, *sp;
344 unsigned int frames = s->tx.tso_frames, css, sofar, n;
345 struct e1000_tx *tp = &s->tx;
346
347 if (tp->tse && tp->cptse) {
348 css = tp->ipcss;
349 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
350 frames, tp->size, css);
351 if (tp->ip) { // IPv4
352 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
353 tp->size - css);
354 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
355 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
356 } else // IPv6
357 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
358 tp->size - css);
359 css = tp->tucss;
360 len = tp->size - css;
361 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
362 if (tp->tcp) {
363 sofar = frames * tp->mss;
364 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
365 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
366 if (tp->paylen - sofar > tp->mss)
367 tp->data[css + 13] &= ~9; // PSH, FIN
368 } else // UDP
369 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
370 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
371 // add pseudo-header length before checksum calculation
372 sp = (uint16_t *)(tp->data + tp->tucso);
373 cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
374 }
375 tp->tso_frames++;
376 }
377
378 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
379 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
380 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
381 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
382 if (tp->vlan_needed) {
383 memmove(tp->vlan, tp->data, 12);
384 memcpy(tp->data + 8, tp->vlan_header, 4);
385 qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
386 } else
387 qemu_send_packet(s->vc, tp->data, tp->size);
388 s->mac_reg[TPT]++;
389 s->mac_reg[GPTC]++;
390 n = s->mac_reg[TOTL];
391 if ((s->mac_reg[TOTL] += s->tx.size) < n)
392 s->mac_reg[TOTH]++;
393 }
394
395 static void
396 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
397 {
398 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
399 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
400 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
401 unsigned int msh = 0xfffff, hdr = 0;
402 uint64_t addr;
403 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
404 struct e1000_tx *tp = &s->tx;
405
406 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
407 op = le32_to_cpu(xp->cmd_and_length);
408 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
409 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
410 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
411 tp->tucss = xp->upper_setup.tcp_fields.tucss;
412 tp->tucso = xp->upper_setup.tcp_fields.tucso;
413 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
414 tp->paylen = op & 0xfffff;
415 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
416 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
417 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
418 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
419 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
420 tp->tso_frames = 0;
421 if (tp->tucso == 0) { // this is probably wrong
422 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
423 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
424 }
425 return;
426 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
427 // data descriptor
428 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
429 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
430 } else
431 // legacy descriptor
432 tp->cptse = 0;
433
434 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
435 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
436 tp->vlan_needed = 1;
437 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
438 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
439 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
440 le16_to_cpu(dp->upper.fields.special));
441 }
442
443 addr = le64_to_cpu(dp->buffer_addr);
444 if (tp->tse && tp->cptse) {
445 hdr = tp->hdr_len;
446 msh = hdr + tp->mss;
447 do {
448 bytes = split_size;
449 if (tp->size + bytes > msh)
450 bytes = msh - tp->size;
451 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
452 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
453 memmove(tp->header, tp->data, hdr);
454 tp->size = sz;
455 addr += bytes;
456 if (sz == msh) {
457 xmit_seg(s);
458 memmove(tp->data, tp->header, hdr);
459 tp->size = hdr;
460 }
461 } while (split_size -= bytes);
462 } else if (!tp->tse && tp->cptse) {
463 // context descriptor TSE is not set, while data descriptor TSE is set
464 DBGOUT(TXERR, "TCP segmentaion Error\n");
465 } else {
466 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
467 tp->size += split_size;
468 }
469
470 if (!(txd_lower & E1000_TXD_CMD_EOP))
471 return;
472 if (!(tp->tse && tp->cptse && tp->size < hdr))
473 xmit_seg(s);
474 tp->tso_frames = 0;
475 tp->sum_needed = 0;
476 tp->vlan_needed = 0;
477 tp->size = 0;
478 tp->cptse = 0;
479 }
480
481 static uint32_t
482 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
483 {
484 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
485
486 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
487 return 0;
488 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
489 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
490 dp->upper.data = cpu_to_le32(txd_upper);
491 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
492 (void *)&dp->upper, sizeof(dp->upper));
493 return E1000_ICR_TXDW;
494 }
495
496 static void
497 start_xmit(E1000State *s)
498 {
499 target_phys_addr_t base;
500 struct e1000_tx_desc desc;
501 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
502
503 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
504 DBGOUT(TX, "tx disabled\n");
505 return;
506 }
507
508 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
509 base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
510 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
511 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
512
513 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
514 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
515 desc.upper.data);
516
517 process_tx_desc(s, &desc);
518 cause |= txdesc_writeback(base, &desc);
519
520 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
521 s->mac_reg[TDH] = 0;
522 /*
523 * the following could happen only if guest sw assigns
524 * bogus values to TDT/TDLEN.
525 * there's nothing too intelligent we could do about this.
526 */
527 if (s->mac_reg[TDH] == tdh_start) {
528 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
529 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
530 break;
531 }
532 }
533 set_ics(s, 0, cause);
534 }
535
536 static int
537 receive_filter(E1000State *s, const uint8_t *buf, int size)
538 {
539 static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
540 static int mta_shift[] = {4, 3, 2, 0};
541 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
542
543 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
544 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
545 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
546 ((vid >> 5) & 0x7f));
547 if ((vfta & (1 << (vid & 0x1f))) == 0)
548 return 0;
549 }
550
551 if (rctl & E1000_RCTL_UPE) // promiscuous
552 return 1;
553
554 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
555 return 1;
556
557 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
558 return 1;
559
560 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
561 if (!(rp[1] & E1000_RAH_AV))
562 continue;
563 ra[0] = cpu_to_le32(rp[0]);
564 ra[1] = cpu_to_le32(rp[1]);
565 if (!memcmp(buf, (uint8_t *)ra, 6)) {
566 DBGOUT(RXFILTER,
567 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
568 (int)(rp - s->mac_reg - RA)/2,
569 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
570 return 1;
571 }
572 }
573 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
574 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
575
576 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
577 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
578 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
579 return 1;
580 DBGOUT(RXFILTER,
581 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
582 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
583 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
584 s->mac_reg[MTA + (f >> 5)]);
585
586 return 0;
587 }
588
589 static void
590 e1000_set_link_status(VLANClientState *vc)
591 {
592 E1000State *s = vc->opaque;
593 uint32_t old_status = s->mac_reg[STATUS];
594
595 if (vc->link_down)
596 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
597 else
598 s->mac_reg[STATUS] |= E1000_STATUS_LU;
599
600 if (s->mac_reg[STATUS] != old_status)
601 set_ics(s, 0, E1000_ICR_LSC);
602 }
603
604 static int
605 e1000_can_receive(void *opaque)
606 {
607 E1000State *s = opaque;
608
609 return (s->mac_reg[RCTL] & E1000_RCTL_EN);
610 }
611
612 static void
613 e1000_receive(void *opaque, const uint8_t *buf, int size)
614 {
615 E1000State *s = opaque;
616 struct e1000_rx_desc desc;
617 target_phys_addr_t base;
618 unsigned int n, rdt;
619 uint32_t rdh_start;
620 uint16_t vlan_special = 0;
621 uint8_t vlan_status = 0, vlan_offset = 0;
622
623 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
624 return;
625
626 if (size > s->rxbuf_size) {
627 DBGOUT(RX, "packet too large for buffers (%d > %d)\n", size,
628 s->rxbuf_size);
629 return;
630 }
631
632 if (!receive_filter(s, buf, size))
633 return;
634
635 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
636 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
637 memmove((void *)(buf + 4), buf, 12);
638 vlan_status = E1000_RXD_STAT_VP;
639 vlan_offset = 4;
640 size -= 4;
641 }
642
643 rdh_start = s->mac_reg[RDH];
644 size += 4; // for the header
645 do {
646 if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
647 set_ics(s, 0, E1000_ICS_RXO);
648 return;
649 }
650 base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
651 sizeof(desc) * s->mac_reg[RDH];
652 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
653 desc.special = vlan_special;
654 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
655 if (desc.buffer_addr) {
656 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
657 (void *)(buf + vlan_offset), size);
658 desc.length = cpu_to_le16(size);
659 desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
660 } else // as per intel docs; skip descriptors with null buf addr
661 DBGOUT(RX, "Null RX descriptor!!\n");
662 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
663
664 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
665 s->mac_reg[RDH] = 0;
666 s->check_rxov = 1;
667 /* see comment in start_xmit; same here */
668 if (s->mac_reg[RDH] == rdh_start) {
669 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
670 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
671 set_ics(s, 0, E1000_ICS_RXO);
672 return;
673 }
674 } while (desc.buffer_addr == 0);
675
676 s->mac_reg[GPRC]++;
677 s->mac_reg[TPR]++;
678 n = s->mac_reg[TORL];
679 if ((s->mac_reg[TORL] += size) < n)
680 s->mac_reg[TORH]++;
681
682 n = E1000_ICS_RXT0;
683 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
684 rdt += s->mac_reg[RDLEN] / sizeof(desc);
685 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
686 s->rxbuf_min_shift)
687 n |= E1000_ICS_RXDMT0;
688
689 set_ics(s, 0, n);
690 }
691
692 static uint32_t
693 mac_readreg(E1000State *s, int index)
694 {
695 return s->mac_reg[index];
696 }
697
698 static uint32_t
699 mac_icr_read(E1000State *s, int index)
700 {
701 uint32_t ret = s->mac_reg[ICR];
702
703 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
704 set_interrupt_cause(s, 0, 0);
705 return ret;
706 }
707
708 static uint32_t
709 mac_read_clr4(E1000State *s, int index)
710 {
711 uint32_t ret = s->mac_reg[index];
712
713 s->mac_reg[index] = 0;
714 return ret;
715 }
716
717 static uint32_t
718 mac_read_clr8(E1000State *s, int index)
719 {
720 uint32_t ret = s->mac_reg[index];
721
722 s->mac_reg[index] = 0;
723 s->mac_reg[index-1] = 0;
724 return ret;
725 }
726
727 static void
728 mac_writereg(E1000State *s, int index, uint32_t val)
729 {
730 s->mac_reg[index] = val;
731 }
732
733 static void
734 set_rdt(E1000State *s, int index, uint32_t val)
735 {
736 s->check_rxov = 0;
737 s->mac_reg[index] = val & 0xffff;
738 }
739
740 static void
741 set_16bit(E1000State *s, int index, uint32_t val)
742 {
743 s->mac_reg[index] = val & 0xffff;
744 }
745
746 static void
747 set_dlen(E1000State *s, int index, uint32_t val)
748 {
749 s->mac_reg[index] = val & 0xfff80;
750 }
751
752 static void
753 set_tctl(E1000State *s, int index, uint32_t val)
754 {
755 s->mac_reg[index] = val;
756 s->mac_reg[TDT] &= 0xffff;
757 start_xmit(s);
758 }
759
760 static void
761 set_icr(E1000State *s, int index, uint32_t val)
762 {
763 DBGOUT(INTERRUPT, "set_icr %x\n", val);
764 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
765 }
766
767 static void
768 set_imc(E1000State *s, int index, uint32_t val)
769 {
770 s->mac_reg[IMS] &= ~val;
771 set_ics(s, 0, 0);
772 }
773
774 static void
775 set_ims(E1000State *s, int index, uint32_t val)
776 {
777 s->mac_reg[IMS] |= val;
778 set_ics(s, 0, 0);
779 }
780
781 #define getreg(x) [x] = mac_readreg
782 static uint32_t (*macreg_readops[])(E1000State *, int) = {
783 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
784 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
785 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
786 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
787 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
788
789 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
790 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
791 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
792 [CRCERRS ... MPC] = &mac_readreg,
793 [RA ... RA+31] = &mac_readreg,
794 [MTA ... MTA+127] = &mac_readreg,
795 [VFTA ... VFTA+127] = &mac_readreg,
796 };
797 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
798
799 #define putreg(x) [x] = mac_writereg
800 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
801 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
802 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
803 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
804 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
805 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
806 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
807 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
808 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
809 [RA ... RA+31] = &mac_writereg,
810 [MTA ... MTA+127] = &mac_writereg,
811 [VFTA ... VFTA+127] = &mac_writereg,
812 };
813 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
814
815 static void
816 e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
817 {
818 E1000State *s = opaque;
819 unsigned int index = (addr & 0x1ffff) >> 2;
820
821 #ifdef TARGET_WORDS_BIGENDIAN
822 val = bswap32(val);
823 #endif
824 if (index < NWRITEOPS && macreg_writeops[index])
825 macreg_writeops[index](s, index, val);
826 else if (index < NREADOPS && macreg_readops[index])
827 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
828 else
829 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
830 index<<2, val);
831 }
832
833 static void
834 e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
835 {
836 // emulate hw without byte enables: no RMW
837 e1000_mmio_writel(opaque, addr & ~3,
838 (val & 0xffff) << (8*(addr & 3)));
839 }
840
841 static void
842 e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
843 {
844 // emulate hw without byte enables: no RMW
845 e1000_mmio_writel(opaque, addr & ~3,
846 (val & 0xff) << (8*(addr & 3)));
847 }
848
849 static uint32_t
850 e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
851 {
852 E1000State *s = opaque;
853 unsigned int index = (addr & 0x1ffff) >> 2;
854
855 if (index < NREADOPS && macreg_readops[index])
856 {
857 uint32_t val = macreg_readops[index](s, index);
858 #ifdef TARGET_WORDS_BIGENDIAN
859 val = bswap32(val);
860 #endif
861 return val;
862 }
863 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
864 return 0;
865 }
866
867 static uint32_t
868 e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
869 {
870 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
871 (8 * (addr & 3))) & 0xff;
872 }
873
874 static uint32_t
875 e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
876 {
877 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
878 (8 * (addr & 3))) & 0xffff;
879 }
880
881 static const int mac_regtosave[] = {
882 CTRL, EECD, EERD, GPRC, GPTC, ICR, ICS, IMC, IMS,
883 LEDCTL, MANC, MDIC, MPC, PBA, RCTL, RDBAH, RDBAL, RDH,
884 RDLEN, RDT, STATUS, SWSM, TCTL, TDBAH, TDBAL, TDH, TDLEN,
885 TDT, TORH, TORL, TOTH, TOTL, TPR, TPT, TXDCTL, WUFC,
886 VET,
887 };
888 enum { MAC_NSAVE = ARRAY_SIZE(mac_regtosave) };
889
890 static const struct {
891 int size;
892 int array0;
893 } mac_regarraystosave[] = { {32, RA}, {128, MTA}, {128, VFTA} };
894 enum { MAC_NARRAYS = ARRAY_SIZE(mac_regarraystosave) };
895
896 static void
897 nic_save(QEMUFile *f, void *opaque)
898 {
899 E1000State *s = (E1000State *)opaque;
900 int i, j;
901
902 pci_device_save(&s->dev, f);
903 qemu_put_be32(f, 0);
904 qemu_put_be32s(f, &s->rxbuf_size);
905 qemu_put_be32s(f, &s->rxbuf_min_shift);
906 qemu_put_be32s(f, &s->eecd_state.val_in);
907 qemu_put_be16s(f, &s->eecd_state.bitnum_in);
908 qemu_put_be16s(f, &s->eecd_state.bitnum_out);
909 qemu_put_be16s(f, &s->eecd_state.reading);
910 qemu_put_be32s(f, &s->eecd_state.old_eecd);
911 qemu_put_8s(f, &s->tx.ipcss);
912 qemu_put_8s(f, &s->tx.ipcso);
913 qemu_put_be16s(f, &s->tx.ipcse);
914 qemu_put_8s(f, &s->tx.tucss);
915 qemu_put_8s(f, &s->tx.tucso);
916 qemu_put_be16s(f, &s->tx.tucse);
917 qemu_put_be32s(f, &s->tx.paylen);
918 qemu_put_8s(f, &s->tx.hdr_len);
919 qemu_put_be16s(f, &s->tx.mss);
920 qemu_put_be16s(f, &s->tx.size);
921 qemu_put_be16s(f, &s->tx.tso_frames);
922 qemu_put_8s(f, &s->tx.sum_needed);
923 qemu_put_s8s(f, &s->tx.ip);
924 qemu_put_s8s(f, &s->tx.tcp);
925 qemu_put_buffer(f, s->tx.header, sizeof s->tx.header);
926 qemu_put_buffer(f, s->tx.data, sizeof s->tx.data);
927 for (i = 0; i < 64; i++)
928 qemu_put_be16s(f, s->eeprom_data + i);
929 for (i = 0; i < 0x20; i++)
930 qemu_put_be16s(f, s->phy_reg + i);
931 for (i = 0; i < MAC_NSAVE; i++)
932 qemu_put_be32s(f, s->mac_reg + mac_regtosave[i]);
933 for (i = 0; i < MAC_NARRAYS; i++)
934 for (j = 0; j < mac_regarraystosave[i].size; j++)
935 qemu_put_be32s(f,
936 s->mac_reg + mac_regarraystosave[i].array0 + j);
937 }
938
939 static int
940 nic_load(QEMUFile *f, void *opaque, int version_id)
941 {
942 E1000State *s = (E1000State *)opaque;
943 int i, j, ret;
944
945 if ((ret = pci_device_load(&s->dev, f)) < 0)
946 return ret;
947 if (version_id == 1)
948 qemu_get_sbe32s(f, &i); /* once some unused instance id */
949 qemu_get_be32(f); /* Ignored. Was mmio_base. */
950 qemu_get_be32s(f, &s->rxbuf_size);
951 qemu_get_be32s(f, &s->rxbuf_min_shift);
952 qemu_get_be32s(f, &s->eecd_state.val_in);
953 qemu_get_be16s(f, &s->eecd_state.bitnum_in);
954 qemu_get_be16s(f, &s->eecd_state.bitnum_out);
955 qemu_get_be16s(f, &s->eecd_state.reading);
956 qemu_get_be32s(f, &s->eecd_state.old_eecd);
957 qemu_get_8s(f, &s->tx.ipcss);
958 qemu_get_8s(f, &s->tx.ipcso);
959 qemu_get_be16s(f, &s->tx.ipcse);
960 qemu_get_8s(f, &s->tx.tucss);
961 qemu_get_8s(f, &s->tx.tucso);
962 qemu_get_be16s(f, &s->tx.tucse);
963 qemu_get_be32s(f, &s->tx.paylen);
964 qemu_get_8s(f, &s->tx.hdr_len);
965 qemu_get_be16s(f, &s->tx.mss);
966 qemu_get_be16s(f, &s->tx.size);
967 qemu_get_be16s(f, &s->tx.tso_frames);
968 qemu_get_8s(f, &s->tx.sum_needed);
969 qemu_get_s8s(f, &s->tx.ip);
970 qemu_get_s8s(f, &s->tx.tcp);
971 qemu_get_buffer(f, s->tx.header, sizeof s->tx.header);
972 qemu_get_buffer(f, s->tx.data, sizeof s->tx.data);
973 for (i = 0; i < 64; i++)
974 qemu_get_be16s(f, s->eeprom_data + i);
975 for (i = 0; i < 0x20; i++)
976 qemu_get_be16s(f, s->phy_reg + i);
977 for (i = 0; i < MAC_NSAVE; i++)
978 qemu_get_be32s(f, s->mac_reg + mac_regtosave[i]);
979 for (i = 0; i < MAC_NARRAYS; i++)
980 for (j = 0; j < mac_regarraystosave[i].size; j++)
981 qemu_get_be32s(f,
982 s->mac_reg + mac_regarraystosave[i].array0 + j);
983 return 0;
984 }
985
986 static const uint16_t e1000_eeprom_template[64] = {
987 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
988 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
989 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
990 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
991 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
992 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
993 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
994 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
995 };
996
997 static const uint16_t phy_reg_init[] = {
998 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
999 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
1000 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
1001 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
1002 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
1003 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
1004 };
1005
1006 static const uint32_t mac_reg_init[] = {
1007 [PBA] = 0x00100030,
1008 [LEDCTL] = 0x602,
1009 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
1010 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
1011 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
1012 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
1013 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
1014 E1000_STATUS_LU,
1015 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
1016 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
1017 E1000_MANC_RMCP_EN,
1018 };
1019
1020 /* PCI interface */
1021
1022 static CPUWriteMemoryFunc *e1000_mmio_write[] = {
1023 e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
1024 };
1025
1026 static CPUReadMemoryFunc *e1000_mmio_read[] = {
1027 e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
1028 };
1029
1030 static void
1031 e1000_mmio_map(PCIDevice *pci_dev, int region_num,
1032 uint32_t addr, uint32_t size, int type)
1033 {
1034 E1000State *d = (E1000State *)pci_dev;
1035 int i;
1036 const uint32_t excluded_regs[] = {
1037 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1038 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1039 };
1040
1041
1042 DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
1043
1044 cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
1045 qemu_register_coalesced_mmio(addr, excluded_regs[0]);
1046
1047 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1048 qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
1049 excluded_regs[i + 1] -
1050 excluded_regs[i] - 4);
1051 }
1052
1053 static void
1054 e1000_cleanup(VLANClientState *vc)
1055 {
1056 E1000State *d = vc->opaque;
1057
1058 unregister_savevm("e1000", d);
1059 }
1060
1061 static int
1062 pci_e1000_uninit(PCIDevice *dev)
1063 {
1064 E1000State *d = (E1000State *) dev;
1065
1066 cpu_unregister_io_memory(d->mmio_index);
1067
1068 return 0;
1069 }
1070
1071 PCIDevice *
1072 pci_e1000_init(PCIBus *bus, NICInfo *nd, int devfn)
1073 {
1074 E1000State *d;
1075 uint8_t *pci_conf;
1076 uint16_t checksum = 0;
1077 static const char info_str[] = "e1000";
1078 int i;
1079
1080 d = (E1000State *)pci_register_device(bus, "e1000",
1081 sizeof(E1000State), devfn, NULL, NULL);
1082
1083 if (!d)
1084 return NULL;
1085
1086 pci_conf = d->dev.config;
1087
1088 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
1089 pci_config_set_device_id(pci_conf, E1000_DEVID);
1090 *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
1091 *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
1092 pci_conf[0x08] = 0x03;
1093 pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
1094 pci_conf[0x0c] = 0x10;
1095
1096 pci_conf[0x3d] = 1; // interrupt pin 0
1097
1098 d->mmio_index = cpu_register_io_memory(0, e1000_mmio_read,
1099 e1000_mmio_write, d);
1100
1101 pci_register_io_region((PCIDevice *)d, 0, PNPMMIO_SIZE,
1102 PCI_ADDRESS_SPACE_MEM, e1000_mmio_map);
1103
1104 pci_register_io_region((PCIDevice *)d, 1, IOPORT_SIZE,
1105 PCI_ADDRESS_SPACE_IO, ioport_map);
1106
1107 memmove(d->eeprom_data, e1000_eeprom_template,
1108 sizeof e1000_eeprom_template);
1109 for (i = 0; i < 3; i++)
1110 d->eeprom_data[i] = (nd->macaddr[2*i+1]<<8) | nd->macaddr[2*i];
1111 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1112 checksum += d->eeprom_data[i];
1113 checksum = (uint16_t) EEPROM_SUM - checksum;
1114 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1115
1116 memset(d->phy_reg, 0, sizeof d->phy_reg);
1117 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1118 memset(d->mac_reg, 0, sizeof d->mac_reg);
1119 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1120 d->rxbuf_min_shift = 1;
1121 memset(&d->tx, 0, sizeof d->tx);
1122
1123 d->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name,
1124 e1000_receive, e1000_can_receive,
1125 e1000_cleanup, d);
1126 d->vc->link_status_changed = e1000_set_link_status;
1127
1128 qemu_format_nic_info_str(d->vc, nd->macaddr);
1129
1130 register_savevm(info_str, -1, 2, nic_save, nic_load, d);
1131 d->dev.unregister = pci_e1000_uninit;
1132
1133 return (PCIDevice *)d;
1134 }