]> git.proxmox.com Git - qemu.git/blob - hw/e1000.c
Revert "Introduce reset notifier order"
[qemu.git] / hw / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
23 */
24
25
26 #include "hw.h"
27 #include "pci.h"
28 #include "net.h"
29
30 #include "e1000_hw.h"
31
32 #define DEBUG
33
34 #ifdef DEBUG
35 enum {
36 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
37 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
38 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
39 DEBUG_RXFILTER, DEBUG_NOTYET,
40 };
41 #define DBGBIT(x) (1<<DEBUG_##x)
42 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
43
44 #define DBGOUT(what, fmt, ...) do { \
45 if (debugflags & DBGBIT(what)) \
46 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
47 } while (0)
48 #else
49 #define DBGOUT(what, fmt, ...) do {} while (0)
50 #endif
51
52 #define IOPORT_SIZE 0x40
53 #define PNPMMIO_SIZE 0x20000
54
55 /*
56 * HW models:
57 * E1000_DEV_ID_82540EM works with Windows and Linux
58 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
59 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
60 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
61 * Others never tested
62 */
63 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
64
65 /*
66 * May need to specify additional MAC-to-PHY entries --
67 * Intel's Windows driver refuses to initialize unless they match
68 */
69 enum {
70 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
71 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
72 /* default to E1000_DEV_ID_82540EM */ 0xc20
73 };
74
75 typedef struct E1000State_st {
76 PCIDevice dev;
77 VLANClientState *vc;
78 int mmio_index;
79
80 uint32_t mac_reg[0x8000];
81 uint16_t phy_reg[0x20];
82 uint16_t eeprom_data[64];
83
84 uint32_t rxbuf_size;
85 uint32_t rxbuf_min_shift;
86 int check_rxov;
87 struct e1000_tx {
88 unsigned char header[256];
89 unsigned char vlan_header[4];
90 unsigned char vlan[4];
91 unsigned char data[0x10000];
92 uint16_t size;
93 unsigned char sum_needed;
94 unsigned char vlan_needed;
95 uint8_t ipcss;
96 uint8_t ipcso;
97 uint16_t ipcse;
98 uint8_t tucss;
99 uint8_t tucso;
100 uint16_t tucse;
101 uint8_t hdr_len;
102 uint16_t mss;
103 uint32_t paylen;
104 uint16_t tso_frames;
105 char tse;
106 int8_t ip;
107 int8_t tcp;
108 char cptse; // current packet tse bit
109 } tx;
110
111 struct {
112 uint32_t val_in; // shifted in from guest driver
113 uint16_t bitnum_in;
114 uint16_t bitnum_out;
115 uint16_t reading;
116 uint32_t old_eecd;
117 } eecd_state;
118 } E1000State;
119
120 #define defreg(x) x = (E1000_##x>>2)
121 enum {
122 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
123 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
124 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
125 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
126 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
127 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
128 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
129 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
130 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
131 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
132 defreg(VET),
133 };
134
135 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
136 static const char phy_regcap[0x20] = {
137 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
138 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
139 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
140 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
141 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
142 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
143 };
144
145 static void
146 ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr,
147 uint32_t size, int type)
148 {
149 DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
150 }
151
152 static void
153 set_interrupt_cause(E1000State *s, int index, uint32_t val)
154 {
155 if (val)
156 val |= E1000_ICR_INT_ASSERTED;
157 s->mac_reg[ICR] = val;
158 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
159 }
160
161 static void
162 set_ics(E1000State *s, int index, uint32_t val)
163 {
164 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
165 s->mac_reg[IMS]);
166 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
167 }
168
169 static int
170 rxbufsize(uint32_t v)
171 {
172 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
173 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
174 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
175 switch (v) {
176 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
177 return 16384;
178 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
179 return 8192;
180 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
181 return 4096;
182 case E1000_RCTL_SZ_1024:
183 return 1024;
184 case E1000_RCTL_SZ_512:
185 return 512;
186 case E1000_RCTL_SZ_256:
187 return 256;
188 }
189 return 2048;
190 }
191
192 static void
193 set_ctrl(E1000State *s, int index, uint32_t val)
194 {
195 /* RST is self clearing */
196 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
197 }
198
199 static void
200 set_rx_control(E1000State *s, int index, uint32_t val)
201 {
202 s->mac_reg[RCTL] = val;
203 s->rxbuf_size = rxbufsize(val);
204 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
205 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
206 s->mac_reg[RCTL]);
207 }
208
209 static void
210 set_mdic(E1000State *s, int index, uint32_t val)
211 {
212 uint32_t data = val & E1000_MDIC_DATA_MASK;
213 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
214
215 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
216 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
217 else if (val & E1000_MDIC_OP_READ) {
218 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
219 if (!(phy_regcap[addr] & PHY_R)) {
220 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
221 val |= E1000_MDIC_ERROR;
222 } else
223 val = (val ^ data) | s->phy_reg[addr];
224 } else if (val & E1000_MDIC_OP_WRITE) {
225 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
226 if (!(phy_regcap[addr] & PHY_W)) {
227 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
228 val |= E1000_MDIC_ERROR;
229 } else
230 s->phy_reg[addr] = data;
231 }
232 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
233 set_ics(s, 0, E1000_ICR_MDAC);
234 }
235
236 static uint32_t
237 get_eecd(E1000State *s, int index)
238 {
239 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
240
241 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
242 s->eecd_state.bitnum_out, s->eecd_state.reading);
243 if (!s->eecd_state.reading ||
244 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
245 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
246 ret |= E1000_EECD_DO;
247 return ret;
248 }
249
250 static void
251 set_eecd(E1000State *s, int index, uint32_t val)
252 {
253 uint32_t oldval = s->eecd_state.old_eecd;
254
255 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
256 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
257 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
258 return;
259 if (!(E1000_EECD_SK & val)) { // falling edge
260 s->eecd_state.bitnum_out++;
261 return;
262 }
263 if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
264 memset(&s->eecd_state, 0, sizeof s->eecd_state);
265 return;
266 }
267 s->eecd_state.val_in <<= 1;
268 if (val & E1000_EECD_DI)
269 s->eecd_state.val_in |= 1;
270 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
271 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
272 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
273 EEPROM_READ_OPCODE_MICROWIRE);
274 }
275 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
276 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
277 s->eecd_state.reading);
278 }
279
280 static uint32_t
281 flash_eerd_read(E1000State *s, int x)
282 {
283 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
284
285 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
286 return 0;
287 return (s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
288 E1000_EEPROM_RW_REG_DONE | r;
289 }
290
291 static void
292 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
293 {
294 uint32_t sum;
295
296 if (cse && cse < n)
297 n = cse + 1;
298 if (sloc < n-1) {
299 sum = net_checksum_add(n-css, data+css);
300 cpu_to_be16wu((uint16_t *)(data + sloc),
301 net_checksum_finish(sum));
302 }
303 }
304
305 static inline int
306 vlan_enabled(E1000State *s)
307 {
308 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
309 }
310
311 static inline int
312 vlan_rx_filter_enabled(E1000State *s)
313 {
314 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
315 }
316
317 static inline int
318 is_vlan_packet(E1000State *s, const uint8_t *buf)
319 {
320 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
321 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
322 }
323
324 static inline int
325 is_vlan_txd(uint32_t txd_lower)
326 {
327 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
328 }
329
330 static void
331 xmit_seg(E1000State *s)
332 {
333 uint16_t len, *sp;
334 unsigned int frames = s->tx.tso_frames, css, sofar, n;
335 struct e1000_tx *tp = &s->tx;
336
337 if (tp->tse && tp->cptse) {
338 css = tp->ipcss;
339 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
340 frames, tp->size, css);
341 if (tp->ip) { // IPv4
342 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
343 tp->size - css);
344 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
345 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
346 } else // IPv6
347 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
348 tp->size - css);
349 css = tp->tucss;
350 len = tp->size - css;
351 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
352 if (tp->tcp) {
353 sofar = frames * tp->mss;
354 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
355 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
356 if (tp->paylen - sofar > tp->mss)
357 tp->data[css + 13] &= ~9; // PSH, FIN
358 } else // UDP
359 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
360 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
361 // add pseudo-header length before checksum calculation
362 sp = (uint16_t *)(tp->data + tp->tucso);
363 cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
364 }
365 tp->tso_frames++;
366 }
367
368 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
369 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
370 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
371 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
372 if (tp->vlan_needed) {
373 memmove(tp->vlan, tp->data, 12);
374 memcpy(tp->data + 8, tp->vlan_header, 4);
375 qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
376 } else
377 qemu_send_packet(s->vc, tp->data, tp->size);
378 s->mac_reg[TPT]++;
379 s->mac_reg[GPTC]++;
380 n = s->mac_reg[TOTL];
381 if ((s->mac_reg[TOTL] += s->tx.size) < n)
382 s->mac_reg[TOTH]++;
383 }
384
385 static void
386 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
387 {
388 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
389 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
390 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
391 unsigned int msh = 0xfffff, hdr = 0;
392 uint64_t addr;
393 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
394 struct e1000_tx *tp = &s->tx;
395
396 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
397 op = le32_to_cpu(xp->cmd_and_length);
398 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
399 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
400 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
401 tp->tucss = xp->upper_setup.tcp_fields.tucss;
402 tp->tucso = xp->upper_setup.tcp_fields.tucso;
403 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
404 tp->paylen = op & 0xfffff;
405 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
406 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
407 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
408 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
409 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
410 tp->tso_frames = 0;
411 if (tp->tucso == 0) { // this is probably wrong
412 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
413 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
414 }
415 return;
416 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
417 // data descriptor
418 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
419 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
420 } else
421 // legacy descriptor
422 tp->cptse = 0;
423
424 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
425 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
426 tp->vlan_needed = 1;
427 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
428 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
429 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
430 le16_to_cpu(dp->upper.fields.special));
431 }
432
433 addr = le64_to_cpu(dp->buffer_addr);
434 if (tp->tse && tp->cptse) {
435 hdr = tp->hdr_len;
436 msh = hdr + tp->mss;
437 do {
438 bytes = split_size;
439 if (tp->size + bytes > msh)
440 bytes = msh - tp->size;
441 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
442 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
443 memmove(tp->header, tp->data, hdr);
444 tp->size = sz;
445 addr += bytes;
446 if (sz == msh) {
447 xmit_seg(s);
448 memmove(tp->data, tp->header, hdr);
449 tp->size = hdr;
450 }
451 } while (split_size -= bytes);
452 } else if (!tp->tse && tp->cptse) {
453 // context descriptor TSE is not set, while data descriptor TSE is set
454 DBGOUT(TXERR, "TCP segmentaion Error\n");
455 } else {
456 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
457 tp->size += split_size;
458 }
459
460 if (!(txd_lower & E1000_TXD_CMD_EOP))
461 return;
462 if (!(tp->tse && tp->cptse && tp->size < hdr))
463 xmit_seg(s);
464 tp->tso_frames = 0;
465 tp->sum_needed = 0;
466 tp->vlan_needed = 0;
467 tp->size = 0;
468 tp->cptse = 0;
469 }
470
471 static uint32_t
472 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
473 {
474 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
475
476 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
477 return 0;
478 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
479 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
480 dp->upper.data = cpu_to_le32(txd_upper);
481 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
482 (void *)&dp->upper, sizeof(dp->upper));
483 return E1000_ICR_TXDW;
484 }
485
486 static void
487 start_xmit(E1000State *s)
488 {
489 target_phys_addr_t base;
490 struct e1000_tx_desc desc;
491 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
492
493 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
494 DBGOUT(TX, "tx disabled\n");
495 return;
496 }
497
498 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
499 base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
500 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
501 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
502
503 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
504 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
505 desc.upper.data);
506
507 process_tx_desc(s, &desc);
508 cause |= txdesc_writeback(base, &desc);
509
510 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
511 s->mac_reg[TDH] = 0;
512 /*
513 * the following could happen only if guest sw assigns
514 * bogus values to TDT/TDLEN.
515 * there's nothing too intelligent we could do about this.
516 */
517 if (s->mac_reg[TDH] == tdh_start) {
518 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
519 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
520 break;
521 }
522 }
523 set_ics(s, 0, cause);
524 }
525
526 static int
527 receive_filter(E1000State *s, const uint8_t *buf, int size)
528 {
529 static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
530 static int mta_shift[] = {4, 3, 2, 0};
531 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
532
533 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
534 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
535 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
536 ((vid >> 5) & 0x7f));
537 if ((vfta & (1 << (vid & 0x1f))) == 0)
538 return 0;
539 }
540
541 if (rctl & E1000_RCTL_UPE) // promiscuous
542 return 1;
543
544 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
545 return 1;
546
547 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
548 return 1;
549
550 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
551 if (!(rp[1] & E1000_RAH_AV))
552 continue;
553 ra[0] = cpu_to_le32(rp[0]);
554 ra[1] = cpu_to_le32(rp[1]);
555 if (!memcmp(buf, (uint8_t *)ra, 6)) {
556 DBGOUT(RXFILTER,
557 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
558 (int)(rp - s->mac_reg - RA)/2,
559 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
560 return 1;
561 }
562 }
563 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
564 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
565
566 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
567 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
568 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
569 return 1;
570 DBGOUT(RXFILTER,
571 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
572 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
573 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
574 s->mac_reg[MTA + (f >> 5)]);
575
576 return 0;
577 }
578
579 static void
580 e1000_set_link_status(VLANClientState *vc)
581 {
582 E1000State *s = vc->opaque;
583 uint32_t old_status = s->mac_reg[STATUS];
584
585 if (vc->link_down)
586 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
587 else
588 s->mac_reg[STATUS] |= E1000_STATUS_LU;
589
590 if (s->mac_reg[STATUS] != old_status)
591 set_ics(s, 0, E1000_ICR_LSC);
592 }
593
594 static int
595 e1000_can_receive(VLANClientState *vc)
596 {
597 E1000State *s = vc->opaque;
598
599 return (s->mac_reg[RCTL] & E1000_RCTL_EN);
600 }
601
602 static ssize_t
603 e1000_receive(VLANClientState *vc, const uint8_t *buf, size_t size)
604 {
605 E1000State *s = vc->opaque;
606 struct e1000_rx_desc desc;
607 target_phys_addr_t base;
608 unsigned int n, rdt;
609 uint32_t rdh_start;
610 uint16_t vlan_special = 0;
611 uint8_t vlan_status = 0, vlan_offset = 0;
612
613 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
614 return -1;
615
616 if (size > s->rxbuf_size) {
617 DBGOUT(RX, "packet too large for buffers (%lu > %d)\n",
618 (unsigned long)size, s->rxbuf_size);
619 return -1;
620 }
621
622 if (!receive_filter(s, buf, size))
623 return size;
624
625 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
626 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
627 memmove((void *)(buf + 4), buf, 12);
628 vlan_status = E1000_RXD_STAT_VP;
629 vlan_offset = 4;
630 size -= 4;
631 }
632
633 rdh_start = s->mac_reg[RDH];
634 size += 4; // for the header
635 do {
636 if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
637 set_ics(s, 0, E1000_ICS_RXO);
638 return -1;
639 }
640 base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
641 sizeof(desc) * s->mac_reg[RDH];
642 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
643 desc.special = vlan_special;
644 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
645 if (desc.buffer_addr) {
646 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
647 (void *)(buf + vlan_offset), size);
648 desc.length = cpu_to_le16(size);
649 desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
650 } else // as per intel docs; skip descriptors with null buf addr
651 DBGOUT(RX, "Null RX descriptor!!\n");
652 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
653
654 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
655 s->mac_reg[RDH] = 0;
656 s->check_rxov = 1;
657 /* see comment in start_xmit; same here */
658 if (s->mac_reg[RDH] == rdh_start) {
659 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
660 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
661 set_ics(s, 0, E1000_ICS_RXO);
662 return -1;
663 }
664 } while (desc.buffer_addr == 0);
665
666 s->mac_reg[GPRC]++;
667 s->mac_reg[TPR]++;
668 n = s->mac_reg[TORL];
669 if ((s->mac_reg[TORL] += size) < n)
670 s->mac_reg[TORH]++;
671
672 n = E1000_ICS_RXT0;
673 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
674 rdt += s->mac_reg[RDLEN] / sizeof(desc);
675 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
676 s->rxbuf_min_shift)
677 n |= E1000_ICS_RXDMT0;
678
679 set_ics(s, 0, n);
680
681 return size;
682 }
683
684 static uint32_t
685 mac_readreg(E1000State *s, int index)
686 {
687 return s->mac_reg[index];
688 }
689
690 static uint32_t
691 mac_icr_read(E1000State *s, int index)
692 {
693 uint32_t ret = s->mac_reg[ICR];
694
695 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
696 set_interrupt_cause(s, 0, 0);
697 return ret;
698 }
699
700 static uint32_t
701 mac_read_clr4(E1000State *s, int index)
702 {
703 uint32_t ret = s->mac_reg[index];
704
705 s->mac_reg[index] = 0;
706 return ret;
707 }
708
709 static uint32_t
710 mac_read_clr8(E1000State *s, int index)
711 {
712 uint32_t ret = s->mac_reg[index];
713
714 s->mac_reg[index] = 0;
715 s->mac_reg[index-1] = 0;
716 return ret;
717 }
718
719 static void
720 mac_writereg(E1000State *s, int index, uint32_t val)
721 {
722 s->mac_reg[index] = val;
723 }
724
725 static void
726 set_rdt(E1000State *s, int index, uint32_t val)
727 {
728 s->check_rxov = 0;
729 s->mac_reg[index] = val & 0xffff;
730 }
731
732 static void
733 set_16bit(E1000State *s, int index, uint32_t val)
734 {
735 s->mac_reg[index] = val & 0xffff;
736 }
737
738 static void
739 set_dlen(E1000State *s, int index, uint32_t val)
740 {
741 s->mac_reg[index] = val & 0xfff80;
742 }
743
744 static void
745 set_tctl(E1000State *s, int index, uint32_t val)
746 {
747 s->mac_reg[index] = val;
748 s->mac_reg[TDT] &= 0xffff;
749 start_xmit(s);
750 }
751
752 static void
753 set_icr(E1000State *s, int index, uint32_t val)
754 {
755 DBGOUT(INTERRUPT, "set_icr %x\n", val);
756 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
757 }
758
759 static void
760 set_imc(E1000State *s, int index, uint32_t val)
761 {
762 s->mac_reg[IMS] &= ~val;
763 set_ics(s, 0, 0);
764 }
765
766 static void
767 set_ims(E1000State *s, int index, uint32_t val)
768 {
769 s->mac_reg[IMS] |= val;
770 set_ics(s, 0, 0);
771 }
772
773 #define getreg(x) [x] = mac_readreg
774 static uint32_t (*macreg_readops[])(E1000State *, int) = {
775 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
776 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
777 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
778 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
779 getreg(RDH), getreg(RDT), getreg(VET),
780
781 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
782 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
783 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
784 [CRCERRS ... MPC] = &mac_readreg,
785 [RA ... RA+31] = &mac_readreg,
786 [MTA ... MTA+127] = &mac_readreg,
787 [VFTA ... VFTA+127] = &mac_readreg,
788 };
789 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
790
791 #define putreg(x) [x] = mac_writereg
792 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
793 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
794 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
795 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
796 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
797 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
798 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
799 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
800 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
801 [RA ... RA+31] = &mac_writereg,
802 [MTA ... MTA+127] = &mac_writereg,
803 [VFTA ... VFTA+127] = &mac_writereg,
804 };
805 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
806
807 static void
808 e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
809 {
810 E1000State *s = opaque;
811 unsigned int index = (addr & 0x1ffff) >> 2;
812
813 #ifdef TARGET_WORDS_BIGENDIAN
814 val = bswap32(val);
815 #endif
816 if (index < NWRITEOPS && macreg_writeops[index])
817 macreg_writeops[index](s, index, val);
818 else if (index < NREADOPS && macreg_readops[index])
819 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
820 else
821 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
822 index<<2, val);
823 }
824
825 static void
826 e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
827 {
828 // emulate hw without byte enables: no RMW
829 e1000_mmio_writel(opaque, addr & ~3,
830 (val & 0xffff) << (8*(addr & 3)));
831 }
832
833 static void
834 e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
835 {
836 // emulate hw without byte enables: no RMW
837 e1000_mmio_writel(opaque, addr & ~3,
838 (val & 0xff) << (8*(addr & 3)));
839 }
840
841 static uint32_t
842 e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
843 {
844 E1000State *s = opaque;
845 unsigned int index = (addr & 0x1ffff) >> 2;
846
847 if (index < NREADOPS && macreg_readops[index])
848 {
849 uint32_t val = macreg_readops[index](s, index);
850 #ifdef TARGET_WORDS_BIGENDIAN
851 val = bswap32(val);
852 #endif
853 return val;
854 }
855 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
856 return 0;
857 }
858
859 static uint32_t
860 e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
861 {
862 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
863 (8 * (addr & 3))) & 0xff;
864 }
865
866 static uint32_t
867 e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
868 {
869 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
870 (8 * (addr & 3))) & 0xffff;
871 }
872
873 static const int mac_regtosave[] = {
874 CTRL, EECD, EERD, GPRC, GPTC, ICR, ICS, IMC, IMS,
875 LEDCTL, MANC, MDIC, MPC, PBA, RCTL, RDBAH, RDBAL, RDH,
876 RDLEN, RDT, STATUS, SWSM, TCTL, TDBAH, TDBAL, TDH, TDLEN,
877 TDT, TORH, TORL, TOTH, TOTL, TPR, TPT, TXDCTL, WUFC,
878 VET,
879 };
880 enum { MAC_NSAVE = ARRAY_SIZE(mac_regtosave) };
881
882 static const struct {
883 int size;
884 int array0;
885 } mac_regarraystosave[] = { {32, RA}, {128, MTA}, {128, VFTA} };
886 enum { MAC_NARRAYS = ARRAY_SIZE(mac_regarraystosave) };
887
888 static void
889 nic_save(QEMUFile *f, void *opaque)
890 {
891 E1000State *s = (E1000State *)opaque;
892 int i, j;
893
894 pci_device_save(&s->dev, f);
895 qemu_put_be32(f, 0);
896 qemu_put_be32s(f, &s->rxbuf_size);
897 qemu_put_be32s(f, &s->rxbuf_min_shift);
898 qemu_put_be32s(f, &s->eecd_state.val_in);
899 qemu_put_be16s(f, &s->eecd_state.bitnum_in);
900 qemu_put_be16s(f, &s->eecd_state.bitnum_out);
901 qemu_put_be16s(f, &s->eecd_state.reading);
902 qemu_put_be32s(f, &s->eecd_state.old_eecd);
903 qemu_put_8s(f, &s->tx.ipcss);
904 qemu_put_8s(f, &s->tx.ipcso);
905 qemu_put_be16s(f, &s->tx.ipcse);
906 qemu_put_8s(f, &s->tx.tucss);
907 qemu_put_8s(f, &s->tx.tucso);
908 qemu_put_be16s(f, &s->tx.tucse);
909 qemu_put_be32s(f, &s->tx.paylen);
910 qemu_put_8s(f, &s->tx.hdr_len);
911 qemu_put_be16s(f, &s->tx.mss);
912 qemu_put_be16s(f, &s->tx.size);
913 qemu_put_be16s(f, &s->tx.tso_frames);
914 qemu_put_8s(f, &s->tx.sum_needed);
915 qemu_put_s8s(f, &s->tx.ip);
916 qemu_put_s8s(f, &s->tx.tcp);
917 qemu_put_buffer(f, s->tx.header, sizeof s->tx.header);
918 qemu_put_buffer(f, s->tx.data, sizeof s->tx.data);
919 for (i = 0; i < 64; i++)
920 qemu_put_be16s(f, s->eeprom_data + i);
921 for (i = 0; i < 0x20; i++)
922 qemu_put_be16s(f, s->phy_reg + i);
923 for (i = 0; i < MAC_NSAVE; i++)
924 qemu_put_be32s(f, s->mac_reg + mac_regtosave[i]);
925 for (i = 0; i < MAC_NARRAYS; i++)
926 for (j = 0; j < mac_regarraystosave[i].size; j++)
927 qemu_put_be32s(f,
928 s->mac_reg + mac_regarraystosave[i].array0 + j);
929 }
930
931 static int
932 nic_load(QEMUFile *f, void *opaque, int version_id)
933 {
934 E1000State *s = (E1000State *)opaque;
935 int i, j, ret;
936
937 if ((ret = pci_device_load(&s->dev, f)) < 0)
938 return ret;
939 if (version_id == 1)
940 qemu_get_sbe32s(f, &i); /* once some unused instance id */
941 qemu_get_be32(f); /* Ignored. Was mmio_base. */
942 qemu_get_be32s(f, &s->rxbuf_size);
943 qemu_get_be32s(f, &s->rxbuf_min_shift);
944 qemu_get_be32s(f, &s->eecd_state.val_in);
945 qemu_get_be16s(f, &s->eecd_state.bitnum_in);
946 qemu_get_be16s(f, &s->eecd_state.bitnum_out);
947 qemu_get_be16s(f, &s->eecd_state.reading);
948 qemu_get_be32s(f, &s->eecd_state.old_eecd);
949 qemu_get_8s(f, &s->tx.ipcss);
950 qemu_get_8s(f, &s->tx.ipcso);
951 qemu_get_be16s(f, &s->tx.ipcse);
952 qemu_get_8s(f, &s->tx.tucss);
953 qemu_get_8s(f, &s->tx.tucso);
954 qemu_get_be16s(f, &s->tx.tucse);
955 qemu_get_be32s(f, &s->tx.paylen);
956 qemu_get_8s(f, &s->tx.hdr_len);
957 qemu_get_be16s(f, &s->tx.mss);
958 qemu_get_be16s(f, &s->tx.size);
959 qemu_get_be16s(f, &s->tx.tso_frames);
960 qemu_get_8s(f, &s->tx.sum_needed);
961 qemu_get_s8s(f, &s->tx.ip);
962 qemu_get_s8s(f, &s->tx.tcp);
963 qemu_get_buffer(f, s->tx.header, sizeof s->tx.header);
964 qemu_get_buffer(f, s->tx.data, sizeof s->tx.data);
965 for (i = 0; i < 64; i++)
966 qemu_get_be16s(f, s->eeprom_data + i);
967 for (i = 0; i < 0x20; i++)
968 qemu_get_be16s(f, s->phy_reg + i);
969 for (i = 0; i < MAC_NSAVE; i++)
970 qemu_get_be32s(f, s->mac_reg + mac_regtosave[i]);
971 for (i = 0; i < MAC_NARRAYS; i++)
972 for (j = 0; j < mac_regarraystosave[i].size; j++)
973 qemu_get_be32s(f,
974 s->mac_reg + mac_regarraystosave[i].array0 + j);
975 return 0;
976 }
977
978 static const uint16_t e1000_eeprom_template[64] = {
979 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
980 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
981 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
982 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
983 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
984 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
985 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
986 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
987 };
988
989 static const uint16_t phy_reg_init[] = {
990 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
991 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
992 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
993 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
994 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
995 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
996 };
997
998 static const uint32_t mac_reg_init[] = {
999 [PBA] = 0x00100030,
1000 [LEDCTL] = 0x602,
1001 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
1002 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
1003 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
1004 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
1005 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
1006 E1000_STATUS_LU,
1007 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
1008 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
1009 E1000_MANC_RMCP_EN,
1010 };
1011
1012 /* PCI interface */
1013
1014 static CPUWriteMemoryFunc *e1000_mmio_write[] = {
1015 e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
1016 };
1017
1018 static CPUReadMemoryFunc *e1000_mmio_read[] = {
1019 e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
1020 };
1021
1022 static void
1023 e1000_mmio_map(PCIDevice *pci_dev, int region_num,
1024 uint32_t addr, uint32_t size, int type)
1025 {
1026 E1000State *d = (E1000State *)pci_dev;
1027 int i;
1028 const uint32_t excluded_regs[] = {
1029 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1030 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1031 };
1032
1033
1034 DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
1035
1036 cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
1037 qemu_register_coalesced_mmio(addr, excluded_regs[0]);
1038
1039 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1040 qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
1041 excluded_regs[i + 1] -
1042 excluded_regs[i] - 4);
1043 }
1044
1045 static void
1046 e1000_cleanup(VLANClientState *vc)
1047 {
1048 E1000State *d = vc->opaque;
1049
1050 unregister_savevm("e1000", d);
1051 }
1052
1053 static int
1054 pci_e1000_uninit(PCIDevice *dev)
1055 {
1056 E1000State *d = (E1000State *) dev;
1057
1058 cpu_unregister_io_memory(d->mmio_index);
1059
1060 return 0;
1061 }
1062
1063 static void e1000_reset(void *opaque)
1064 {
1065 E1000State *d = opaque;
1066
1067 memset(d->phy_reg, 0, sizeof d->phy_reg);
1068 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1069 memset(d->mac_reg, 0, sizeof d->mac_reg);
1070 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1071 d->rxbuf_min_shift = 1;
1072 memset(&d->tx, 0, sizeof d->tx);
1073 }
1074
1075 static void pci_e1000_init(PCIDevice *pci_dev)
1076 {
1077 E1000State *d = (E1000State *)pci_dev;
1078 uint8_t *pci_conf;
1079 uint16_t checksum = 0;
1080 static const char info_str[] = "e1000";
1081 int i;
1082 uint8_t macaddr[6];
1083
1084 pci_conf = d->dev.config;
1085
1086 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
1087 pci_config_set_device_id(pci_conf, E1000_DEVID);
1088 *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
1089 *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
1090 pci_conf[0x08] = 0x03;
1091 pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
1092 pci_conf[0x0c] = 0x10;
1093
1094 pci_conf[0x3d] = 1; // interrupt pin 0
1095
1096 d->mmio_index = cpu_register_io_memory(e1000_mmio_read,
1097 e1000_mmio_write, d);
1098
1099 pci_register_bar((PCIDevice *)d, 0, PNPMMIO_SIZE,
1100 PCI_ADDRESS_SPACE_MEM, e1000_mmio_map);
1101
1102 pci_register_bar((PCIDevice *)d, 1, IOPORT_SIZE,
1103 PCI_ADDRESS_SPACE_IO, ioport_map);
1104
1105 memmove(d->eeprom_data, e1000_eeprom_template,
1106 sizeof e1000_eeprom_template);
1107 qdev_get_macaddr(&d->dev.qdev, macaddr);
1108 for (i = 0; i < 3; i++)
1109 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1110 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1111 checksum += d->eeprom_data[i];
1112 checksum = (uint16_t) EEPROM_SUM - checksum;
1113 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1114
1115 d->vc = qdev_get_vlan_client(&d->dev.qdev,
1116 e1000_can_receive, e1000_receive,
1117 NULL, e1000_cleanup, d);
1118 d->vc->link_status_changed = e1000_set_link_status;
1119
1120 qemu_format_nic_info_str(d->vc, macaddr);
1121
1122 register_savevm(info_str, -1, 2, nic_save, nic_load, d);
1123 d->dev.unregister = pci_e1000_uninit;
1124 qemu_register_reset(e1000_reset, d);
1125 e1000_reset(d);
1126 }
1127
1128 static void e1000_register_devices(void)
1129 {
1130 pci_qdev_register("e1000", sizeof(E1000State), pci_e1000_init);
1131 }
1132
1133 device_init(e1000_register_devices)