]> git.proxmox.com Git - mirror_qemu.git/blob - hw/e1000.c
pci: introduce pcibus_t to represent pci bus address/size instead of uint32_t
[mirror_qemu.git] / hw / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
5 * Copyright (c) 2008 Qumranet
6 * Based on work done by:
7 * Copyright (c) 2007 Dan Aloni
8 * Copyright (c) 2004 Antony T Curtis
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24
25 #include "hw.h"
26 #include "pci.h"
27 #include "net.h"
28 #include "net/checksum.h"
29 #include "loader.h"
30
31 #include "e1000_hw.h"
32
33 #define DEBUG
34
35 #ifdef DEBUG
36 enum {
37 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
38 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
39 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
40 DEBUG_RXFILTER, DEBUG_NOTYET,
41 };
42 #define DBGBIT(x) (1<<DEBUG_##x)
43 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
44
45 #define DBGOUT(what, fmt, ...) do { \
46 if (debugflags & DBGBIT(what)) \
47 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
48 } while (0)
49 #else
50 #define DBGOUT(what, fmt, ...) do {} while (0)
51 #endif
52
53 #define IOPORT_SIZE 0x40
54 #define PNPMMIO_SIZE 0x20000
55
56 /*
57 * HW models:
58 * E1000_DEV_ID_82540EM works with Windows and Linux
59 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
60 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
61 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
62 * Others never tested
63 */
64 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
65
66 /*
67 * May need to specify additional MAC-to-PHY entries --
68 * Intel's Windows driver refuses to initialize unless they match
69 */
70 enum {
71 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
72 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
73 /* default to E1000_DEV_ID_82540EM */ 0xc20
74 };
75
76 typedef struct E1000State_st {
77 PCIDevice dev;
78 VLANClientState *vc;
79 NICConf conf;
80 int mmio_index;
81
82 uint32_t mac_reg[0x8000];
83 uint16_t phy_reg[0x20];
84 uint16_t eeprom_data[64];
85
86 uint32_t rxbuf_size;
87 uint32_t rxbuf_min_shift;
88 int check_rxov;
89 struct e1000_tx {
90 unsigned char header[256];
91 unsigned char vlan_header[4];
92 unsigned char vlan[4];
93 unsigned char data[0x10000];
94 uint16_t size;
95 unsigned char sum_needed;
96 unsigned char vlan_needed;
97 uint8_t ipcss;
98 uint8_t ipcso;
99 uint16_t ipcse;
100 uint8_t tucss;
101 uint8_t tucso;
102 uint16_t tucse;
103 uint8_t hdr_len;
104 uint16_t mss;
105 uint32_t paylen;
106 uint16_t tso_frames;
107 char tse;
108 int8_t ip;
109 int8_t tcp;
110 char cptse; // current packet tse bit
111 } tx;
112
113 struct {
114 uint32_t val_in; // shifted in from guest driver
115 uint16_t bitnum_in;
116 uint16_t bitnum_out;
117 uint16_t reading;
118 uint32_t old_eecd;
119 } eecd_state;
120 } E1000State;
121
122 #define defreg(x) x = (E1000_##x>>2)
123 enum {
124 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
125 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
126 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
127 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
128 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
129 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
130 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
131 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
132 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
133 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
134 defreg(VET),
135 };
136
137 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
138 static const char phy_regcap[0x20] = {
139 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
140 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
141 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
142 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
143 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
144 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
145 };
146
147 static void
148 ioport_map(PCIDevice *pci_dev, int region_num, pcibus_t addr,
149 pcibus_t size, int type)
150 {
151 DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
152 }
153
154 static void
155 set_interrupt_cause(E1000State *s, int index, uint32_t val)
156 {
157 if (val)
158 val |= E1000_ICR_INT_ASSERTED;
159 s->mac_reg[ICR] = val;
160 s->mac_reg[ICS] = val;
161 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
162 }
163
164 static void
165 set_ics(E1000State *s, int index, uint32_t val)
166 {
167 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
168 s->mac_reg[IMS]);
169 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
170 }
171
172 static int
173 rxbufsize(uint32_t v)
174 {
175 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
176 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
177 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
178 switch (v) {
179 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
180 return 16384;
181 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
182 return 8192;
183 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
184 return 4096;
185 case E1000_RCTL_SZ_1024:
186 return 1024;
187 case E1000_RCTL_SZ_512:
188 return 512;
189 case E1000_RCTL_SZ_256:
190 return 256;
191 }
192 return 2048;
193 }
194
195 static void
196 set_ctrl(E1000State *s, int index, uint32_t val)
197 {
198 /* RST is self clearing */
199 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
200 }
201
202 static void
203 set_rx_control(E1000State *s, int index, uint32_t val)
204 {
205 s->mac_reg[RCTL] = val;
206 s->rxbuf_size = rxbufsize(val);
207 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
208 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
209 s->mac_reg[RCTL]);
210 }
211
212 static void
213 set_mdic(E1000State *s, int index, uint32_t val)
214 {
215 uint32_t data = val & E1000_MDIC_DATA_MASK;
216 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
217
218 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
219 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
220 else if (val & E1000_MDIC_OP_READ) {
221 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
222 if (!(phy_regcap[addr] & PHY_R)) {
223 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
224 val |= E1000_MDIC_ERROR;
225 } else
226 val = (val ^ data) | s->phy_reg[addr];
227 } else if (val & E1000_MDIC_OP_WRITE) {
228 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
229 if (!(phy_regcap[addr] & PHY_W)) {
230 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
231 val |= E1000_MDIC_ERROR;
232 } else
233 s->phy_reg[addr] = data;
234 }
235 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
236 set_ics(s, 0, E1000_ICR_MDAC);
237 }
238
239 static uint32_t
240 get_eecd(E1000State *s, int index)
241 {
242 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
243
244 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
245 s->eecd_state.bitnum_out, s->eecd_state.reading);
246 if (!s->eecd_state.reading ||
247 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
248 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
249 ret |= E1000_EECD_DO;
250 return ret;
251 }
252
253 static void
254 set_eecd(E1000State *s, int index, uint32_t val)
255 {
256 uint32_t oldval = s->eecd_state.old_eecd;
257
258 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
259 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
260 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
261 return;
262 if (!(E1000_EECD_SK & val)) { // falling edge
263 s->eecd_state.bitnum_out++;
264 return;
265 }
266 if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
267 memset(&s->eecd_state, 0, sizeof s->eecd_state);
268 /*
269 * restore old_eecd's E1000_EECD_SK (known to be on)
270 * to avoid false detection of a clock edge
271 */
272 s->eecd_state.old_eecd = E1000_EECD_SK;
273 return;
274 }
275 s->eecd_state.val_in <<= 1;
276 if (val & E1000_EECD_DI)
277 s->eecd_state.val_in |= 1;
278 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
279 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
280 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
281 EEPROM_READ_OPCODE_MICROWIRE);
282 }
283 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
284 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
285 s->eecd_state.reading);
286 }
287
288 static uint32_t
289 flash_eerd_read(E1000State *s, int x)
290 {
291 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
292
293 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
294 return (s->mac_reg[EERD]);
295
296 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
297 return (E1000_EEPROM_RW_REG_DONE | r);
298
299 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
300 E1000_EEPROM_RW_REG_DONE | r);
301 }
302
303 static void
304 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
305 {
306 uint32_t sum;
307
308 if (cse && cse < n)
309 n = cse + 1;
310 if (sloc < n-1) {
311 sum = net_checksum_add(n-css, data+css);
312 cpu_to_be16wu((uint16_t *)(data + sloc),
313 net_checksum_finish(sum));
314 }
315 }
316
317 static inline int
318 vlan_enabled(E1000State *s)
319 {
320 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
321 }
322
323 static inline int
324 vlan_rx_filter_enabled(E1000State *s)
325 {
326 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
327 }
328
329 static inline int
330 is_vlan_packet(E1000State *s, const uint8_t *buf)
331 {
332 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
333 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
334 }
335
336 static inline int
337 is_vlan_txd(uint32_t txd_lower)
338 {
339 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
340 }
341
342 static void
343 xmit_seg(E1000State *s)
344 {
345 uint16_t len, *sp;
346 unsigned int frames = s->tx.tso_frames, css, sofar, n;
347 struct e1000_tx *tp = &s->tx;
348
349 if (tp->tse && tp->cptse) {
350 css = tp->ipcss;
351 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
352 frames, tp->size, css);
353 if (tp->ip) { // IPv4
354 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
355 tp->size - css);
356 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
357 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
358 } else // IPv6
359 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
360 tp->size - css);
361 css = tp->tucss;
362 len = tp->size - css;
363 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
364 if (tp->tcp) {
365 sofar = frames * tp->mss;
366 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
367 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
368 if (tp->paylen - sofar > tp->mss)
369 tp->data[css + 13] &= ~9; // PSH, FIN
370 } else // UDP
371 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
372 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
373 // add pseudo-header length before checksum calculation
374 sp = (uint16_t *)(tp->data + tp->tucso);
375 cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
376 }
377 tp->tso_frames++;
378 }
379
380 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
381 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
382 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
383 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
384 if (tp->vlan_needed) {
385 memmove(tp->vlan, tp->data, 12);
386 memcpy(tp->data + 8, tp->vlan_header, 4);
387 qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
388 } else
389 qemu_send_packet(s->vc, tp->data, tp->size);
390 s->mac_reg[TPT]++;
391 s->mac_reg[GPTC]++;
392 n = s->mac_reg[TOTL];
393 if ((s->mac_reg[TOTL] += s->tx.size) < n)
394 s->mac_reg[TOTH]++;
395 }
396
397 static void
398 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
399 {
400 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
401 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
402 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
403 unsigned int msh = 0xfffff, hdr = 0;
404 uint64_t addr;
405 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
406 struct e1000_tx *tp = &s->tx;
407
408 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
409 op = le32_to_cpu(xp->cmd_and_length);
410 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
411 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
412 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
413 tp->tucss = xp->upper_setup.tcp_fields.tucss;
414 tp->tucso = xp->upper_setup.tcp_fields.tucso;
415 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
416 tp->paylen = op & 0xfffff;
417 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
418 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
419 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
420 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
421 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
422 tp->tso_frames = 0;
423 if (tp->tucso == 0) { // this is probably wrong
424 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
425 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
426 }
427 return;
428 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
429 // data descriptor
430 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
431 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
432 } else
433 // legacy descriptor
434 tp->cptse = 0;
435
436 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
437 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
438 tp->vlan_needed = 1;
439 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
440 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
441 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
442 le16_to_cpu(dp->upper.fields.special));
443 }
444
445 addr = le64_to_cpu(dp->buffer_addr);
446 if (tp->tse && tp->cptse) {
447 hdr = tp->hdr_len;
448 msh = hdr + tp->mss;
449 do {
450 bytes = split_size;
451 if (tp->size + bytes > msh)
452 bytes = msh - tp->size;
453 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
454 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
455 memmove(tp->header, tp->data, hdr);
456 tp->size = sz;
457 addr += bytes;
458 if (sz == msh) {
459 xmit_seg(s);
460 memmove(tp->data, tp->header, hdr);
461 tp->size = hdr;
462 }
463 } while (split_size -= bytes);
464 } else if (!tp->tse && tp->cptse) {
465 // context descriptor TSE is not set, while data descriptor TSE is set
466 DBGOUT(TXERR, "TCP segmentaion Error\n");
467 } else {
468 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
469 tp->size += split_size;
470 }
471
472 if (!(txd_lower & E1000_TXD_CMD_EOP))
473 return;
474 if (!(tp->tse && tp->cptse && tp->size < hdr))
475 xmit_seg(s);
476 tp->tso_frames = 0;
477 tp->sum_needed = 0;
478 tp->vlan_needed = 0;
479 tp->size = 0;
480 tp->cptse = 0;
481 }
482
483 static uint32_t
484 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
485 {
486 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
487
488 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
489 return 0;
490 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
491 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
492 dp->upper.data = cpu_to_le32(txd_upper);
493 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
494 (void *)&dp->upper, sizeof(dp->upper));
495 return E1000_ICR_TXDW;
496 }
497
498 static void
499 start_xmit(E1000State *s)
500 {
501 target_phys_addr_t base;
502 struct e1000_tx_desc desc;
503 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
504
505 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
506 DBGOUT(TX, "tx disabled\n");
507 return;
508 }
509
510 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
511 base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
512 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
513 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
514
515 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
516 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
517 desc.upper.data);
518
519 process_tx_desc(s, &desc);
520 cause |= txdesc_writeback(base, &desc);
521
522 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
523 s->mac_reg[TDH] = 0;
524 /*
525 * the following could happen only if guest sw assigns
526 * bogus values to TDT/TDLEN.
527 * there's nothing too intelligent we could do about this.
528 */
529 if (s->mac_reg[TDH] == tdh_start) {
530 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
531 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
532 break;
533 }
534 }
535 set_ics(s, 0, cause);
536 }
537
538 static int
539 receive_filter(E1000State *s, const uint8_t *buf, int size)
540 {
541 static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
542 static int mta_shift[] = {4, 3, 2, 0};
543 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
544
545 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
546 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
547 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
548 ((vid >> 5) & 0x7f));
549 if ((vfta & (1 << (vid & 0x1f))) == 0)
550 return 0;
551 }
552
553 if (rctl & E1000_RCTL_UPE) // promiscuous
554 return 1;
555
556 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
557 return 1;
558
559 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
560 return 1;
561
562 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
563 if (!(rp[1] & E1000_RAH_AV))
564 continue;
565 ra[0] = cpu_to_le32(rp[0]);
566 ra[1] = cpu_to_le32(rp[1]);
567 if (!memcmp(buf, (uint8_t *)ra, 6)) {
568 DBGOUT(RXFILTER,
569 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
570 (int)(rp - s->mac_reg - RA)/2,
571 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
572 return 1;
573 }
574 }
575 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
576 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
577
578 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
579 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
580 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
581 return 1;
582 DBGOUT(RXFILTER,
583 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
584 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
585 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
586 s->mac_reg[MTA + (f >> 5)]);
587
588 return 0;
589 }
590
591 static void
592 e1000_set_link_status(VLANClientState *vc)
593 {
594 E1000State *s = vc->opaque;
595 uint32_t old_status = s->mac_reg[STATUS];
596
597 if (vc->link_down)
598 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
599 else
600 s->mac_reg[STATUS] |= E1000_STATUS_LU;
601
602 if (s->mac_reg[STATUS] != old_status)
603 set_ics(s, 0, E1000_ICR_LSC);
604 }
605
606 static int
607 e1000_can_receive(VLANClientState *vc)
608 {
609 E1000State *s = vc->opaque;
610
611 return (s->mac_reg[RCTL] & E1000_RCTL_EN);
612 }
613
614 static ssize_t
615 e1000_receive(VLANClientState *vc, const uint8_t *buf, size_t size)
616 {
617 E1000State *s = vc->opaque;
618 struct e1000_rx_desc desc;
619 target_phys_addr_t base;
620 unsigned int n, rdt;
621 uint32_t rdh_start;
622 uint16_t vlan_special = 0;
623 uint8_t vlan_status = 0, vlan_offset = 0;
624
625 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
626 return -1;
627
628 if (size > s->rxbuf_size) {
629 DBGOUT(RX, "packet too large for buffers (%lu > %d)\n",
630 (unsigned long)size, s->rxbuf_size);
631 return -1;
632 }
633
634 if (!receive_filter(s, buf, size))
635 return size;
636
637 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
638 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
639 memmove((void *)(buf + 4), buf, 12);
640 vlan_status = E1000_RXD_STAT_VP;
641 vlan_offset = 4;
642 size -= 4;
643 }
644
645 rdh_start = s->mac_reg[RDH];
646 size += 4; // for the header
647 do {
648 if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
649 set_ics(s, 0, E1000_ICS_RXO);
650 return -1;
651 }
652 base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
653 sizeof(desc) * s->mac_reg[RDH];
654 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
655 desc.special = vlan_special;
656 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
657 if (desc.buffer_addr) {
658 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
659 (void *)(buf + vlan_offset), size);
660 desc.length = cpu_to_le16(size);
661 desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
662 } else // as per intel docs; skip descriptors with null buf addr
663 DBGOUT(RX, "Null RX descriptor!!\n");
664 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
665
666 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
667 s->mac_reg[RDH] = 0;
668 s->check_rxov = 1;
669 /* see comment in start_xmit; same here */
670 if (s->mac_reg[RDH] == rdh_start) {
671 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
672 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
673 set_ics(s, 0, E1000_ICS_RXO);
674 return -1;
675 }
676 } while (desc.buffer_addr == 0);
677
678 s->mac_reg[GPRC]++;
679 s->mac_reg[TPR]++;
680 n = s->mac_reg[TORL];
681 if ((s->mac_reg[TORL] += size) < n)
682 s->mac_reg[TORH]++;
683
684 n = E1000_ICS_RXT0;
685 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
686 rdt += s->mac_reg[RDLEN] / sizeof(desc);
687 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
688 s->rxbuf_min_shift)
689 n |= E1000_ICS_RXDMT0;
690
691 set_ics(s, 0, n);
692
693 return size;
694 }
695
696 static uint32_t
697 mac_readreg(E1000State *s, int index)
698 {
699 return s->mac_reg[index];
700 }
701
702 static uint32_t
703 mac_icr_read(E1000State *s, int index)
704 {
705 uint32_t ret = s->mac_reg[ICR];
706
707 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
708 set_interrupt_cause(s, 0, 0);
709 return ret;
710 }
711
712 static uint32_t
713 mac_read_clr4(E1000State *s, int index)
714 {
715 uint32_t ret = s->mac_reg[index];
716
717 s->mac_reg[index] = 0;
718 return ret;
719 }
720
721 static uint32_t
722 mac_read_clr8(E1000State *s, int index)
723 {
724 uint32_t ret = s->mac_reg[index];
725
726 s->mac_reg[index] = 0;
727 s->mac_reg[index-1] = 0;
728 return ret;
729 }
730
731 static void
732 mac_writereg(E1000State *s, int index, uint32_t val)
733 {
734 s->mac_reg[index] = val;
735 }
736
737 static void
738 set_rdt(E1000State *s, int index, uint32_t val)
739 {
740 s->check_rxov = 0;
741 s->mac_reg[index] = val & 0xffff;
742 }
743
744 static void
745 set_16bit(E1000State *s, int index, uint32_t val)
746 {
747 s->mac_reg[index] = val & 0xffff;
748 }
749
750 static void
751 set_dlen(E1000State *s, int index, uint32_t val)
752 {
753 s->mac_reg[index] = val & 0xfff80;
754 }
755
756 static void
757 set_tctl(E1000State *s, int index, uint32_t val)
758 {
759 s->mac_reg[index] = val;
760 s->mac_reg[TDT] &= 0xffff;
761 start_xmit(s);
762 }
763
764 static void
765 set_icr(E1000State *s, int index, uint32_t val)
766 {
767 DBGOUT(INTERRUPT, "set_icr %x\n", val);
768 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
769 }
770
771 static void
772 set_imc(E1000State *s, int index, uint32_t val)
773 {
774 s->mac_reg[IMS] &= ~val;
775 set_ics(s, 0, 0);
776 }
777
778 static void
779 set_ims(E1000State *s, int index, uint32_t val)
780 {
781 s->mac_reg[IMS] |= val;
782 set_ics(s, 0, 0);
783 }
784
785 #define getreg(x) [x] = mac_readreg
786 static uint32_t (*macreg_readops[])(E1000State *, int) = {
787 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
788 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
789 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
790 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
791 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
792
793 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
794 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
795 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
796 [CRCERRS ... MPC] = &mac_readreg,
797 [RA ... RA+31] = &mac_readreg,
798 [MTA ... MTA+127] = &mac_readreg,
799 [VFTA ... VFTA+127] = &mac_readreg,
800 };
801 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
802
803 #define putreg(x) [x] = mac_writereg
804 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
805 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
806 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
807 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
808 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
809 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
810 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
811 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
812 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
813 [RA ... RA+31] = &mac_writereg,
814 [MTA ... MTA+127] = &mac_writereg,
815 [VFTA ... VFTA+127] = &mac_writereg,
816 };
817 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
818
819 static void
820 e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
821 {
822 E1000State *s = opaque;
823 unsigned int index = (addr & 0x1ffff) >> 2;
824
825 #ifdef TARGET_WORDS_BIGENDIAN
826 val = bswap32(val);
827 #endif
828 if (index < NWRITEOPS && macreg_writeops[index])
829 macreg_writeops[index](s, index, val);
830 else if (index < NREADOPS && macreg_readops[index])
831 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
832 else
833 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
834 index<<2, val);
835 }
836
837 static void
838 e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
839 {
840 // emulate hw without byte enables: no RMW
841 e1000_mmio_writel(opaque, addr & ~3,
842 (val & 0xffff) << (8*(addr & 3)));
843 }
844
845 static void
846 e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
847 {
848 // emulate hw without byte enables: no RMW
849 e1000_mmio_writel(opaque, addr & ~3,
850 (val & 0xff) << (8*(addr & 3)));
851 }
852
853 static uint32_t
854 e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
855 {
856 E1000State *s = opaque;
857 unsigned int index = (addr & 0x1ffff) >> 2;
858
859 if (index < NREADOPS && macreg_readops[index])
860 {
861 uint32_t val = macreg_readops[index](s, index);
862 #ifdef TARGET_WORDS_BIGENDIAN
863 val = bswap32(val);
864 #endif
865 return val;
866 }
867 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
868 return 0;
869 }
870
871 static uint32_t
872 e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
873 {
874 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
875 (8 * (addr & 3))) & 0xff;
876 }
877
878 static uint32_t
879 e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
880 {
881 return ((e1000_mmio_readl(opaque, addr & ~3)) >>
882 (8 * (addr & 3))) & 0xffff;
883 }
884
885 static bool is_version_1(void *opaque, int version_id)
886 {
887 return version_id == 1;
888 }
889
890 static const VMStateDescription vmstate_e1000 = {
891 .name = "e1000",
892 .version_id = 2,
893 .minimum_version_id = 1,
894 .minimum_version_id_old = 1,
895 .fields = (VMStateField []) {
896 VMSTATE_PCI_DEVICE(dev, E1000State),
897 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
898 VMSTATE_UNUSED(4), /* Was mmio_base. */
899 VMSTATE_UINT32(rxbuf_size, E1000State),
900 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
901 VMSTATE_UINT32(eecd_state.val_in, E1000State),
902 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
903 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
904 VMSTATE_UINT16(eecd_state.reading, E1000State),
905 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
906 VMSTATE_UINT8(tx.ipcss, E1000State),
907 VMSTATE_UINT8(tx.ipcso, E1000State),
908 VMSTATE_UINT16(tx.ipcse, E1000State),
909 VMSTATE_UINT8(tx.tucss, E1000State),
910 VMSTATE_UINT8(tx.tucso, E1000State),
911 VMSTATE_UINT16(tx.tucse, E1000State),
912 VMSTATE_UINT32(tx.paylen, E1000State),
913 VMSTATE_UINT8(tx.hdr_len, E1000State),
914 VMSTATE_UINT16(tx.mss, E1000State),
915 VMSTATE_UINT16(tx.size, E1000State),
916 VMSTATE_UINT16(tx.tso_frames, E1000State),
917 VMSTATE_UINT8(tx.sum_needed, E1000State),
918 VMSTATE_INT8(tx.ip, E1000State),
919 VMSTATE_INT8(tx.tcp, E1000State),
920 VMSTATE_BUFFER(tx.header, E1000State),
921 VMSTATE_BUFFER(tx.data, E1000State),
922 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
923 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
924 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
925 VMSTATE_UINT32(mac_reg[EECD], E1000State),
926 VMSTATE_UINT32(mac_reg[EERD], E1000State),
927 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
928 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
929 VMSTATE_UINT32(mac_reg[ICR], E1000State),
930 VMSTATE_UINT32(mac_reg[ICS], E1000State),
931 VMSTATE_UINT32(mac_reg[IMC], E1000State),
932 VMSTATE_UINT32(mac_reg[IMS], E1000State),
933 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
934 VMSTATE_UINT32(mac_reg[MANC], E1000State),
935 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
936 VMSTATE_UINT32(mac_reg[MPC], E1000State),
937 VMSTATE_UINT32(mac_reg[PBA], E1000State),
938 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
939 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
940 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
941 VMSTATE_UINT32(mac_reg[RDH], E1000State),
942 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
943 VMSTATE_UINT32(mac_reg[RDT], E1000State),
944 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
945 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
946 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
947 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
948 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
949 VMSTATE_UINT32(mac_reg[TDH], E1000State),
950 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
951 VMSTATE_UINT32(mac_reg[TDT], E1000State),
952 VMSTATE_UINT32(mac_reg[TORH], E1000State),
953 VMSTATE_UINT32(mac_reg[TORL], E1000State),
954 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
955 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
956 VMSTATE_UINT32(mac_reg[TPR], E1000State),
957 VMSTATE_UINT32(mac_reg[TPT], E1000State),
958 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
959 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
960 VMSTATE_UINT32(mac_reg[VET], E1000State),
961 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
962 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
963 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
964 VMSTATE_END_OF_LIST()
965 }
966 };
967
968 static const uint16_t e1000_eeprom_template[64] = {
969 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
970 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
971 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
972 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
973 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
974 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
975 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
976 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
977 };
978
979 static const uint16_t phy_reg_init[] = {
980 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
981 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
982 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
983 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
984 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
985 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
986 };
987
988 static const uint32_t mac_reg_init[] = {
989 [PBA] = 0x00100030,
990 [LEDCTL] = 0x602,
991 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
992 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
993 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
994 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
995 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
996 E1000_STATUS_LU,
997 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
998 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
999 E1000_MANC_RMCP_EN,
1000 };
1001
1002 /* PCI interface */
1003
1004 static CPUWriteMemoryFunc * const e1000_mmio_write[] = {
1005 e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
1006 };
1007
1008 static CPUReadMemoryFunc * const e1000_mmio_read[] = {
1009 e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
1010 };
1011
1012 static void
1013 e1000_mmio_map(PCIDevice *pci_dev, int region_num,
1014 pcibus_t addr, pcibus_t size, int type)
1015 {
1016 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1017 int i;
1018 const uint32_t excluded_regs[] = {
1019 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1020 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1021 };
1022
1023
1024 DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
1025
1026 cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
1027 qemu_register_coalesced_mmio(addr, excluded_regs[0]);
1028
1029 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1030 qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
1031 excluded_regs[i + 1] -
1032 excluded_regs[i] - 4);
1033 }
1034
1035 static void
1036 e1000_cleanup(VLANClientState *vc)
1037 {
1038 E1000State *d = vc->opaque;
1039
1040 d->vc = NULL;
1041 }
1042
1043 static int
1044 pci_e1000_uninit(PCIDevice *dev)
1045 {
1046 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1047
1048 cpu_unregister_io_memory(d->mmio_index);
1049 qemu_del_vlan_client(d->vc);
1050 vmstate_unregister(&vmstate_e1000, d);
1051 return 0;
1052 }
1053
1054 static void e1000_reset(void *opaque)
1055 {
1056 E1000State *d = opaque;
1057
1058 memset(d->phy_reg, 0, sizeof d->phy_reg);
1059 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1060 memset(d->mac_reg, 0, sizeof d->mac_reg);
1061 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1062 d->rxbuf_min_shift = 1;
1063 memset(&d->tx, 0, sizeof d->tx);
1064 }
1065
1066 static int pci_e1000_init(PCIDevice *pci_dev)
1067 {
1068 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1069 uint8_t *pci_conf;
1070 uint16_t checksum = 0;
1071 int i;
1072 uint8_t *macaddr;
1073
1074 pci_conf = d->dev.config;
1075
1076 pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
1077 pci_config_set_device_id(pci_conf, E1000_DEVID);
1078 *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
1079 *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
1080 pci_conf[0x08] = 0x03;
1081 pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
1082 pci_conf[0x0c] = 0x10;
1083
1084 pci_conf[0x3d] = 1; // interrupt pin 0
1085
1086 d->mmio_index = cpu_register_io_memory(e1000_mmio_read,
1087 e1000_mmio_write, d);
1088
1089 pci_register_bar((PCIDevice *)d, 0, PNPMMIO_SIZE,
1090 PCI_BASE_ADDRESS_SPACE_MEMORY, e1000_mmio_map);
1091
1092 pci_register_bar((PCIDevice *)d, 1, IOPORT_SIZE,
1093 PCI_BASE_ADDRESS_SPACE_IO, ioport_map);
1094
1095 memmove(d->eeprom_data, e1000_eeprom_template,
1096 sizeof e1000_eeprom_template);
1097 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1098 macaddr = d->conf.macaddr.a;
1099 for (i = 0; i < 3; i++)
1100 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1101 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1102 checksum += d->eeprom_data[i];
1103 checksum = (uint16_t) EEPROM_SUM - checksum;
1104 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1105
1106 d->vc = qemu_new_vlan_client(NET_CLIENT_TYPE_NIC,
1107 d->conf.vlan, d->conf.peer,
1108 d->dev.qdev.info->name, d->dev.qdev.id,
1109 e1000_can_receive, e1000_receive, NULL,
1110 NULL, e1000_cleanup, d);
1111 d->vc->link_status_changed = e1000_set_link_status;
1112
1113 qemu_format_nic_info_str(d->vc, macaddr);
1114
1115 vmstate_register(-1, &vmstate_e1000, d);
1116
1117 if (!pci_dev->qdev.hotplugged) {
1118 static int loaded = 0;
1119 if (!loaded) {
1120 rom_add_option("pxe-e1000.bin");
1121 loaded = 1;
1122 }
1123 }
1124 return 0;
1125 }
1126
1127 static void qdev_e1000_reset(DeviceState *dev)
1128 {
1129 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1130 e1000_reset(d);
1131 }
1132
1133 static PCIDeviceInfo e1000_info = {
1134 .qdev.name = "e1000",
1135 .qdev.desc = "Intel Gigabit Ethernet",
1136 .qdev.size = sizeof(E1000State),
1137 .qdev.reset = qdev_e1000_reset,
1138 .init = pci_e1000_init,
1139 .exit = pci_e1000_uninit,
1140 .qdev.props = (Property[]) {
1141 DEFINE_NIC_PROPERTIES(E1000State, conf),
1142 DEFINE_PROP_END_OF_LIST(),
1143 }
1144 };
1145
1146 static void e1000_register_devices(void)
1147 {
1148 pci_qdev_register(&e1000_info);
1149 }
1150
1151 device_init(e1000_register_devices)