]> git.proxmox.com Git - qemu.git/blob - hw/e1000.c
qxl: create slots on post_load in vga state
[qemu.git] / hw / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6 *
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
12 *
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 */
26
27
28 #include "hw.h"
29 #include "pci.h"
30 #include "net.h"
31 #include "net/checksum.h"
32 #include "loader.h"
33 #include "sysemu.h"
34
35 #include "e1000_hw.h"
36
37 #define E1000_DEBUG
38
39 #ifdef E1000_DEBUG
40 enum {
41 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
42 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
43 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
44 DEBUG_RXFILTER, DEBUG_NOTYET,
45 };
46 #define DBGBIT(x) (1<<DEBUG_##x)
47 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
48
49 #define DBGOUT(what, fmt, ...) do { \
50 if (debugflags & DBGBIT(what)) \
51 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
52 } while (0)
53 #else
54 #define DBGOUT(what, fmt, ...) do {} while (0)
55 #endif
56
57 #define IOPORT_SIZE 0x40
58 #define PNPMMIO_SIZE 0x20000
59 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
60
61 /*
62 * HW models:
63 * E1000_DEV_ID_82540EM works with Windows and Linux
64 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
65 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
66 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
67 * Others never tested
68 */
69 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
70
71 /*
72 * May need to specify additional MAC-to-PHY entries --
73 * Intel's Windows driver refuses to initialize unless they match
74 */
75 enum {
76 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
77 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
78 /* default to E1000_DEV_ID_82540EM */ 0xc20
79 };
80
81 typedef struct E1000State_st {
82 PCIDevice dev;
83 NICState *nic;
84 NICConf conf;
85 MemoryRegion mmio;
86 MemoryRegion io;
87
88 uint32_t mac_reg[0x8000];
89 uint16_t phy_reg[0x20];
90 uint16_t eeprom_data[64];
91
92 uint32_t rxbuf_size;
93 uint32_t rxbuf_min_shift;
94 int check_rxov;
95 struct e1000_tx {
96 unsigned char header[256];
97 unsigned char vlan_header[4];
98 /* Fields vlan and data must not be reordered or separated. */
99 unsigned char vlan[4];
100 unsigned char data[0x10000];
101 uint16_t size;
102 unsigned char sum_needed;
103 unsigned char vlan_needed;
104 uint8_t ipcss;
105 uint8_t ipcso;
106 uint16_t ipcse;
107 uint8_t tucss;
108 uint8_t tucso;
109 uint16_t tucse;
110 uint8_t hdr_len;
111 uint16_t mss;
112 uint32_t paylen;
113 uint16_t tso_frames;
114 char tse;
115 int8_t ip;
116 int8_t tcp;
117 char cptse; // current packet tse bit
118 } tx;
119
120 struct {
121 uint32_t val_in; // shifted in from guest driver
122 uint16_t bitnum_in;
123 uint16_t bitnum_out;
124 uint16_t reading;
125 uint32_t old_eecd;
126 } eecd_state;
127 } E1000State;
128
129 #define defreg(x) x = (E1000_##x>>2)
130 enum {
131 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
132 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
133 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
134 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
135 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
136 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
137 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
138 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
139 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
140 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
141 defreg(VET),
142 };
143
144 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
145 static const char phy_regcap[0x20] = {
146 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
147 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
148 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
149 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
150 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
151 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
152 };
153
154 static void
155 set_interrupt_cause(E1000State *s, int index, uint32_t val)
156 {
157 if (val)
158 val |= E1000_ICR_INT_ASSERTED;
159 s->mac_reg[ICR] = val;
160 s->mac_reg[ICS] = val;
161 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
162 }
163
164 static void
165 set_ics(E1000State *s, int index, uint32_t val)
166 {
167 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
168 s->mac_reg[IMS]);
169 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
170 }
171
172 static int
173 rxbufsize(uint32_t v)
174 {
175 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
176 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
177 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
178 switch (v) {
179 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
180 return 16384;
181 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
182 return 8192;
183 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
184 return 4096;
185 case E1000_RCTL_SZ_1024:
186 return 1024;
187 case E1000_RCTL_SZ_512:
188 return 512;
189 case E1000_RCTL_SZ_256:
190 return 256;
191 }
192 return 2048;
193 }
194
195 static void
196 set_ctrl(E1000State *s, int index, uint32_t val)
197 {
198 /* RST is self clearing */
199 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
200 }
201
202 static void
203 set_rx_control(E1000State *s, int index, uint32_t val)
204 {
205 s->mac_reg[RCTL] = val;
206 s->rxbuf_size = rxbufsize(val);
207 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
208 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
209 s->mac_reg[RCTL]);
210 }
211
212 static void
213 set_mdic(E1000State *s, int index, uint32_t val)
214 {
215 uint32_t data = val & E1000_MDIC_DATA_MASK;
216 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
217
218 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
219 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
220 else if (val & E1000_MDIC_OP_READ) {
221 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
222 if (!(phy_regcap[addr] & PHY_R)) {
223 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
224 val |= E1000_MDIC_ERROR;
225 } else
226 val = (val ^ data) | s->phy_reg[addr];
227 } else if (val & E1000_MDIC_OP_WRITE) {
228 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
229 if (!(phy_regcap[addr] & PHY_W)) {
230 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
231 val |= E1000_MDIC_ERROR;
232 } else
233 s->phy_reg[addr] = data;
234 }
235 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
236 set_ics(s, 0, E1000_ICR_MDAC);
237 }
238
239 static uint32_t
240 get_eecd(E1000State *s, int index)
241 {
242 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
243
244 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
245 s->eecd_state.bitnum_out, s->eecd_state.reading);
246 if (!s->eecd_state.reading ||
247 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
248 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
249 ret |= E1000_EECD_DO;
250 return ret;
251 }
252
253 static void
254 set_eecd(E1000State *s, int index, uint32_t val)
255 {
256 uint32_t oldval = s->eecd_state.old_eecd;
257
258 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
259 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
260 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
261 return;
262 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
263 s->eecd_state.val_in = 0;
264 s->eecd_state.bitnum_in = 0;
265 s->eecd_state.bitnum_out = 0;
266 s->eecd_state.reading = 0;
267 }
268 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
269 return;
270 if (!(E1000_EECD_SK & val)) { // falling edge
271 s->eecd_state.bitnum_out++;
272 return;
273 }
274 s->eecd_state.val_in <<= 1;
275 if (val & E1000_EECD_DI)
276 s->eecd_state.val_in |= 1;
277 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
278 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
279 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
280 EEPROM_READ_OPCODE_MICROWIRE);
281 }
282 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
283 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
284 s->eecd_state.reading);
285 }
286
287 static uint32_t
288 flash_eerd_read(E1000State *s, int x)
289 {
290 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
291
292 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
293 return (s->mac_reg[EERD]);
294
295 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
296 return (E1000_EEPROM_RW_REG_DONE | r);
297
298 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
299 E1000_EEPROM_RW_REG_DONE | r);
300 }
301
302 static void
303 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
304 {
305 uint32_t sum;
306
307 if (cse && cse < n)
308 n = cse + 1;
309 if (sloc < n-1) {
310 sum = net_checksum_add(n-css, data+css);
311 cpu_to_be16wu((uint16_t *)(data + sloc),
312 net_checksum_finish(sum));
313 }
314 }
315
316 static inline int
317 vlan_enabled(E1000State *s)
318 {
319 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
320 }
321
322 static inline int
323 vlan_rx_filter_enabled(E1000State *s)
324 {
325 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
326 }
327
328 static inline int
329 is_vlan_packet(E1000State *s, const uint8_t *buf)
330 {
331 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
332 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
333 }
334
335 static inline int
336 is_vlan_txd(uint32_t txd_lower)
337 {
338 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
339 }
340
341 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
342 * fill it in, just pad descriptor length by 4 bytes unless guest
343 * told us to strip it off the packet. */
344 static inline int
345 fcs_len(E1000State *s)
346 {
347 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
348 }
349
350 static void
351 xmit_seg(E1000State *s)
352 {
353 uint16_t len, *sp;
354 unsigned int frames = s->tx.tso_frames, css, sofar, n;
355 struct e1000_tx *tp = &s->tx;
356
357 if (tp->tse && tp->cptse) {
358 css = tp->ipcss;
359 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
360 frames, tp->size, css);
361 if (tp->ip) { // IPv4
362 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
363 tp->size - css);
364 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
365 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
366 } else // IPv6
367 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
368 tp->size - css);
369 css = tp->tucss;
370 len = tp->size - css;
371 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
372 if (tp->tcp) {
373 sofar = frames * tp->mss;
374 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
375 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
376 if (tp->paylen - sofar > tp->mss)
377 tp->data[css + 13] &= ~9; // PSH, FIN
378 } else // UDP
379 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
380 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
381 unsigned int phsum;
382 // add pseudo-header length before checksum calculation
383 sp = (uint16_t *)(tp->data + tp->tucso);
384 phsum = be16_to_cpup(sp) + len;
385 phsum = (phsum >> 16) + (phsum & 0xffff);
386 cpu_to_be16wu(sp, phsum);
387 }
388 tp->tso_frames++;
389 }
390
391 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
392 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
393 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
394 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
395 if (tp->vlan_needed) {
396 memmove(tp->vlan, tp->data, 4);
397 memmove(tp->data, tp->data + 4, 8);
398 memcpy(tp->data + 8, tp->vlan_header, 4);
399 qemu_send_packet(&s->nic->nc, tp->vlan, tp->size + 4);
400 } else
401 qemu_send_packet(&s->nic->nc, tp->data, tp->size);
402 s->mac_reg[TPT]++;
403 s->mac_reg[GPTC]++;
404 n = s->mac_reg[TOTL];
405 if ((s->mac_reg[TOTL] += s->tx.size) < n)
406 s->mac_reg[TOTH]++;
407 }
408
409 static void
410 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
411 {
412 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
413 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
414 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
415 unsigned int msh = 0xfffff, hdr = 0;
416 uint64_t addr;
417 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
418 struct e1000_tx *tp = &s->tx;
419
420 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
421 op = le32_to_cpu(xp->cmd_and_length);
422 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
423 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
424 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
425 tp->tucss = xp->upper_setup.tcp_fields.tucss;
426 tp->tucso = xp->upper_setup.tcp_fields.tucso;
427 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
428 tp->paylen = op & 0xfffff;
429 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
430 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
431 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
432 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
433 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
434 tp->tso_frames = 0;
435 if (tp->tucso == 0) { // this is probably wrong
436 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
437 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
438 }
439 return;
440 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
441 // data descriptor
442 if (tp->size == 0) {
443 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
444 }
445 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
446 } else {
447 // legacy descriptor
448 tp->cptse = 0;
449 }
450
451 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
452 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
453 tp->vlan_needed = 1;
454 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
455 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
456 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
457 le16_to_cpu(dp->upper.fields.special));
458 }
459
460 addr = le64_to_cpu(dp->buffer_addr);
461 if (tp->tse && tp->cptse) {
462 hdr = tp->hdr_len;
463 msh = hdr + tp->mss;
464 do {
465 bytes = split_size;
466 if (tp->size + bytes > msh)
467 bytes = msh - tp->size;
468 cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
469 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
470 memmove(tp->header, tp->data, hdr);
471 tp->size = sz;
472 addr += bytes;
473 if (sz == msh) {
474 xmit_seg(s);
475 memmove(tp->data, tp->header, hdr);
476 tp->size = hdr;
477 }
478 } while (split_size -= bytes);
479 } else if (!tp->tse && tp->cptse) {
480 // context descriptor TSE is not set, while data descriptor TSE is set
481 DBGOUT(TXERR, "TCP segmentaion Error\n");
482 } else {
483 cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
484 tp->size += split_size;
485 }
486
487 if (!(txd_lower & E1000_TXD_CMD_EOP))
488 return;
489 if (!(tp->tse && tp->cptse && tp->size < hdr))
490 xmit_seg(s);
491 tp->tso_frames = 0;
492 tp->sum_needed = 0;
493 tp->vlan_needed = 0;
494 tp->size = 0;
495 tp->cptse = 0;
496 }
497
498 static uint32_t
499 txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
500 {
501 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
502
503 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
504 return 0;
505 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
506 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
507 dp->upper.data = cpu_to_le32(txd_upper);
508 cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
509 (void *)&dp->upper, sizeof(dp->upper));
510 return E1000_ICR_TXDW;
511 }
512
513 static uint64_t tx_desc_base(E1000State *s)
514 {
515 uint64_t bah = s->mac_reg[TDBAH];
516 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
517
518 return (bah << 32) + bal;
519 }
520
521 static void
522 start_xmit(E1000State *s)
523 {
524 target_phys_addr_t base;
525 struct e1000_tx_desc desc;
526 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
527
528 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
529 DBGOUT(TX, "tx disabled\n");
530 return;
531 }
532
533 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
534 base = tx_desc_base(s) +
535 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
536 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
537
538 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
539 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
540 desc.upper.data);
541
542 process_tx_desc(s, &desc);
543 cause |= txdesc_writeback(base, &desc);
544
545 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
546 s->mac_reg[TDH] = 0;
547 /*
548 * the following could happen only if guest sw assigns
549 * bogus values to TDT/TDLEN.
550 * there's nothing too intelligent we could do about this.
551 */
552 if (s->mac_reg[TDH] == tdh_start) {
553 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
554 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
555 break;
556 }
557 }
558 set_ics(s, 0, cause);
559 }
560
561 static int
562 receive_filter(E1000State *s, const uint8_t *buf, int size)
563 {
564 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
565 static const int mta_shift[] = {4, 3, 2, 0};
566 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
567
568 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
569 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
570 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
571 ((vid >> 5) & 0x7f));
572 if ((vfta & (1 << (vid & 0x1f))) == 0)
573 return 0;
574 }
575
576 if (rctl & E1000_RCTL_UPE) // promiscuous
577 return 1;
578
579 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
580 return 1;
581
582 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
583 return 1;
584
585 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
586 if (!(rp[1] & E1000_RAH_AV))
587 continue;
588 ra[0] = cpu_to_le32(rp[0]);
589 ra[1] = cpu_to_le32(rp[1]);
590 if (!memcmp(buf, (uint8_t *)ra, 6)) {
591 DBGOUT(RXFILTER,
592 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
593 (int)(rp - s->mac_reg - RA)/2,
594 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
595 return 1;
596 }
597 }
598 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
599 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
600
601 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
602 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
603 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
604 return 1;
605 DBGOUT(RXFILTER,
606 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
607 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
608 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
609 s->mac_reg[MTA + (f >> 5)]);
610
611 return 0;
612 }
613
614 static void
615 e1000_set_link_status(VLANClientState *nc)
616 {
617 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
618 uint32_t old_status = s->mac_reg[STATUS];
619
620 if (nc->link_down) {
621 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
622 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
623 } else {
624 s->mac_reg[STATUS] |= E1000_STATUS_LU;
625 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
626 }
627
628 if (s->mac_reg[STATUS] != old_status)
629 set_ics(s, 0, E1000_ICR_LSC);
630 }
631
632 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
633 {
634 int bufs;
635 /* Fast-path short packets */
636 if (total_size <= s->rxbuf_size) {
637 return s->mac_reg[RDH] != s->mac_reg[RDT] || !s->check_rxov;
638 }
639 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
640 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
641 } else if (s->mac_reg[RDH] > s->mac_reg[RDT] || !s->check_rxov) {
642 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
643 s->mac_reg[RDT] - s->mac_reg[RDH];
644 } else {
645 return false;
646 }
647 return total_size <= bufs * s->rxbuf_size;
648 }
649
650 static int
651 e1000_can_receive(VLANClientState *nc)
652 {
653 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
654
655 return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
656 }
657
658 static uint64_t rx_desc_base(E1000State *s)
659 {
660 uint64_t bah = s->mac_reg[RDBAH];
661 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
662
663 return (bah << 32) + bal;
664 }
665
666 static ssize_t
667 e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
668 {
669 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
670 struct e1000_rx_desc desc;
671 target_phys_addr_t base;
672 unsigned int n, rdt;
673 uint32_t rdh_start;
674 uint16_t vlan_special = 0;
675 uint8_t vlan_status = 0, vlan_offset = 0;
676 uint8_t min_buf[MIN_BUF_SIZE];
677 size_t desc_offset;
678 size_t desc_size;
679 size_t total_size;
680
681 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
682 return -1;
683
684 /* Pad to minimum Ethernet frame length */
685 if (size < sizeof(min_buf)) {
686 memcpy(min_buf, buf, size);
687 memset(&min_buf[size], 0, sizeof(min_buf) - size);
688 buf = min_buf;
689 size = sizeof(min_buf);
690 }
691
692 if (!receive_filter(s, buf, size))
693 return size;
694
695 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
696 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
697 memmove((uint8_t *)buf + 4, buf, 12);
698 vlan_status = E1000_RXD_STAT_VP;
699 vlan_offset = 4;
700 size -= 4;
701 }
702
703 rdh_start = s->mac_reg[RDH];
704 desc_offset = 0;
705 total_size = size + fcs_len(s);
706 if (!e1000_has_rxbufs(s, total_size)) {
707 set_ics(s, 0, E1000_ICS_RXO);
708 return -1;
709 }
710 do {
711 desc_size = total_size - desc_offset;
712 if (desc_size > s->rxbuf_size) {
713 desc_size = s->rxbuf_size;
714 }
715 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
716 cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
717 desc.special = vlan_special;
718 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
719 if (desc.buffer_addr) {
720 if (desc_offset < size) {
721 size_t copy_size = size - desc_offset;
722 if (copy_size > s->rxbuf_size) {
723 copy_size = s->rxbuf_size;
724 }
725 cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
726 (void *)(buf + desc_offset + vlan_offset),
727 copy_size);
728 }
729 desc_offset += desc_size;
730 desc.length = cpu_to_le16(desc_size);
731 if (desc_offset >= total_size) {
732 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
733 } else {
734 /* Guest zeroing out status is not a hardware requirement.
735 Clear EOP in case guest didn't do it. */
736 desc.status &= ~E1000_RXD_STAT_EOP;
737 }
738 } else { // as per intel docs; skip descriptors with null buf addr
739 DBGOUT(RX, "Null RX descriptor!!\n");
740 }
741 cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
742
743 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
744 s->mac_reg[RDH] = 0;
745 s->check_rxov = 1;
746 /* see comment in start_xmit; same here */
747 if (s->mac_reg[RDH] == rdh_start) {
748 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
749 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
750 set_ics(s, 0, E1000_ICS_RXO);
751 return -1;
752 }
753 } while (desc_offset < total_size);
754
755 s->mac_reg[GPRC]++;
756 s->mac_reg[TPR]++;
757 /* TOR - Total Octets Received:
758 * This register includes bytes received in a packet from the <Destination
759 * Address> field through the <CRC> field, inclusively.
760 */
761 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
762 if (n < s->mac_reg[TORL])
763 s->mac_reg[TORH]++;
764 s->mac_reg[TORL] = n;
765
766 n = E1000_ICS_RXT0;
767 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
768 rdt += s->mac_reg[RDLEN] / sizeof(desc);
769 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
770 s->rxbuf_min_shift)
771 n |= E1000_ICS_RXDMT0;
772
773 set_ics(s, 0, n);
774
775 return size;
776 }
777
778 static uint32_t
779 mac_readreg(E1000State *s, int index)
780 {
781 return s->mac_reg[index];
782 }
783
784 static uint32_t
785 mac_icr_read(E1000State *s, int index)
786 {
787 uint32_t ret = s->mac_reg[ICR];
788
789 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
790 set_interrupt_cause(s, 0, 0);
791 return ret;
792 }
793
794 static uint32_t
795 mac_read_clr4(E1000State *s, int index)
796 {
797 uint32_t ret = s->mac_reg[index];
798
799 s->mac_reg[index] = 0;
800 return ret;
801 }
802
803 static uint32_t
804 mac_read_clr8(E1000State *s, int index)
805 {
806 uint32_t ret = s->mac_reg[index];
807
808 s->mac_reg[index] = 0;
809 s->mac_reg[index-1] = 0;
810 return ret;
811 }
812
813 static void
814 mac_writereg(E1000State *s, int index, uint32_t val)
815 {
816 s->mac_reg[index] = val;
817 }
818
819 static void
820 set_rdt(E1000State *s, int index, uint32_t val)
821 {
822 s->check_rxov = 0;
823 s->mac_reg[index] = val & 0xffff;
824 }
825
826 static void
827 set_16bit(E1000State *s, int index, uint32_t val)
828 {
829 s->mac_reg[index] = val & 0xffff;
830 }
831
832 static void
833 set_dlen(E1000State *s, int index, uint32_t val)
834 {
835 s->mac_reg[index] = val & 0xfff80;
836 }
837
838 static void
839 set_tctl(E1000State *s, int index, uint32_t val)
840 {
841 s->mac_reg[index] = val;
842 s->mac_reg[TDT] &= 0xffff;
843 start_xmit(s);
844 }
845
846 static void
847 set_icr(E1000State *s, int index, uint32_t val)
848 {
849 DBGOUT(INTERRUPT, "set_icr %x\n", val);
850 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
851 }
852
853 static void
854 set_imc(E1000State *s, int index, uint32_t val)
855 {
856 s->mac_reg[IMS] &= ~val;
857 set_ics(s, 0, 0);
858 }
859
860 static void
861 set_ims(E1000State *s, int index, uint32_t val)
862 {
863 s->mac_reg[IMS] |= val;
864 set_ics(s, 0, 0);
865 }
866
867 #define getreg(x) [x] = mac_readreg
868 static uint32_t (*macreg_readops[])(E1000State *, int) = {
869 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
870 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
871 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
872 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
873 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
874 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
875 getreg(TDLEN), getreg(RDLEN),
876
877 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
878 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
879 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
880 [CRCERRS ... MPC] = &mac_readreg,
881 [RA ... RA+31] = &mac_readreg,
882 [MTA ... MTA+127] = &mac_readreg,
883 [VFTA ... VFTA+127] = &mac_readreg,
884 };
885 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
886
887 #define putreg(x) [x] = mac_writereg
888 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
889 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
890 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
891 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
892 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
893 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
894 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
895 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
896 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
897 [RA ... RA+31] = &mac_writereg,
898 [MTA ... MTA+127] = &mac_writereg,
899 [VFTA ... VFTA+127] = &mac_writereg,
900 };
901 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
902
903 static void
904 e1000_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val,
905 unsigned size)
906 {
907 E1000State *s = opaque;
908 unsigned int index = (addr & 0x1ffff) >> 2;
909
910 if (index < NWRITEOPS && macreg_writeops[index]) {
911 macreg_writeops[index](s, index, val);
912 } else if (index < NREADOPS && macreg_readops[index]) {
913 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
914 } else {
915 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
916 index<<2, val);
917 }
918 }
919
920 static uint64_t
921 e1000_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size)
922 {
923 E1000State *s = opaque;
924 unsigned int index = (addr & 0x1ffff) >> 2;
925
926 if (index < NREADOPS && macreg_readops[index])
927 {
928 return macreg_readops[index](s, index);
929 }
930 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
931 return 0;
932 }
933
934 static const MemoryRegionOps e1000_mmio_ops = {
935 .read = e1000_mmio_read,
936 .write = e1000_mmio_write,
937 .endianness = DEVICE_LITTLE_ENDIAN,
938 .impl = {
939 .min_access_size = 4,
940 .max_access_size = 4,
941 },
942 };
943
944 static uint64_t e1000_io_read(void *opaque, target_phys_addr_t addr,
945 unsigned size)
946 {
947 E1000State *s = opaque;
948
949 (void)s;
950 return 0;
951 }
952
953 static void e1000_io_write(void *opaque, target_phys_addr_t addr,
954 uint64_t val, unsigned size)
955 {
956 E1000State *s = opaque;
957
958 (void)s;
959 }
960
961 static const MemoryRegionOps e1000_io_ops = {
962 .read = e1000_io_read,
963 .write = e1000_io_write,
964 .endianness = DEVICE_LITTLE_ENDIAN,
965 };
966
967 static bool is_version_1(void *opaque, int version_id)
968 {
969 return version_id == 1;
970 }
971
972 static const VMStateDescription vmstate_e1000 = {
973 .name = "e1000",
974 .version_id = 2,
975 .minimum_version_id = 1,
976 .minimum_version_id_old = 1,
977 .fields = (VMStateField []) {
978 VMSTATE_PCI_DEVICE(dev, E1000State),
979 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
980 VMSTATE_UNUSED(4), /* Was mmio_base. */
981 VMSTATE_UINT32(rxbuf_size, E1000State),
982 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
983 VMSTATE_UINT32(eecd_state.val_in, E1000State),
984 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
985 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
986 VMSTATE_UINT16(eecd_state.reading, E1000State),
987 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
988 VMSTATE_UINT8(tx.ipcss, E1000State),
989 VMSTATE_UINT8(tx.ipcso, E1000State),
990 VMSTATE_UINT16(tx.ipcse, E1000State),
991 VMSTATE_UINT8(tx.tucss, E1000State),
992 VMSTATE_UINT8(tx.tucso, E1000State),
993 VMSTATE_UINT16(tx.tucse, E1000State),
994 VMSTATE_UINT32(tx.paylen, E1000State),
995 VMSTATE_UINT8(tx.hdr_len, E1000State),
996 VMSTATE_UINT16(tx.mss, E1000State),
997 VMSTATE_UINT16(tx.size, E1000State),
998 VMSTATE_UINT16(tx.tso_frames, E1000State),
999 VMSTATE_UINT8(tx.sum_needed, E1000State),
1000 VMSTATE_INT8(tx.ip, E1000State),
1001 VMSTATE_INT8(tx.tcp, E1000State),
1002 VMSTATE_BUFFER(tx.header, E1000State),
1003 VMSTATE_BUFFER(tx.data, E1000State),
1004 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1005 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1006 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1007 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1008 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1009 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1010 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1011 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1012 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1013 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1014 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1015 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1016 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1017 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1018 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1019 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1020 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1021 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1022 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1023 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1024 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1025 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1026 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1027 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1028 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1029 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1030 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1031 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1032 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1033 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1034 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1035 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1036 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1037 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1038 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1039 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1040 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1041 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1042 VMSTATE_UINT32(mac_reg[VET], E1000State),
1043 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1044 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1045 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1046 VMSTATE_END_OF_LIST()
1047 }
1048 };
1049
1050 static const uint16_t e1000_eeprom_template[64] = {
1051 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1052 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
1053 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1054 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1055 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1056 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1057 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1058 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1059 };
1060
1061 static const uint16_t phy_reg_init[] = {
1062 [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
1063 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
1064 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
1065 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
1066 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
1067 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
1068 };
1069
1070 static const uint32_t mac_reg_init[] = {
1071 [PBA] = 0x00100030,
1072 [LEDCTL] = 0x602,
1073 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
1074 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
1075 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
1076 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
1077 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
1078 E1000_STATUS_LU,
1079 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
1080 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
1081 E1000_MANC_RMCP_EN,
1082 };
1083
1084 /* PCI interface */
1085
1086 static void
1087 e1000_mmio_setup(E1000State *d)
1088 {
1089 int i;
1090 const uint32_t excluded_regs[] = {
1091 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1092 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1093 };
1094
1095 memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
1096 PNPMMIO_SIZE);
1097 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1098 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1099 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1100 excluded_regs[i+1] - excluded_regs[i] - 4);
1101 memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1102 }
1103
1104 static void
1105 e1000_cleanup(VLANClientState *nc)
1106 {
1107 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
1108
1109 s->nic = NULL;
1110 }
1111
1112 static int
1113 pci_e1000_uninit(PCIDevice *dev)
1114 {
1115 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1116
1117 memory_region_destroy(&d->mmio);
1118 memory_region_destroy(&d->io);
1119 qemu_del_vlan_client(&d->nic->nc);
1120 return 0;
1121 }
1122
1123 static void e1000_reset(void *opaque)
1124 {
1125 E1000State *d = opaque;
1126
1127 memset(d->phy_reg, 0, sizeof d->phy_reg);
1128 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
1129 memset(d->mac_reg, 0, sizeof d->mac_reg);
1130 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
1131 d->rxbuf_min_shift = 1;
1132 memset(&d->tx, 0, sizeof d->tx);
1133 }
1134
1135 static NetClientInfo net_e1000_info = {
1136 .type = NET_CLIENT_TYPE_NIC,
1137 .size = sizeof(NICState),
1138 .can_receive = e1000_can_receive,
1139 .receive = e1000_receive,
1140 .cleanup = e1000_cleanup,
1141 .link_status_changed = e1000_set_link_status,
1142 };
1143
1144 static int pci_e1000_init(PCIDevice *pci_dev)
1145 {
1146 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1147 uint8_t *pci_conf;
1148 uint16_t checksum = 0;
1149 int i;
1150 uint8_t *macaddr;
1151
1152 pci_conf = d->dev.config;
1153
1154 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1155 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1156
1157 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1158
1159 e1000_mmio_setup(d);
1160
1161 pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1162
1163 pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1164
1165 memmove(d->eeprom_data, e1000_eeprom_template,
1166 sizeof e1000_eeprom_template);
1167 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1168 macaddr = d->conf.macaddr.a;
1169 for (i = 0; i < 3; i++)
1170 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1171 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1172 checksum += d->eeprom_data[i];
1173 checksum = (uint16_t) EEPROM_SUM - checksum;
1174 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1175
1176 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1177 d->dev.qdev.info->name, d->dev.qdev.id, d);
1178
1179 qemu_format_nic_info_str(&d->nic->nc, macaddr);
1180
1181 add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
1182
1183 return 0;
1184 }
1185
1186 static void qdev_e1000_reset(DeviceState *dev)
1187 {
1188 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1189 e1000_reset(d);
1190 }
1191
1192 static PCIDeviceInfo e1000_info = {
1193 .qdev.name = "e1000",
1194 .qdev.desc = "Intel Gigabit Ethernet",
1195 .qdev.size = sizeof(E1000State),
1196 .qdev.reset = qdev_e1000_reset,
1197 .qdev.vmsd = &vmstate_e1000,
1198 .init = pci_e1000_init,
1199 .exit = pci_e1000_uninit,
1200 .romfile = "pxe-e1000.rom",
1201 .vendor_id = PCI_VENDOR_ID_INTEL,
1202 .device_id = E1000_DEVID,
1203 .revision = 0x03,
1204 .class_id = PCI_CLASS_NETWORK_ETHERNET,
1205 .qdev.props = (Property[]) {
1206 DEFINE_NIC_PROPERTIES(E1000State, conf),
1207 DEFINE_PROP_END_OF_LIST(),
1208 }
1209 };
1210
1211 static void e1000_register_devices(void)
1212 {
1213 pci_qdev_register(&e1000_info);
1214 }
1215
1216 device_init(e1000_register_devices)