]> git.proxmox.com Git - qemu.git/blob - hw/e1000.c
misc: move include files to include/qemu/
[qemu.git] / hw / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6 *
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
12 *
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 */
26
27
28 #include "hw.h"
29 #include "pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "loader.h"
33 #include "sysemu.h"
34 #include "dma.h"
35
36 #include "e1000_hw.h"
37
38 #define E1000_DEBUG
39
40 #ifdef E1000_DEBUG
41 enum {
42 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
43 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
44 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
45 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
46 };
47 #define DBGBIT(x) (1<<DEBUG_##x)
48 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
49
50 #define DBGOUT(what, fmt, ...) do { \
51 if (debugflags & DBGBIT(what)) \
52 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
53 } while (0)
54 #else
55 #define DBGOUT(what, fmt, ...) do {} while (0)
56 #endif
57
58 #define IOPORT_SIZE 0x40
59 #define PNPMMIO_SIZE 0x20000
60 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
61
62 /* this is the size past which hardware will drop packets when setting LPE=0 */
63 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
64
65 /*
66 * HW models:
67 * E1000_DEV_ID_82540EM works with Windows and Linux
68 * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
69 * appears to perform better than 82540EM, but breaks with Linux 2.6.18
70 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
71 * Others never tested
72 */
73 enum { E1000_DEVID = E1000_DEV_ID_82540EM };
74
75 /*
76 * May need to specify additional MAC-to-PHY entries --
77 * Intel's Windows driver refuses to initialize unless they match
78 */
79 enum {
80 PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
81 E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
82 /* default to E1000_DEV_ID_82540EM */ 0xc20
83 };
84
85 typedef struct E1000State_st {
86 PCIDevice dev;
87 NICState *nic;
88 NICConf conf;
89 MemoryRegion mmio;
90 MemoryRegion io;
91
92 uint32_t mac_reg[0x8000];
93 uint16_t phy_reg[0x20];
94 uint16_t eeprom_data[64];
95
96 uint32_t rxbuf_size;
97 uint32_t rxbuf_min_shift;
98 struct e1000_tx {
99 unsigned char header[256];
100 unsigned char vlan_header[4];
101 /* Fields vlan and data must not be reordered or separated. */
102 unsigned char vlan[4];
103 unsigned char data[0x10000];
104 uint16_t size;
105 unsigned char sum_needed;
106 unsigned char vlan_needed;
107 uint8_t ipcss;
108 uint8_t ipcso;
109 uint16_t ipcse;
110 uint8_t tucss;
111 uint8_t tucso;
112 uint16_t tucse;
113 uint8_t hdr_len;
114 uint16_t mss;
115 uint32_t paylen;
116 uint16_t tso_frames;
117 char tse;
118 int8_t ip;
119 int8_t tcp;
120 char cptse; // current packet tse bit
121 } tx;
122
123 struct {
124 uint32_t val_in; // shifted in from guest driver
125 uint16_t bitnum_in;
126 uint16_t bitnum_out;
127 uint16_t reading;
128 uint32_t old_eecd;
129 } eecd_state;
130
131 QEMUTimer *autoneg_timer;
132 } E1000State;
133
134 #define defreg(x) x = (E1000_##x>>2)
135 enum {
136 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
137 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
138 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
139 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
140 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
141 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
142 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
143 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
144 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
145 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
146 defreg(VET),
147 };
148
149 static void
150 e1000_link_down(E1000State *s)
151 {
152 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
153 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
154 }
155
156 static void
157 e1000_link_up(E1000State *s)
158 {
159 s->mac_reg[STATUS] |= E1000_STATUS_LU;
160 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
161 }
162
163 static void
164 set_phy_ctrl(E1000State *s, int index, uint16_t val)
165 {
166 if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
167 s->nic->nc.link_down = true;
168 e1000_link_down(s);
169 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
170 DBGOUT(PHY, "Start link auto negotiation\n");
171 qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
172 }
173 }
174
175 static void
176 e1000_autoneg_timer(void *opaque)
177 {
178 E1000State *s = opaque;
179 s->nic->nc.link_down = false;
180 e1000_link_up(s);
181 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
182 DBGOUT(PHY, "Auto negotiation is completed\n");
183 }
184
185 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
186 [PHY_CTRL] = set_phy_ctrl,
187 };
188
189 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
190
191 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
192 static const char phy_regcap[0x20] = {
193 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
194 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
195 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
196 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
197 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
198 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
199 };
200
201 static const uint16_t phy_reg_init[] = {
202 [PHY_CTRL] = 0x1140,
203 [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
204 [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
205 [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
206 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
207 [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
208 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
209 };
210
211 static const uint32_t mac_reg_init[] = {
212 [PBA] = 0x00100030,
213 [LEDCTL] = 0x602,
214 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
215 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
216 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
217 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
218 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
219 E1000_STATUS_LU,
220 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
221 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
222 E1000_MANC_RMCP_EN,
223 };
224
225 static void
226 set_interrupt_cause(E1000State *s, int index, uint32_t val)
227 {
228 if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
229 /* Only for 8257x */
230 val |= E1000_ICR_INT_ASSERTED;
231 }
232 s->mac_reg[ICR] = val;
233 s->mac_reg[ICS] = val;
234 qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
235 }
236
237 static void
238 set_ics(E1000State *s, int index, uint32_t val)
239 {
240 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
241 s->mac_reg[IMS]);
242 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
243 }
244
245 static int
246 rxbufsize(uint32_t v)
247 {
248 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
249 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
250 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
251 switch (v) {
252 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
253 return 16384;
254 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
255 return 8192;
256 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
257 return 4096;
258 case E1000_RCTL_SZ_1024:
259 return 1024;
260 case E1000_RCTL_SZ_512:
261 return 512;
262 case E1000_RCTL_SZ_256:
263 return 256;
264 }
265 return 2048;
266 }
267
268 static void e1000_reset(void *opaque)
269 {
270 E1000State *d = opaque;
271 uint8_t *macaddr = d->conf.macaddr.a;
272 int i;
273
274 qemu_del_timer(d->autoneg_timer);
275 memset(d->phy_reg, 0, sizeof d->phy_reg);
276 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
277 memset(d->mac_reg, 0, sizeof d->mac_reg);
278 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
279 d->rxbuf_min_shift = 1;
280 memset(&d->tx, 0, sizeof d->tx);
281
282 if (d->nic->nc.link_down) {
283 e1000_link_down(d);
284 }
285
286 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
287 d->mac_reg[RA] = 0;
288 d->mac_reg[RA + 1] = E1000_RAH_AV;
289 for (i = 0; i < 4; i++) {
290 d->mac_reg[RA] |= macaddr[i] << (8 * i);
291 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
292 }
293 }
294
295 static void
296 set_ctrl(E1000State *s, int index, uint32_t val)
297 {
298 /* RST is self clearing */
299 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
300 }
301
302 static void
303 set_rx_control(E1000State *s, int index, uint32_t val)
304 {
305 s->mac_reg[RCTL] = val;
306 s->rxbuf_size = rxbufsize(val);
307 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
308 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
309 s->mac_reg[RCTL]);
310 qemu_flush_queued_packets(&s->nic->nc);
311 }
312
313 static void
314 set_mdic(E1000State *s, int index, uint32_t val)
315 {
316 uint32_t data = val & E1000_MDIC_DATA_MASK;
317 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
318
319 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
320 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
321 else if (val & E1000_MDIC_OP_READ) {
322 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
323 if (!(phy_regcap[addr] & PHY_R)) {
324 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
325 val |= E1000_MDIC_ERROR;
326 } else
327 val = (val ^ data) | s->phy_reg[addr];
328 } else if (val & E1000_MDIC_OP_WRITE) {
329 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
330 if (!(phy_regcap[addr] & PHY_W)) {
331 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
332 val |= E1000_MDIC_ERROR;
333 } else {
334 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
335 phyreg_writeops[addr](s, index, data);
336 }
337 s->phy_reg[addr] = data;
338 }
339 }
340 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
341
342 if (val & E1000_MDIC_INT_EN) {
343 set_ics(s, 0, E1000_ICR_MDAC);
344 }
345 }
346
347 static uint32_t
348 get_eecd(E1000State *s, int index)
349 {
350 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
351
352 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
353 s->eecd_state.bitnum_out, s->eecd_state.reading);
354 if (!s->eecd_state.reading ||
355 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
356 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
357 ret |= E1000_EECD_DO;
358 return ret;
359 }
360
361 static void
362 set_eecd(E1000State *s, int index, uint32_t val)
363 {
364 uint32_t oldval = s->eecd_state.old_eecd;
365
366 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
367 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
368 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
369 return;
370 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
371 s->eecd_state.val_in = 0;
372 s->eecd_state.bitnum_in = 0;
373 s->eecd_state.bitnum_out = 0;
374 s->eecd_state.reading = 0;
375 }
376 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
377 return;
378 if (!(E1000_EECD_SK & val)) { // falling edge
379 s->eecd_state.bitnum_out++;
380 return;
381 }
382 s->eecd_state.val_in <<= 1;
383 if (val & E1000_EECD_DI)
384 s->eecd_state.val_in |= 1;
385 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
386 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
387 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
388 EEPROM_READ_OPCODE_MICROWIRE);
389 }
390 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
391 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
392 s->eecd_state.reading);
393 }
394
395 static uint32_t
396 flash_eerd_read(E1000State *s, int x)
397 {
398 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
399
400 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
401 return (s->mac_reg[EERD]);
402
403 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
404 return (E1000_EEPROM_RW_REG_DONE | r);
405
406 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
407 E1000_EEPROM_RW_REG_DONE | r);
408 }
409
410 static void
411 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
412 {
413 uint32_t sum;
414
415 if (cse && cse < n)
416 n = cse + 1;
417 if (sloc < n-1) {
418 sum = net_checksum_add(n-css, data+css);
419 cpu_to_be16wu((uint16_t *)(data + sloc),
420 net_checksum_finish(sum));
421 }
422 }
423
424 static inline int
425 vlan_enabled(E1000State *s)
426 {
427 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
428 }
429
430 static inline int
431 vlan_rx_filter_enabled(E1000State *s)
432 {
433 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
434 }
435
436 static inline int
437 is_vlan_packet(E1000State *s, const uint8_t *buf)
438 {
439 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
440 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
441 }
442
443 static inline int
444 is_vlan_txd(uint32_t txd_lower)
445 {
446 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
447 }
448
449 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
450 * fill it in, just pad descriptor length by 4 bytes unless guest
451 * told us to strip it off the packet. */
452 static inline int
453 fcs_len(E1000State *s)
454 {
455 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
456 }
457
458 static void
459 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
460 {
461 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
462 s->nic->nc.info->receive(&s->nic->nc, buf, size);
463 } else {
464 qemu_send_packet(&s->nic->nc, buf, size);
465 }
466 }
467
468 static void
469 xmit_seg(E1000State *s)
470 {
471 uint16_t len, *sp;
472 unsigned int frames = s->tx.tso_frames, css, sofar, n;
473 struct e1000_tx *tp = &s->tx;
474
475 if (tp->tse && tp->cptse) {
476 css = tp->ipcss;
477 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
478 frames, tp->size, css);
479 if (tp->ip) { // IPv4
480 cpu_to_be16wu((uint16_t *)(tp->data+css+2),
481 tp->size - css);
482 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
483 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
484 } else // IPv6
485 cpu_to_be16wu((uint16_t *)(tp->data+css+4),
486 tp->size - css);
487 css = tp->tucss;
488 len = tp->size - css;
489 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
490 if (tp->tcp) {
491 sofar = frames * tp->mss;
492 cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
493 be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
494 if (tp->paylen - sofar > tp->mss)
495 tp->data[css + 13] &= ~9; // PSH, FIN
496 } else // UDP
497 cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
498 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
499 unsigned int phsum;
500 // add pseudo-header length before checksum calculation
501 sp = (uint16_t *)(tp->data + tp->tucso);
502 phsum = be16_to_cpup(sp) + len;
503 phsum = (phsum >> 16) + (phsum & 0xffff);
504 cpu_to_be16wu(sp, phsum);
505 }
506 tp->tso_frames++;
507 }
508
509 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
510 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
511 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
512 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
513 if (tp->vlan_needed) {
514 memmove(tp->vlan, tp->data, 4);
515 memmove(tp->data, tp->data + 4, 8);
516 memcpy(tp->data + 8, tp->vlan_header, 4);
517 e1000_send_packet(s, tp->vlan, tp->size + 4);
518 } else
519 e1000_send_packet(s, tp->data, tp->size);
520 s->mac_reg[TPT]++;
521 s->mac_reg[GPTC]++;
522 n = s->mac_reg[TOTL];
523 if ((s->mac_reg[TOTL] += s->tx.size) < n)
524 s->mac_reg[TOTH]++;
525 }
526
527 static void
528 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
529 {
530 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
531 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
532 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
533 unsigned int msh = 0xfffff, hdr = 0;
534 uint64_t addr;
535 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
536 struct e1000_tx *tp = &s->tx;
537
538 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
539 op = le32_to_cpu(xp->cmd_and_length);
540 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
541 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
542 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
543 tp->tucss = xp->upper_setup.tcp_fields.tucss;
544 tp->tucso = xp->upper_setup.tcp_fields.tucso;
545 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
546 tp->paylen = op & 0xfffff;
547 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
548 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
549 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
550 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
551 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
552 tp->tso_frames = 0;
553 if (tp->tucso == 0) { // this is probably wrong
554 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
555 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
556 }
557 return;
558 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
559 // data descriptor
560 if (tp->size == 0) {
561 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
562 }
563 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
564 } else {
565 // legacy descriptor
566 tp->cptse = 0;
567 }
568
569 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
570 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
571 tp->vlan_needed = 1;
572 cpu_to_be16wu((uint16_t *)(tp->vlan_header),
573 le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
574 cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
575 le16_to_cpu(dp->upper.fields.special));
576 }
577
578 addr = le64_to_cpu(dp->buffer_addr);
579 if (tp->tse && tp->cptse) {
580 hdr = tp->hdr_len;
581 msh = hdr + tp->mss;
582 do {
583 bytes = split_size;
584 if (tp->size + bytes > msh)
585 bytes = msh - tp->size;
586
587 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
588 pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes);
589 if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
590 memmove(tp->header, tp->data, hdr);
591 tp->size = sz;
592 addr += bytes;
593 if (sz == msh) {
594 xmit_seg(s);
595 memmove(tp->data, tp->header, hdr);
596 tp->size = hdr;
597 }
598 } while (split_size -= bytes);
599 } else if (!tp->tse && tp->cptse) {
600 // context descriptor TSE is not set, while data descriptor TSE is set
601 DBGOUT(TXERR, "TCP segmentation error\n");
602 } else {
603 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
604 pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size);
605 tp->size += split_size;
606 }
607
608 if (!(txd_lower & E1000_TXD_CMD_EOP))
609 return;
610 if (!(tp->tse && tp->cptse && tp->size < hdr))
611 xmit_seg(s);
612 tp->tso_frames = 0;
613 tp->sum_needed = 0;
614 tp->vlan_needed = 0;
615 tp->size = 0;
616 tp->cptse = 0;
617 }
618
619 static uint32_t
620 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
621 {
622 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
623
624 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
625 return 0;
626 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
627 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
628 dp->upper.data = cpu_to_le32(txd_upper);
629 pci_dma_write(&s->dev, base + ((char *)&dp->upper - (char *)dp),
630 &dp->upper, sizeof(dp->upper));
631 return E1000_ICR_TXDW;
632 }
633
634 static uint64_t tx_desc_base(E1000State *s)
635 {
636 uint64_t bah = s->mac_reg[TDBAH];
637 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
638
639 return (bah << 32) + bal;
640 }
641
642 static void
643 start_xmit(E1000State *s)
644 {
645 dma_addr_t base;
646 struct e1000_tx_desc desc;
647 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
648
649 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
650 DBGOUT(TX, "tx disabled\n");
651 return;
652 }
653
654 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
655 base = tx_desc_base(s) +
656 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
657 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
658
659 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
660 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
661 desc.upper.data);
662
663 process_tx_desc(s, &desc);
664 cause |= txdesc_writeback(s, base, &desc);
665
666 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
667 s->mac_reg[TDH] = 0;
668 /*
669 * the following could happen only if guest sw assigns
670 * bogus values to TDT/TDLEN.
671 * there's nothing too intelligent we could do about this.
672 */
673 if (s->mac_reg[TDH] == tdh_start) {
674 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
675 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
676 break;
677 }
678 }
679 set_ics(s, 0, cause);
680 }
681
682 static int
683 receive_filter(E1000State *s, const uint8_t *buf, int size)
684 {
685 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
686 static const int mta_shift[] = {4, 3, 2, 0};
687 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
688
689 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
690 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
691 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
692 ((vid >> 5) & 0x7f));
693 if ((vfta & (1 << (vid & 0x1f))) == 0)
694 return 0;
695 }
696
697 if (rctl & E1000_RCTL_UPE) // promiscuous
698 return 1;
699
700 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
701 return 1;
702
703 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
704 return 1;
705
706 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
707 if (!(rp[1] & E1000_RAH_AV))
708 continue;
709 ra[0] = cpu_to_le32(rp[0]);
710 ra[1] = cpu_to_le32(rp[1]);
711 if (!memcmp(buf, (uint8_t *)ra, 6)) {
712 DBGOUT(RXFILTER,
713 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
714 (int)(rp - s->mac_reg - RA)/2,
715 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
716 return 1;
717 }
718 }
719 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
720 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
721
722 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
723 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
724 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
725 return 1;
726 DBGOUT(RXFILTER,
727 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
728 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
729 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
730 s->mac_reg[MTA + (f >> 5)]);
731
732 return 0;
733 }
734
735 static void
736 e1000_set_link_status(NetClientState *nc)
737 {
738 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
739 uint32_t old_status = s->mac_reg[STATUS];
740
741 if (nc->link_down) {
742 e1000_link_down(s);
743 } else {
744 e1000_link_up(s);
745 }
746
747 if (s->mac_reg[STATUS] != old_status)
748 set_ics(s, 0, E1000_ICR_LSC);
749 }
750
751 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
752 {
753 int bufs;
754 /* Fast-path short packets */
755 if (total_size <= s->rxbuf_size) {
756 return s->mac_reg[RDH] != s->mac_reg[RDT];
757 }
758 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
759 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
760 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
761 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
762 s->mac_reg[RDT] - s->mac_reg[RDH];
763 } else {
764 return false;
765 }
766 return total_size <= bufs * s->rxbuf_size;
767 }
768
769 static int
770 e1000_can_receive(NetClientState *nc)
771 {
772 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
773
774 return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
775 }
776
777 static uint64_t rx_desc_base(E1000State *s)
778 {
779 uint64_t bah = s->mac_reg[RDBAH];
780 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
781
782 return (bah << 32) + bal;
783 }
784
785 static ssize_t
786 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
787 {
788 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
789 struct e1000_rx_desc desc;
790 dma_addr_t base;
791 unsigned int n, rdt;
792 uint32_t rdh_start;
793 uint16_t vlan_special = 0;
794 uint8_t vlan_status = 0, vlan_offset = 0;
795 uint8_t min_buf[MIN_BUF_SIZE];
796 size_t desc_offset;
797 size_t desc_size;
798 size_t total_size;
799
800 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
801 return -1;
802
803 /* Pad to minimum Ethernet frame length */
804 if (size < sizeof(min_buf)) {
805 memcpy(min_buf, buf, size);
806 memset(&min_buf[size], 0, sizeof(min_buf) - size);
807 buf = min_buf;
808 size = sizeof(min_buf);
809 }
810
811 /* Discard oversized packets if !LPE and !SBP. */
812 if (size > MAXIMUM_ETHERNET_VLAN_SIZE
813 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)
814 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
815 return size;
816 }
817
818 if (!receive_filter(s, buf, size))
819 return size;
820
821 if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
822 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
823 memmove((uint8_t *)buf + 4, buf, 12);
824 vlan_status = E1000_RXD_STAT_VP;
825 vlan_offset = 4;
826 size -= 4;
827 }
828
829 rdh_start = s->mac_reg[RDH];
830 desc_offset = 0;
831 total_size = size + fcs_len(s);
832 if (!e1000_has_rxbufs(s, total_size)) {
833 set_ics(s, 0, E1000_ICS_RXO);
834 return -1;
835 }
836 do {
837 desc_size = total_size - desc_offset;
838 if (desc_size > s->rxbuf_size) {
839 desc_size = s->rxbuf_size;
840 }
841 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
842 pci_dma_read(&s->dev, base, &desc, sizeof(desc));
843 desc.special = vlan_special;
844 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
845 if (desc.buffer_addr) {
846 if (desc_offset < size) {
847 size_t copy_size = size - desc_offset;
848 if (copy_size > s->rxbuf_size) {
849 copy_size = s->rxbuf_size;
850 }
851 pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr),
852 buf + desc_offset + vlan_offset, copy_size);
853 }
854 desc_offset += desc_size;
855 desc.length = cpu_to_le16(desc_size);
856 if (desc_offset >= total_size) {
857 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
858 } else {
859 /* Guest zeroing out status is not a hardware requirement.
860 Clear EOP in case guest didn't do it. */
861 desc.status &= ~E1000_RXD_STAT_EOP;
862 }
863 } else { // as per intel docs; skip descriptors with null buf addr
864 DBGOUT(RX, "Null RX descriptor!!\n");
865 }
866 pci_dma_write(&s->dev, base, &desc, sizeof(desc));
867
868 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
869 s->mac_reg[RDH] = 0;
870 /* see comment in start_xmit; same here */
871 if (s->mac_reg[RDH] == rdh_start) {
872 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
873 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
874 set_ics(s, 0, E1000_ICS_RXO);
875 return -1;
876 }
877 } while (desc_offset < total_size);
878
879 s->mac_reg[GPRC]++;
880 s->mac_reg[TPR]++;
881 /* TOR - Total Octets Received:
882 * This register includes bytes received in a packet from the <Destination
883 * Address> field through the <CRC> field, inclusively.
884 */
885 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
886 if (n < s->mac_reg[TORL])
887 s->mac_reg[TORH]++;
888 s->mac_reg[TORL] = n;
889
890 n = E1000_ICS_RXT0;
891 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
892 rdt += s->mac_reg[RDLEN] / sizeof(desc);
893 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
894 s->rxbuf_min_shift)
895 n |= E1000_ICS_RXDMT0;
896
897 set_ics(s, 0, n);
898
899 return size;
900 }
901
902 static uint32_t
903 mac_readreg(E1000State *s, int index)
904 {
905 return s->mac_reg[index];
906 }
907
908 static uint32_t
909 mac_icr_read(E1000State *s, int index)
910 {
911 uint32_t ret = s->mac_reg[ICR];
912
913 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
914 set_interrupt_cause(s, 0, 0);
915 return ret;
916 }
917
918 static uint32_t
919 mac_read_clr4(E1000State *s, int index)
920 {
921 uint32_t ret = s->mac_reg[index];
922
923 s->mac_reg[index] = 0;
924 return ret;
925 }
926
927 static uint32_t
928 mac_read_clr8(E1000State *s, int index)
929 {
930 uint32_t ret = s->mac_reg[index];
931
932 s->mac_reg[index] = 0;
933 s->mac_reg[index-1] = 0;
934 return ret;
935 }
936
937 static void
938 mac_writereg(E1000State *s, int index, uint32_t val)
939 {
940 s->mac_reg[index] = val;
941 }
942
943 static void
944 set_rdt(E1000State *s, int index, uint32_t val)
945 {
946 s->mac_reg[index] = val & 0xffff;
947 if (e1000_has_rxbufs(s, 1)) {
948 qemu_flush_queued_packets(&s->nic->nc);
949 }
950 }
951
952 static void
953 set_16bit(E1000State *s, int index, uint32_t val)
954 {
955 s->mac_reg[index] = val & 0xffff;
956 }
957
958 static void
959 set_dlen(E1000State *s, int index, uint32_t val)
960 {
961 s->mac_reg[index] = val & 0xfff80;
962 }
963
964 static void
965 set_tctl(E1000State *s, int index, uint32_t val)
966 {
967 s->mac_reg[index] = val;
968 s->mac_reg[TDT] &= 0xffff;
969 start_xmit(s);
970 }
971
972 static void
973 set_icr(E1000State *s, int index, uint32_t val)
974 {
975 DBGOUT(INTERRUPT, "set_icr %x\n", val);
976 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
977 }
978
979 static void
980 set_imc(E1000State *s, int index, uint32_t val)
981 {
982 s->mac_reg[IMS] &= ~val;
983 set_ics(s, 0, 0);
984 }
985
986 static void
987 set_ims(E1000State *s, int index, uint32_t val)
988 {
989 s->mac_reg[IMS] |= val;
990 set_ics(s, 0, 0);
991 }
992
993 #define getreg(x) [x] = mac_readreg
994 static uint32_t (*macreg_readops[])(E1000State *, int) = {
995 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
996 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
997 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
998 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
999 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1000 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1001 getreg(TDLEN), getreg(RDLEN),
1002
1003 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
1004 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
1005 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
1006 [CRCERRS ... MPC] = &mac_readreg,
1007 [RA ... RA+31] = &mac_readreg,
1008 [MTA ... MTA+127] = &mac_readreg,
1009 [VFTA ... VFTA+127] = &mac_readreg,
1010 };
1011 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1012
1013 #define putreg(x) [x] = mac_writereg
1014 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1015 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1016 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1017 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
1018 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1019 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1020 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1021 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1022 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1023 [RA ... RA+31] = &mac_writereg,
1024 [MTA ... MTA+127] = &mac_writereg,
1025 [VFTA ... VFTA+127] = &mac_writereg,
1026 };
1027
1028 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1029
1030 static void
1031 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1032 unsigned size)
1033 {
1034 E1000State *s = opaque;
1035 unsigned int index = (addr & 0x1ffff) >> 2;
1036
1037 if (index < NWRITEOPS && macreg_writeops[index]) {
1038 macreg_writeops[index](s, index, val);
1039 } else if (index < NREADOPS && macreg_readops[index]) {
1040 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1041 } else {
1042 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1043 index<<2, val);
1044 }
1045 }
1046
1047 static uint64_t
1048 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1049 {
1050 E1000State *s = opaque;
1051 unsigned int index = (addr & 0x1ffff) >> 2;
1052
1053 if (index < NREADOPS && macreg_readops[index])
1054 {
1055 return macreg_readops[index](s, index);
1056 }
1057 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1058 return 0;
1059 }
1060
1061 static const MemoryRegionOps e1000_mmio_ops = {
1062 .read = e1000_mmio_read,
1063 .write = e1000_mmio_write,
1064 .endianness = DEVICE_LITTLE_ENDIAN,
1065 .impl = {
1066 .min_access_size = 4,
1067 .max_access_size = 4,
1068 },
1069 };
1070
1071 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1072 unsigned size)
1073 {
1074 E1000State *s = opaque;
1075
1076 (void)s;
1077 return 0;
1078 }
1079
1080 static void e1000_io_write(void *opaque, hwaddr addr,
1081 uint64_t val, unsigned size)
1082 {
1083 E1000State *s = opaque;
1084
1085 (void)s;
1086 }
1087
1088 static const MemoryRegionOps e1000_io_ops = {
1089 .read = e1000_io_read,
1090 .write = e1000_io_write,
1091 .endianness = DEVICE_LITTLE_ENDIAN,
1092 };
1093
1094 static bool is_version_1(void *opaque, int version_id)
1095 {
1096 return version_id == 1;
1097 }
1098
1099 static int e1000_post_load(void *opaque, int version_id)
1100 {
1101 E1000State *s = opaque;
1102
1103 /* nc.link_down can't be migrated, so infer link_down according
1104 * to link status bit in mac_reg[STATUS] */
1105 s->nic->nc.link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1106
1107 return 0;
1108 }
1109
1110 static const VMStateDescription vmstate_e1000 = {
1111 .name = "e1000",
1112 .version_id = 2,
1113 .minimum_version_id = 1,
1114 .minimum_version_id_old = 1,
1115 .post_load = e1000_post_load,
1116 .fields = (VMStateField []) {
1117 VMSTATE_PCI_DEVICE(dev, E1000State),
1118 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1119 VMSTATE_UNUSED(4), /* Was mmio_base. */
1120 VMSTATE_UINT32(rxbuf_size, E1000State),
1121 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1122 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1123 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1124 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1125 VMSTATE_UINT16(eecd_state.reading, E1000State),
1126 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1127 VMSTATE_UINT8(tx.ipcss, E1000State),
1128 VMSTATE_UINT8(tx.ipcso, E1000State),
1129 VMSTATE_UINT16(tx.ipcse, E1000State),
1130 VMSTATE_UINT8(tx.tucss, E1000State),
1131 VMSTATE_UINT8(tx.tucso, E1000State),
1132 VMSTATE_UINT16(tx.tucse, E1000State),
1133 VMSTATE_UINT32(tx.paylen, E1000State),
1134 VMSTATE_UINT8(tx.hdr_len, E1000State),
1135 VMSTATE_UINT16(tx.mss, E1000State),
1136 VMSTATE_UINT16(tx.size, E1000State),
1137 VMSTATE_UINT16(tx.tso_frames, E1000State),
1138 VMSTATE_UINT8(tx.sum_needed, E1000State),
1139 VMSTATE_INT8(tx.ip, E1000State),
1140 VMSTATE_INT8(tx.tcp, E1000State),
1141 VMSTATE_BUFFER(tx.header, E1000State),
1142 VMSTATE_BUFFER(tx.data, E1000State),
1143 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1144 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1145 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1146 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1147 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1148 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1149 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1150 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1151 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1152 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1153 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1154 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1155 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1156 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1157 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1158 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1159 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1160 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1161 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1162 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1163 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1164 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1165 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1166 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1167 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1168 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1169 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1170 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1171 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1172 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1173 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1174 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1175 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1176 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1177 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1178 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1179 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1180 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1181 VMSTATE_UINT32(mac_reg[VET], E1000State),
1182 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1183 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1184 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1185 VMSTATE_END_OF_LIST()
1186 }
1187 };
1188
1189 static const uint16_t e1000_eeprom_template[64] = {
1190 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1191 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
1192 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1193 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1194 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1195 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1196 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1197 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1198 };
1199
1200 /* PCI interface */
1201
1202 static void
1203 e1000_mmio_setup(E1000State *d)
1204 {
1205 int i;
1206 const uint32_t excluded_regs[] = {
1207 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1208 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1209 };
1210
1211 memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
1212 PNPMMIO_SIZE);
1213 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1214 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1215 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1216 excluded_regs[i+1] - excluded_regs[i] - 4);
1217 memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1218 }
1219
1220 static void
1221 e1000_cleanup(NetClientState *nc)
1222 {
1223 E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
1224
1225 s->nic = NULL;
1226 }
1227
1228 static void
1229 pci_e1000_uninit(PCIDevice *dev)
1230 {
1231 E1000State *d = DO_UPCAST(E1000State, dev, dev);
1232
1233 qemu_del_timer(d->autoneg_timer);
1234 qemu_free_timer(d->autoneg_timer);
1235 memory_region_destroy(&d->mmio);
1236 memory_region_destroy(&d->io);
1237 qemu_del_net_client(&d->nic->nc);
1238 }
1239
1240 static NetClientInfo net_e1000_info = {
1241 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1242 .size = sizeof(NICState),
1243 .can_receive = e1000_can_receive,
1244 .receive = e1000_receive,
1245 .cleanup = e1000_cleanup,
1246 .link_status_changed = e1000_set_link_status,
1247 };
1248
1249 static int pci_e1000_init(PCIDevice *pci_dev)
1250 {
1251 E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
1252 uint8_t *pci_conf;
1253 uint16_t checksum = 0;
1254 int i;
1255 uint8_t *macaddr;
1256
1257 pci_conf = d->dev.config;
1258
1259 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1260 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1261
1262 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1263
1264 e1000_mmio_setup(d);
1265
1266 pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1267
1268 pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1269
1270 memmove(d->eeprom_data, e1000_eeprom_template,
1271 sizeof e1000_eeprom_template);
1272 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1273 macaddr = d->conf.macaddr.a;
1274 for (i = 0; i < 3; i++)
1275 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1276 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1277 checksum += d->eeprom_data[i];
1278 checksum = (uint16_t) EEPROM_SUM - checksum;
1279 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1280
1281 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1282 object_get_typename(OBJECT(d)), d->dev.qdev.id, d);
1283
1284 qemu_format_nic_info_str(&d->nic->nc, macaddr);
1285
1286 add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
1287
1288 d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
1289
1290 return 0;
1291 }
1292
1293 static void qdev_e1000_reset(DeviceState *dev)
1294 {
1295 E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
1296 e1000_reset(d);
1297 }
1298
1299 static Property e1000_properties[] = {
1300 DEFINE_NIC_PROPERTIES(E1000State, conf),
1301 DEFINE_PROP_END_OF_LIST(),
1302 };
1303
1304 static void e1000_class_init(ObjectClass *klass, void *data)
1305 {
1306 DeviceClass *dc = DEVICE_CLASS(klass);
1307 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1308
1309 k->init = pci_e1000_init;
1310 k->exit = pci_e1000_uninit;
1311 k->romfile = "pxe-e1000.rom";
1312 k->vendor_id = PCI_VENDOR_ID_INTEL;
1313 k->device_id = E1000_DEVID;
1314 k->revision = 0x03;
1315 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1316 dc->desc = "Intel Gigabit Ethernet";
1317 dc->reset = qdev_e1000_reset;
1318 dc->vmsd = &vmstate_e1000;
1319 dc->props = e1000_properties;
1320 }
1321
1322 static TypeInfo e1000_info = {
1323 .name = "e1000",
1324 .parent = TYPE_PCI_DEVICE,
1325 .instance_size = sizeof(E1000State),
1326 .class_init = e1000_class_init,
1327 };
1328
1329 static void e1000_register_types(void)
1330 {
1331 type_register_static(&e1000_info);
1332 }
1333
1334 type_init(e1000_register_types)