]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/e1000.c
e079f25455e69500ece14df0caa0797dcdf9e1a3
[mirror_qemu.git] / hw / net / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6 *
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
12 *
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 */
26
27
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 #include "qemu/range.h"
37
38 #include "e1000_regs.h"
39
40 #define E1000_DEBUG
41
42 #ifdef E1000_DEBUG
43 enum {
44 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
45 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
46 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
47 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
48 };
49 #define DBGBIT(x) (1<<DEBUG_##x)
50 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
51
52 #define DBGOUT(what, fmt, ...) do { \
53 if (debugflags & DBGBIT(what)) \
54 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
55 } while (0)
56 #else
57 #define DBGOUT(what, fmt, ...) do {} while (0)
58 #endif
59
60 #define IOPORT_SIZE 0x40
61 #define PNPMMIO_SIZE 0x20000
62 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
63
64 /* this is the size past which hardware will drop packets when setting LPE=0 */
65 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
66 /* this is the size past which hardware will drop packets when setting LPE=1 */
67 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
68
69 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
70
71 /*
72 * HW models:
73 * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
74 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
75 * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
76 * Others never tested
77 */
78
79 typedef struct E1000State_st {
80 /*< private >*/
81 PCIDevice parent_obj;
82 /*< public >*/
83
84 NICState *nic;
85 NICConf conf;
86 MemoryRegion mmio;
87 MemoryRegion io;
88
89 uint32_t mac_reg[0x8000];
90 uint16_t phy_reg[0x20];
91 uint16_t eeprom_data[64];
92
93 uint32_t rxbuf_size;
94 uint32_t rxbuf_min_shift;
95 struct e1000_tx {
96 unsigned char header[256];
97 unsigned char vlan_header[4];
98 /* Fields vlan and data must not be reordered or separated. */
99 unsigned char vlan[4];
100 unsigned char data[0x10000];
101 uint16_t size;
102 unsigned char sum_needed;
103 unsigned char vlan_needed;
104 uint8_t ipcss;
105 uint8_t ipcso;
106 uint16_t ipcse;
107 uint8_t tucss;
108 uint8_t tucso;
109 uint16_t tucse;
110 uint8_t hdr_len;
111 uint16_t mss;
112 uint32_t paylen;
113 uint16_t tso_frames;
114 char tse;
115 int8_t ip;
116 int8_t tcp;
117 char cptse; // current packet tse bit
118 } tx;
119
120 struct {
121 uint32_t val_in; /* shifted in from guest driver */
122 uint16_t bitnum_in;
123 uint16_t bitnum_out;
124 uint16_t reading;
125 uint32_t old_eecd;
126 } eecd_state;
127
128 QEMUTimer *autoneg_timer;
129
130 QEMUTimer *mit_timer; /* Mitigation timer. */
131 bool mit_timer_on; /* Mitigation timer is running. */
132 bool mit_irq_level; /* Tracks interrupt pin level. */
133 uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
134
135 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
136 #define E1000_FLAG_AUTONEG_BIT 0
137 #define E1000_FLAG_MIT_BIT 1
138 #define E1000_FLAG_MAC_BIT 2
139 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
140 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
141 #define E1000_FLAG_MAC (1 << E1000_FLAG_MAC_BIT)
142 uint32_t compat_flags;
143 } E1000State;
144
145 #define chkflag(x) (s->compat_flags & E1000_FLAG_##x)
146
147 typedef struct E1000BaseClass {
148 PCIDeviceClass parent_class;
149 uint16_t phy_id2;
150 } E1000BaseClass;
151
152 #define TYPE_E1000_BASE "e1000-base"
153
154 #define E1000(obj) \
155 OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
156
157 #define E1000_DEVICE_CLASS(klass) \
158 OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
159 #define E1000_DEVICE_GET_CLASS(obj) \
160 OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
161
162 #define defreg(x) x = (E1000_##x>>2)
163 enum {
164 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
165 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
166 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
167 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
168 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
169 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
170 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
171 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
172 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
173 defreg(RA), defreg(MTA), defreg(CRCERRS), defreg(VFTA),
174 defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
175 defreg(ITR),
176 };
177
178 static void
179 e1000_link_down(E1000State *s)
180 {
181 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
182 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
183 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
184 s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
185 }
186
187 static void
188 e1000_link_up(E1000State *s)
189 {
190 s->mac_reg[STATUS] |= E1000_STATUS_LU;
191 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
192
193 /* E1000_STATUS_LU is tested by e1000_can_receive() */
194 qemu_flush_queued_packets(qemu_get_queue(s->nic));
195 }
196
197 static bool
198 have_autoneg(E1000State *s)
199 {
200 return chkflag(AUTONEG) && (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
201 }
202
203 static void
204 set_phy_ctrl(E1000State *s, int index, uint16_t val)
205 {
206 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
207 s->phy_reg[PHY_CTRL] = val & ~(0x3f |
208 MII_CR_RESET |
209 MII_CR_RESTART_AUTO_NEG);
210
211 /*
212 * QEMU 1.3 does not support link auto-negotiation emulation, so if we
213 * migrate during auto negotiation, after migration the link will be
214 * down.
215 */
216 if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
217 e1000_link_down(s);
218 DBGOUT(PHY, "Start link auto negotiation\n");
219 timer_mod(s->autoneg_timer,
220 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
221 }
222 }
223
224 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
225 [PHY_CTRL] = set_phy_ctrl,
226 };
227
228 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
229
230 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
231 static const char phy_regcap[0x20] = {
232 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
233 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
234 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
235 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
236 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
237 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
238 [PHY_AUTONEG_EXP] = PHY_R,
239 };
240
241 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
242 static const uint16_t phy_reg_init[] = {
243 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
244 MII_CR_FULL_DUPLEX |
245 MII_CR_AUTO_NEG_EN,
246
247 [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
248 MII_SR_LINK_STATUS | /* link initially up */
249 MII_SR_AUTONEG_CAPS |
250 /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
251 MII_SR_PREAMBLE_SUPPRESS |
252 MII_SR_EXTENDED_STATUS |
253 MII_SR_10T_HD_CAPS |
254 MII_SR_10T_FD_CAPS |
255 MII_SR_100X_HD_CAPS |
256 MII_SR_100X_FD_CAPS,
257
258 [PHY_ID1] = 0x141,
259 /* [PHY_ID2] configured per DevId, from e1000_reset() */
260 [PHY_AUTONEG_ADV] = 0xde1,
261 [PHY_LP_ABILITY] = 0x1e0,
262 [PHY_1000T_CTRL] = 0x0e00,
263 [PHY_1000T_STATUS] = 0x3c00,
264 [M88E1000_PHY_SPEC_CTRL] = 0x360,
265 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
266 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
267 };
268
269 static const uint32_t mac_reg_init[] = {
270 [PBA] = 0x00100030,
271 [LEDCTL] = 0x602,
272 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
273 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
274 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
275 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
276 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
277 E1000_STATUS_LU,
278 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
279 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
280 E1000_MANC_RMCP_EN,
281 };
282
283 /* Helper function, *curr == 0 means the value is not set */
284 static inline void
285 mit_update_delay(uint32_t *curr, uint32_t value)
286 {
287 if (value && (*curr == 0 || value < *curr)) {
288 *curr = value;
289 }
290 }
291
292 static void
293 set_interrupt_cause(E1000State *s, int index, uint32_t val)
294 {
295 PCIDevice *d = PCI_DEVICE(s);
296 uint32_t pending_ints;
297 uint32_t mit_delay;
298
299 s->mac_reg[ICR] = val;
300
301 /*
302 * Make sure ICR and ICS registers have the same value.
303 * The spec says that the ICS register is write-only. However in practice,
304 * on real hardware ICS is readable, and for reads it has the same value as
305 * ICR (except that ICS does not have the clear on read behaviour of ICR).
306 *
307 * The VxWorks PRO/1000 driver uses this behaviour.
308 */
309 s->mac_reg[ICS] = val;
310
311 pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
312 if (!s->mit_irq_level && pending_ints) {
313 /*
314 * Here we detect a potential raising edge. We postpone raising the
315 * interrupt line if we are inside the mitigation delay window
316 * (s->mit_timer_on == 1).
317 * We provide a partial implementation of interrupt mitigation,
318 * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
319 * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
320 * RADV; relative timers based on TIDV and RDTR are not implemented.
321 */
322 if (s->mit_timer_on) {
323 return;
324 }
325 if (chkflag(MIT)) {
326 /* Compute the next mitigation delay according to pending
327 * interrupts and the current values of RADV (provided
328 * RDTR!=0), TADV and ITR.
329 * Then rearm the timer.
330 */
331 mit_delay = 0;
332 if (s->mit_ide &&
333 (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
334 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
335 }
336 if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
337 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
338 }
339 mit_update_delay(&mit_delay, s->mac_reg[ITR]);
340
341 if (mit_delay) {
342 s->mit_timer_on = 1;
343 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
344 mit_delay * 256);
345 }
346 s->mit_ide = 0;
347 }
348 }
349
350 s->mit_irq_level = (pending_ints != 0);
351 pci_set_irq(d, s->mit_irq_level);
352 }
353
354 static void
355 e1000_mit_timer(void *opaque)
356 {
357 E1000State *s = opaque;
358
359 s->mit_timer_on = 0;
360 /* Call set_interrupt_cause to update the irq level (if necessary). */
361 set_interrupt_cause(s, 0, s->mac_reg[ICR]);
362 }
363
364 static void
365 set_ics(E1000State *s, int index, uint32_t val)
366 {
367 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
368 s->mac_reg[IMS]);
369 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
370 }
371
372 static void
373 e1000_autoneg_timer(void *opaque)
374 {
375 E1000State *s = opaque;
376 if (!qemu_get_queue(s->nic)->link_down) {
377 e1000_link_up(s);
378 s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
379 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
380 DBGOUT(PHY, "Auto negotiation is completed\n");
381 set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
382 }
383 }
384
385 static int
386 rxbufsize(uint32_t v)
387 {
388 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
389 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
390 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
391 switch (v) {
392 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
393 return 16384;
394 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
395 return 8192;
396 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
397 return 4096;
398 case E1000_RCTL_SZ_1024:
399 return 1024;
400 case E1000_RCTL_SZ_512:
401 return 512;
402 case E1000_RCTL_SZ_256:
403 return 256;
404 }
405 return 2048;
406 }
407
408 static void e1000_reset(void *opaque)
409 {
410 E1000State *d = opaque;
411 E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
412 uint8_t *macaddr = d->conf.macaddr.a;
413 int i;
414
415 timer_del(d->autoneg_timer);
416 timer_del(d->mit_timer);
417 d->mit_timer_on = 0;
418 d->mit_irq_level = 0;
419 d->mit_ide = 0;
420 memset(d->phy_reg, 0, sizeof d->phy_reg);
421 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
422 d->phy_reg[PHY_ID2] = edc->phy_id2;
423 memset(d->mac_reg, 0, sizeof d->mac_reg);
424 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
425 d->rxbuf_min_shift = 1;
426 memset(&d->tx, 0, sizeof d->tx);
427
428 if (qemu_get_queue(d->nic)->link_down) {
429 e1000_link_down(d);
430 }
431
432 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
433 d->mac_reg[RA] = 0;
434 d->mac_reg[RA + 1] = E1000_RAH_AV;
435 for (i = 0; i < 4; i++) {
436 d->mac_reg[RA] |= macaddr[i] << (8 * i);
437 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
438 }
439 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
440 }
441
442 static void
443 set_ctrl(E1000State *s, int index, uint32_t val)
444 {
445 /* RST is self clearing */
446 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
447 }
448
449 static void
450 set_rx_control(E1000State *s, int index, uint32_t val)
451 {
452 s->mac_reg[RCTL] = val;
453 s->rxbuf_size = rxbufsize(val);
454 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
455 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
456 s->mac_reg[RCTL]);
457 qemu_flush_queued_packets(qemu_get_queue(s->nic));
458 }
459
460 static void
461 set_mdic(E1000State *s, int index, uint32_t val)
462 {
463 uint32_t data = val & E1000_MDIC_DATA_MASK;
464 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
465
466 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
467 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
468 else if (val & E1000_MDIC_OP_READ) {
469 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
470 if (!(phy_regcap[addr] & PHY_R)) {
471 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
472 val |= E1000_MDIC_ERROR;
473 } else
474 val = (val ^ data) | s->phy_reg[addr];
475 } else if (val & E1000_MDIC_OP_WRITE) {
476 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
477 if (!(phy_regcap[addr] & PHY_W)) {
478 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
479 val |= E1000_MDIC_ERROR;
480 } else {
481 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
482 phyreg_writeops[addr](s, index, data);
483 } else {
484 s->phy_reg[addr] = data;
485 }
486 }
487 }
488 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
489
490 if (val & E1000_MDIC_INT_EN) {
491 set_ics(s, 0, E1000_ICR_MDAC);
492 }
493 }
494
495 static uint32_t
496 get_eecd(E1000State *s, int index)
497 {
498 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
499
500 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
501 s->eecd_state.bitnum_out, s->eecd_state.reading);
502 if (!s->eecd_state.reading ||
503 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
504 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
505 ret |= E1000_EECD_DO;
506 return ret;
507 }
508
509 static void
510 set_eecd(E1000State *s, int index, uint32_t val)
511 {
512 uint32_t oldval = s->eecd_state.old_eecd;
513
514 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
515 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
516 if (!(E1000_EECD_CS & val)) { /* CS inactive; nothing to do */
517 return;
518 }
519 if (E1000_EECD_CS & (val ^ oldval)) { /* CS rise edge; reset state */
520 s->eecd_state.val_in = 0;
521 s->eecd_state.bitnum_in = 0;
522 s->eecd_state.bitnum_out = 0;
523 s->eecd_state.reading = 0;
524 }
525 if (!(E1000_EECD_SK & (val ^ oldval))) { /* no clock edge */
526 return;
527 }
528 if (!(E1000_EECD_SK & val)) { /* falling edge */
529 s->eecd_state.bitnum_out++;
530 return;
531 }
532 s->eecd_state.val_in <<= 1;
533 if (val & E1000_EECD_DI)
534 s->eecd_state.val_in |= 1;
535 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
536 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
537 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
538 EEPROM_READ_OPCODE_MICROWIRE);
539 }
540 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
541 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
542 s->eecd_state.reading);
543 }
544
545 static uint32_t
546 flash_eerd_read(E1000State *s, int x)
547 {
548 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
549
550 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
551 return (s->mac_reg[EERD]);
552
553 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
554 return (E1000_EEPROM_RW_REG_DONE | r);
555
556 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
557 E1000_EEPROM_RW_REG_DONE | r);
558 }
559
560 static void
561 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
562 {
563 uint32_t sum;
564
565 if (cse && cse < n)
566 n = cse + 1;
567 if (sloc < n-1) {
568 sum = net_checksum_add(n-css, data+css);
569 stw_be_p(data + sloc, net_checksum_finish(sum));
570 }
571 }
572
573 static inline int
574 vlan_enabled(E1000State *s)
575 {
576 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
577 }
578
579 static inline int
580 vlan_rx_filter_enabled(E1000State *s)
581 {
582 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
583 }
584
585 static inline int
586 is_vlan_packet(E1000State *s, const uint8_t *buf)
587 {
588 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
589 le16_to_cpu(s->mac_reg[VET]));
590 }
591
592 static inline int
593 is_vlan_txd(uint32_t txd_lower)
594 {
595 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
596 }
597
598 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
599 * fill it in, just pad descriptor length by 4 bytes unless guest
600 * told us to strip it off the packet. */
601 static inline int
602 fcs_len(E1000State *s)
603 {
604 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
605 }
606
607 static void
608 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
609 {
610 NetClientState *nc = qemu_get_queue(s->nic);
611 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
612 nc->info->receive(nc, buf, size);
613 } else {
614 qemu_send_packet(nc, buf, size);
615 }
616 }
617
618 static void
619 xmit_seg(E1000State *s)
620 {
621 uint16_t len, *sp;
622 unsigned int frames = s->tx.tso_frames, css, sofar, n;
623 struct e1000_tx *tp = &s->tx;
624
625 if (tp->tse && tp->cptse) {
626 css = tp->ipcss;
627 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
628 frames, tp->size, css);
629 if (tp->ip) { /* IPv4 */
630 stw_be_p(tp->data+css+2, tp->size - css);
631 stw_be_p(tp->data+css+4,
632 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
633 } else { /* IPv6 */
634 stw_be_p(tp->data+css+4, tp->size - css);
635 }
636 css = tp->tucss;
637 len = tp->size - css;
638 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
639 if (tp->tcp) {
640 sofar = frames * tp->mss;
641 stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
642 if (tp->paylen - sofar > tp->mss)
643 tp->data[css + 13] &= ~9; /* PSH, FIN */
644 } else /* UDP */
645 stw_be_p(tp->data+css+4, len);
646 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
647 unsigned int phsum;
648 // add pseudo-header length before checksum calculation
649 sp = (uint16_t *)(tp->data + tp->tucso);
650 phsum = be16_to_cpup(sp) + len;
651 phsum = (phsum >> 16) + (phsum & 0xffff);
652 stw_be_p(sp, phsum);
653 }
654 tp->tso_frames++;
655 }
656
657 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
658 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
659 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
660 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
661 if (tp->vlan_needed) {
662 memmove(tp->vlan, tp->data, 4);
663 memmove(tp->data, tp->data + 4, 8);
664 memcpy(tp->data + 8, tp->vlan_header, 4);
665 e1000_send_packet(s, tp->vlan, tp->size + 4);
666 } else {
667 e1000_send_packet(s, tp->data, tp->size);
668 }
669
670 s->mac_reg[TPT]++;
671 s->mac_reg[GPTC]++;
672 n = s->mac_reg[TOTL];
673 if ((s->mac_reg[TOTL] += s->tx.size) < n)
674 s->mac_reg[TOTH]++;
675 }
676
677 static void
678 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
679 {
680 PCIDevice *d = PCI_DEVICE(s);
681 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
682 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
683 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
684 unsigned int msh = 0xfffff;
685 uint64_t addr;
686 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
687 struct e1000_tx *tp = &s->tx;
688
689 s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
690 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
691 op = le32_to_cpu(xp->cmd_and_length);
692 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
693 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
694 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
695 tp->tucss = xp->upper_setup.tcp_fields.tucss;
696 tp->tucso = xp->upper_setup.tcp_fields.tucso;
697 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
698 tp->paylen = op & 0xfffff;
699 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
700 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
701 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
702 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
703 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
704 tp->tso_frames = 0;
705 if (tp->tucso == 0) { /* this is probably wrong */
706 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
707 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
708 }
709 return;
710 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
711 // data descriptor
712 if (tp->size == 0) {
713 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
714 }
715 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
716 } else {
717 // legacy descriptor
718 tp->cptse = 0;
719 }
720
721 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
722 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
723 tp->vlan_needed = 1;
724 stw_be_p(tp->vlan_header,
725 le16_to_cpu(s->mac_reg[VET]));
726 stw_be_p(tp->vlan_header + 2,
727 le16_to_cpu(dp->upper.fields.special));
728 }
729
730 addr = le64_to_cpu(dp->buffer_addr);
731 if (tp->tse && tp->cptse) {
732 msh = tp->hdr_len + tp->mss;
733 do {
734 bytes = split_size;
735 if (tp->size + bytes > msh)
736 bytes = msh - tp->size;
737
738 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
739 pci_dma_read(d, addr, tp->data + tp->size, bytes);
740 sz = tp->size + bytes;
741 if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
742 memmove(tp->header, tp->data, tp->hdr_len);
743 }
744 tp->size = sz;
745 addr += bytes;
746 if (sz == msh) {
747 xmit_seg(s);
748 memmove(tp->data, tp->header, tp->hdr_len);
749 tp->size = tp->hdr_len;
750 }
751 split_size -= bytes;
752 } while (bytes && split_size);
753 } else if (!tp->tse && tp->cptse) {
754 // context descriptor TSE is not set, while data descriptor TSE is set
755 DBGOUT(TXERR, "TCP segmentation error\n");
756 } else {
757 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
758 pci_dma_read(d, addr, tp->data + tp->size, split_size);
759 tp->size += split_size;
760 }
761
762 if (!(txd_lower & E1000_TXD_CMD_EOP))
763 return;
764 if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
765 xmit_seg(s);
766 }
767 tp->tso_frames = 0;
768 tp->sum_needed = 0;
769 tp->vlan_needed = 0;
770 tp->size = 0;
771 tp->cptse = 0;
772 }
773
774 static uint32_t
775 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
776 {
777 PCIDevice *d = PCI_DEVICE(s);
778 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
779
780 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
781 return 0;
782 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
783 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
784 dp->upper.data = cpu_to_le32(txd_upper);
785 pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
786 &dp->upper, sizeof(dp->upper));
787 return E1000_ICR_TXDW;
788 }
789
790 static uint64_t tx_desc_base(E1000State *s)
791 {
792 uint64_t bah = s->mac_reg[TDBAH];
793 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
794
795 return (bah << 32) + bal;
796 }
797
798 static void
799 start_xmit(E1000State *s)
800 {
801 PCIDevice *d = PCI_DEVICE(s);
802 dma_addr_t base;
803 struct e1000_tx_desc desc;
804 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
805
806 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
807 DBGOUT(TX, "tx disabled\n");
808 return;
809 }
810
811 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
812 base = tx_desc_base(s) +
813 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
814 pci_dma_read(d, base, &desc, sizeof(desc));
815
816 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
817 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
818 desc.upper.data);
819
820 process_tx_desc(s, &desc);
821 cause |= txdesc_writeback(s, base, &desc);
822
823 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
824 s->mac_reg[TDH] = 0;
825 /*
826 * the following could happen only if guest sw assigns
827 * bogus values to TDT/TDLEN.
828 * there's nothing too intelligent we could do about this.
829 */
830 if (s->mac_reg[TDH] == tdh_start) {
831 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
832 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
833 break;
834 }
835 }
836 set_ics(s, 0, cause);
837 }
838
839 static int
840 receive_filter(E1000State *s, const uint8_t *buf, int size)
841 {
842 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
843 static const int mta_shift[] = {4, 3, 2, 0};
844 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
845
846 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
847 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
848 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
849 ((vid >> 5) & 0x7f));
850 if ((vfta & (1 << (vid & 0x1f))) == 0)
851 return 0;
852 }
853
854 if (rctl & E1000_RCTL_UPE) // promiscuous
855 return 1;
856
857 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
858 return 1;
859
860 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
861 return 1;
862
863 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
864 if (!(rp[1] & E1000_RAH_AV))
865 continue;
866 ra[0] = cpu_to_le32(rp[0]);
867 ra[1] = cpu_to_le32(rp[1]);
868 if (!memcmp(buf, (uint8_t *)ra, 6)) {
869 DBGOUT(RXFILTER,
870 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
871 (int)(rp - s->mac_reg - RA)/2,
872 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
873 return 1;
874 }
875 }
876 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
877 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
878
879 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
880 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
881 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
882 return 1;
883 DBGOUT(RXFILTER,
884 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
885 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
886 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
887 s->mac_reg[MTA + (f >> 5)]);
888
889 return 0;
890 }
891
892 static void
893 e1000_set_link_status(NetClientState *nc)
894 {
895 E1000State *s = qemu_get_nic_opaque(nc);
896 uint32_t old_status = s->mac_reg[STATUS];
897
898 if (nc->link_down) {
899 e1000_link_down(s);
900 } else {
901 if (have_autoneg(s) &&
902 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
903 /* emulate auto-negotiation if supported */
904 timer_mod(s->autoneg_timer,
905 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
906 } else {
907 e1000_link_up(s);
908 }
909 }
910
911 if (s->mac_reg[STATUS] != old_status)
912 set_ics(s, 0, E1000_ICR_LSC);
913 }
914
915 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
916 {
917 int bufs;
918 /* Fast-path short packets */
919 if (total_size <= s->rxbuf_size) {
920 return s->mac_reg[RDH] != s->mac_reg[RDT];
921 }
922 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
923 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
924 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
925 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
926 s->mac_reg[RDT] - s->mac_reg[RDH];
927 } else {
928 return false;
929 }
930 return total_size <= bufs * s->rxbuf_size;
931 }
932
933 static int
934 e1000_can_receive(NetClientState *nc)
935 {
936 E1000State *s = qemu_get_nic_opaque(nc);
937
938 return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
939 (s->mac_reg[RCTL] & E1000_RCTL_EN) &&
940 (s->parent_obj.config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
941 e1000_has_rxbufs(s, 1);
942 }
943
944 static uint64_t rx_desc_base(E1000State *s)
945 {
946 uint64_t bah = s->mac_reg[RDBAH];
947 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
948
949 return (bah << 32) + bal;
950 }
951
952 static ssize_t
953 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
954 {
955 E1000State *s = qemu_get_nic_opaque(nc);
956 PCIDevice *d = PCI_DEVICE(s);
957 struct e1000_rx_desc desc;
958 dma_addr_t base;
959 unsigned int n, rdt;
960 uint32_t rdh_start;
961 uint16_t vlan_special = 0;
962 uint8_t vlan_status = 0;
963 uint8_t min_buf[MIN_BUF_SIZE];
964 struct iovec min_iov;
965 uint8_t *filter_buf = iov->iov_base;
966 size_t size = iov_size(iov, iovcnt);
967 size_t iov_ofs = 0;
968 size_t desc_offset;
969 size_t desc_size;
970 size_t total_size;
971
972 if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
973 return -1;
974 }
975
976 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
977 return -1;
978 }
979
980 /* Pad to minimum Ethernet frame length */
981 if (size < sizeof(min_buf)) {
982 iov_to_buf(iov, iovcnt, 0, min_buf, size);
983 memset(&min_buf[size], 0, sizeof(min_buf) - size);
984 min_iov.iov_base = filter_buf = min_buf;
985 min_iov.iov_len = size = sizeof(min_buf);
986 iovcnt = 1;
987 iov = &min_iov;
988 } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
989 /* This is very unlikely, but may happen. */
990 iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
991 filter_buf = min_buf;
992 }
993
994 /* Discard oversized packets if !LPE and !SBP. */
995 if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
996 (size > MAXIMUM_ETHERNET_VLAN_SIZE
997 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
998 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
999 return size;
1000 }
1001
1002 if (!receive_filter(s, filter_buf, size)) {
1003 return size;
1004 }
1005
1006 if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
1007 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
1008 + 14)));
1009 iov_ofs = 4;
1010 if (filter_buf == iov->iov_base) {
1011 memmove(filter_buf + 4, filter_buf, 12);
1012 } else {
1013 iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
1014 while (iov->iov_len <= iov_ofs) {
1015 iov_ofs -= iov->iov_len;
1016 iov++;
1017 }
1018 }
1019 vlan_status = E1000_RXD_STAT_VP;
1020 size -= 4;
1021 }
1022
1023 rdh_start = s->mac_reg[RDH];
1024 desc_offset = 0;
1025 total_size = size + fcs_len(s);
1026 if (!e1000_has_rxbufs(s, total_size)) {
1027 set_ics(s, 0, E1000_ICS_RXO);
1028 return -1;
1029 }
1030 do {
1031 desc_size = total_size - desc_offset;
1032 if (desc_size > s->rxbuf_size) {
1033 desc_size = s->rxbuf_size;
1034 }
1035 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
1036 pci_dma_read(d, base, &desc, sizeof(desc));
1037 desc.special = vlan_special;
1038 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
1039 if (desc.buffer_addr) {
1040 if (desc_offset < size) {
1041 size_t iov_copy;
1042 hwaddr ba = le64_to_cpu(desc.buffer_addr);
1043 size_t copy_size = size - desc_offset;
1044 if (copy_size > s->rxbuf_size) {
1045 copy_size = s->rxbuf_size;
1046 }
1047 do {
1048 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1049 pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1050 copy_size -= iov_copy;
1051 ba += iov_copy;
1052 iov_ofs += iov_copy;
1053 if (iov_ofs == iov->iov_len) {
1054 iov++;
1055 iov_ofs = 0;
1056 }
1057 } while (copy_size);
1058 }
1059 desc_offset += desc_size;
1060 desc.length = cpu_to_le16(desc_size);
1061 if (desc_offset >= total_size) {
1062 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1063 } else {
1064 /* Guest zeroing out status is not a hardware requirement.
1065 Clear EOP in case guest didn't do it. */
1066 desc.status &= ~E1000_RXD_STAT_EOP;
1067 }
1068 } else { // as per intel docs; skip descriptors with null buf addr
1069 DBGOUT(RX, "Null RX descriptor!!\n");
1070 }
1071 pci_dma_write(d, base, &desc, sizeof(desc));
1072
1073 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1074 s->mac_reg[RDH] = 0;
1075 /* see comment in start_xmit; same here */
1076 if (s->mac_reg[RDH] == rdh_start) {
1077 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1078 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1079 set_ics(s, 0, E1000_ICS_RXO);
1080 return -1;
1081 }
1082 } while (desc_offset < total_size);
1083
1084 s->mac_reg[GPRC]++;
1085 s->mac_reg[TPR]++;
1086 /* TOR - Total Octets Received:
1087 * This register includes bytes received in a packet from the <Destination
1088 * Address> field through the <CRC> field, inclusively.
1089 */
1090 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1091 if (n < s->mac_reg[TORL])
1092 s->mac_reg[TORH]++;
1093 s->mac_reg[TORL] = n;
1094
1095 n = E1000_ICS_RXT0;
1096 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1097 rdt += s->mac_reg[RDLEN] / sizeof(desc);
1098 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1099 s->rxbuf_min_shift)
1100 n |= E1000_ICS_RXDMT0;
1101
1102 set_ics(s, 0, n);
1103
1104 return size;
1105 }
1106
1107 static ssize_t
1108 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1109 {
1110 const struct iovec iov = {
1111 .iov_base = (uint8_t *)buf,
1112 .iov_len = size
1113 };
1114
1115 return e1000_receive_iov(nc, &iov, 1);
1116 }
1117
1118 static uint32_t
1119 mac_readreg(E1000State *s, int index)
1120 {
1121 return s->mac_reg[index];
1122 }
1123
1124 static uint32_t
1125 mac_icr_read(E1000State *s, int index)
1126 {
1127 uint32_t ret = s->mac_reg[ICR];
1128
1129 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1130 set_interrupt_cause(s, 0, 0);
1131 return ret;
1132 }
1133
1134 static uint32_t
1135 mac_read_clr4(E1000State *s, int index)
1136 {
1137 uint32_t ret = s->mac_reg[index];
1138
1139 s->mac_reg[index] = 0;
1140 return ret;
1141 }
1142
1143 static uint32_t
1144 mac_read_clr8(E1000State *s, int index)
1145 {
1146 uint32_t ret = s->mac_reg[index];
1147
1148 s->mac_reg[index] = 0;
1149 s->mac_reg[index-1] = 0;
1150 return ret;
1151 }
1152
1153 static void
1154 mac_writereg(E1000State *s, int index, uint32_t val)
1155 {
1156 uint32_t macaddr[2];
1157
1158 s->mac_reg[index] = val;
1159
1160 if (index == RA + 1) {
1161 macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1162 macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1163 qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1164 }
1165 }
1166
1167 static void
1168 set_rdt(E1000State *s, int index, uint32_t val)
1169 {
1170 s->mac_reg[index] = val & 0xffff;
1171 if (e1000_has_rxbufs(s, 1)) {
1172 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1173 }
1174 }
1175
1176 static void
1177 set_16bit(E1000State *s, int index, uint32_t val)
1178 {
1179 s->mac_reg[index] = val & 0xffff;
1180 }
1181
1182 static void
1183 set_dlen(E1000State *s, int index, uint32_t val)
1184 {
1185 s->mac_reg[index] = val & 0xfff80;
1186 }
1187
1188 static void
1189 set_tctl(E1000State *s, int index, uint32_t val)
1190 {
1191 s->mac_reg[index] = val;
1192 s->mac_reg[TDT] &= 0xffff;
1193 start_xmit(s);
1194 }
1195
1196 static void
1197 set_icr(E1000State *s, int index, uint32_t val)
1198 {
1199 DBGOUT(INTERRUPT, "set_icr %x\n", val);
1200 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1201 }
1202
1203 static void
1204 set_imc(E1000State *s, int index, uint32_t val)
1205 {
1206 s->mac_reg[IMS] &= ~val;
1207 set_ics(s, 0, 0);
1208 }
1209
1210 static void
1211 set_ims(E1000State *s, int index, uint32_t val)
1212 {
1213 s->mac_reg[IMS] |= val;
1214 set_ics(s, 0, 0);
1215 }
1216
1217 #define getreg(x) [x] = mac_readreg
1218 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1219 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1220 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1221 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1222 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
1223 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1224 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1225 getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
1226 getreg(TADV), getreg(ITR),
1227
1228 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8,
1229 [GPRC] = mac_read_clr4, [GPTC] = mac_read_clr4,
1230 [TPT] = mac_read_clr4, [TPR] = mac_read_clr4,
1231 [ICR] = mac_icr_read, [EECD] = get_eecd,
1232 [EERD] = flash_eerd_read,
1233
1234 [CRCERRS ... MPC] = &mac_readreg,
1235 [RA ... RA+31] = &mac_readreg,
1236 [MTA ... MTA+127] = &mac_readreg,
1237 [VFTA ... VFTA+127] = &mac_readreg,
1238 };
1239 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1240
1241 #define putreg(x) [x] = mac_writereg
1242 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1243 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1244 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1245 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
1246
1247 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1248 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1249 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1250 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1251 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1252 [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
1253 [ITR] = set_16bit,
1254
1255 [RA ... RA+31] = &mac_writereg,
1256 [MTA ... MTA+127] = &mac_writereg,
1257 [VFTA ... VFTA+127] = &mac_writereg,
1258 };
1259
1260 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1261
1262 enum { MAC_ACCESS_PARTIAL = 1, MAC_ACCESS_FLAG_NEEDED = 2 };
1263
1264 #define markflag(x) ((E1000_FLAG_##x << 2) | MAC_ACCESS_FLAG_NEEDED)
1265 /* In the array below the meaning of the bits is: [f|f|f|f|f|f|n|p]
1266 * f - flag bits (up to 6 possible flags)
1267 * n - flag needed
1268 * p - partially implenented */
1269 static const uint8_t mac_reg_access[0x8000] = {
1270 [RDTR] = markflag(MIT), [TADV] = markflag(MIT),
1271 [RADV] = markflag(MIT), [ITR] = markflag(MIT),
1272 };
1273
1274 static void
1275 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1276 unsigned size)
1277 {
1278 E1000State *s = opaque;
1279 unsigned int index = (addr & 0x1ffff) >> 2;
1280
1281 if (index < NWRITEOPS && macreg_writeops[index]) {
1282 if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
1283 || (s->compat_flags & (mac_reg_access[index] >> 2))) {
1284 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
1285 DBGOUT(GENERAL, "Writing to register at offset: 0x%08x. "
1286 "It is not fully implemented.\n", index<<2);
1287 }
1288 macreg_writeops[index](s, index, val);
1289 } else { /* "flag needed" bit is set, but the flag is not active */
1290 DBGOUT(MMIO, "MMIO write attempt to disabled reg. addr=0x%08x\n",
1291 index<<2);
1292 }
1293 } else if (index < NREADOPS && macreg_readops[index]) {
1294 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n",
1295 index<<2, val);
1296 } else {
1297 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1298 index<<2, val);
1299 }
1300 }
1301
1302 static uint64_t
1303 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1304 {
1305 E1000State *s = opaque;
1306 unsigned int index = (addr & 0x1ffff) >> 2;
1307
1308 if (index < NREADOPS && macreg_readops[index]) {
1309 if (!(mac_reg_access[index] & MAC_ACCESS_FLAG_NEEDED)
1310 || (s->compat_flags & (mac_reg_access[index] >> 2))) {
1311 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
1312 DBGOUT(GENERAL, "Reading register at offset: 0x%08x. "
1313 "It is not fully implemented.\n", index<<2);
1314 }
1315 return macreg_readops[index](s, index);
1316 } else { /* "flag needed" bit is set, but the flag is not active */
1317 DBGOUT(MMIO, "MMIO read attempt of disabled reg. addr=0x%08x\n",
1318 index<<2);
1319 }
1320 } else {
1321 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1322 }
1323 return 0;
1324 }
1325
1326 static const MemoryRegionOps e1000_mmio_ops = {
1327 .read = e1000_mmio_read,
1328 .write = e1000_mmio_write,
1329 .endianness = DEVICE_LITTLE_ENDIAN,
1330 .impl = {
1331 .min_access_size = 4,
1332 .max_access_size = 4,
1333 },
1334 };
1335
1336 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1337 unsigned size)
1338 {
1339 E1000State *s = opaque;
1340
1341 (void)s;
1342 return 0;
1343 }
1344
1345 static void e1000_io_write(void *opaque, hwaddr addr,
1346 uint64_t val, unsigned size)
1347 {
1348 E1000State *s = opaque;
1349
1350 (void)s;
1351 }
1352
1353 static const MemoryRegionOps e1000_io_ops = {
1354 .read = e1000_io_read,
1355 .write = e1000_io_write,
1356 .endianness = DEVICE_LITTLE_ENDIAN,
1357 };
1358
1359 static bool is_version_1(void *opaque, int version_id)
1360 {
1361 return version_id == 1;
1362 }
1363
1364 static void e1000_pre_save(void *opaque)
1365 {
1366 E1000State *s = opaque;
1367 NetClientState *nc = qemu_get_queue(s->nic);
1368
1369 /* If the mitigation timer is active, emulate a timeout now. */
1370 if (s->mit_timer_on) {
1371 e1000_mit_timer(s);
1372 }
1373
1374 /*
1375 * If link is down and auto-negotiation is supported and ongoing,
1376 * complete auto-negotiation immediately. This allows us to look
1377 * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1378 */
1379 if (nc->link_down && have_autoneg(s)) {
1380 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1381 }
1382 }
1383
1384 static int e1000_post_load(void *opaque, int version_id)
1385 {
1386 E1000State *s = opaque;
1387 NetClientState *nc = qemu_get_queue(s->nic);
1388
1389 if (!chkflag(MIT)) {
1390 s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1391 s->mac_reg[TADV] = 0;
1392 s->mit_irq_level = false;
1393 }
1394 s->mit_ide = 0;
1395 s->mit_timer_on = false;
1396
1397 /* nc.link_down can't be migrated, so infer link_down according
1398 * to link status bit in mac_reg[STATUS].
1399 * Alternatively, restart link negotiation if it was in progress. */
1400 nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1401
1402 if (have_autoneg(s) &&
1403 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1404 nc->link_down = false;
1405 timer_mod(s->autoneg_timer,
1406 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1407 }
1408
1409 return 0;
1410 }
1411
1412 static bool e1000_mit_state_needed(void *opaque)
1413 {
1414 E1000State *s = opaque;
1415
1416 return chkflag(MIT);
1417 }
1418
1419 static bool e1000_full_mac_needed(void *opaque)
1420 {
1421 E1000State *s = opaque;
1422
1423 return chkflag(MAC);
1424 }
1425
1426 static const VMStateDescription vmstate_e1000_mit_state = {
1427 .name = "e1000/mit_state",
1428 .version_id = 1,
1429 .minimum_version_id = 1,
1430 .needed = e1000_mit_state_needed,
1431 .fields = (VMStateField[]) {
1432 VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1433 VMSTATE_UINT32(mac_reg[RADV], E1000State),
1434 VMSTATE_UINT32(mac_reg[TADV], E1000State),
1435 VMSTATE_UINT32(mac_reg[ITR], E1000State),
1436 VMSTATE_BOOL(mit_irq_level, E1000State),
1437 VMSTATE_END_OF_LIST()
1438 }
1439 };
1440
1441 static const VMStateDescription vmstate_e1000_full_mac_state = {
1442 .name = "e1000/full_mac_state",
1443 .version_id = 1,
1444 .minimum_version_id = 1,
1445 .needed = e1000_full_mac_needed,
1446 .fields = (VMStateField[]) {
1447 VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000),
1448 VMSTATE_END_OF_LIST()
1449 }
1450 };
1451
1452 static const VMStateDescription vmstate_e1000 = {
1453 .name = "e1000",
1454 .version_id = 2,
1455 .minimum_version_id = 1,
1456 .pre_save = e1000_pre_save,
1457 .post_load = e1000_post_load,
1458 .fields = (VMStateField[]) {
1459 VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1460 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1461 VMSTATE_UNUSED(4), /* Was mmio_base. */
1462 VMSTATE_UINT32(rxbuf_size, E1000State),
1463 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1464 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1465 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1466 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1467 VMSTATE_UINT16(eecd_state.reading, E1000State),
1468 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1469 VMSTATE_UINT8(tx.ipcss, E1000State),
1470 VMSTATE_UINT8(tx.ipcso, E1000State),
1471 VMSTATE_UINT16(tx.ipcse, E1000State),
1472 VMSTATE_UINT8(tx.tucss, E1000State),
1473 VMSTATE_UINT8(tx.tucso, E1000State),
1474 VMSTATE_UINT16(tx.tucse, E1000State),
1475 VMSTATE_UINT32(tx.paylen, E1000State),
1476 VMSTATE_UINT8(tx.hdr_len, E1000State),
1477 VMSTATE_UINT16(tx.mss, E1000State),
1478 VMSTATE_UINT16(tx.size, E1000State),
1479 VMSTATE_UINT16(tx.tso_frames, E1000State),
1480 VMSTATE_UINT8(tx.sum_needed, E1000State),
1481 VMSTATE_INT8(tx.ip, E1000State),
1482 VMSTATE_INT8(tx.tcp, E1000State),
1483 VMSTATE_BUFFER(tx.header, E1000State),
1484 VMSTATE_BUFFER(tx.data, E1000State),
1485 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1486 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1487 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1488 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1489 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1490 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1491 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1492 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1493 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1494 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1495 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1496 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1497 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1498 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1499 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1500 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1501 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1502 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1503 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1504 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1505 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1506 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1507 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1508 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1509 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1510 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1511 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1512 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1513 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1514 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1515 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1516 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1517 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1518 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1519 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1520 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1521 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1522 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1523 VMSTATE_UINT32(mac_reg[VET], E1000State),
1524 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1525 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1526 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1527 VMSTATE_END_OF_LIST()
1528 },
1529 .subsections = (const VMStateDescription*[]) {
1530 &vmstate_e1000_mit_state,
1531 &vmstate_e1000_full_mac_state,
1532 NULL
1533 }
1534 };
1535
1536 /*
1537 * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1538 * Note: A valid DevId will be inserted during pci_e1000_init().
1539 */
1540 static const uint16_t e1000_eeprom_template[64] = {
1541 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1542 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1543 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1544 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1545 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1546 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1547 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1548 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1549 };
1550
1551 /* PCI interface */
1552
1553 static void
1554 e1000_mmio_setup(E1000State *d)
1555 {
1556 int i;
1557 const uint32_t excluded_regs[] = {
1558 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1559 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1560 };
1561
1562 memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1563 "e1000-mmio", PNPMMIO_SIZE);
1564 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1565 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1566 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1567 excluded_regs[i+1] - excluded_regs[i] - 4);
1568 memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1569 }
1570
1571 static void
1572 pci_e1000_uninit(PCIDevice *dev)
1573 {
1574 E1000State *d = E1000(dev);
1575
1576 timer_del(d->autoneg_timer);
1577 timer_free(d->autoneg_timer);
1578 timer_del(d->mit_timer);
1579 timer_free(d->mit_timer);
1580 qemu_del_nic(d->nic);
1581 }
1582
1583 static NetClientInfo net_e1000_info = {
1584 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1585 .size = sizeof(NICState),
1586 .can_receive = e1000_can_receive,
1587 .receive = e1000_receive,
1588 .receive_iov = e1000_receive_iov,
1589 .link_status_changed = e1000_set_link_status,
1590 };
1591
1592 static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
1593 uint32_t val, int len)
1594 {
1595 E1000State *s = E1000(pci_dev);
1596
1597 pci_default_write_config(pci_dev, address, val, len);
1598
1599 if (range_covers_byte(address, len, PCI_COMMAND) &&
1600 (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1601 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1602 }
1603 }
1604
1605
1606 static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
1607 {
1608 DeviceState *dev = DEVICE(pci_dev);
1609 E1000State *d = E1000(pci_dev);
1610 PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1611 uint8_t *pci_conf;
1612 uint16_t checksum = 0;
1613 int i;
1614 uint8_t *macaddr;
1615
1616 pci_dev->config_write = e1000_write_config;
1617
1618 pci_conf = pci_dev->config;
1619
1620 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1621 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1622
1623 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1624
1625 e1000_mmio_setup(d);
1626
1627 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1628
1629 pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1630
1631 memmove(d->eeprom_data, e1000_eeprom_template,
1632 sizeof e1000_eeprom_template);
1633 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1634 macaddr = d->conf.macaddr.a;
1635 for (i = 0; i < 3; i++)
1636 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1637 d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1638 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1639 checksum += d->eeprom_data[i];
1640 checksum = (uint16_t) EEPROM_SUM - checksum;
1641 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1642
1643 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1644 object_get_typename(OBJECT(d)), dev->id, d);
1645
1646 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1647
1648 d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1649 d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1650 }
1651
1652 static void qdev_e1000_reset(DeviceState *dev)
1653 {
1654 E1000State *d = E1000(dev);
1655 e1000_reset(d);
1656 }
1657
1658 static Property e1000_properties[] = {
1659 DEFINE_NIC_PROPERTIES(E1000State, conf),
1660 DEFINE_PROP_BIT("autonegotiation", E1000State,
1661 compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1662 DEFINE_PROP_BIT("mitigation", E1000State,
1663 compat_flags, E1000_FLAG_MIT_BIT, true),
1664 DEFINE_PROP_END_OF_LIST(),
1665 };
1666
1667 typedef struct E1000Info {
1668 const char *name;
1669 uint16_t device_id;
1670 uint8_t revision;
1671 uint16_t phy_id2;
1672 } E1000Info;
1673
1674 static void e1000_class_init(ObjectClass *klass, void *data)
1675 {
1676 DeviceClass *dc = DEVICE_CLASS(klass);
1677 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1678 E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1679 const E1000Info *info = data;
1680
1681 k->realize = pci_e1000_realize;
1682 k->exit = pci_e1000_uninit;
1683 k->romfile = "efi-e1000.rom";
1684 k->vendor_id = PCI_VENDOR_ID_INTEL;
1685 k->device_id = info->device_id;
1686 k->revision = info->revision;
1687 e->phy_id2 = info->phy_id2;
1688 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1689 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1690 dc->desc = "Intel Gigabit Ethernet";
1691 dc->reset = qdev_e1000_reset;
1692 dc->vmsd = &vmstate_e1000;
1693 dc->props = e1000_properties;
1694 }
1695
1696 static void e1000_instance_init(Object *obj)
1697 {
1698 E1000State *n = E1000(obj);
1699 device_add_bootindex_property(obj, &n->conf.bootindex,
1700 "bootindex", "/ethernet-phy@0",
1701 DEVICE(n), NULL);
1702 }
1703
1704 static const TypeInfo e1000_base_info = {
1705 .name = TYPE_E1000_BASE,
1706 .parent = TYPE_PCI_DEVICE,
1707 .instance_size = sizeof(E1000State),
1708 .instance_init = e1000_instance_init,
1709 .class_size = sizeof(E1000BaseClass),
1710 .abstract = true,
1711 };
1712
1713 static const E1000Info e1000_devices[] = {
1714 {
1715 .name = "e1000",
1716 .device_id = E1000_DEV_ID_82540EM,
1717 .revision = 0x03,
1718 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1719 },
1720 {
1721 .name = "e1000-82544gc",
1722 .device_id = E1000_DEV_ID_82544GC_COPPER,
1723 .revision = 0x03,
1724 .phy_id2 = E1000_PHY_ID2_82544x,
1725 },
1726 {
1727 .name = "e1000-82545em",
1728 .device_id = E1000_DEV_ID_82545EM_COPPER,
1729 .revision = 0x03,
1730 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1731 },
1732 };
1733
1734 static void e1000_register_types(void)
1735 {
1736 int i;
1737
1738 type_register_static(&e1000_base_info);
1739 for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1740 const E1000Info *info = &e1000_devices[i];
1741 TypeInfo type_info = {};
1742
1743 type_info.name = info->name;
1744 type_info.parent = TYPE_E1000_BASE;
1745 type_info.class_data = (void *)info;
1746 type_info.class_init = e1000_class_init;
1747 type_info.instance_init = e1000_instance_init;
1748
1749 type_register(&type_info);
1750 }
1751 }
1752
1753 type_init(e1000_register_types)