]> git.proxmox.com Git - mirror_qemu.git/blob - hw/net/e1000.c
e1000: Avoid infinite loop in processing transmit descriptor (CVE-2015-6815)
[mirror_qemu.git] / hw / net / e1000.c
1 /*
2 * QEMU e1000 emulation
3 *
4 * Software developer's manual:
5 * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
6 *
7 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
8 * Copyright (c) 2008 Qumranet
9 * Based on work done by:
10 * Copyright (c) 2007 Dan Aloni
11 * Copyright (c) 2004 Antony T Curtis
12 *
13 * This library is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU Lesser General Public
15 * License as published by the Free Software Foundation; either
16 * version 2 of the License, or (at your option) any later version.
17 *
18 * This library is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * Lesser General Public License for more details.
22 *
23 * You should have received a copy of the GNU Lesser General Public
24 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 */
26
27
28 #include "hw/hw.h"
29 #include "hw/pci/pci.h"
30 #include "net/net.h"
31 #include "net/checksum.h"
32 #include "hw/loader.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/dma.h"
35 #include "qemu/iov.h"
36 #include "qemu/range.h"
37
38 #include "e1000_regs.h"
39
40 #define E1000_DEBUG
41
42 #ifdef E1000_DEBUG
43 enum {
44 DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
45 DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
46 DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
47 DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
48 };
49 #define DBGBIT(x) (1<<DEBUG_##x)
50 static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
51
52 #define DBGOUT(what, fmt, ...) do { \
53 if (debugflags & DBGBIT(what)) \
54 fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
55 } while (0)
56 #else
57 #define DBGOUT(what, fmt, ...) do {} while (0)
58 #endif
59
60 #define IOPORT_SIZE 0x40
61 #define PNPMMIO_SIZE 0x20000
62 #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
63
64 /* this is the size past which hardware will drop packets when setting LPE=0 */
65 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
66 /* this is the size past which hardware will drop packets when setting LPE=1 */
67 #define MAXIMUM_ETHERNET_LPE_SIZE 16384
68
69 #define MAXIMUM_ETHERNET_HDR_LEN (14+4)
70
71 /*
72 * HW models:
73 * E1000_DEV_ID_82540EM works with Windows, Linux, and OS X <= 10.8
74 * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
75 * E1000_DEV_ID_82545EM_COPPER works with Linux and OS X >= 10.6
76 * Others never tested
77 */
78
79 typedef struct E1000State_st {
80 /*< private >*/
81 PCIDevice parent_obj;
82 /*< public >*/
83
84 NICState *nic;
85 NICConf conf;
86 MemoryRegion mmio;
87 MemoryRegion io;
88
89 uint32_t mac_reg[0x8000];
90 uint16_t phy_reg[0x20];
91 uint16_t eeprom_data[64];
92
93 uint32_t rxbuf_size;
94 uint32_t rxbuf_min_shift;
95 struct e1000_tx {
96 unsigned char header[256];
97 unsigned char vlan_header[4];
98 /* Fields vlan and data must not be reordered or separated. */
99 unsigned char vlan[4];
100 unsigned char data[0x10000];
101 uint16_t size;
102 unsigned char sum_needed;
103 unsigned char vlan_needed;
104 uint8_t ipcss;
105 uint8_t ipcso;
106 uint16_t ipcse;
107 uint8_t tucss;
108 uint8_t tucso;
109 uint16_t tucse;
110 uint8_t hdr_len;
111 uint16_t mss;
112 uint32_t paylen;
113 uint16_t tso_frames;
114 char tse;
115 int8_t ip;
116 int8_t tcp;
117 char cptse; // current packet tse bit
118 } tx;
119
120 struct {
121 uint32_t val_in; // shifted in from guest driver
122 uint16_t bitnum_in;
123 uint16_t bitnum_out;
124 uint16_t reading;
125 uint32_t old_eecd;
126 } eecd_state;
127
128 QEMUTimer *autoneg_timer;
129
130 QEMUTimer *mit_timer; /* Mitigation timer. */
131 bool mit_timer_on; /* Mitigation timer is running. */
132 bool mit_irq_level; /* Tracks interrupt pin level. */
133 uint32_t mit_ide; /* Tracks E1000_TXD_CMD_IDE bit. */
134
135 /* Compatibility flags for migration to/from qemu 1.3.0 and older */
136 #define E1000_FLAG_AUTONEG_BIT 0
137 #define E1000_FLAG_MIT_BIT 1
138 #define E1000_FLAG_AUTONEG (1 << E1000_FLAG_AUTONEG_BIT)
139 #define E1000_FLAG_MIT (1 << E1000_FLAG_MIT_BIT)
140 uint32_t compat_flags;
141 } E1000State;
142
143 typedef struct E1000BaseClass {
144 PCIDeviceClass parent_class;
145 uint16_t phy_id2;
146 } E1000BaseClass;
147
148 #define TYPE_E1000_BASE "e1000-base"
149
150 #define E1000(obj) \
151 OBJECT_CHECK(E1000State, (obj), TYPE_E1000_BASE)
152
153 #define E1000_DEVICE_CLASS(klass) \
154 OBJECT_CLASS_CHECK(E1000BaseClass, (klass), TYPE_E1000_BASE)
155 #define E1000_DEVICE_GET_CLASS(obj) \
156 OBJECT_GET_CLASS(E1000BaseClass, (obj), TYPE_E1000_BASE)
157
158 #define defreg(x) x = (E1000_##x>>2)
159 enum {
160 defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
161 defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
162 defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
163 defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
164 defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
165 defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
166 defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
167 defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
168 defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
169 defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
170 defreg(VET), defreg(RDTR), defreg(RADV), defreg(TADV),
171 defreg(ITR),
172 };
173
174 static void
175 e1000_link_down(E1000State *s)
176 {
177 s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
178 s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
179 s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
180 s->phy_reg[PHY_LP_ABILITY] &= ~MII_LPAR_LPACK;
181 }
182
183 static void
184 e1000_link_up(E1000State *s)
185 {
186 s->mac_reg[STATUS] |= E1000_STATUS_LU;
187 s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
188
189 /* E1000_STATUS_LU is tested by e1000_can_receive() */
190 qemu_flush_queued_packets(qemu_get_queue(s->nic));
191 }
192
193 static bool
194 have_autoneg(E1000State *s)
195 {
196 return (s->compat_flags & E1000_FLAG_AUTONEG) &&
197 (s->phy_reg[PHY_CTRL] & MII_CR_AUTO_NEG_EN);
198 }
199
200 static void
201 set_phy_ctrl(E1000State *s, int index, uint16_t val)
202 {
203 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */
204 s->phy_reg[PHY_CTRL] = val & ~(0x3f |
205 MII_CR_RESET |
206 MII_CR_RESTART_AUTO_NEG);
207
208 /*
209 * QEMU 1.3 does not support link auto-negotiation emulation, so if we
210 * migrate during auto negotiation, after migration the link will be
211 * down.
212 */
213 if (have_autoneg(s) && (val & MII_CR_RESTART_AUTO_NEG)) {
214 e1000_link_down(s);
215 DBGOUT(PHY, "Start link auto negotiation\n");
216 timer_mod(s->autoneg_timer,
217 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
218 }
219 }
220
221 static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
222 [PHY_CTRL] = set_phy_ctrl,
223 };
224
225 enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
226
227 enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
228 static const char phy_regcap[0x20] = {
229 [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
230 [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
231 [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
232 [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
233 [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
234 [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R,
235 [PHY_AUTONEG_EXP] = PHY_R,
236 };
237
238 /* PHY_ID2 documented in 8254x_GBe_SDM.pdf, pp. 250 */
239 static const uint16_t phy_reg_init[] = {
240 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB |
241 MII_CR_FULL_DUPLEX |
242 MII_CR_AUTO_NEG_EN,
243
244 [PHY_STATUS] = MII_SR_EXTENDED_CAPS |
245 MII_SR_LINK_STATUS | /* link initially up */
246 MII_SR_AUTONEG_CAPS |
247 /* MII_SR_AUTONEG_COMPLETE: initially NOT completed */
248 MII_SR_PREAMBLE_SUPPRESS |
249 MII_SR_EXTENDED_STATUS |
250 MII_SR_10T_HD_CAPS |
251 MII_SR_10T_FD_CAPS |
252 MII_SR_100X_HD_CAPS |
253 MII_SR_100X_FD_CAPS,
254
255 [PHY_ID1] = 0x141,
256 /* [PHY_ID2] configured per DevId, from e1000_reset() */
257 [PHY_AUTONEG_ADV] = 0xde1,
258 [PHY_LP_ABILITY] = 0x1e0,
259 [PHY_1000T_CTRL] = 0x0e00,
260 [PHY_1000T_STATUS] = 0x3c00,
261 [M88E1000_PHY_SPEC_CTRL] = 0x360,
262 [M88E1000_PHY_SPEC_STATUS] = 0xac00,
263 [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60,
264 };
265
266 static const uint32_t mac_reg_init[] = {
267 [PBA] = 0x00100030,
268 [LEDCTL] = 0x602,
269 [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
270 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
271 [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
272 E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
273 E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
274 E1000_STATUS_LU,
275 [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
276 E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
277 E1000_MANC_RMCP_EN,
278 };
279
280 /* Helper function, *curr == 0 means the value is not set */
281 static inline void
282 mit_update_delay(uint32_t *curr, uint32_t value)
283 {
284 if (value && (*curr == 0 || value < *curr)) {
285 *curr = value;
286 }
287 }
288
289 static void
290 set_interrupt_cause(E1000State *s, int index, uint32_t val)
291 {
292 PCIDevice *d = PCI_DEVICE(s);
293 uint32_t pending_ints;
294 uint32_t mit_delay;
295
296 s->mac_reg[ICR] = val;
297
298 /*
299 * Make sure ICR and ICS registers have the same value.
300 * The spec says that the ICS register is write-only. However in practice,
301 * on real hardware ICS is readable, and for reads it has the same value as
302 * ICR (except that ICS does not have the clear on read behaviour of ICR).
303 *
304 * The VxWorks PRO/1000 driver uses this behaviour.
305 */
306 s->mac_reg[ICS] = val;
307
308 pending_ints = (s->mac_reg[IMS] & s->mac_reg[ICR]);
309 if (!s->mit_irq_level && pending_ints) {
310 /*
311 * Here we detect a potential raising edge. We postpone raising the
312 * interrupt line if we are inside the mitigation delay window
313 * (s->mit_timer_on == 1).
314 * We provide a partial implementation of interrupt mitigation,
315 * emulating only RADV, TADV and ITR (lower 16 bits, 1024ns units for
316 * RADV and TADV, 256ns units for ITR). RDTR is only used to enable
317 * RADV; relative timers based on TIDV and RDTR are not implemented.
318 */
319 if (s->mit_timer_on) {
320 return;
321 }
322 if (s->compat_flags & E1000_FLAG_MIT) {
323 /* Compute the next mitigation delay according to pending
324 * interrupts and the current values of RADV (provided
325 * RDTR!=0), TADV and ITR.
326 * Then rearm the timer.
327 */
328 mit_delay = 0;
329 if (s->mit_ide &&
330 (pending_ints & (E1000_ICR_TXQE | E1000_ICR_TXDW))) {
331 mit_update_delay(&mit_delay, s->mac_reg[TADV] * 4);
332 }
333 if (s->mac_reg[RDTR] && (pending_ints & E1000_ICS_RXT0)) {
334 mit_update_delay(&mit_delay, s->mac_reg[RADV] * 4);
335 }
336 mit_update_delay(&mit_delay, s->mac_reg[ITR]);
337
338 if (mit_delay) {
339 s->mit_timer_on = 1;
340 timer_mod(s->mit_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
341 mit_delay * 256);
342 }
343 s->mit_ide = 0;
344 }
345 }
346
347 s->mit_irq_level = (pending_ints != 0);
348 pci_set_irq(d, s->mit_irq_level);
349 }
350
351 static void
352 e1000_mit_timer(void *opaque)
353 {
354 E1000State *s = opaque;
355
356 s->mit_timer_on = 0;
357 /* Call set_interrupt_cause to update the irq level (if necessary). */
358 set_interrupt_cause(s, 0, s->mac_reg[ICR]);
359 }
360
361 static void
362 set_ics(E1000State *s, int index, uint32_t val)
363 {
364 DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
365 s->mac_reg[IMS]);
366 set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
367 }
368
369 static void
370 e1000_autoneg_timer(void *opaque)
371 {
372 E1000State *s = opaque;
373 if (!qemu_get_queue(s->nic)->link_down) {
374 e1000_link_up(s);
375 s->phy_reg[PHY_LP_ABILITY] |= MII_LPAR_LPACK;
376 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
377 DBGOUT(PHY, "Auto negotiation is completed\n");
378 set_ics(s, 0, E1000_ICS_LSC); /* signal link status change to guest */
379 }
380 }
381
382 static int
383 rxbufsize(uint32_t v)
384 {
385 v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
386 E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
387 E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
388 switch (v) {
389 case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
390 return 16384;
391 case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
392 return 8192;
393 case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
394 return 4096;
395 case E1000_RCTL_SZ_1024:
396 return 1024;
397 case E1000_RCTL_SZ_512:
398 return 512;
399 case E1000_RCTL_SZ_256:
400 return 256;
401 }
402 return 2048;
403 }
404
405 static void e1000_reset(void *opaque)
406 {
407 E1000State *d = opaque;
408 E1000BaseClass *edc = E1000_DEVICE_GET_CLASS(d);
409 uint8_t *macaddr = d->conf.macaddr.a;
410 int i;
411
412 timer_del(d->autoneg_timer);
413 timer_del(d->mit_timer);
414 d->mit_timer_on = 0;
415 d->mit_irq_level = 0;
416 d->mit_ide = 0;
417 memset(d->phy_reg, 0, sizeof d->phy_reg);
418 memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
419 d->phy_reg[PHY_ID2] = edc->phy_id2;
420 memset(d->mac_reg, 0, sizeof d->mac_reg);
421 memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
422 d->rxbuf_min_shift = 1;
423 memset(&d->tx, 0, sizeof d->tx);
424
425 if (qemu_get_queue(d->nic)->link_down) {
426 e1000_link_down(d);
427 }
428
429 /* Some guests expect pre-initialized RAH/RAL (AddrValid flag + MACaddr) */
430 d->mac_reg[RA] = 0;
431 d->mac_reg[RA + 1] = E1000_RAH_AV;
432 for (i = 0; i < 4; i++) {
433 d->mac_reg[RA] |= macaddr[i] << (8 * i);
434 d->mac_reg[RA + 1] |= (i < 2) ? macaddr[i + 4] << (8 * i) : 0;
435 }
436 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
437 }
438
439 static void
440 set_ctrl(E1000State *s, int index, uint32_t val)
441 {
442 /* RST is self clearing */
443 s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
444 }
445
446 static void
447 set_rx_control(E1000State *s, int index, uint32_t val)
448 {
449 s->mac_reg[RCTL] = val;
450 s->rxbuf_size = rxbufsize(val);
451 s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
452 DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
453 s->mac_reg[RCTL]);
454 qemu_flush_queued_packets(qemu_get_queue(s->nic));
455 }
456
457 static void
458 set_mdic(E1000State *s, int index, uint32_t val)
459 {
460 uint32_t data = val & E1000_MDIC_DATA_MASK;
461 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
462
463 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
464 val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
465 else if (val & E1000_MDIC_OP_READ) {
466 DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
467 if (!(phy_regcap[addr] & PHY_R)) {
468 DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
469 val |= E1000_MDIC_ERROR;
470 } else
471 val = (val ^ data) | s->phy_reg[addr];
472 } else if (val & E1000_MDIC_OP_WRITE) {
473 DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
474 if (!(phy_regcap[addr] & PHY_W)) {
475 DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
476 val |= E1000_MDIC_ERROR;
477 } else {
478 if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
479 phyreg_writeops[addr](s, index, data);
480 } else {
481 s->phy_reg[addr] = data;
482 }
483 }
484 }
485 s->mac_reg[MDIC] = val | E1000_MDIC_READY;
486
487 if (val & E1000_MDIC_INT_EN) {
488 set_ics(s, 0, E1000_ICR_MDAC);
489 }
490 }
491
492 static uint32_t
493 get_eecd(E1000State *s, int index)
494 {
495 uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
496
497 DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
498 s->eecd_state.bitnum_out, s->eecd_state.reading);
499 if (!s->eecd_state.reading ||
500 ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
501 ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
502 ret |= E1000_EECD_DO;
503 return ret;
504 }
505
506 static void
507 set_eecd(E1000State *s, int index, uint32_t val)
508 {
509 uint32_t oldval = s->eecd_state.old_eecd;
510
511 s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
512 E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
513 if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
514 return;
515 if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
516 s->eecd_state.val_in = 0;
517 s->eecd_state.bitnum_in = 0;
518 s->eecd_state.bitnum_out = 0;
519 s->eecd_state.reading = 0;
520 }
521 if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
522 return;
523 if (!(E1000_EECD_SK & val)) { // falling edge
524 s->eecd_state.bitnum_out++;
525 return;
526 }
527 s->eecd_state.val_in <<= 1;
528 if (val & E1000_EECD_DI)
529 s->eecd_state.val_in |= 1;
530 if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
531 s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
532 s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
533 EEPROM_READ_OPCODE_MICROWIRE);
534 }
535 DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
536 s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
537 s->eecd_state.reading);
538 }
539
540 static uint32_t
541 flash_eerd_read(E1000State *s, int x)
542 {
543 unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
544
545 if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
546 return (s->mac_reg[EERD]);
547
548 if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
549 return (E1000_EEPROM_RW_REG_DONE | r);
550
551 return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
552 E1000_EEPROM_RW_REG_DONE | r);
553 }
554
555 static void
556 putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
557 {
558 uint32_t sum;
559
560 if (cse && cse < n)
561 n = cse + 1;
562 if (sloc < n-1) {
563 sum = net_checksum_add(n-css, data+css);
564 stw_be_p(data + sloc, net_checksum_finish(sum));
565 }
566 }
567
568 static inline int
569 vlan_enabled(E1000State *s)
570 {
571 return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
572 }
573
574 static inline int
575 vlan_rx_filter_enabled(E1000State *s)
576 {
577 return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
578 }
579
580 static inline int
581 is_vlan_packet(E1000State *s, const uint8_t *buf)
582 {
583 return (be16_to_cpup((uint16_t *)(buf + 12)) ==
584 le16_to_cpu(s->mac_reg[VET]));
585 }
586
587 static inline int
588 is_vlan_txd(uint32_t txd_lower)
589 {
590 return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
591 }
592
593 /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
594 * fill it in, just pad descriptor length by 4 bytes unless guest
595 * told us to strip it off the packet. */
596 static inline int
597 fcs_len(E1000State *s)
598 {
599 return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
600 }
601
602 static void
603 e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
604 {
605 NetClientState *nc = qemu_get_queue(s->nic);
606 if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
607 nc->info->receive(nc, buf, size);
608 } else {
609 qemu_send_packet(nc, buf, size);
610 }
611 }
612
613 static void
614 xmit_seg(E1000State *s)
615 {
616 uint16_t len, *sp;
617 unsigned int frames = s->tx.tso_frames, css, sofar, n;
618 struct e1000_tx *tp = &s->tx;
619
620 if (tp->tse && tp->cptse) {
621 css = tp->ipcss;
622 DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
623 frames, tp->size, css);
624 if (tp->ip) { // IPv4
625 stw_be_p(tp->data+css+2, tp->size - css);
626 stw_be_p(tp->data+css+4,
627 be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
628 } else // IPv6
629 stw_be_p(tp->data+css+4, tp->size - css);
630 css = tp->tucss;
631 len = tp->size - css;
632 DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
633 if (tp->tcp) {
634 sofar = frames * tp->mss;
635 stl_be_p(tp->data+css+4, ldl_be_p(tp->data+css+4)+sofar); /* seq */
636 if (tp->paylen - sofar > tp->mss)
637 tp->data[css + 13] &= ~9; // PSH, FIN
638 } else // UDP
639 stw_be_p(tp->data+css+4, len);
640 if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
641 unsigned int phsum;
642 // add pseudo-header length before checksum calculation
643 sp = (uint16_t *)(tp->data + tp->tucso);
644 phsum = be16_to_cpup(sp) + len;
645 phsum = (phsum >> 16) + (phsum & 0xffff);
646 stw_be_p(sp, phsum);
647 }
648 tp->tso_frames++;
649 }
650
651 if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
652 putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
653 if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
654 putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
655 if (tp->vlan_needed) {
656 memmove(tp->vlan, tp->data, 4);
657 memmove(tp->data, tp->data + 4, 8);
658 memcpy(tp->data + 8, tp->vlan_header, 4);
659 e1000_send_packet(s, tp->vlan, tp->size + 4);
660 } else
661 e1000_send_packet(s, tp->data, tp->size);
662 s->mac_reg[TPT]++;
663 s->mac_reg[GPTC]++;
664 n = s->mac_reg[TOTL];
665 if ((s->mac_reg[TOTL] += s->tx.size) < n)
666 s->mac_reg[TOTH]++;
667 }
668
669 static void
670 process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
671 {
672 PCIDevice *d = PCI_DEVICE(s);
673 uint32_t txd_lower = le32_to_cpu(dp->lower.data);
674 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
675 unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
676 unsigned int msh = 0xfffff;
677 uint64_t addr;
678 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
679 struct e1000_tx *tp = &s->tx;
680
681 s->mit_ide |= (txd_lower & E1000_TXD_CMD_IDE);
682 if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
683 op = le32_to_cpu(xp->cmd_and_length);
684 tp->ipcss = xp->lower_setup.ip_fields.ipcss;
685 tp->ipcso = xp->lower_setup.ip_fields.ipcso;
686 tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
687 tp->tucss = xp->upper_setup.tcp_fields.tucss;
688 tp->tucso = xp->upper_setup.tcp_fields.tucso;
689 tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
690 tp->paylen = op & 0xfffff;
691 tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
692 tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
693 tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
694 tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
695 tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
696 tp->tso_frames = 0;
697 if (tp->tucso == 0) { // this is probably wrong
698 DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
699 tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
700 }
701 return;
702 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
703 // data descriptor
704 if (tp->size == 0) {
705 tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
706 }
707 tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
708 } else {
709 // legacy descriptor
710 tp->cptse = 0;
711 }
712
713 if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
714 (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
715 tp->vlan_needed = 1;
716 stw_be_p(tp->vlan_header,
717 le16_to_cpu(s->mac_reg[VET]));
718 stw_be_p(tp->vlan_header + 2,
719 le16_to_cpu(dp->upper.fields.special));
720 }
721
722 addr = le64_to_cpu(dp->buffer_addr);
723 if (tp->tse && tp->cptse) {
724 msh = tp->hdr_len + tp->mss;
725 do {
726 bytes = split_size;
727 if (tp->size + bytes > msh)
728 bytes = msh - tp->size;
729
730 bytes = MIN(sizeof(tp->data) - tp->size, bytes);
731 pci_dma_read(d, addr, tp->data + tp->size, bytes);
732 sz = tp->size + bytes;
733 if (sz >= tp->hdr_len && tp->size < tp->hdr_len) {
734 memmove(tp->header, tp->data, tp->hdr_len);
735 }
736 tp->size = sz;
737 addr += bytes;
738 if (sz == msh) {
739 xmit_seg(s);
740 memmove(tp->data, tp->header, tp->hdr_len);
741 tp->size = tp->hdr_len;
742 }
743 split_size -= bytes;
744 } while (bytes && split_size);
745 } else if (!tp->tse && tp->cptse) {
746 // context descriptor TSE is not set, while data descriptor TSE is set
747 DBGOUT(TXERR, "TCP segmentation error\n");
748 } else {
749 split_size = MIN(sizeof(tp->data) - tp->size, split_size);
750 pci_dma_read(d, addr, tp->data + tp->size, split_size);
751 tp->size += split_size;
752 }
753
754 if (!(txd_lower & E1000_TXD_CMD_EOP))
755 return;
756 if (!(tp->tse && tp->cptse && tp->size < tp->hdr_len)) {
757 xmit_seg(s);
758 }
759 tp->tso_frames = 0;
760 tp->sum_needed = 0;
761 tp->vlan_needed = 0;
762 tp->size = 0;
763 tp->cptse = 0;
764 }
765
766 static uint32_t
767 txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
768 {
769 PCIDevice *d = PCI_DEVICE(s);
770 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
771
772 if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
773 return 0;
774 txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
775 ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
776 dp->upper.data = cpu_to_le32(txd_upper);
777 pci_dma_write(d, base + ((char *)&dp->upper - (char *)dp),
778 &dp->upper, sizeof(dp->upper));
779 return E1000_ICR_TXDW;
780 }
781
782 static uint64_t tx_desc_base(E1000State *s)
783 {
784 uint64_t bah = s->mac_reg[TDBAH];
785 uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
786
787 return (bah << 32) + bal;
788 }
789
790 static void
791 start_xmit(E1000State *s)
792 {
793 PCIDevice *d = PCI_DEVICE(s);
794 dma_addr_t base;
795 struct e1000_tx_desc desc;
796 uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
797
798 if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
799 DBGOUT(TX, "tx disabled\n");
800 return;
801 }
802
803 while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
804 base = tx_desc_base(s) +
805 sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
806 pci_dma_read(d, base, &desc, sizeof(desc));
807
808 DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
809 (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
810 desc.upper.data);
811
812 process_tx_desc(s, &desc);
813 cause |= txdesc_writeback(s, base, &desc);
814
815 if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
816 s->mac_reg[TDH] = 0;
817 /*
818 * the following could happen only if guest sw assigns
819 * bogus values to TDT/TDLEN.
820 * there's nothing too intelligent we could do about this.
821 */
822 if (s->mac_reg[TDH] == tdh_start) {
823 DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
824 tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
825 break;
826 }
827 }
828 set_ics(s, 0, cause);
829 }
830
831 static int
832 receive_filter(E1000State *s, const uint8_t *buf, int size)
833 {
834 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
835 static const int mta_shift[] = {4, 3, 2, 0};
836 uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
837
838 if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
839 uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
840 uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
841 ((vid >> 5) & 0x7f));
842 if ((vfta & (1 << (vid & 0x1f))) == 0)
843 return 0;
844 }
845
846 if (rctl & E1000_RCTL_UPE) // promiscuous
847 return 1;
848
849 if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
850 return 1;
851
852 if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
853 return 1;
854
855 for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
856 if (!(rp[1] & E1000_RAH_AV))
857 continue;
858 ra[0] = cpu_to_le32(rp[0]);
859 ra[1] = cpu_to_le32(rp[1]);
860 if (!memcmp(buf, (uint8_t *)ra, 6)) {
861 DBGOUT(RXFILTER,
862 "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
863 (int)(rp - s->mac_reg - RA)/2,
864 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
865 return 1;
866 }
867 }
868 DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
869 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
870
871 f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
872 f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
873 if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
874 return 1;
875 DBGOUT(RXFILTER,
876 "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
877 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
878 (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
879 s->mac_reg[MTA + (f >> 5)]);
880
881 return 0;
882 }
883
884 static void
885 e1000_set_link_status(NetClientState *nc)
886 {
887 E1000State *s = qemu_get_nic_opaque(nc);
888 uint32_t old_status = s->mac_reg[STATUS];
889
890 if (nc->link_down) {
891 e1000_link_down(s);
892 } else {
893 if (have_autoneg(s) &&
894 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
895 /* emulate auto-negotiation if supported */
896 timer_mod(s->autoneg_timer,
897 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
898 } else {
899 e1000_link_up(s);
900 }
901 }
902
903 if (s->mac_reg[STATUS] != old_status)
904 set_ics(s, 0, E1000_ICR_LSC);
905 }
906
907 static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
908 {
909 int bufs;
910 /* Fast-path short packets */
911 if (total_size <= s->rxbuf_size) {
912 return s->mac_reg[RDH] != s->mac_reg[RDT];
913 }
914 if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
915 bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
916 } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
917 bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
918 s->mac_reg[RDT] - s->mac_reg[RDH];
919 } else {
920 return false;
921 }
922 return total_size <= bufs * s->rxbuf_size;
923 }
924
925 static int
926 e1000_can_receive(NetClientState *nc)
927 {
928 E1000State *s = qemu_get_nic_opaque(nc);
929
930 return (s->mac_reg[STATUS] & E1000_STATUS_LU) &&
931 (s->mac_reg[RCTL] & E1000_RCTL_EN) &&
932 (s->parent_obj.config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
933 e1000_has_rxbufs(s, 1);
934 }
935
936 static uint64_t rx_desc_base(E1000State *s)
937 {
938 uint64_t bah = s->mac_reg[RDBAH];
939 uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
940
941 return (bah << 32) + bal;
942 }
943
944 static ssize_t
945 e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt)
946 {
947 E1000State *s = qemu_get_nic_opaque(nc);
948 PCIDevice *d = PCI_DEVICE(s);
949 struct e1000_rx_desc desc;
950 dma_addr_t base;
951 unsigned int n, rdt;
952 uint32_t rdh_start;
953 uint16_t vlan_special = 0;
954 uint8_t vlan_status = 0;
955 uint8_t min_buf[MIN_BUF_SIZE];
956 struct iovec min_iov;
957 uint8_t *filter_buf = iov->iov_base;
958 size_t size = iov_size(iov, iovcnt);
959 size_t iov_ofs = 0;
960 size_t desc_offset;
961 size_t desc_size;
962 size_t total_size;
963
964 if (!(s->mac_reg[STATUS] & E1000_STATUS_LU)) {
965 return -1;
966 }
967
968 if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) {
969 return -1;
970 }
971
972 /* Pad to minimum Ethernet frame length */
973 if (size < sizeof(min_buf)) {
974 iov_to_buf(iov, iovcnt, 0, min_buf, size);
975 memset(&min_buf[size], 0, sizeof(min_buf) - size);
976 min_iov.iov_base = filter_buf = min_buf;
977 min_iov.iov_len = size = sizeof(min_buf);
978 iovcnt = 1;
979 iov = &min_iov;
980 } else if (iov->iov_len < MAXIMUM_ETHERNET_HDR_LEN) {
981 /* This is very unlikely, but may happen. */
982 iov_to_buf(iov, iovcnt, 0, min_buf, MAXIMUM_ETHERNET_HDR_LEN);
983 filter_buf = min_buf;
984 }
985
986 /* Discard oversized packets if !LPE and !SBP. */
987 if ((size > MAXIMUM_ETHERNET_LPE_SIZE ||
988 (size > MAXIMUM_ETHERNET_VLAN_SIZE
989 && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)))
990 && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
991 return size;
992 }
993
994 if (!receive_filter(s, filter_buf, size)) {
995 return size;
996 }
997
998 if (vlan_enabled(s) && is_vlan_packet(s, filter_buf)) {
999 vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(filter_buf
1000 + 14)));
1001 iov_ofs = 4;
1002 if (filter_buf == iov->iov_base) {
1003 memmove(filter_buf + 4, filter_buf, 12);
1004 } else {
1005 iov_from_buf(iov, iovcnt, 4, filter_buf, 12);
1006 while (iov->iov_len <= iov_ofs) {
1007 iov_ofs -= iov->iov_len;
1008 iov++;
1009 }
1010 }
1011 vlan_status = E1000_RXD_STAT_VP;
1012 size -= 4;
1013 }
1014
1015 rdh_start = s->mac_reg[RDH];
1016 desc_offset = 0;
1017 total_size = size + fcs_len(s);
1018 if (!e1000_has_rxbufs(s, total_size)) {
1019 set_ics(s, 0, E1000_ICS_RXO);
1020 return -1;
1021 }
1022 do {
1023 desc_size = total_size - desc_offset;
1024 if (desc_size > s->rxbuf_size) {
1025 desc_size = s->rxbuf_size;
1026 }
1027 base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
1028 pci_dma_read(d, base, &desc, sizeof(desc));
1029 desc.special = vlan_special;
1030 desc.status |= (vlan_status | E1000_RXD_STAT_DD);
1031 if (desc.buffer_addr) {
1032 if (desc_offset < size) {
1033 size_t iov_copy;
1034 hwaddr ba = le64_to_cpu(desc.buffer_addr);
1035 size_t copy_size = size - desc_offset;
1036 if (copy_size > s->rxbuf_size) {
1037 copy_size = s->rxbuf_size;
1038 }
1039 do {
1040 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs);
1041 pci_dma_write(d, ba, iov->iov_base + iov_ofs, iov_copy);
1042 copy_size -= iov_copy;
1043 ba += iov_copy;
1044 iov_ofs += iov_copy;
1045 if (iov_ofs == iov->iov_len) {
1046 iov++;
1047 iov_ofs = 0;
1048 }
1049 } while (copy_size);
1050 }
1051 desc_offset += desc_size;
1052 desc.length = cpu_to_le16(desc_size);
1053 if (desc_offset >= total_size) {
1054 desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
1055 } else {
1056 /* Guest zeroing out status is not a hardware requirement.
1057 Clear EOP in case guest didn't do it. */
1058 desc.status &= ~E1000_RXD_STAT_EOP;
1059 }
1060 } else { // as per intel docs; skip descriptors with null buf addr
1061 DBGOUT(RX, "Null RX descriptor!!\n");
1062 }
1063 pci_dma_write(d, base, &desc, sizeof(desc));
1064
1065 if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
1066 s->mac_reg[RDH] = 0;
1067 /* see comment in start_xmit; same here */
1068 if (s->mac_reg[RDH] == rdh_start) {
1069 DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
1070 rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
1071 set_ics(s, 0, E1000_ICS_RXO);
1072 return -1;
1073 }
1074 } while (desc_offset < total_size);
1075
1076 s->mac_reg[GPRC]++;
1077 s->mac_reg[TPR]++;
1078 /* TOR - Total Octets Received:
1079 * This register includes bytes received in a packet from the <Destination
1080 * Address> field through the <CRC> field, inclusively.
1081 */
1082 n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
1083 if (n < s->mac_reg[TORL])
1084 s->mac_reg[TORH]++;
1085 s->mac_reg[TORL] = n;
1086
1087 n = E1000_ICS_RXT0;
1088 if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
1089 rdt += s->mac_reg[RDLEN] / sizeof(desc);
1090 if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
1091 s->rxbuf_min_shift)
1092 n |= E1000_ICS_RXDMT0;
1093
1094 set_ics(s, 0, n);
1095
1096 return size;
1097 }
1098
1099 static ssize_t
1100 e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
1101 {
1102 const struct iovec iov = {
1103 .iov_base = (uint8_t *)buf,
1104 .iov_len = size
1105 };
1106
1107 return e1000_receive_iov(nc, &iov, 1);
1108 }
1109
1110 static uint32_t
1111 mac_readreg(E1000State *s, int index)
1112 {
1113 return s->mac_reg[index];
1114 }
1115
1116 static uint32_t
1117 mac_icr_read(E1000State *s, int index)
1118 {
1119 uint32_t ret = s->mac_reg[ICR];
1120
1121 DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
1122 set_interrupt_cause(s, 0, 0);
1123 return ret;
1124 }
1125
1126 static uint32_t
1127 mac_read_clr4(E1000State *s, int index)
1128 {
1129 uint32_t ret = s->mac_reg[index];
1130
1131 s->mac_reg[index] = 0;
1132 return ret;
1133 }
1134
1135 static uint32_t
1136 mac_read_clr8(E1000State *s, int index)
1137 {
1138 uint32_t ret = s->mac_reg[index];
1139
1140 s->mac_reg[index] = 0;
1141 s->mac_reg[index-1] = 0;
1142 return ret;
1143 }
1144
1145 static void
1146 mac_writereg(E1000State *s, int index, uint32_t val)
1147 {
1148 uint32_t macaddr[2];
1149
1150 s->mac_reg[index] = val;
1151
1152 if (index == RA + 1) {
1153 macaddr[0] = cpu_to_le32(s->mac_reg[RA]);
1154 macaddr[1] = cpu_to_le32(s->mac_reg[RA + 1]);
1155 qemu_format_nic_info_str(qemu_get_queue(s->nic), (uint8_t *)macaddr);
1156 }
1157 }
1158
1159 static void
1160 set_rdt(E1000State *s, int index, uint32_t val)
1161 {
1162 s->mac_reg[index] = val & 0xffff;
1163 if (e1000_has_rxbufs(s, 1)) {
1164 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1165 }
1166 }
1167
1168 static void
1169 set_16bit(E1000State *s, int index, uint32_t val)
1170 {
1171 s->mac_reg[index] = val & 0xffff;
1172 }
1173
1174 static void
1175 set_dlen(E1000State *s, int index, uint32_t val)
1176 {
1177 s->mac_reg[index] = val & 0xfff80;
1178 }
1179
1180 static void
1181 set_tctl(E1000State *s, int index, uint32_t val)
1182 {
1183 s->mac_reg[index] = val;
1184 s->mac_reg[TDT] &= 0xffff;
1185 start_xmit(s);
1186 }
1187
1188 static void
1189 set_icr(E1000State *s, int index, uint32_t val)
1190 {
1191 DBGOUT(INTERRUPT, "set_icr %x\n", val);
1192 set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
1193 }
1194
1195 static void
1196 set_imc(E1000State *s, int index, uint32_t val)
1197 {
1198 s->mac_reg[IMS] &= ~val;
1199 set_ics(s, 0, 0);
1200 }
1201
1202 static void
1203 set_ims(E1000State *s, int index, uint32_t val)
1204 {
1205 s->mac_reg[IMS] |= val;
1206 set_ics(s, 0, 0);
1207 }
1208
1209 #define getreg(x) [x] = mac_readreg
1210 static uint32_t (*macreg_readops[])(E1000State *, int) = {
1211 getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
1212 getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
1213 getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
1214 getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
1215 getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
1216 getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
1217 getreg(TDLEN), getreg(RDLEN), getreg(RDTR), getreg(RADV),
1218 getreg(TADV), getreg(ITR),
1219
1220 [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
1221 [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
1222 [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
1223 [CRCERRS ... MPC] = &mac_readreg,
1224 [RA ... RA+31] = &mac_readreg,
1225 [MTA ... MTA+127] = &mac_readreg,
1226 [VFTA ... VFTA+127] = &mac_readreg,
1227 };
1228 enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
1229
1230 #define putreg(x) [x] = mac_writereg
1231 static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
1232 putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
1233 putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
1234 putreg(RDBAL), putreg(LEDCTL), putreg(VET),
1235 [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
1236 [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
1237 [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
1238 [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
1239 [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
1240 [RDTR] = set_16bit, [RADV] = set_16bit, [TADV] = set_16bit,
1241 [ITR] = set_16bit,
1242 [RA ... RA+31] = &mac_writereg,
1243 [MTA ... MTA+127] = &mac_writereg,
1244 [VFTA ... VFTA+127] = &mac_writereg,
1245 };
1246
1247 enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
1248
1249 static void
1250 e1000_mmio_write(void *opaque, hwaddr addr, uint64_t val,
1251 unsigned size)
1252 {
1253 E1000State *s = opaque;
1254 unsigned int index = (addr & 0x1ffff) >> 2;
1255
1256 if (index < NWRITEOPS && macreg_writeops[index]) {
1257 macreg_writeops[index](s, index, val);
1258 } else if (index < NREADOPS && macreg_readops[index]) {
1259 DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
1260 } else {
1261 DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
1262 index<<2, val);
1263 }
1264 }
1265
1266 static uint64_t
1267 e1000_mmio_read(void *opaque, hwaddr addr, unsigned size)
1268 {
1269 E1000State *s = opaque;
1270 unsigned int index = (addr & 0x1ffff) >> 2;
1271
1272 if (index < NREADOPS && macreg_readops[index])
1273 {
1274 return macreg_readops[index](s, index);
1275 }
1276 DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
1277 return 0;
1278 }
1279
1280 static const MemoryRegionOps e1000_mmio_ops = {
1281 .read = e1000_mmio_read,
1282 .write = e1000_mmio_write,
1283 .endianness = DEVICE_LITTLE_ENDIAN,
1284 .impl = {
1285 .min_access_size = 4,
1286 .max_access_size = 4,
1287 },
1288 };
1289
1290 static uint64_t e1000_io_read(void *opaque, hwaddr addr,
1291 unsigned size)
1292 {
1293 E1000State *s = opaque;
1294
1295 (void)s;
1296 return 0;
1297 }
1298
1299 static void e1000_io_write(void *opaque, hwaddr addr,
1300 uint64_t val, unsigned size)
1301 {
1302 E1000State *s = opaque;
1303
1304 (void)s;
1305 }
1306
1307 static const MemoryRegionOps e1000_io_ops = {
1308 .read = e1000_io_read,
1309 .write = e1000_io_write,
1310 .endianness = DEVICE_LITTLE_ENDIAN,
1311 };
1312
1313 static bool is_version_1(void *opaque, int version_id)
1314 {
1315 return version_id == 1;
1316 }
1317
1318 static void e1000_pre_save(void *opaque)
1319 {
1320 E1000State *s = opaque;
1321 NetClientState *nc = qemu_get_queue(s->nic);
1322
1323 /* If the mitigation timer is active, emulate a timeout now. */
1324 if (s->mit_timer_on) {
1325 e1000_mit_timer(s);
1326 }
1327
1328 /*
1329 * If link is down and auto-negotiation is supported and ongoing,
1330 * complete auto-negotiation immediately. This allows us to look
1331 * at MII_SR_AUTONEG_COMPLETE to infer link status on load.
1332 */
1333 if (nc->link_down && have_autoneg(s)) {
1334 s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
1335 }
1336 }
1337
1338 static int e1000_post_load(void *opaque, int version_id)
1339 {
1340 E1000State *s = opaque;
1341 NetClientState *nc = qemu_get_queue(s->nic);
1342
1343 if (!(s->compat_flags & E1000_FLAG_MIT)) {
1344 s->mac_reg[ITR] = s->mac_reg[RDTR] = s->mac_reg[RADV] =
1345 s->mac_reg[TADV] = 0;
1346 s->mit_irq_level = false;
1347 }
1348 s->mit_ide = 0;
1349 s->mit_timer_on = false;
1350
1351 /* nc.link_down can't be migrated, so infer link_down according
1352 * to link status bit in mac_reg[STATUS].
1353 * Alternatively, restart link negotiation if it was in progress. */
1354 nc->link_down = (s->mac_reg[STATUS] & E1000_STATUS_LU) == 0;
1355
1356 if (have_autoneg(s) &&
1357 !(s->phy_reg[PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
1358 nc->link_down = false;
1359 timer_mod(s->autoneg_timer,
1360 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
1361 }
1362
1363 return 0;
1364 }
1365
1366 static bool e1000_mit_state_needed(void *opaque)
1367 {
1368 E1000State *s = opaque;
1369
1370 return s->compat_flags & E1000_FLAG_MIT;
1371 }
1372
1373 static const VMStateDescription vmstate_e1000_mit_state = {
1374 .name = "e1000/mit_state",
1375 .version_id = 1,
1376 .minimum_version_id = 1,
1377 .needed = e1000_mit_state_needed,
1378 .fields = (VMStateField[]) {
1379 VMSTATE_UINT32(mac_reg[RDTR], E1000State),
1380 VMSTATE_UINT32(mac_reg[RADV], E1000State),
1381 VMSTATE_UINT32(mac_reg[TADV], E1000State),
1382 VMSTATE_UINT32(mac_reg[ITR], E1000State),
1383 VMSTATE_BOOL(mit_irq_level, E1000State),
1384 VMSTATE_END_OF_LIST()
1385 }
1386 };
1387
1388 static const VMStateDescription vmstate_e1000 = {
1389 .name = "e1000",
1390 .version_id = 2,
1391 .minimum_version_id = 1,
1392 .pre_save = e1000_pre_save,
1393 .post_load = e1000_post_load,
1394 .fields = (VMStateField[]) {
1395 VMSTATE_PCI_DEVICE(parent_obj, E1000State),
1396 VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
1397 VMSTATE_UNUSED(4), /* Was mmio_base. */
1398 VMSTATE_UINT32(rxbuf_size, E1000State),
1399 VMSTATE_UINT32(rxbuf_min_shift, E1000State),
1400 VMSTATE_UINT32(eecd_state.val_in, E1000State),
1401 VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
1402 VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
1403 VMSTATE_UINT16(eecd_state.reading, E1000State),
1404 VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
1405 VMSTATE_UINT8(tx.ipcss, E1000State),
1406 VMSTATE_UINT8(tx.ipcso, E1000State),
1407 VMSTATE_UINT16(tx.ipcse, E1000State),
1408 VMSTATE_UINT8(tx.tucss, E1000State),
1409 VMSTATE_UINT8(tx.tucso, E1000State),
1410 VMSTATE_UINT16(tx.tucse, E1000State),
1411 VMSTATE_UINT32(tx.paylen, E1000State),
1412 VMSTATE_UINT8(tx.hdr_len, E1000State),
1413 VMSTATE_UINT16(tx.mss, E1000State),
1414 VMSTATE_UINT16(tx.size, E1000State),
1415 VMSTATE_UINT16(tx.tso_frames, E1000State),
1416 VMSTATE_UINT8(tx.sum_needed, E1000State),
1417 VMSTATE_INT8(tx.ip, E1000State),
1418 VMSTATE_INT8(tx.tcp, E1000State),
1419 VMSTATE_BUFFER(tx.header, E1000State),
1420 VMSTATE_BUFFER(tx.data, E1000State),
1421 VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
1422 VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
1423 VMSTATE_UINT32(mac_reg[CTRL], E1000State),
1424 VMSTATE_UINT32(mac_reg[EECD], E1000State),
1425 VMSTATE_UINT32(mac_reg[EERD], E1000State),
1426 VMSTATE_UINT32(mac_reg[GPRC], E1000State),
1427 VMSTATE_UINT32(mac_reg[GPTC], E1000State),
1428 VMSTATE_UINT32(mac_reg[ICR], E1000State),
1429 VMSTATE_UINT32(mac_reg[ICS], E1000State),
1430 VMSTATE_UINT32(mac_reg[IMC], E1000State),
1431 VMSTATE_UINT32(mac_reg[IMS], E1000State),
1432 VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
1433 VMSTATE_UINT32(mac_reg[MANC], E1000State),
1434 VMSTATE_UINT32(mac_reg[MDIC], E1000State),
1435 VMSTATE_UINT32(mac_reg[MPC], E1000State),
1436 VMSTATE_UINT32(mac_reg[PBA], E1000State),
1437 VMSTATE_UINT32(mac_reg[RCTL], E1000State),
1438 VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
1439 VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
1440 VMSTATE_UINT32(mac_reg[RDH], E1000State),
1441 VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
1442 VMSTATE_UINT32(mac_reg[RDT], E1000State),
1443 VMSTATE_UINT32(mac_reg[STATUS], E1000State),
1444 VMSTATE_UINT32(mac_reg[SWSM], E1000State),
1445 VMSTATE_UINT32(mac_reg[TCTL], E1000State),
1446 VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
1447 VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
1448 VMSTATE_UINT32(mac_reg[TDH], E1000State),
1449 VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
1450 VMSTATE_UINT32(mac_reg[TDT], E1000State),
1451 VMSTATE_UINT32(mac_reg[TORH], E1000State),
1452 VMSTATE_UINT32(mac_reg[TORL], E1000State),
1453 VMSTATE_UINT32(mac_reg[TOTH], E1000State),
1454 VMSTATE_UINT32(mac_reg[TOTL], E1000State),
1455 VMSTATE_UINT32(mac_reg[TPR], E1000State),
1456 VMSTATE_UINT32(mac_reg[TPT], E1000State),
1457 VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
1458 VMSTATE_UINT32(mac_reg[WUFC], E1000State),
1459 VMSTATE_UINT32(mac_reg[VET], E1000State),
1460 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
1461 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
1462 VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
1463 VMSTATE_END_OF_LIST()
1464 },
1465 .subsections = (const VMStateDescription*[]) {
1466 &vmstate_e1000_mit_state,
1467 NULL
1468 }
1469 };
1470
1471 /*
1472 * EEPROM contents documented in Tables 5-2 and 5-3, pp. 98-102.
1473 * Note: A valid DevId will be inserted during pci_e1000_init().
1474 */
1475 static const uint16_t e1000_eeprom_template[64] = {
1476 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
1477 0x3000, 0x1000, 0x6403, 0 /*DevId*/, 0x8086, 0 /*DevId*/, 0x8086, 0x3040,
1478 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
1479 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
1480 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
1481 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1482 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
1483 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
1484 };
1485
1486 /* PCI interface */
1487
1488 static void
1489 e1000_mmio_setup(E1000State *d)
1490 {
1491 int i;
1492 const uint32_t excluded_regs[] = {
1493 E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
1494 E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
1495 };
1496
1497 memory_region_init_io(&d->mmio, OBJECT(d), &e1000_mmio_ops, d,
1498 "e1000-mmio", PNPMMIO_SIZE);
1499 memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
1500 for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
1501 memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
1502 excluded_regs[i+1] - excluded_regs[i] - 4);
1503 memory_region_init_io(&d->io, OBJECT(d), &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
1504 }
1505
1506 static void
1507 pci_e1000_uninit(PCIDevice *dev)
1508 {
1509 E1000State *d = E1000(dev);
1510
1511 timer_del(d->autoneg_timer);
1512 timer_free(d->autoneg_timer);
1513 timer_del(d->mit_timer);
1514 timer_free(d->mit_timer);
1515 qemu_del_nic(d->nic);
1516 }
1517
1518 static NetClientInfo net_e1000_info = {
1519 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1520 .size = sizeof(NICState),
1521 .can_receive = e1000_can_receive,
1522 .receive = e1000_receive,
1523 .receive_iov = e1000_receive_iov,
1524 .link_status_changed = e1000_set_link_status,
1525 };
1526
1527 static void e1000_write_config(PCIDevice *pci_dev, uint32_t address,
1528 uint32_t val, int len)
1529 {
1530 E1000State *s = E1000(pci_dev);
1531
1532 pci_default_write_config(pci_dev, address, val, len);
1533
1534 if (range_covers_byte(address, len, PCI_COMMAND) &&
1535 (pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
1536 qemu_flush_queued_packets(qemu_get_queue(s->nic));
1537 }
1538 }
1539
1540
1541 static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp)
1542 {
1543 DeviceState *dev = DEVICE(pci_dev);
1544 E1000State *d = E1000(pci_dev);
1545 PCIDeviceClass *pdc = PCI_DEVICE_GET_CLASS(pci_dev);
1546 uint8_t *pci_conf;
1547 uint16_t checksum = 0;
1548 int i;
1549 uint8_t *macaddr;
1550
1551 pci_dev->config_write = e1000_write_config;
1552
1553 pci_conf = pci_dev->config;
1554
1555 /* TODO: RST# value should be 0, PCI spec 6.2.4 */
1556 pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
1557
1558 pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
1559
1560 e1000_mmio_setup(d);
1561
1562 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
1563
1564 pci_register_bar(pci_dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
1565
1566 memmove(d->eeprom_data, e1000_eeprom_template,
1567 sizeof e1000_eeprom_template);
1568 qemu_macaddr_default_if_unset(&d->conf.macaddr);
1569 macaddr = d->conf.macaddr.a;
1570 for (i = 0; i < 3; i++)
1571 d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
1572 d->eeprom_data[11] = d->eeprom_data[13] = pdc->device_id;
1573 for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
1574 checksum += d->eeprom_data[i];
1575 checksum = (uint16_t) EEPROM_SUM - checksum;
1576 d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
1577
1578 d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
1579 object_get_typename(OBJECT(d)), dev->id, d);
1580
1581 qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr);
1582
1583 d->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, e1000_autoneg_timer, d);
1584 d->mit_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000_mit_timer, d);
1585 }
1586
1587 static void qdev_e1000_reset(DeviceState *dev)
1588 {
1589 E1000State *d = E1000(dev);
1590 e1000_reset(d);
1591 }
1592
1593 static Property e1000_properties[] = {
1594 DEFINE_NIC_PROPERTIES(E1000State, conf),
1595 DEFINE_PROP_BIT("autonegotiation", E1000State,
1596 compat_flags, E1000_FLAG_AUTONEG_BIT, true),
1597 DEFINE_PROP_BIT("mitigation", E1000State,
1598 compat_flags, E1000_FLAG_MIT_BIT, true),
1599 DEFINE_PROP_END_OF_LIST(),
1600 };
1601
1602 typedef struct E1000Info {
1603 const char *name;
1604 uint16_t device_id;
1605 uint8_t revision;
1606 uint16_t phy_id2;
1607 } E1000Info;
1608
1609 static void e1000_class_init(ObjectClass *klass, void *data)
1610 {
1611 DeviceClass *dc = DEVICE_CLASS(klass);
1612 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1613 E1000BaseClass *e = E1000_DEVICE_CLASS(klass);
1614 const E1000Info *info = data;
1615
1616 k->realize = pci_e1000_realize;
1617 k->exit = pci_e1000_uninit;
1618 k->romfile = "efi-e1000.rom";
1619 k->vendor_id = PCI_VENDOR_ID_INTEL;
1620 k->device_id = info->device_id;
1621 k->revision = info->revision;
1622 e->phy_id2 = info->phy_id2;
1623 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1624 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1625 dc->desc = "Intel Gigabit Ethernet";
1626 dc->reset = qdev_e1000_reset;
1627 dc->vmsd = &vmstate_e1000;
1628 dc->props = e1000_properties;
1629 }
1630
1631 static void e1000_instance_init(Object *obj)
1632 {
1633 E1000State *n = E1000(obj);
1634 device_add_bootindex_property(obj, &n->conf.bootindex,
1635 "bootindex", "/ethernet-phy@0",
1636 DEVICE(n), NULL);
1637 }
1638
1639 static const TypeInfo e1000_base_info = {
1640 .name = TYPE_E1000_BASE,
1641 .parent = TYPE_PCI_DEVICE,
1642 .instance_size = sizeof(E1000State),
1643 .instance_init = e1000_instance_init,
1644 .class_size = sizeof(E1000BaseClass),
1645 .abstract = true,
1646 };
1647
1648 static const E1000Info e1000_devices[] = {
1649 {
1650 .name = "e1000-82540em",
1651 .device_id = E1000_DEV_ID_82540EM,
1652 .revision = 0x03,
1653 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1654 },
1655 {
1656 .name = "e1000-82544gc",
1657 .device_id = E1000_DEV_ID_82544GC_COPPER,
1658 .revision = 0x03,
1659 .phy_id2 = E1000_PHY_ID2_82544x,
1660 },
1661 {
1662 .name = "e1000-82545em",
1663 .device_id = E1000_DEV_ID_82545EM_COPPER,
1664 .revision = 0x03,
1665 .phy_id2 = E1000_PHY_ID2_8254xx_DEFAULT,
1666 },
1667 };
1668
1669 static const TypeInfo e1000_default_info = {
1670 .name = "e1000",
1671 .parent = "e1000-82540em",
1672 };
1673
1674 static void e1000_register_types(void)
1675 {
1676 int i;
1677
1678 type_register_static(&e1000_base_info);
1679 for (i = 0; i < ARRAY_SIZE(e1000_devices); i++) {
1680 const E1000Info *info = &e1000_devices[i];
1681 TypeInfo type_info = {};
1682
1683 type_info.name = info->name;
1684 type_info.parent = TYPE_E1000_BASE;
1685 type_info.class_data = (void *)info;
1686 type_info.class_init = e1000_class_init;
1687 type_info.instance_init = e1000_instance_init;
1688
1689 type_register(&type_info);
1690 }
1691 type_register_static(&e1000_default_info);
1692 }
1693
1694 type_init(e1000_register_types)