2 * Core code for QEMU igb emulation
5 * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
7 * Copyright (c) 2020-2023 Red Hat, Inc.
8 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9 * Developed by Daynix Computing LTD (http://www.daynix.com)
12 * Akihiko Odaki <akihiko.odaki@daynix.com>
13 * Gal Hammmer <gal.hammer@sap.com>
14 * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15 * Dmitry Fleytman <dmitry@daynix.com>
16 * Leonid Bloch <leonid@daynix.com>
17 * Yan Vugenfirer <yan@daynix.com>
19 * Based on work done by:
20 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21 * Copyright (c) 2008 Qumranet
22 * Based on work done by:
23 * Copyright (c) 2007 Dan Aloni
24 * Copyright (c) 2004 Antony T Curtis
26 * This library is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU Lesser General Public
28 * License as published by the Free Software Foundation; either
29 * version 2.1 of the License, or (at your option) any later version.
31 * This library is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * Lesser General Public License for more details.
36 * You should have received a copy of the GNU Lesser General Public
37 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
40 #include "qemu/osdep.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
52 #include "igb_common.h"
53 #include "e1000x_common.h"
58 #define E1000E_MAX_TX_FRAGS (64)
60 union e1000_rx_desc_union
{
61 struct e1000_rx_desc legacy
;
62 union e1000_adv_rx_desc adv
;
65 typedef struct IGBTxPktVmdqCallbackContext
{
68 } IGBTxPktVmdqCallbackContext
;
71 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
72 bool has_vnet
, bool *external_tx
);
75 igb_set_interrupt_cause(IGBCore
*core
, uint32_t val
);
77 static void igb_update_interrupt_state(IGBCore
*core
);
78 static void igb_reset(IGBCore
*core
, bool sw
);
81 igb_raise_legacy_irq(IGBCore
*core
)
83 trace_e1000e_irq_legacy_notify(true);
84 e1000x_inc_reg_if_not_full(core
->mac
, IAC
);
85 pci_set_irq(core
->owner
, 1);
89 igb_lower_legacy_irq(IGBCore
*core
)
91 trace_e1000e_irq_legacy_notify(false);
92 pci_set_irq(core
->owner
, 0);
95 static void igb_msix_notify(IGBCore
*core
, unsigned int vector
)
97 PCIDevice
*dev
= core
->owner
;
100 vfn
= 8 - (vector
+ 2) / IGBVF_MSIX_VEC_NUM
;
101 if (vfn
< pcie_sriov_num_vfs(core
->owner
)) {
102 dev
= pcie_sriov_get_vf_at_index(core
->owner
, vfn
);
104 vector
= (vector
+ 2) % IGBVF_MSIX_VEC_NUM
;
105 } else if (vector
>= IGB_MSIX_VEC_NUM
) {
106 qemu_log_mask(LOG_GUEST_ERROR
,
107 "igb: Tried to use vector unavailable for PF");
111 msix_notify(dev
, vector
);
115 igb_intrmgr_rearm_timer(IGBIntrDelayTimer
*timer
)
117 int64_t delay_ns
= (int64_t) timer
->core
->mac
[timer
->delay_reg
] *
118 timer
->delay_resolution_ns
;
120 trace_e1000e_irq_rearm_timer(timer
->delay_reg
<< 2, delay_ns
);
122 timer_mod(timer
->timer
, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + delay_ns
);
124 timer
->running
= true;
128 igb_intmgr_timer_resume(IGBIntrDelayTimer
*timer
)
130 if (timer
->running
) {
131 igb_intrmgr_rearm_timer(timer
);
136 igb_intmgr_timer_pause(IGBIntrDelayTimer
*timer
)
138 if (timer
->running
) {
139 timer_del(timer
->timer
);
144 igb_intrmgr_on_msix_throttling_timer(void *opaque
)
146 IGBIntrDelayTimer
*timer
= opaque
;
147 int idx
= timer
- &timer
->core
->eitr
[0];
149 timer
->running
= false;
151 trace_e1000e_irq_msix_notify_postponed_vec(idx
);
152 igb_msix_notify(timer
->core
, idx
);
156 igb_intrmgr_initialize_all_timers(IGBCore
*core
, bool create
)
160 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
161 core
->eitr
[i
].core
= core
;
162 core
->eitr
[i
].delay_reg
= EITR0
+ i
;
163 core
->eitr
[i
].delay_resolution_ns
= E1000_INTR_DELAY_NS_RES
;
170 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
171 core
->eitr
[i
].timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
172 igb_intrmgr_on_msix_throttling_timer
,
178 igb_intrmgr_resume(IGBCore
*core
)
182 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
183 igb_intmgr_timer_resume(&core
->eitr
[i
]);
188 igb_intrmgr_pause(IGBCore
*core
)
192 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
193 igb_intmgr_timer_pause(&core
->eitr
[i
]);
198 igb_intrmgr_reset(IGBCore
*core
)
202 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
203 if (core
->eitr
[i
].running
) {
204 timer_del(core
->eitr
[i
].timer
);
205 igb_intrmgr_on_msix_throttling_timer(&core
->eitr
[i
]);
211 igb_intrmgr_pci_unint(IGBCore
*core
)
215 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
216 timer_free(core
->eitr
[i
].timer
);
221 igb_intrmgr_pci_realize(IGBCore
*core
)
223 igb_intrmgr_initialize_all_timers(core
, true);
227 igb_rx_csum_enabled(IGBCore
*core
)
229 return (core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) ? false : true;
233 igb_rx_use_legacy_descriptor(IGBCore
*core
)
236 * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
243 igb_rss_enabled(IGBCore
*core
)
245 return (core
->mac
[MRQC
] & 3) == E1000_MRQC_ENABLE_RSS_MQ
&&
246 !igb_rx_csum_enabled(core
) &&
247 !igb_rx_use_legacy_descriptor(core
);
250 typedef struct E1000E_RSSInfo_st
{
258 igb_rss_get_hash_type(IGBCore
*core
, struct NetRxPkt
*pkt
)
261 EthL4HdrProto l4hdr_proto
;
263 assert(igb_rss_enabled(core
));
265 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
268 trace_e1000e_rx_rss_ip4(l4hdr_proto
, core
->mac
[MRQC
],
269 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
]),
270 E1000_MRQC_EN_IPV4(core
->mac
[MRQC
]));
272 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
273 E1000_MRQC_EN_TCPIPV4(core
->mac
[MRQC
])) {
274 return E1000_MRQ_RSS_TYPE_IPV4TCP
;
277 if (E1000_MRQC_EN_IPV4(core
->mac
[MRQC
])) {
278 return E1000_MRQ_RSS_TYPE_IPV4
;
281 eth_ip6_hdr_info
*ip6info
= net_rx_pkt_get_ip6_info(pkt
);
283 bool ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_EX_DIS
;
284 bool new_ex_dis
= core
->mac
[RFCTL
] & E1000_RFCTL_NEW_IPV6_EXT_DIS
;
287 * Following two traces must not be combined because resulting
288 * event will have 11 arguments totally and some trace backends
289 * (at least "ust") have limitation of maximum 10 arguments per
290 * event. Events with more arguments fail to compile for
291 * backends like these.
293 trace_e1000e_rx_rss_ip6_rfctl(core
->mac
[RFCTL
]);
294 trace_e1000e_rx_rss_ip6(ex_dis
, new_ex_dis
, l4hdr_proto
,
295 ip6info
->has_ext_hdrs
,
296 ip6info
->rss_ex_dst_valid
,
297 ip6info
->rss_ex_src_valid
,
299 E1000_MRQC_EN_TCPIPV6(core
->mac
[MRQC
]),
300 E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
]),
301 E1000_MRQC_EN_IPV6(core
->mac
[MRQC
]));
303 if ((!ex_dis
|| !ip6info
->has_ext_hdrs
) &&
304 (!new_ex_dis
|| !(ip6info
->rss_ex_dst_valid
||
305 ip6info
->rss_ex_src_valid
))) {
307 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&&
308 E1000_MRQC_EN_TCPIPV6(core
->mac
[MRQC
])) {
309 return E1000_MRQ_RSS_TYPE_IPV6TCP
;
312 if (E1000_MRQC_EN_IPV6EX(core
->mac
[MRQC
])) {
313 return E1000_MRQ_RSS_TYPE_IPV6EX
;
318 if (E1000_MRQC_EN_IPV6(core
->mac
[MRQC
])) {
319 return E1000_MRQ_RSS_TYPE_IPV6
;
324 return E1000_MRQ_RSS_TYPE_NONE
;
328 igb_rss_calc_hash(IGBCore
*core
, struct NetRxPkt
*pkt
, E1000E_RSSInfo
*info
)
330 NetRxPktRssType type
;
332 assert(igb_rss_enabled(core
));
334 switch (info
->type
) {
335 case E1000_MRQ_RSS_TYPE_IPV4
:
336 type
= NetPktRssIpV4
;
338 case E1000_MRQ_RSS_TYPE_IPV4TCP
:
339 type
= NetPktRssIpV4Tcp
;
341 case E1000_MRQ_RSS_TYPE_IPV6TCP
:
342 type
= NetPktRssIpV6TcpEx
;
344 case E1000_MRQ_RSS_TYPE_IPV6
:
345 type
= NetPktRssIpV6
;
347 case E1000_MRQ_RSS_TYPE_IPV6EX
:
348 type
= NetPktRssIpV6Ex
;
355 return net_rx_pkt_calc_rss_hash(pkt
, type
, (uint8_t *) &core
->mac
[RSSRK
]);
359 igb_rss_parse_packet(IGBCore
*core
, struct NetRxPkt
*pkt
, bool tx
,
360 E1000E_RSSInfo
*info
)
362 trace_e1000e_rx_rss_started();
364 if (tx
|| !igb_rss_enabled(core
)) {
365 info
->enabled
= false;
369 trace_e1000e_rx_rss_disabled();
373 info
->enabled
= true;
375 info
->type
= igb_rss_get_hash_type(core
, pkt
);
377 trace_e1000e_rx_rss_type(info
->type
);
379 if (info
->type
== E1000_MRQ_RSS_TYPE_NONE
) {
385 info
->hash
= igb_rss_calc_hash(core
, pkt
, info
);
386 info
->queue
= E1000_RSS_QUEUE(&core
->mac
[RETA
], info
->hash
);
390 igb_setup_tx_offloads(IGBCore
*core
, struct igb_tx
*tx
)
393 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, true, true, tx
->mss
)) {
397 net_tx_pkt_update_ip_checksums(tx
->tx_pkt
);
398 e1000x_inc_reg_if_not_full(core
->mac
, TSCTC
);
403 if (!net_tx_pkt_build_vheader(tx
->tx_pkt
, false, true, 0)) {
409 net_tx_pkt_update_ip_hdr_checksum(tx
->tx_pkt
);
415 static void igb_tx_pkt_mac_callback(void *core
,
416 const struct iovec
*iov
,
418 const struct iovec
*virt_iov
,
421 igb_receive_internal(core
, virt_iov
, virt_iovcnt
, true, NULL
);
424 static void igb_tx_pkt_vmdq_callback(void *opaque
,
425 const struct iovec
*iov
,
427 const struct iovec
*virt_iov
,
430 IGBTxPktVmdqCallbackContext
*context
= opaque
;
433 igb_receive_internal(context
->core
, virt_iov
, virt_iovcnt
, true,
437 if (context
->core
->has_vnet
) {
438 qemu_sendv_packet(context
->nc
, virt_iov
, virt_iovcnt
);
440 qemu_sendv_packet(context
->nc
, iov
, iovcnt
);
445 /* TX Packets Switching (7.10.3.6) */
446 static bool igb_tx_pkt_switch(IGBCore
*core
, struct igb_tx
*tx
,
449 IGBTxPktVmdqCallbackContext context
;
451 /* TX switching is only used to serve VM to VM traffic. */
452 if (!(core
->mac
[MRQC
] & 1)) {
456 /* TX switching requires DTXSWC.Loopback_en bit enabled. */
457 if (!(core
->mac
[DTXSWC
] & E1000_DTXSWC_VMDQ_LOOPBACK_EN
)) {
464 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
465 igb_tx_pkt_vmdq_callback
, &context
);
468 return net_tx_pkt_send(tx
->tx_pkt
, nc
);
472 igb_tx_pkt_send(IGBCore
*core
, struct igb_tx
*tx
, int queue_index
)
474 int target_queue
= MIN(core
->max_queue_num
, queue_index
);
475 NetClientState
*queue
= qemu_get_subqueue(core
->owner_nic
, target_queue
);
477 if (!igb_setup_tx_offloads(core
, tx
)) {
481 net_tx_pkt_dump(tx
->tx_pkt
);
483 if ((core
->phy
[MII_BMCR
] & MII_BMCR_LOOPBACK
) ||
484 ((core
->mac
[RCTL
] & E1000_RCTL_LBM_MAC
) == E1000_RCTL_LBM_MAC
)) {
485 return net_tx_pkt_send_custom(tx
->tx_pkt
, false,
486 igb_tx_pkt_mac_callback
, core
);
488 return igb_tx_pkt_switch(core
, tx
, queue
);
493 igb_on_tx_done_update_stats(IGBCore
*core
, struct NetTxPkt
*tx_pkt
)
495 static const int PTCregs
[6] = { PTC64
, PTC127
, PTC255
, PTC511
,
498 size_t tot_len
= net_tx_pkt_get_total_len(tx_pkt
) + 4;
500 e1000x_increase_size_stats(core
->mac
, PTCregs
, tot_len
);
501 e1000x_inc_reg_if_not_full(core
->mac
, TPT
);
502 e1000x_grow_8reg_if_not_full(core
->mac
, TOTL
, tot_len
);
504 switch (net_tx_pkt_get_packet_type(tx_pkt
)) {
506 e1000x_inc_reg_if_not_full(core
->mac
, BPTC
);
509 e1000x_inc_reg_if_not_full(core
->mac
, MPTC
);
514 g_assert_not_reached();
517 core
->mac
[GPTC
] = core
->mac
[TPT
];
518 core
->mac
[GOTCL
] = core
->mac
[TOTL
];
519 core
->mac
[GOTCH
] = core
->mac
[TOTH
];
523 igb_process_tx_desc(IGBCore
*core
,
525 union e1000_adv_tx_desc
*tx_desc
,
528 struct e1000_adv_tx_context_desc
*tx_ctx_desc
;
529 uint32_t cmd_type_len
;
530 uint32_t olinfo_status
;
531 uint64_t buffer_addr
;
534 cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
536 if (cmd_type_len
& E1000_ADVTXD_DCMD_DEXT
) {
537 if ((cmd_type_len
& E1000_ADVTXD_DTYP_DATA
) ==
538 E1000_ADVTXD_DTYP_DATA
) {
539 /* advanced transmit data descriptor */
541 olinfo_status
= le32_to_cpu(tx_desc
->read
.olinfo_status
);
543 tx
->tse
= !!(cmd_type_len
& E1000_ADVTXD_DCMD_TSE
);
544 tx
->ixsm
= !!(olinfo_status
& E1000_ADVTXD_POTS_IXSM
);
545 tx
->txsm
= !!(olinfo_status
& E1000_ADVTXD_POTS_TXSM
);
549 } else if ((cmd_type_len
& E1000_ADVTXD_DTYP_CTXT
) ==
550 E1000_ADVTXD_DTYP_CTXT
) {
551 /* advanced transmit context descriptor */
552 tx_ctx_desc
= (struct e1000_adv_tx_context_desc
*)tx_desc
;
553 tx
->vlan
= le32_to_cpu(tx_ctx_desc
->vlan_macip_lens
) >> 16;
554 tx
->mss
= le32_to_cpu(tx_ctx_desc
->mss_l4len_idx
) >> 16;
557 /* unknown descriptor type */
561 /* legacy descriptor */
563 /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
566 buffer_addr
= le64_to_cpu(tx_desc
->read
.buffer_addr
);
567 length
= cmd_type_len
& 0xFFFF;
570 if (!net_tx_pkt_add_raw_fragment(tx
->tx_pkt
, buffer_addr
, length
)) {
575 if (cmd_type_len
& E1000_TXD_CMD_EOP
) {
576 if (!tx
->skip_cp
&& net_tx_pkt_parse(tx
->tx_pkt
)) {
577 if (cmd_type_len
& E1000_TXD_CMD_VLE
) {
578 net_tx_pkt_setup_vlan_header_ex(tx
->tx_pkt
, tx
->vlan
,
579 core
->mac
[VET
] & 0xffff);
581 if (igb_tx_pkt_send(core
, tx
, queue_index
)) {
582 igb_on_tx_done_update_stats(core
, tx
->tx_pkt
);
588 net_tx_pkt_reset(tx
->tx_pkt
);
592 static uint32_t igb_tx_wb_eic(IGBCore
*core
, int queue_idx
)
596 n
= igb_ivar_entry_tx(queue_idx
);
597 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
599 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
602 static uint32_t igb_rx_wb_eic(IGBCore
*core
, int queue_idx
)
606 n
= igb_ivar_entry_rx(queue_idx
);
607 ent
= (core
->mac
[IVAR0
+ n
/ 4] >> (8 * (n
% 4))) & 0xff;
609 return (ent
& E1000_IVAR_VALID
) ? BIT(ent
& 0x1f) : 0;
612 typedef struct E1000E_RingInfo_st
{
622 igb_ring_empty(IGBCore
*core
, const E1000E_RingInfo
*r
)
624 return core
->mac
[r
->dh
] == core
->mac
[r
->dt
] ||
625 core
->mac
[r
->dt
] >= core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
;
628 static inline uint64_t
629 igb_ring_base(IGBCore
*core
, const E1000E_RingInfo
*r
)
631 uint64_t bah
= core
->mac
[r
->dbah
];
632 uint64_t bal
= core
->mac
[r
->dbal
];
634 return (bah
<< 32) + bal
;
637 static inline uint64_t
638 igb_ring_head_descr(IGBCore
*core
, const E1000E_RingInfo
*r
)
640 return igb_ring_base(core
, r
) + E1000_RING_DESC_LEN
* core
->mac
[r
->dh
];
644 igb_ring_advance(IGBCore
*core
, const E1000E_RingInfo
*r
, uint32_t count
)
646 core
->mac
[r
->dh
] += count
;
648 if (core
->mac
[r
->dh
] * E1000_RING_DESC_LEN
>= core
->mac
[r
->dlen
]) {
649 core
->mac
[r
->dh
] = 0;
653 static inline uint32_t
654 igb_ring_free_descr_num(IGBCore
*core
, const E1000E_RingInfo
*r
)
656 trace_e1000e_ring_free_space(r
->idx
, core
->mac
[r
->dlen
],
657 core
->mac
[r
->dh
], core
->mac
[r
->dt
]);
659 if (core
->mac
[r
->dh
] <= core
->mac
[r
->dt
]) {
660 return core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
663 if (core
->mac
[r
->dh
] > core
->mac
[r
->dt
]) {
664 return core
->mac
[r
->dlen
] / E1000_RING_DESC_LEN
+
665 core
->mac
[r
->dt
] - core
->mac
[r
->dh
];
668 g_assert_not_reached();
673 igb_ring_enabled(IGBCore
*core
, const E1000E_RingInfo
*r
)
675 return core
->mac
[r
->dlen
] > 0;
678 typedef struct IGB_TxRing_st
{
679 const E1000E_RingInfo
*i
;
684 igb_mq_queue_idx(int base_reg_idx
, int reg_idx
)
686 return (reg_idx
- base_reg_idx
) / 16;
690 igb_tx_ring_init(IGBCore
*core
, IGB_TxRing
*txr
, int idx
)
692 static const E1000E_RingInfo i
[IGB_NUM_QUEUES
] = {
693 { TDBAH0
, TDBAL0
, TDLEN0
, TDH0
, TDT0
, 0 },
694 { TDBAH1
, TDBAL1
, TDLEN1
, TDH1
, TDT1
, 1 },
695 { TDBAH2
, TDBAL2
, TDLEN2
, TDH2
, TDT2
, 2 },
696 { TDBAH3
, TDBAL3
, TDLEN3
, TDH3
, TDT3
, 3 },
697 { TDBAH4
, TDBAL4
, TDLEN4
, TDH4
, TDT4
, 4 },
698 { TDBAH5
, TDBAL5
, TDLEN5
, TDH5
, TDT5
, 5 },
699 { TDBAH6
, TDBAL6
, TDLEN6
, TDH6
, TDT6
, 6 },
700 { TDBAH7
, TDBAL7
, TDLEN7
, TDH7
, TDT7
, 7 },
701 { TDBAH8
, TDBAL8
, TDLEN8
, TDH8
, TDT8
, 8 },
702 { TDBAH9
, TDBAL9
, TDLEN9
, TDH9
, TDT9
, 9 },
703 { TDBAH10
, TDBAL10
, TDLEN10
, TDH10
, TDT10
, 10 },
704 { TDBAH11
, TDBAL11
, TDLEN11
, TDH11
, TDT11
, 11 },
705 { TDBAH12
, TDBAL12
, TDLEN12
, TDH12
, TDT12
, 12 },
706 { TDBAH13
, TDBAL13
, TDLEN13
, TDH13
, TDT13
, 13 },
707 { TDBAH14
, TDBAL14
, TDLEN14
, TDH14
, TDT14
, 14 },
708 { TDBAH15
, TDBAL15
, TDLEN15
, TDH15
, TDT15
, 15 }
711 assert(idx
< ARRAY_SIZE(i
));
714 txr
->tx
= &core
->tx
[idx
];
717 typedef struct E1000E_RxRing_st
{
718 const E1000E_RingInfo
*i
;
722 igb_rx_ring_init(IGBCore
*core
, E1000E_RxRing
*rxr
, int idx
)
724 static const E1000E_RingInfo i
[IGB_NUM_QUEUES
] = {
725 { RDBAH0
, RDBAL0
, RDLEN0
, RDH0
, RDT0
, 0 },
726 { RDBAH1
, RDBAL1
, RDLEN1
, RDH1
, RDT1
, 1 },
727 { RDBAH2
, RDBAL2
, RDLEN2
, RDH2
, RDT2
, 2 },
728 { RDBAH3
, RDBAL3
, RDLEN3
, RDH3
, RDT3
, 3 },
729 { RDBAH4
, RDBAL4
, RDLEN4
, RDH4
, RDT4
, 4 },
730 { RDBAH5
, RDBAL5
, RDLEN5
, RDH5
, RDT5
, 5 },
731 { RDBAH6
, RDBAL6
, RDLEN6
, RDH6
, RDT6
, 6 },
732 { RDBAH7
, RDBAL7
, RDLEN7
, RDH7
, RDT7
, 7 },
733 { RDBAH8
, RDBAL8
, RDLEN8
, RDH8
, RDT8
, 8 },
734 { RDBAH9
, RDBAL9
, RDLEN9
, RDH9
, RDT9
, 9 },
735 { RDBAH10
, RDBAL10
, RDLEN10
, RDH10
, RDT10
, 10 },
736 { RDBAH11
, RDBAL11
, RDLEN11
, RDH11
, RDT11
, 11 },
737 { RDBAH12
, RDBAL12
, RDLEN12
, RDH12
, RDT12
, 12 },
738 { RDBAH13
, RDBAL13
, RDLEN13
, RDH13
, RDT13
, 13 },
739 { RDBAH14
, RDBAL14
, RDLEN14
, RDH14
, RDT14
, 14 },
740 { RDBAH15
, RDBAL15
, RDLEN15
, RDH15
, RDT15
, 15 }
743 assert(idx
< ARRAY_SIZE(i
));
749 igb_txdesc_writeback(IGBCore
*core
, dma_addr_t base
,
750 union e1000_adv_tx_desc
*tx_desc
,
751 const E1000E_RingInfo
*txi
)
754 uint32_t cmd_type_len
= le32_to_cpu(tx_desc
->read
.cmd_type_len
);
757 tdwba
= core
->mac
[E1000_TDWBAL(txi
->idx
) >> 2];
758 tdwba
|= (uint64_t)core
->mac
[E1000_TDWBAH(txi
->idx
) >> 2] << 32;
760 if (!(cmd_type_len
& E1000_TXD_CMD_RS
)) {
764 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
770 uint32_t buffer
= cpu_to_le32(core
->mac
[txi
->dh
]);
771 pci_dma_write(d
, tdwba
& ~3, &buffer
, sizeof(buffer
));
773 uint32_t status
= le32_to_cpu(tx_desc
->wb
.status
) | E1000_TXD_STAT_DD
;
775 tx_desc
->wb
.status
= cpu_to_le32(status
);
776 pci_dma_write(d
, base
+ offsetof(union e1000_adv_tx_desc
, wb
),
777 &tx_desc
->wb
, sizeof(tx_desc
->wb
));
780 return igb_tx_wb_eic(core
, txi
->idx
);
784 igb_start_xmit(IGBCore
*core
, const IGB_TxRing
*txr
)
788 union e1000_adv_tx_desc desc
;
789 const E1000E_RingInfo
*txi
= txr
->i
;
792 /* TODO: check if the queue itself is enabled too. */
793 if (!(core
->mac
[TCTL
] & E1000_TCTL_EN
)) {
794 trace_e1000e_tx_disabled();
798 d
= pcie_sriov_get_vf_at_index(core
->owner
, txi
->idx
% 8);
803 while (!igb_ring_empty(core
, txi
)) {
804 base
= igb_ring_head_descr(core
, txi
);
806 pci_dma_read(d
, base
, &desc
, sizeof(desc
));
808 trace_e1000e_tx_descr((void *)(intptr_t)desc
.read
.buffer_addr
,
809 desc
.read
.cmd_type_len
, desc
.wb
.status
);
811 igb_process_tx_desc(core
, txr
->tx
, &desc
, txi
->idx
);
812 igb_ring_advance(core
, txi
, 1);
813 eic
|= igb_txdesc_writeback(core
, base
, &desc
, txi
);
817 core
->mac
[EICR
] |= eic
;
818 igb_set_interrupt_cause(core
, E1000_ICR_TXDW
);
823 igb_rxbufsize(IGBCore
*core
, const E1000E_RingInfo
*r
)
825 uint32_t srrctl
= core
->mac
[E1000_SRRCTL(r
->idx
) >> 2];
826 uint32_t bsizepkt
= srrctl
& E1000_SRRCTL_BSIZEPKT_MASK
;
828 return bsizepkt
<< E1000_SRRCTL_BSIZEPKT_SHIFT
;
831 return e1000x_rxbufsize(core
->mac
[RCTL
]);
835 igb_has_rxbufs(IGBCore
*core
, const E1000E_RingInfo
*r
, size_t total_size
)
837 uint32_t bufs
= igb_ring_free_descr_num(core
, r
);
838 uint32_t bufsize
= igb_rxbufsize(core
, r
);
840 trace_e1000e_rx_has_buffers(r
->idx
, bufs
, total_size
, bufsize
);
842 return total_size
<= bufs
/ (core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
) *
847 igb_start_recv(IGBCore
*core
)
851 trace_e1000e_rx_start_recv();
853 for (i
= 0; i
<= core
->max_queue_num
; i
++) {
854 qemu_flush_queued_packets(qemu_get_subqueue(core
->owner_nic
, i
));
859 igb_can_receive(IGBCore
*core
)
863 if (!e1000x_rx_ready(core
->owner
, core
->mac
)) {
867 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
870 igb_rx_ring_init(core
, &rxr
, i
);
871 if (igb_ring_enabled(core
, rxr
.i
) && igb_has_rxbufs(core
, rxr
.i
, 1)) {
872 trace_e1000e_rx_can_recv();
877 trace_e1000e_rx_can_recv_rings_full();
882 igb_receive(IGBCore
*core
, const uint8_t *buf
, size_t size
)
884 const struct iovec iov
= {
885 .iov_base
= (uint8_t *)buf
,
889 return igb_receive_iov(core
, &iov
, 1);
893 igb_rx_l3_cso_enabled(IGBCore
*core
)
895 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_IPOFLD
);
899 igb_rx_l4_cso_enabled(IGBCore
*core
)
901 return !!(core
->mac
[RXCSUM
] & E1000_RXCSUM_TUOFLD
);
904 static uint16_t igb_receive_assign(IGBCore
*core
, const struct eth_header
*ehdr
,
905 E1000E_RSSInfo
*rss_info
, bool *external_tx
)
907 static const int ta_shift
[] = { 4, 3, 2, 0 };
908 uint32_t f
, ra
[2], *macp
, rctl
= core
->mac
[RCTL
];
910 uint16_t vid
= lduw_be_p(&PKT_GET_VLAN_HDR(ehdr
)->h_tci
) & VLAN_VID_MASK
;
911 bool accepted
= false;
914 memset(rss_info
, 0, sizeof(E1000E_RSSInfo
));
920 if (e1000x_is_vlan_packet(ehdr
, core
->mac
[VET
] & 0xffff) &&
921 e1000x_vlan_rx_filter_enabled(core
->mac
)) {
923 ldl_le_p((uint32_t *)(core
->mac
+ VFTA
) +
924 ((vid
>> E1000_VFTA_ENTRY_SHIFT
) & E1000_VFTA_ENTRY_MASK
));
925 if ((vfta
& (1 << (vid
& E1000_VFTA_ENTRY_BIT_SHIFT_MASK
))) == 0) {
926 trace_e1000e_rx_flt_vlan_mismatch(vid
);
929 trace_e1000e_rx_flt_vlan_match(vid
);
933 if (core
->mac
[MRQC
] & 1) {
934 if (is_broadcast_ether_addr(ehdr
->h_dest
)) {
935 for (i
= 0; i
< 8; i
++) {
936 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_BAM
) {
941 for (macp
= core
->mac
+ RA
; macp
< core
->mac
+ RA
+ 32; macp
+= 2) {
942 if (!(macp
[1] & E1000_RAH_AV
)) {
945 ra
[0] = cpu_to_le32(macp
[0]);
946 ra
[1] = cpu_to_le32(macp
[1]);
947 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
948 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
952 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
953 if (!(macp
[1] & E1000_RAH_AV
)) {
956 ra
[0] = cpu_to_le32(macp
[0]);
957 ra
[1] = cpu_to_le32(macp
[1]);
958 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
959 queues
|= (macp
[1] & E1000_RAH_POOL_MASK
) / E1000_RAH_POOL_1
;
964 macp
= core
->mac
+ (is_multicast_ether_addr(ehdr
->h_dest
) ? MTA
: UTA
);
966 f
= ta_shift
[(rctl
>> E1000_RCTL_MO_SHIFT
) & 3];
967 f
= (((ehdr
->h_dest
[5] << 8) | ehdr
->h_dest
[4]) >> f
) & 0xfff;
968 if (macp
[f
>> 5] & (1 << (f
& 0x1f))) {
969 for (i
= 0; i
< 8; i
++) {
970 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_ROMPE
) {
975 } else if (is_unicast_ether_addr(ehdr
->h_dest
) && external_tx
) {
976 *external_tx
= false;
980 if (e1000x_vlan_rx_filter_enabled(core
->mac
)) {
983 if (e1000x_is_vlan_packet(ehdr
, core
->mac
[VET
] & 0xffff)) {
984 for (i
= 0; i
< E1000_VLVF_ARRAY_SIZE
; i
++) {
985 if ((core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_MASK
) == vid
&&
986 (core
->mac
[VLVF0
+ i
] & E1000_VLVF_VLANID_ENABLE
)) {
987 uint32_t poolsel
= core
->mac
[VLVF0
+ i
] & E1000_VLVF_POOLSEL_MASK
;
988 mask
|= poolsel
>> E1000_VLVF_POOLSEL_SHIFT
;
992 for (i
= 0; i
< 8; i
++) {
993 if (core
->mac
[VMOLR0
+ i
] & E1000_VMOLR_AUPE
) {
1002 if (is_unicast_ether_addr(ehdr
->h_dest
) && !queues
&& !external_tx
&&
1003 !(core
->mac
[VT_CTL
] & E1000_VT_CTL_DISABLE_DEF_POOL
)) {
1004 uint32_t def_pl
= core
->mac
[VT_CTL
] & E1000_VT_CTL_DEFAULT_POOL_MASK
;
1005 queues
= BIT(def_pl
>> E1000_VT_CTL_DEFAULT_POOL_SHIFT
);
1008 igb_rss_parse_packet(core
, core
->rx_pkt
, external_tx
!= NULL
, rss_info
);
1009 if (rss_info
->queue
& 1) {
1013 switch (net_rx_pkt_get_packet_type(core
->rx_pkt
)) {
1015 if (rctl
& E1000_RCTL_UPE
) {
1016 accepted
= true; /* promiscuous ucast */
1021 if (rctl
& E1000_RCTL_BAM
) {
1022 accepted
= true; /* broadcast enabled */
1027 if (rctl
& E1000_RCTL_MPE
) {
1028 accepted
= true; /* promiscuous mcast */
1033 g_assert_not_reached();
1037 accepted
= e1000x_rx_group_filter(core
->mac
, ehdr
->h_dest
);
1041 for (macp
= core
->mac
+ RA2
; macp
< core
->mac
+ RA2
+ 16; macp
+= 2) {
1042 if (!(macp
[1] & E1000_RAH_AV
)) {
1045 ra
[0] = cpu_to_le32(macp
[0]);
1046 ra
[1] = cpu_to_le32(macp
[1]);
1047 if (!memcmp(ehdr
->h_dest
, (uint8_t *)ra
, ETH_ALEN
)) {
1048 trace_e1000x_rx_flt_ucast_match((int)(macp
- core
->mac
- RA2
) / 2,
1049 MAC_ARG(ehdr
->h_dest
));
1058 igb_rss_parse_packet(core
, core
->rx_pkt
, false, rss_info
);
1059 queues
= BIT(rss_info
->queue
);
1067 igb_read_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1070 *buff_addr
= le64_to_cpu(desc
->buffer_addr
);
1074 igb_read_adv_rx_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1077 *buff_addr
= le64_to_cpu(desc
->read
.pkt_addr
);
1081 igb_read_rx_descr(IGBCore
*core
, union e1000_rx_desc_union
*desc
,
1084 if (igb_rx_use_legacy_descriptor(core
)) {
1085 igb_read_lgcy_rx_descr(core
, &desc
->legacy
, buff_addr
);
1087 igb_read_adv_rx_descr(core
, &desc
->adv
, buff_addr
);
1092 igb_verify_csum_in_sw(IGBCore
*core
,
1093 struct NetRxPkt
*pkt
,
1094 uint32_t *status_flags
,
1095 EthL4HdrProto l4hdr_proto
)
1098 uint32_t csum_error
;
1100 if (igb_rx_l3_cso_enabled(core
)) {
1101 if (!net_rx_pkt_validate_l3_csum(pkt
, &csum_valid
)) {
1102 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1104 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_IPE
;
1105 *status_flags
|= E1000_RXD_STAT_IPCS
| csum_error
;
1108 trace_e1000e_rx_metadata_l3_cso_disabled();
1111 if (!igb_rx_l4_cso_enabled(core
)) {
1112 trace_e1000e_rx_metadata_l4_cso_disabled();
1116 if (!net_rx_pkt_validate_l4_csum(pkt
, &csum_valid
)) {
1117 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1121 csum_error
= csum_valid
? 0 : E1000_RXDEXT_STATERR_TCPE
;
1122 *status_flags
|= E1000_RXD_STAT_TCPCS
| csum_error
;
1124 if (l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1125 *status_flags
|= E1000_RXD_STAT_UDPCS
;
1130 igb_build_rx_metadata(IGBCore
*core
,
1131 struct NetRxPkt
*pkt
,
1133 const E1000E_RSSInfo
*rss_info
,
1134 uint16_t *pkt_info
, uint16_t *hdr_info
,
1136 uint32_t *status_flags
,
1140 struct virtio_net_hdr
*vhdr
;
1141 bool hasip4
, hasip6
;
1142 EthL4HdrProto l4hdr_proto
;
1145 *status_flags
= E1000_RXD_STAT_DD
;
1147 /* No additional metadata needed for non-EOP descriptors */
1148 /* TODO: EOP apply only to status so don't skip whole function. */
1153 *status_flags
|= E1000_RXD_STAT_EOP
;
1155 net_rx_pkt_get_protocols(pkt
, &hasip4
, &hasip6
, &l4hdr_proto
);
1156 trace_e1000e_rx_metadata_protocols(hasip4
, hasip6
, l4hdr_proto
);
1159 if (net_rx_pkt_is_vlan_stripped(pkt
)) {
1160 *status_flags
|= E1000_RXD_STAT_VP
;
1161 *vlan_tag
= cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt
));
1162 trace_e1000e_rx_metadata_vlan(*vlan_tag
);
1165 /* Packet parsing results */
1166 if ((core
->mac
[RXCSUM
] & E1000_RXCSUM_PCSD
) != 0) {
1167 if (rss_info
->enabled
) {
1168 *rss
= cpu_to_le32(rss_info
->hash
);
1169 trace_igb_rx_metadata_rss(*rss
);
1171 } else if (hasip4
) {
1172 *status_flags
|= E1000_RXD_STAT_IPIDV
;
1173 *ip_id
= cpu_to_le16(net_rx_pkt_get_ip_id(pkt
));
1174 trace_e1000e_rx_metadata_ip_id(*ip_id
);
1177 if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
&& net_rx_pkt_is_tcp_ack(pkt
)) {
1178 *status_flags
|= E1000_RXD_STAT_ACK
;
1179 trace_e1000e_rx_metadata_ack();
1182 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_DIS
)) {
1183 trace_e1000e_rx_metadata_ipv6_filtering_disabled();
1184 pkt_type
= E1000_RXD_PKT_MAC
;
1185 } else if (l4hdr_proto
== ETH_L4_HDR_PROTO_TCP
||
1186 l4hdr_proto
== ETH_L4_HDR_PROTO_UDP
) {
1187 pkt_type
= hasip4
? E1000_RXD_PKT_IP4_XDP
: E1000_RXD_PKT_IP6_XDP
;
1188 } else if (hasip4
|| hasip6
) {
1189 pkt_type
= hasip4
? E1000_RXD_PKT_IP4
: E1000_RXD_PKT_IP6
;
1191 pkt_type
= E1000_RXD_PKT_MAC
;
1194 trace_e1000e_rx_metadata_pkt_type(pkt_type
);
1197 if (rss_info
->enabled
) {
1198 *pkt_info
= rss_info
->type
;
1201 *pkt_info
|= (pkt_type
<< 4);
1203 *status_flags
|= E1000_RXD_PKT_TYPE(pkt_type
);
1210 /* RX CSO information */
1211 if (hasip6
&& (core
->mac
[RFCTL
] & E1000_RFCTL_IPV6_XSUM_DIS
)) {
1212 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1216 vhdr
= net_rx_pkt_get_vhdr(pkt
);
1218 if (!(vhdr
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
) &&
1219 !(vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
)) {
1220 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1221 igb_verify_csum_in_sw(core
, pkt
, status_flags
, l4hdr_proto
);
1225 if (igb_rx_l3_cso_enabled(core
)) {
1226 *status_flags
|= hasip4
? E1000_RXD_STAT_IPCS
: 0;
1228 trace_e1000e_rx_metadata_l3_cso_disabled();
1231 if (igb_rx_l4_cso_enabled(core
)) {
1232 switch (l4hdr_proto
) {
1233 case ETH_L4_HDR_PROTO_TCP
:
1234 *status_flags
|= E1000_RXD_STAT_TCPCS
;
1237 case ETH_L4_HDR_PROTO_UDP
:
1238 *status_flags
|= E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
;
1245 trace_e1000e_rx_metadata_l4_cso_disabled();
1248 trace_e1000e_rx_metadata_status_flags(*status_flags
);
1251 *status_flags
= cpu_to_le32(*status_flags
);
1255 igb_write_lgcy_rx_descr(IGBCore
*core
, struct e1000_rx_desc
*desc
,
1256 struct NetRxPkt
*pkt
,
1257 const E1000E_RSSInfo
*rss_info
,
1260 uint32_t status_flags
, rss
;
1263 assert(!rss_info
->enabled
);
1264 desc
->length
= cpu_to_le16(length
);
1267 igb_build_rx_metadata(core
, pkt
, pkt
!= NULL
,
1270 &status_flags
, &ip_id
,
1272 desc
->errors
= (uint8_t) (le32_to_cpu(status_flags
) >> 24);
1273 desc
->status
= (uint8_t) le32_to_cpu(status_flags
);
1277 igb_write_adv_rx_descr(IGBCore
*core
, union e1000_adv_rx_desc
*desc
,
1278 struct NetRxPkt
*pkt
,
1279 const E1000E_RSSInfo
*rss_info
,
1282 memset(&desc
->wb
, 0, sizeof(desc
->wb
));
1284 desc
->wb
.upper
.length
= cpu_to_le16(length
);
1286 igb_build_rx_metadata(core
, pkt
, pkt
!= NULL
,
1288 &desc
->wb
.lower
.lo_dword
.pkt_info
,
1289 &desc
->wb
.lower
.lo_dword
.hdr_info
,
1290 &desc
->wb
.lower
.hi_dword
.rss
,
1291 &desc
->wb
.upper
.status_error
,
1292 &desc
->wb
.lower
.hi_dword
.csum_ip
.ip_id
,
1293 &desc
->wb
.upper
.vlan
);
1297 igb_write_rx_descr(IGBCore
*core
, union e1000_rx_desc_union
*desc
,
1298 struct NetRxPkt
*pkt
, const E1000E_RSSInfo
*rss_info
, uint16_t length
)
1300 if (igb_rx_use_legacy_descriptor(core
)) {
1301 igb_write_lgcy_rx_descr(core
, &desc
->legacy
, pkt
, rss_info
, length
);
1303 igb_write_adv_rx_descr(core
, &desc
->adv
, pkt
, rss_info
, length
);
1308 igb_pci_dma_write_rx_desc(IGBCore
*core
, PCIDevice
*dev
, dma_addr_t addr
,
1309 union e1000_rx_desc_union
*desc
, dma_addr_t len
)
1311 if (igb_rx_use_legacy_descriptor(core
)) {
1312 struct e1000_rx_desc
*d
= &desc
->legacy
;
1313 size_t offset
= offsetof(struct e1000_rx_desc
, status
);
1314 uint8_t status
= d
->status
;
1316 d
->status
&= ~E1000_RXD_STAT_DD
;
1317 pci_dma_write(dev
, addr
, desc
, len
);
1319 if (status
& E1000_RXD_STAT_DD
) {
1321 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1324 union e1000_adv_rx_desc
*d
= &desc
->adv
;
1326 offsetof(union e1000_adv_rx_desc
, wb
.upper
.status_error
);
1327 uint32_t status
= d
->wb
.upper
.status_error
;
1329 d
->wb
.upper
.status_error
&= ~E1000_RXD_STAT_DD
;
1330 pci_dma_write(dev
, addr
, desc
, len
);
1332 if (status
& E1000_RXD_STAT_DD
) {
1333 d
->wb
.upper
.status_error
= status
;
1334 pci_dma_write(dev
, addr
+ offset
, &status
, sizeof(status
));
1340 igb_write_to_rx_buffers(IGBCore
*core
,
1345 dma_addr_t data_len
)
1347 trace_igb_rx_desc_buff_write(ba
, *written
, data
, data_len
);
1348 pci_dma_write(d
, ba
+ *written
, data
, data_len
);
1349 *written
+= data_len
;
1353 igb_update_rx_stats(IGBCore
*core
, size_t data_size
, size_t data_fcs_size
)
1355 e1000x_update_rx_total_stats(core
->mac
, data_size
, data_fcs_size
);
1357 switch (net_rx_pkt_get_packet_type(core
->rx_pkt
)) {
1359 e1000x_inc_reg_if_not_full(core
->mac
, BPRC
);
1363 e1000x_inc_reg_if_not_full(core
->mac
, MPRC
);
1372 igb_rx_descr_threshold_hit(IGBCore
*core
, const E1000E_RingInfo
*rxi
)
1374 return igb_ring_free_descr_num(core
, rxi
) ==
1375 ((core
->mac
[E1000_SRRCTL(rxi
->idx
) >> 2] >> 20) & 31) * 16;
1379 igb_write_packet_to_guest(IGBCore
*core
, struct NetRxPkt
*pkt
,
1380 const E1000E_RxRing
*rxr
,
1381 const E1000E_RSSInfo
*rss_info
)
1385 union e1000_rx_desc_union desc
;
1387 size_t desc_offset
= 0;
1390 struct iovec
*iov
= net_rx_pkt_get_iovec(pkt
);
1391 size_t size
= net_rx_pkt_get_total_len(pkt
);
1392 size_t total_size
= size
+ e1000x_fcs_len(core
->mac
);
1393 const E1000E_RingInfo
*rxi
= rxr
->i
;
1394 size_t bufsize
= igb_rxbufsize(core
, rxi
);
1396 d
= pcie_sriov_get_vf_at_index(core
->owner
, rxi
->idx
% 8);
1403 uint16_t written
= 0;
1404 bool is_last
= false;
1406 desc_size
= total_size
- desc_offset
;
1408 if (desc_size
> bufsize
) {
1409 desc_size
= bufsize
;
1412 if (igb_ring_empty(core
, rxi
)) {
1416 base
= igb_ring_head_descr(core
, rxi
);
1418 pci_dma_read(d
, base
, &desc
, core
->rx_desc_len
);
1420 trace_e1000e_rx_descr(rxi
->idx
, base
, core
->rx_desc_len
);
1422 igb_read_rx_descr(core
, &desc
, &ba
);
1425 if (desc_offset
< size
) {
1426 static const uint32_t fcs_pad
;
1428 size_t copy_size
= size
- desc_offset
;
1429 if (copy_size
> bufsize
) {
1430 copy_size
= bufsize
;
1433 /* Copy packet payload */
1435 iov_copy
= MIN(copy_size
, iov
->iov_len
- iov_ofs
);
1437 igb_write_to_rx_buffers(core
, d
, ba
, &written
,
1438 iov
->iov_base
+ iov_ofs
, iov_copy
);
1440 copy_size
-= iov_copy
;
1441 iov_ofs
+= iov_copy
;
1442 if (iov_ofs
== iov
->iov_len
) {
1448 if (desc_offset
+ desc_size
>= total_size
) {
1449 /* Simulate FCS checksum presence in the last descriptor */
1450 igb_write_to_rx_buffers(core
, d
, ba
, &written
,
1451 (const char *) &fcs_pad
, e1000x_fcs_len(core
->mac
));
1454 } else { /* as per intel docs; skip descriptors with null buf addr */
1455 trace_e1000e_rx_null_descriptor();
1457 desc_offset
+= desc_size
;
1458 if (desc_offset
>= total_size
) {
1462 igb_write_rx_descr(core
, &desc
, is_last
? core
->rx_pkt
: NULL
,
1464 igb_pci_dma_write_rx_desc(core
, d
, base
, &desc
, core
->rx_desc_len
);
1466 igb_ring_advance(core
, rxi
, core
->rx_desc_len
/ E1000_MIN_RX_DESC_LEN
);
1468 } while (desc_offset
< total_size
);
1470 igb_update_rx_stats(core
, size
, total_size
);
1474 igb_rx_fix_l4_csum(IGBCore
*core
, struct NetRxPkt
*pkt
)
1476 struct virtio_net_hdr
*vhdr
= net_rx_pkt_get_vhdr(pkt
);
1478 if (vhdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1479 net_rx_pkt_fix_l4_csum(pkt
);
1484 igb_receive_iov(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
)
1486 return igb_receive_internal(core
, iov
, iovcnt
, core
->has_vnet
, NULL
);
1490 igb_receive_internal(IGBCore
*core
, const struct iovec
*iov
, int iovcnt
,
1491 bool has_vnet
, bool *external_tx
)
1493 static const int maximum_ethernet_hdr_len
= (ETH_HLEN
+ 4);
1495 uint16_t queues
= 0;
1497 uint8_t min_buf
[ETH_ZLEN
];
1498 struct iovec min_iov
;
1499 struct eth_header
*ehdr
;
1500 uint8_t *filter_buf
;
1501 size_t size
, orig_size
;
1504 E1000E_RSSInfo rss_info
;
1508 trace_e1000e_rx_receive_iov(iovcnt
);
1511 *external_tx
= true;
1514 if (!e1000x_hw_rx_enabled(core
->mac
)) {
1518 /* Pull virtio header in */
1520 net_rx_pkt_set_vhdr_iovec(core
->rx_pkt
, iov
, iovcnt
);
1521 iov_ofs
= sizeof(struct virtio_net_hdr
);
1523 net_rx_pkt_unset_vhdr(core
->rx_pkt
);
1526 filter_buf
= iov
->iov_base
+ iov_ofs
;
1527 orig_size
= iov_size(iov
, iovcnt
);
1528 size
= orig_size
- iov_ofs
;
1530 /* Pad to minimum Ethernet frame length */
1531 if (size
< sizeof(min_buf
)) {
1532 iov_to_buf(iov
, iovcnt
, iov_ofs
, min_buf
, size
);
1533 memset(&min_buf
[size
], 0, sizeof(min_buf
) - size
);
1534 e1000x_inc_reg_if_not_full(core
->mac
, RUC
);
1535 min_iov
.iov_base
= filter_buf
= min_buf
;
1536 min_iov
.iov_len
= size
= sizeof(min_buf
);
1540 } else if (iov
->iov_len
< maximum_ethernet_hdr_len
) {
1541 /* This is very unlikely, but may happen. */
1542 iov_to_buf(iov
, iovcnt
, iov_ofs
, min_buf
, maximum_ethernet_hdr_len
);
1543 filter_buf
= min_buf
;
1546 /* Discard oversized packets if !LPE and !SBP. */
1547 if (e1000x_is_oversized(core
->mac
, size
)) {
1551 ehdr
= PKT_GET_ETH_HDR(filter_buf
);
1552 net_rx_pkt_set_packet_type(core
->rx_pkt
, get_eth_packet_type(ehdr
));
1554 net_rx_pkt_attach_iovec_ex(core
->rx_pkt
, iov
, iovcnt
, iov_ofs
,
1555 e1000x_vlan_enabled(core
->mac
),
1556 core
->mac
[VET
] & 0xffff);
1558 queues
= igb_receive_assign(core
, ehdr
, &rss_info
, external_tx
);
1560 trace_e1000e_rx_flt_dropped();
1564 total_size
= net_rx_pkt_get_total_len(core
->rx_pkt
) +
1565 e1000x_fcs_len(core
->mac
);
1567 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
1568 if (!(queues
& BIT(i
))) {
1572 igb_rx_ring_init(core
, &rxr
, i
);
1574 if (!igb_has_rxbufs(core
, rxr
.i
, total_size
)) {
1576 trace_e1000e_rx_not_written_to_guest(rxr
.i
->idx
);
1580 n
|= E1000_ICR_RXT0
;
1582 igb_rx_fix_l4_csum(core
, core
->rx_pkt
);
1583 igb_write_packet_to_guest(core
, core
->rx_pkt
, &rxr
, &rss_info
);
1585 /* Check if receive descriptor minimum threshold hit */
1586 if (igb_rx_descr_threshold_hit(core
, rxr
.i
)) {
1587 n
|= E1000_ICS_RXDMT0
;
1590 core
->mac
[EICR
] |= igb_rx_wb_eic(core
, rxr
.i
->idx
);
1592 trace_e1000e_rx_written_to_guest(rxr
.i
->idx
);
1595 trace_e1000e_rx_interrupt_set(n
);
1596 igb_set_interrupt_cause(core
, n
);
1602 igb_have_autoneg(IGBCore
*core
)
1604 return core
->phy
[MII_BMCR
] & MII_BMCR_AUTOEN
;
1607 static void igb_update_flowctl_status(IGBCore
*core
)
1609 if (igb_have_autoneg(core
) && core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
) {
1610 trace_e1000e_link_autoneg_flowctl(true);
1611 core
->mac
[CTRL
] |= E1000_CTRL_TFCE
| E1000_CTRL_RFCE
;
1613 trace_e1000e_link_autoneg_flowctl(false);
1618 igb_link_down(IGBCore
*core
)
1620 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
1621 igb_update_flowctl_status(core
);
1625 igb_set_phy_ctrl(IGBCore
*core
, uint16_t val
)
1627 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
1628 core
->phy
[MII_BMCR
] = val
& ~(0x3f | MII_BMCR_RESET
| MII_BMCR_ANRESTART
);
1630 if ((val
& MII_BMCR_ANRESTART
) && igb_have_autoneg(core
)) {
1631 e1000x_restart_autoneg(core
->mac
, core
->phy
, core
->autoneg_timer
);
1635 void igb_core_set_link_status(IGBCore
*core
)
1637 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
1638 uint32_t old_status
= core
->mac
[STATUS
];
1640 trace_e1000e_link_status_changed(nc
->link_down
? false : true);
1642 if (nc
->link_down
) {
1643 e1000x_update_regs_on_link_down(core
->mac
, core
->phy
);
1645 if (igb_have_autoneg(core
) &&
1646 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
1647 e1000x_restart_autoneg(core
->mac
, core
->phy
,
1648 core
->autoneg_timer
);
1650 e1000x_update_regs_on_link_up(core
->mac
, core
->phy
);
1651 igb_start_recv(core
);
1655 if (core
->mac
[STATUS
] != old_status
) {
1656 igb_set_interrupt_cause(core
, E1000_ICR_LSC
);
1661 igb_set_ctrl(IGBCore
*core
, int index
, uint32_t val
)
1663 trace_e1000e_core_ctrl_write(index
, val
);
1665 /* RST is self clearing */
1666 core
->mac
[CTRL
] = val
& ~E1000_CTRL_RST
;
1667 core
->mac
[CTRL_DUP
] = core
->mac
[CTRL
];
1669 trace_e1000e_link_set_params(
1670 !!(val
& E1000_CTRL_ASDE
),
1671 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
1672 !!(val
& E1000_CTRL_FRCSPD
),
1673 !!(val
& E1000_CTRL_FRCDPX
),
1674 !!(val
& E1000_CTRL_RFCE
),
1675 !!(val
& E1000_CTRL_TFCE
));
1677 if (val
& E1000_CTRL_RST
) {
1678 trace_e1000e_core_ctrl_sw_reset();
1679 igb_reset(core
, true);
1682 if (val
& E1000_CTRL_PHY_RST
) {
1683 trace_e1000e_core_ctrl_phy_reset();
1684 core
->mac
[STATUS
] |= E1000_STATUS_PHYRA
;
1689 igb_set_rfctl(IGBCore
*core
, int index
, uint32_t val
)
1691 trace_e1000e_rx_set_rfctl(val
);
1693 if (!(val
& E1000_RFCTL_ISCSI_DIS
)) {
1694 trace_e1000e_wrn_iscsi_filtering_not_supported();
1697 if (!(val
& E1000_RFCTL_NFSW_DIS
)) {
1698 trace_e1000e_wrn_nfsw_filtering_not_supported();
1701 if (!(val
& E1000_RFCTL_NFSR_DIS
)) {
1702 trace_e1000e_wrn_nfsr_filtering_not_supported();
1705 core
->mac
[RFCTL
] = val
;
1709 igb_calc_rxdesclen(IGBCore
*core
)
1711 if (igb_rx_use_legacy_descriptor(core
)) {
1712 core
->rx_desc_len
= sizeof(struct e1000_rx_desc
);
1714 core
->rx_desc_len
= sizeof(union e1000_adv_rx_desc
);
1716 trace_e1000e_rx_desc_len(core
->rx_desc_len
);
1720 igb_set_rx_control(IGBCore
*core
, int index
, uint32_t val
)
1722 core
->mac
[RCTL
] = val
;
1723 trace_e1000e_rx_set_rctl(core
->mac
[RCTL
]);
1725 if (val
& E1000_RCTL_DTYP_MASK
) {
1726 qemu_log_mask(LOG_GUEST_ERROR
,
1727 "igb: RCTL.DTYP must be zero for compatibility");
1730 if (val
& E1000_RCTL_EN
) {
1731 igb_calc_rxdesclen(core
);
1732 igb_start_recv(core
);
1737 igb_clear_ims_bits(IGBCore
*core
, uint32_t bits
)
1739 trace_e1000e_irq_clear_ims(bits
, core
->mac
[IMS
], core
->mac
[IMS
] & ~bits
);
1740 core
->mac
[IMS
] &= ~bits
;
1744 igb_postpone_interrupt(IGBIntrDelayTimer
*timer
)
1746 if (timer
->running
) {
1747 trace_e1000e_irq_postponed_by_xitr(timer
->delay_reg
<< 2);
1752 if (timer
->core
->mac
[timer
->delay_reg
] != 0) {
1753 igb_intrmgr_rearm_timer(timer
);
1760 igb_eitr_should_postpone(IGBCore
*core
, int idx
)
1762 return igb_postpone_interrupt(&core
->eitr
[idx
]);
1765 static void igb_send_msix(IGBCore
*core
)
1767 uint32_t causes
= core
->mac
[EICR
] & core
->mac
[EIMS
];
1768 uint32_t effective_eiac
;
1771 for (vector
= 0; vector
< IGB_INTR_NUM
; ++vector
) {
1772 if ((causes
& BIT(vector
)) && !igb_eitr_should_postpone(core
, vector
)) {
1774 trace_e1000e_irq_msix_notify_vec(vector
);
1775 igb_msix_notify(core
, vector
);
1777 trace_e1000e_irq_icr_clear_eiac(core
->mac
[EICR
], core
->mac
[EIAC
]);
1778 effective_eiac
= core
->mac
[EIAC
] & BIT(vector
);
1779 core
->mac
[EICR
] &= ~effective_eiac
;
1785 igb_fix_icr_asserted(IGBCore
*core
)
1787 core
->mac
[ICR
] &= ~E1000_ICR_ASSERTED
;
1788 if (core
->mac
[ICR
]) {
1789 core
->mac
[ICR
] |= E1000_ICR_ASSERTED
;
1792 trace_e1000e_irq_fix_icr_asserted(core
->mac
[ICR
]);
1796 igb_update_interrupt_state(IGBCore
*core
)
1802 icr
= core
->mac
[ICR
] & core
->mac
[IMS
];
1804 if (msix_enabled(core
->owner
)) {
1807 if (icr
& E1000_ICR_DRSTA
) {
1808 int_alloc
= core
->mac
[IVAR_MISC
] & 0xff;
1809 if (int_alloc
& E1000_IVAR_VALID
) {
1810 causes
|= BIT(int_alloc
& 0x1f);
1813 /* Check if other bits (excluding the TCP Timer) are enabled. */
1814 if (icr
& ~E1000_ICR_DRSTA
) {
1815 int_alloc
= (core
->mac
[IVAR_MISC
] >> 8) & 0xff;
1816 if (int_alloc
& E1000_IVAR_VALID
) {
1817 causes
|= BIT(int_alloc
& 0x1f);
1819 trace_e1000e_irq_add_msi_other(core
->mac
[EICR
]);
1821 core
->mac
[EICR
] |= causes
;
1824 if ((core
->mac
[EICR
] & core
->mac
[EIMS
])) {
1825 igb_send_msix(core
);
1828 igb_fix_icr_asserted(core
);
1831 core
->mac
[EICR
] |= (icr
& E1000_ICR_DRSTA
) | E1000_EICR_OTHER
;
1833 core
->mac
[EICR
] &= ~E1000_EICR_OTHER
;
1836 trace_e1000e_irq_pending_interrupts(core
->mac
[ICR
] & core
->mac
[IMS
],
1837 core
->mac
[ICR
], core
->mac
[IMS
]);
1839 if (msi_enabled(core
->owner
)) {
1841 msi_notify(core
->owner
, 0);
1845 igb_raise_legacy_irq(core
);
1847 igb_lower_legacy_irq(core
);
1854 igb_set_interrupt_cause(IGBCore
*core
, uint32_t val
)
1856 trace_e1000e_irq_set_cause_entry(val
, core
->mac
[ICR
]);
1858 core
->mac
[ICR
] |= val
;
1860 trace_e1000e_irq_set_cause_exit(val
, core
->mac
[ICR
]);
1862 igb_update_interrupt_state(core
);
1865 static void igb_set_eics(IGBCore
*core
, int index
, uint32_t val
)
1867 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
1869 trace_igb_irq_write_eics(val
, msix
);
1872 val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
);
1875 * TODO: Move to igb_update_interrupt_state if EICS is modified in other
1878 core
->mac
[EICR
] = core
->mac
[EICS
];
1880 igb_update_interrupt_state(core
);
1883 static void igb_set_eims(IGBCore
*core
, int index
, uint32_t val
)
1885 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
1887 trace_igb_irq_write_eims(val
, msix
);
1890 val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
);
1892 igb_update_interrupt_state(core
);
1895 static void igb_vf_reset(IGBCore
*core
, uint16_t vfn
)
1897 /* TODO: Reset of the queue enable and the interrupt registers of the VF. */
1899 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_RSTI
;
1900 core
->mac
[V2PMAILBOX0
+ vfn
] = E1000_V2PMAILBOX_RSTD
;
1903 static void mailbox_interrupt_to_vf(IGBCore
*core
, uint16_t vfn
)
1905 uint32_t ent
= core
->mac
[VTIVAR_MISC
+ vfn
];
1907 if ((ent
& E1000_IVAR_VALID
)) {
1908 core
->mac
[EICR
] |= (ent
& 0x3) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
);
1909 igb_update_interrupt_state(core
);
1913 static void mailbox_interrupt_to_pf(IGBCore
*core
)
1915 igb_set_interrupt_cause(core
, E1000_ICR_VMMB
);
1918 static void igb_set_pfmailbox(IGBCore
*core
, int index
, uint32_t val
)
1920 uint16_t vfn
= index
- P2VMAILBOX0
;
1922 trace_igb_set_pfmailbox(vfn
, val
);
1924 if (val
& E1000_P2VMAILBOX_STS
) {
1925 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFSTS
;
1926 mailbox_interrupt_to_vf(core
, vfn
);
1929 if (val
& E1000_P2VMAILBOX_ACK
) {
1930 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFACK
;
1931 mailbox_interrupt_to_vf(core
, vfn
);
1934 /* Buffer Taken by PF (can be set only if the VFU is cleared). */
1935 if (val
& E1000_P2VMAILBOX_PFU
) {
1936 if (!(core
->mac
[index
] & E1000_P2VMAILBOX_VFU
)) {
1937 core
->mac
[index
] |= E1000_P2VMAILBOX_PFU
;
1938 core
->mac
[V2PMAILBOX0
+ vfn
] |= E1000_V2PMAILBOX_PFU
;
1941 core
->mac
[index
] &= ~E1000_P2VMAILBOX_PFU
;
1942 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_PFU
;
1945 if (val
& E1000_P2VMAILBOX_RVFU
) {
1946 core
->mac
[V2PMAILBOX0
+ vfn
] &= ~E1000_V2PMAILBOX_VFU
;
1947 core
->mac
[MBVFICR
] &= ~((E1000_MBVFICR_VFACK_VF1
<< vfn
) |
1948 (E1000_MBVFICR_VFREQ_VF1
<< vfn
));
1952 static void igb_set_vfmailbox(IGBCore
*core
, int index
, uint32_t val
)
1954 uint16_t vfn
= index
- V2PMAILBOX0
;
1956 trace_igb_set_vfmailbox(vfn
, val
);
1958 if (val
& E1000_V2PMAILBOX_REQ
) {
1959 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFREQ_VF1
<< vfn
;
1960 mailbox_interrupt_to_pf(core
);
1963 if (val
& E1000_V2PMAILBOX_ACK
) {
1964 core
->mac
[MBVFICR
] |= E1000_MBVFICR_VFACK_VF1
<< vfn
;
1965 mailbox_interrupt_to_pf(core
);
1968 /* Buffer Taken by VF (can be set only if the PFU is cleared). */
1969 if (val
& E1000_V2PMAILBOX_VFU
) {
1970 if (!(core
->mac
[index
] & E1000_V2PMAILBOX_PFU
)) {
1971 core
->mac
[index
] |= E1000_V2PMAILBOX_VFU
;
1972 core
->mac
[P2VMAILBOX0
+ vfn
] |= E1000_P2VMAILBOX_VFU
;
1975 core
->mac
[index
] &= ~E1000_V2PMAILBOX_VFU
;
1976 core
->mac
[P2VMAILBOX0
+ vfn
] &= ~E1000_P2VMAILBOX_VFU
;
1980 static void igb_w1c(IGBCore
*core
, int index
, uint32_t val
)
1982 core
->mac
[index
] &= ~val
;
1985 static void igb_set_eimc(IGBCore
*core
, int index
, uint32_t val
)
1987 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
1989 /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
1991 ~(val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
));
1993 trace_igb_irq_write_eimc(val
, core
->mac
[EIMS
], msix
);
1994 igb_update_interrupt_state(core
);
1997 static void igb_set_eiac(IGBCore
*core
, int index
, uint32_t val
)
1999 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2002 trace_igb_irq_write_eiac(val
);
2005 * TODO: When using IOV, the bits that correspond to MSI-X vectors
2006 * that are assigned to a VF are read-only.
2008 core
->mac
[EIAC
] |= (val
& E1000_EICR_MSIX_MASK
);
2012 static void igb_set_eiam(IGBCore
*core
, int index
, uint32_t val
)
2014 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2017 * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2018 * are assigned to a VF are read-only.
2021 ~(val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
));
2023 trace_igb_irq_write_eiam(val
, msix
);
2026 static void igb_set_eicr(IGBCore
*core
, int index
, uint32_t val
)
2028 bool msix
= !!(core
->mac
[GPIE
] & E1000_GPIE_MSIX_MODE
);
2031 * TODO: In IOV mode, only bit zero of this vector is available for the PF
2035 ~(val
& (msix
? E1000_EICR_MSIX_MASK
: E1000_EICR_LEGACY_MASK
));
2037 trace_igb_irq_write_eicr(val
, msix
);
2038 igb_update_interrupt_state(core
);
2041 static void igb_set_vtctrl(IGBCore
*core
, int index
, uint32_t val
)
2045 if (val
& E1000_CTRL_RST
) {
2046 vfn
= (index
- PVTCTRL0
) / 0x40;
2047 igb_vf_reset(core
, vfn
);
2051 static void igb_set_vteics(IGBCore
*core
, int index
, uint32_t val
)
2053 uint16_t vfn
= (index
- PVTEICS0
) / 0x40;
2055 core
->mac
[index
] = val
;
2056 igb_set_eics(core
, EICS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2059 static void igb_set_vteims(IGBCore
*core
, int index
, uint32_t val
)
2061 uint16_t vfn
= (index
- PVTEIMS0
) / 0x40;
2063 core
->mac
[index
] = val
;
2064 igb_set_eims(core
, EIMS
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2067 static void igb_set_vteimc(IGBCore
*core
, int index
, uint32_t val
)
2069 uint16_t vfn
= (index
- PVTEIMC0
) / 0x40;
2071 core
->mac
[index
] = val
;
2072 igb_set_eimc(core
, EIMC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2075 static void igb_set_vteiac(IGBCore
*core
, int index
, uint32_t val
)
2077 uint16_t vfn
= (index
- PVTEIAC0
) / 0x40;
2079 core
->mac
[index
] = val
;
2080 igb_set_eiac(core
, EIAC
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2083 static void igb_set_vteiam(IGBCore
*core
, int index
, uint32_t val
)
2085 uint16_t vfn
= (index
- PVTEIAM0
) / 0x40;
2087 core
->mac
[index
] = val
;
2088 igb_set_eiam(core
, EIAM
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2091 static void igb_set_vteicr(IGBCore
*core
, int index
, uint32_t val
)
2093 uint16_t vfn
= (index
- PVTEICR0
) / 0x40;
2095 core
->mac
[index
] = val
;
2096 igb_set_eicr(core
, EICR
, (val
& 0x7) << (22 - vfn
* IGBVF_MSIX_VEC_NUM
));
2099 static void igb_set_vtivar(IGBCore
*core
, int index
, uint32_t val
)
2101 uint16_t vfn
= (index
- VTIVAR
);
2106 core
->mac
[index
] = val
;
2108 /* Get assigned vector associated with queue Rx#0. */
2109 if ((val
& E1000_IVAR_VALID
)) {
2110 n
= igb_ivar_entry_rx(qn
);
2111 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (val
& 0x7)));
2112 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2115 /* Get assigned vector associated with queue Tx#0 */
2117 if ((ent
& E1000_IVAR_VALID
)) {
2118 n
= igb_ivar_entry_tx(qn
);
2119 ent
= E1000_IVAR_VALID
| (24 - vfn
* IGBVF_MSIX_VEC_NUM
- (2 - (ent
& 0x7)));
2120 core
->mac
[IVAR0
+ n
/ 4] |= ent
<< 8 * (n
% 4);
2124 * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2129 igb_autoneg_timer(void *opaque
)
2131 IGBCore
*core
= opaque
;
2132 if (!qemu_get_queue(core
->owner_nic
)->link_down
) {
2133 e1000x_update_regs_on_autoneg_done(core
->mac
, core
->phy
);
2134 igb_start_recv(core
);
2136 igb_update_flowctl_status(core
);
2137 /* signal link status change to the guest */
2138 igb_set_interrupt_cause(core
, E1000_ICR_LSC
);
2142 static inline uint16_t
2143 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access
, hwaddr addr
)
2145 uint16_t index
= (addr
& 0x1ffff) >> 2;
2146 return index
+ (mac_reg_access
[index
] & 0xfffe);
2149 static const char igb_phy_regcap
[MAX_PHY_REG_ADDRESS
+ 1] = {
2150 [MII_BMCR
] = PHY_RW
,
2152 [MII_PHYID1
] = PHY_R
,
2153 [MII_PHYID2
] = PHY_R
,
2154 [MII_ANAR
] = PHY_RW
,
2155 [MII_ANLPAR
] = PHY_R
,
2157 [MII_ANNP
] = PHY_RW
,
2158 [MII_ANLPRNP
] = PHY_R
,
2159 [MII_CTRL1000
] = PHY_RW
,
2160 [MII_STAT1000
] = PHY_R
,
2161 [MII_EXTSTAT
] = PHY_R
,
2163 [IGP01E1000_PHY_PORT_CONFIG
] = PHY_RW
,
2164 [IGP01E1000_PHY_PORT_STATUS
] = PHY_R
,
2165 [IGP01E1000_PHY_PORT_CTRL
] = PHY_RW
,
2166 [IGP01E1000_PHY_LINK_HEALTH
] = PHY_R
,
2167 [IGP02E1000_PHY_POWER_MGMT
] = PHY_RW
,
2168 [IGP01E1000_PHY_PAGE_SELECT
] = PHY_W
2172 igb_phy_reg_write(IGBCore
*core
, uint32_t addr
, uint16_t data
)
2174 assert(addr
<= MAX_PHY_REG_ADDRESS
);
2176 if (addr
== MII_BMCR
) {
2177 igb_set_phy_ctrl(core
, data
);
2179 core
->phy
[addr
] = data
;
2184 igb_set_mdic(IGBCore
*core
, int index
, uint32_t val
)
2186 uint32_t data
= val
& E1000_MDIC_DATA_MASK
;
2187 uint32_t addr
= ((val
& E1000_MDIC_REG_MASK
) >> E1000_MDIC_REG_SHIFT
);
2189 if ((val
& E1000_MDIC_PHY_MASK
) >> E1000_MDIC_PHY_SHIFT
!= 1) { /* phy # */
2190 val
= core
->mac
[MDIC
] | E1000_MDIC_ERROR
;
2191 } else if (val
& E1000_MDIC_OP_READ
) {
2192 if (!(igb_phy_regcap
[addr
] & PHY_R
)) {
2193 trace_igb_core_mdic_read_unhandled(addr
);
2194 val
|= E1000_MDIC_ERROR
;
2196 val
= (val
^ data
) | core
->phy
[addr
];
2197 trace_igb_core_mdic_read(addr
, val
);
2199 } else if (val
& E1000_MDIC_OP_WRITE
) {
2200 if (!(igb_phy_regcap
[addr
] & PHY_W
)) {
2201 trace_igb_core_mdic_write_unhandled(addr
);
2202 val
|= E1000_MDIC_ERROR
;
2204 trace_igb_core_mdic_write(addr
, data
);
2205 igb_phy_reg_write(core
, addr
, data
);
2208 core
->mac
[MDIC
] = val
| E1000_MDIC_READY
;
2210 if (val
& E1000_MDIC_INT_EN
) {
2211 igb_set_interrupt_cause(core
, E1000_ICR_MDAC
);
2216 igb_set_rdt(IGBCore
*core
, int index
, uint32_t val
)
2218 core
->mac
[index
] = val
& 0xffff;
2219 trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0
, index
), val
);
2220 igb_start_recv(core
);
2224 igb_set_status(IGBCore
*core
, int index
, uint32_t val
)
2226 if ((val
& E1000_STATUS_PHYRA
) == 0) {
2227 core
->mac
[index
] &= ~E1000_STATUS_PHYRA
;
2232 igb_set_ctrlext(IGBCore
*core
, int index
, uint32_t val
)
2234 trace_e1000e_link_set_ext_params(!!(val
& E1000_CTRL_EXT_ASDCHK
),
2235 !!(val
& E1000_CTRL_EXT_SPD_BYPS
));
2239 /* Zero self-clearing bits */
2240 val
&= ~(E1000_CTRL_EXT_ASDCHK
| E1000_CTRL_EXT_EE_RST
);
2241 core
->mac
[CTRL_EXT
] = val
;
2245 igb_set_pbaclr(IGBCore
*core
, int index
, uint32_t val
)
2249 core
->mac
[PBACLR
] = val
& E1000_PBACLR_VALID_MASK
;
2251 if (!msix_enabled(core
->owner
)) {
2255 for (i
= 0; i
< IGB_INTR_NUM
; i
++) {
2256 if (core
->mac
[PBACLR
] & BIT(i
)) {
2257 msix_clr_pending(core
->owner
, i
);
2263 igb_set_fcrth(IGBCore
*core
, int index
, uint32_t val
)
2265 core
->mac
[FCRTH
] = val
& 0xFFF8;
2269 igb_set_fcrtl(IGBCore
*core
, int index
, uint32_t val
)
2271 core
->mac
[FCRTL
] = val
& 0x8000FFF8;
2274 #define IGB_LOW_BITS_SET_FUNC(num) \
2276 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2278 core->mac[index] = val & (BIT(num) - 1); \
2281 IGB_LOW_BITS_SET_FUNC(4)
2282 IGB_LOW_BITS_SET_FUNC(13)
2283 IGB_LOW_BITS_SET_FUNC(16)
2286 igb_set_dlen(IGBCore
*core
, int index
, uint32_t val
)
2288 core
->mac
[index
] = val
& 0xffff0;
2292 igb_set_dbal(IGBCore
*core
, int index
, uint32_t val
)
2294 core
->mac
[index
] = val
& E1000_XDBAL_MASK
;
2298 igb_set_tdt(IGBCore
*core
, int index
, uint32_t val
)
2301 int qn
= igb_mq_queue_idx(TDT0
, index
);
2303 core
->mac
[index
] = val
& 0xffff;
2305 igb_tx_ring_init(core
, &txr
, qn
);
2306 igb_start_xmit(core
, &txr
);
2310 igb_set_ics(IGBCore
*core
, int index
, uint32_t val
)
2312 trace_e1000e_irq_write_ics(val
);
2313 igb_set_interrupt_cause(core
, val
);
2317 igb_set_imc(IGBCore
*core
, int index
, uint32_t val
)
2319 trace_e1000e_irq_ims_clear_set_imc(val
);
2320 igb_clear_ims_bits(core
, val
);
2321 igb_update_interrupt_state(core
);
2325 igb_set_ims(IGBCore
*core
, int index
, uint32_t val
)
2327 uint32_t valid_val
= val
& 0x77D4FBFD;
2329 trace_e1000e_irq_set_ims(val
, core
->mac
[IMS
], core
->mac
[IMS
] | valid_val
);
2330 core
->mac
[IMS
] |= valid_val
;
2331 igb_update_interrupt_state(core
);
2334 static void igb_commit_icr(IGBCore
*core
)
2337 * If GPIE.NSICR = 0, then the copy of IAM to IMS will occur only if at
2338 * least one bit is set in the IMS and there is a true interrupt as
2339 * reflected in ICR.INTA.
2341 if ((core
->mac
[GPIE
] & E1000_GPIE_NSICR
) ||
2342 (core
->mac
[IMS
] && (core
->mac
[ICR
] & E1000_ICR_INT_ASSERTED
))) {
2343 igb_set_ims(core
, IMS
, core
->mac
[IAM
]);
2345 igb_update_interrupt_state(core
);
2349 static void igb_set_icr(IGBCore
*core
, int index
, uint32_t val
)
2351 uint32_t icr
= core
->mac
[ICR
] & ~val
;
2353 trace_igb_irq_icr_write(val
, core
->mac
[ICR
], icr
);
2354 core
->mac
[ICR
] = icr
;
2355 igb_commit_icr(core
);
2359 igb_mac_readreg(IGBCore
*core
, int index
)
2361 return core
->mac
[index
];
2365 igb_mac_ics_read(IGBCore
*core
, int index
)
2367 trace_e1000e_irq_read_ics(core
->mac
[ICS
]);
2368 return core
->mac
[ICS
];
2372 igb_mac_ims_read(IGBCore
*core
, int index
)
2374 trace_e1000e_irq_read_ims(core
->mac
[IMS
]);
2375 return core
->mac
[IMS
];
2379 igb_mac_swsm_read(IGBCore
*core
, int index
)
2381 uint32_t val
= core
->mac
[SWSM
];
2382 core
->mac
[SWSM
] = val
| E1000_SWSM_SMBI
;
2387 igb_mac_eitr_read(IGBCore
*core
, int index
)
2389 return core
->eitr_guest_value
[index
- EITR0
];
2392 static uint32_t igb_mac_vfmailbox_read(IGBCore
*core
, int index
)
2394 uint32_t val
= core
->mac
[index
];
2396 core
->mac
[index
] &= ~(E1000_V2PMAILBOX_PFSTS
| E1000_V2PMAILBOX_PFACK
|
2397 E1000_V2PMAILBOX_RSTD
);
2403 igb_mac_icr_read(IGBCore
*core
, int index
)
2405 uint32_t ret
= core
->mac
[ICR
];
2406 trace_e1000e_irq_icr_read_entry(ret
);
2408 if (core
->mac
[GPIE
] & E1000_GPIE_NSICR
) {
2409 trace_igb_irq_icr_clear_gpie_nsicr();
2411 } else if (core
->mac
[IMS
] == 0) {
2412 trace_e1000e_irq_icr_clear_zero_ims();
2414 } else if (!msix_enabled(core
->owner
)) {
2415 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2419 trace_e1000e_irq_icr_read_exit(core
->mac
[ICR
]);
2420 igb_commit_icr(core
);
2425 igb_mac_read_clr4(IGBCore
*core
, int index
)
2427 uint32_t ret
= core
->mac
[index
];
2429 core
->mac
[index
] = 0;
2434 igb_mac_read_clr8(IGBCore
*core
, int index
)
2436 uint32_t ret
= core
->mac
[index
];
2438 core
->mac
[index
] = 0;
2439 core
->mac
[index
- 1] = 0;
2444 igb_get_ctrl(IGBCore
*core
, int index
)
2446 uint32_t val
= core
->mac
[CTRL
];
2448 trace_e1000e_link_read_params(
2449 !!(val
& E1000_CTRL_ASDE
),
2450 (val
& E1000_CTRL_SPD_SEL
) >> E1000_CTRL_SPD_SHIFT
,
2451 !!(val
& E1000_CTRL_FRCSPD
),
2452 !!(val
& E1000_CTRL_FRCDPX
),
2453 !!(val
& E1000_CTRL_RFCE
),
2454 !!(val
& E1000_CTRL_TFCE
));
2459 static uint32_t igb_get_status(IGBCore
*core
, int index
)
2461 uint32_t res
= core
->mac
[STATUS
];
2462 uint16_t num_vfs
= pcie_sriov_num_vfs(core
->owner
);
2464 if (core
->mac
[CTRL
] & E1000_CTRL_FRCDPX
) {
2465 res
|= (core
->mac
[CTRL
] & E1000_CTRL_FD
) ? E1000_STATUS_FD
: 0;
2467 res
|= E1000_STATUS_FD
;
2470 if ((core
->mac
[CTRL
] & E1000_CTRL_FRCSPD
) ||
2471 (core
->mac
[CTRL_EXT
] & E1000_CTRL_EXT_SPD_BYPS
)) {
2472 switch (core
->mac
[CTRL
] & E1000_CTRL_SPD_SEL
) {
2473 case E1000_CTRL_SPD_10
:
2474 res
|= E1000_STATUS_SPEED_10
;
2476 case E1000_CTRL_SPD_100
:
2477 res
|= E1000_STATUS_SPEED_100
;
2479 case E1000_CTRL_SPD_1000
:
2481 res
|= E1000_STATUS_SPEED_1000
;
2485 res
|= E1000_STATUS_SPEED_1000
;
2489 res
|= num_vfs
<< E1000_STATUS_NUM_VFS_SHIFT
;
2490 res
|= E1000_STATUS_IOV_MODE
;
2494 * Windows driver 12.18.9.23 resets if E1000_STATUS_GIO_MASTER_ENABLE is
2495 * left set after E1000_CTRL_LRST is set.
2497 if (!(core
->mac
[CTRL
] & E1000_CTRL_GIO_MASTER_DISABLE
) &&
2498 !(core
->mac
[CTRL
] & E1000_CTRL_LRST
)) {
2499 res
|= E1000_STATUS_GIO_MASTER_ENABLE
;
2506 igb_mac_writereg(IGBCore
*core
, int index
, uint32_t val
)
2508 core
->mac
[index
] = val
;
2512 igb_mac_setmacaddr(IGBCore
*core
, int index
, uint32_t val
)
2514 uint32_t macaddr
[2];
2516 core
->mac
[index
] = val
;
2518 macaddr
[0] = cpu_to_le32(core
->mac
[RA
]);
2519 macaddr
[1] = cpu_to_le32(core
->mac
[RA
+ 1]);
2520 qemu_format_nic_info_str(qemu_get_queue(core
->owner_nic
),
2521 (uint8_t *) macaddr
);
2523 trace_e1000e_mac_set_sw(MAC_ARG(macaddr
));
2527 igb_set_eecd(IGBCore
*core
, int index
, uint32_t val
)
2529 static const uint32_t ro_bits
= E1000_EECD_PRES
|
2530 E1000_EECD_AUTO_RD
|
2531 E1000_EECD_SIZE_EX_MASK
;
2533 core
->mac
[EECD
] = (core
->mac
[EECD
] & ro_bits
) | (val
& ~ro_bits
);
2537 igb_set_eerd(IGBCore
*core
, int index
, uint32_t val
)
2539 uint32_t addr
= (val
>> E1000_EERW_ADDR_SHIFT
) & E1000_EERW_ADDR_MASK
;
2543 if ((addr
< IGB_EEPROM_SIZE
) && (val
& E1000_EERW_START
)) {
2544 data
= core
->eeprom
[addr
];
2545 flags
= E1000_EERW_DONE
;
2548 core
->mac
[EERD
] = flags
|
2549 (addr
<< E1000_EERW_ADDR_SHIFT
) |
2550 (data
<< E1000_EERW_DATA_SHIFT
);
2554 igb_set_eitr(IGBCore
*core
, int index
, uint32_t val
)
2556 uint32_t eitr_num
= index
- EITR0
;
2558 trace_igb_irq_eitr_set(eitr_num
, val
);
2560 core
->eitr_guest_value
[eitr_num
] = val
& ~E1000_EITR_CNT_IGNR
;
2561 core
->mac
[index
] = val
& 0x7FFE;
2565 igb_update_rx_offloads(IGBCore
*core
)
2567 int cso_state
= igb_rx_l4_cso_enabled(core
);
2569 trace_e1000e_rx_set_cso(cso_state
);
2571 if (core
->has_vnet
) {
2572 qemu_set_offload(qemu_get_queue(core
->owner_nic
)->peer
,
2573 cso_state
, 0, 0, 0, 0);
2578 igb_set_rxcsum(IGBCore
*core
, int index
, uint32_t val
)
2580 core
->mac
[RXCSUM
] = val
;
2581 igb_update_rx_offloads(core
);
2585 igb_set_gcr(IGBCore
*core
, int index
, uint32_t val
)
2587 uint32_t ro_bits
= core
->mac
[GCR
] & E1000_GCR_RO_BITS
;
2588 core
->mac
[GCR
] = (val
& ~E1000_GCR_RO_BITS
) | ro_bits
;
2591 static uint32_t igb_get_systiml(IGBCore
*core
, int index
)
2593 e1000x_timestamp(core
->mac
, core
->timadj
, SYSTIML
, SYSTIMH
);
2594 return core
->mac
[SYSTIML
];
2597 static uint32_t igb_get_rxsatrh(IGBCore
*core
, int index
)
2599 core
->mac
[TSYNCRXCTL
] &= ~E1000_TSYNCRXCTL_VALID
;
2600 return core
->mac
[RXSATRH
];
2603 static uint32_t igb_get_txstmph(IGBCore
*core
, int index
)
2605 core
->mac
[TSYNCTXCTL
] &= ~E1000_TSYNCTXCTL_VALID
;
2606 return core
->mac
[TXSTMPH
];
2609 static void igb_set_timinca(IGBCore
*core
, int index
, uint32_t val
)
2611 e1000x_set_timinca(core
->mac
, &core
->timadj
, val
);
2614 static void igb_set_timadjh(IGBCore
*core
, int index
, uint32_t val
)
2616 core
->mac
[TIMADJH
] = val
;
2617 core
->timadj
+= core
->mac
[TIMADJL
] | ((int64_t)core
->mac
[TIMADJH
] << 32);
2620 #define igb_getreg(x) [x] = igb_mac_readreg
2621 typedef uint32_t (*readops
)(IGBCore
*, int);
2622 static const readops igb_macreg_readops
[] = {
2652 igb_getreg(RDBAH10
),
2653 igb_getreg(RDBAH11
),
2654 igb_getreg(RDBAH12
),
2655 igb_getreg(RDBAH13
),
2656 igb_getreg(RDBAH14
),
2657 igb_getreg(RDBAH15
),
2668 igb_getreg(TDBAL10
),
2669 igb_getreg(TDBAL11
),
2670 igb_getreg(TDBAL12
),
2671 igb_getreg(TDBAL13
),
2672 igb_getreg(TDBAL14
),
2673 igb_getreg(TDBAL15
),
2684 igb_getreg(RDLEN10
),
2685 igb_getreg(RDLEN11
),
2686 igb_getreg(RDLEN12
),
2687 igb_getreg(RDLEN13
),
2688 igb_getreg(RDLEN14
),
2689 igb_getreg(RDLEN15
),
2690 igb_getreg(SRRCTL0
),
2691 igb_getreg(SRRCTL1
),
2692 igb_getreg(SRRCTL2
),
2693 igb_getreg(SRRCTL3
),
2694 igb_getreg(SRRCTL4
),
2695 igb_getreg(SRRCTL5
),
2696 igb_getreg(SRRCTL6
),
2697 igb_getreg(SRRCTL7
),
2698 igb_getreg(SRRCTL8
),
2699 igb_getreg(SRRCTL9
),
2700 igb_getreg(SRRCTL10
),
2701 igb_getreg(SRRCTL11
),
2702 igb_getreg(SRRCTL12
),
2703 igb_getreg(SRRCTL13
),
2704 igb_getreg(SRRCTL14
),
2705 igb_getreg(SRRCTL15
),
2706 igb_getreg(LATECOL
),
2730 igb_getreg(RXSTMPH
),
2731 igb_getreg(TXSTMPL
),
2732 igb_getreg(TIMADJL
),
2770 igb_getreg(FLMNGCTL
),
2771 igb_getreg(FLMNGCNT
),
2772 igb_getreg(TSYNCTXCTL
),
2773 igb_getreg(EEMNGDATA
),
2774 igb_getreg(CTRL_EXT
),
2775 igb_getreg(SYSTIMH
),
2776 igb_getreg(EEMNGCTL
),
2777 igb_getreg(FLMNGDATA
),
2778 igb_getreg(TSYNCRXCTL
),
2781 igb_getreg(TCTL_EXT
),
2803 igb_getreg(XOFFTXC
),
2807 igb_getreg(TIMINCA
),
2813 igb_getreg(RXSATRL
),
2825 igb_getreg(TDLEN10
),
2826 igb_getreg(TDLEN11
),
2827 igb_getreg(TDLEN12
),
2828 igb_getreg(TDLEN13
),
2829 igb_getreg(TDLEN14
),
2830 igb_getreg(TDLEN15
),
2835 igb_getreg(TXDCTL0
),
2836 igb_getreg(TXDCTL1
),
2837 igb_getreg(TXDCTL2
),
2838 igb_getreg(TXDCTL3
),
2839 igb_getreg(TXDCTL4
),
2840 igb_getreg(TXDCTL5
),
2841 igb_getreg(TXDCTL6
),
2842 igb_getreg(TXDCTL7
),
2843 igb_getreg(TXDCTL8
),
2844 igb_getreg(TXDCTL9
),
2845 igb_getreg(TXDCTL10
),
2846 igb_getreg(TXDCTL11
),
2847 igb_getreg(TXDCTL12
),
2848 igb_getreg(TXDCTL13
),
2849 igb_getreg(TXDCTL14
),
2850 igb_getreg(TXDCTL15
),
2861 igb_getreg(TXCTL10
),
2862 igb_getreg(TXCTL11
),
2863 igb_getreg(TXCTL12
),
2864 igb_getreg(TXCTL13
),
2865 igb_getreg(TXCTL14
),
2866 igb_getreg(TXCTL15
),
2867 igb_getreg(TDWBAL0
),
2868 igb_getreg(TDWBAL1
),
2869 igb_getreg(TDWBAL2
),
2870 igb_getreg(TDWBAL3
),
2871 igb_getreg(TDWBAL4
),
2872 igb_getreg(TDWBAL5
),
2873 igb_getreg(TDWBAL6
),
2874 igb_getreg(TDWBAL7
),
2875 igb_getreg(TDWBAL8
),
2876 igb_getreg(TDWBAL9
),
2877 igb_getreg(TDWBAL10
),
2878 igb_getreg(TDWBAL11
),
2879 igb_getreg(TDWBAL12
),
2880 igb_getreg(TDWBAL13
),
2881 igb_getreg(TDWBAL14
),
2882 igb_getreg(TDWBAL15
),
2883 igb_getreg(TDWBAH0
),
2884 igb_getreg(TDWBAH1
),
2885 igb_getreg(TDWBAH2
),
2886 igb_getreg(TDWBAH3
),
2887 igb_getreg(TDWBAH4
),
2888 igb_getreg(TDWBAH5
),
2889 igb_getreg(TDWBAH6
),
2890 igb_getreg(TDWBAH7
),
2891 igb_getreg(TDWBAH8
),
2892 igb_getreg(TDWBAH9
),
2893 igb_getreg(TDWBAH10
),
2894 igb_getreg(TDWBAH11
),
2895 igb_getreg(TDWBAH12
),
2896 igb_getreg(TDWBAH13
),
2897 igb_getreg(TDWBAH14
),
2898 igb_getreg(TDWBAH15
),
2899 igb_getreg(PVTCTRL0
),
2900 igb_getreg(PVTCTRL1
),
2901 igb_getreg(PVTCTRL2
),
2902 igb_getreg(PVTCTRL3
),
2903 igb_getreg(PVTCTRL4
),
2904 igb_getreg(PVTCTRL5
),
2905 igb_getreg(PVTCTRL6
),
2906 igb_getreg(PVTCTRL7
),
2907 igb_getreg(PVTEIMS0
),
2908 igb_getreg(PVTEIMS1
),
2909 igb_getreg(PVTEIMS2
),
2910 igb_getreg(PVTEIMS3
),
2911 igb_getreg(PVTEIMS4
),
2912 igb_getreg(PVTEIMS5
),
2913 igb_getreg(PVTEIMS6
),
2914 igb_getreg(PVTEIMS7
),
2915 igb_getreg(PVTEIAC0
),
2916 igb_getreg(PVTEIAC1
),
2917 igb_getreg(PVTEIAC2
),
2918 igb_getreg(PVTEIAC3
),
2919 igb_getreg(PVTEIAC4
),
2920 igb_getreg(PVTEIAC5
),
2921 igb_getreg(PVTEIAC6
),
2922 igb_getreg(PVTEIAC7
),
2923 igb_getreg(PVTEIAM0
),
2924 igb_getreg(PVTEIAM1
),
2925 igb_getreg(PVTEIAM2
),
2926 igb_getreg(PVTEIAM3
),
2927 igb_getreg(PVTEIAM4
),
2928 igb_getreg(PVTEIAM5
),
2929 igb_getreg(PVTEIAM6
),
2930 igb_getreg(PVTEIAM7
),
2931 igb_getreg(PVFGPRC0
),
2932 igb_getreg(PVFGPRC1
),
2933 igb_getreg(PVFGPRC2
),
2934 igb_getreg(PVFGPRC3
),
2935 igb_getreg(PVFGPRC4
),
2936 igb_getreg(PVFGPRC5
),
2937 igb_getreg(PVFGPRC6
),
2938 igb_getreg(PVFGPRC7
),
2939 igb_getreg(PVFGPTC0
),
2940 igb_getreg(PVFGPTC1
),
2941 igb_getreg(PVFGPTC2
),
2942 igb_getreg(PVFGPTC3
),
2943 igb_getreg(PVFGPTC4
),
2944 igb_getreg(PVFGPTC5
),
2945 igb_getreg(PVFGPTC6
),
2946 igb_getreg(PVFGPTC7
),
2947 igb_getreg(PVFGORC0
),
2948 igb_getreg(PVFGORC1
),
2949 igb_getreg(PVFGORC2
),
2950 igb_getreg(PVFGORC3
),
2951 igb_getreg(PVFGORC4
),
2952 igb_getreg(PVFGORC5
),
2953 igb_getreg(PVFGORC6
),
2954 igb_getreg(PVFGORC7
),
2955 igb_getreg(PVFGOTC0
),
2956 igb_getreg(PVFGOTC1
),
2957 igb_getreg(PVFGOTC2
),
2958 igb_getreg(PVFGOTC3
),
2959 igb_getreg(PVFGOTC4
),
2960 igb_getreg(PVFGOTC5
),
2961 igb_getreg(PVFGOTC6
),
2962 igb_getreg(PVFGOTC7
),
2963 igb_getreg(PVFMPRC0
),
2964 igb_getreg(PVFMPRC1
),
2965 igb_getreg(PVFMPRC2
),
2966 igb_getreg(PVFMPRC3
),
2967 igb_getreg(PVFMPRC4
),
2968 igb_getreg(PVFMPRC5
),
2969 igb_getreg(PVFMPRC6
),
2970 igb_getreg(PVFMPRC7
),
2971 igb_getreg(PVFGPRLBC0
),
2972 igb_getreg(PVFGPRLBC1
),
2973 igb_getreg(PVFGPRLBC2
),
2974 igb_getreg(PVFGPRLBC3
),
2975 igb_getreg(PVFGPRLBC4
),
2976 igb_getreg(PVFGPRLBC5
),
2977 igb_getreg(PVFGPRLBC6
),
2978 igb_getreg(PVFGPRLBC7
),
2979 igb_getreg(PVFGPTLBC0
),
2980 igb_getreg(PVFGPTLBC1
),
2981 igb_getreg(PVFGPTLBC2
),
2982 igb_getreg(PVFGPTLBC3
),
2983 igb_getreg(PVFGPTLBC4
),
2984 igb_getreg(PVFGPTLBC5
),
2985 igb_getreg(PVFGPTLBC6
),
2986 igb_getreg(PVFGPTLBC7
),
2987 igb_getreg(PVFGORLBC0
),
2988 igb_getreg(PVFGORLBC1
),
2989 igb_getreg(PVFGORLBC2
),
2990 igb_getreg(PVFGORLBC3
),
2991 igb_getreg(PVFGORLBC4
),
2992 igb_getreg(PVFGORLBC5
),
2993 igb_getreg(PVFGORLBC6
),
2994 igb_getreg(PVFGORLBC7
),
2995 igb_getreg(PVFGOTLBC0
),
2996 igb_getreg(PVFGOTLBC1
),
2997 igb_getreg(PVFGOTLBC2
),
2998 igb_getreg(PVFGOTLBC3
),
2999 igb_getreg(PVFGOTLBC4
),
3000 igb_getreg(PVFGOTLBC5
),
3001 igb_getreg(PVFGOTLBC6
),
3002 igb_getreg(PVFGOTLBC7
),
3017 igb_getreg(RDBAL10
),
3018 igb_getreg(RDBAL11
),
3019 igb_getreg(RDBAL12
),
3020 igb_getreg(RDBAL13
),
3021 igb_getreg(RDBAL14
),
3022 igb_getreg(RDBAL15
),
3033 igb_getreg(TDBAH10
),
3034 igb_getreg(TDBAH11
),
3035 igb_getreg(TDBAH12
),
3036 igb_getreg(TDBAH13
),
3037 igb_getreg(TDBAH14
),
3038 igb_getreg(TDBAH15
),
3041 igb_getreg(XOFFRXC
),
3047 igb_getreg(FUNCTAG
),
3053 igb_getreg(RXDCTL0
),
3054 igb_getreg(RXDCTL1
),
3055 igb_getreg(RXDCTL2
),
3056 igb_getreg(RXDCTL3
),
3057 igb_getreg(RXDCTL4
),
3058 igb_getreg(RXDCTL5
),
3059 igb_getreg(RXDCTL6
),
3060 igb_getreg(RXDCTL7
),
3061 igb_getreg(RXDCTL8
),
3062 igb_getreg(RXDCTL9
),
3063 igb_getreg(RXDCTL10
),
3064 igb_getreg(RXDCTL11
),
3065 igb_getreg(RXDCTL12
),
3066 igb_getreg(RXDCTL13
),
3067 igb_getreg(RXDCTL14
),
3068 igb_getreg(RXDCTL15
),
3069 igb_getreg(RXSTMPL
),
3070 igb_getreg(TIMADJH
),
3080 [TOTH
] = igb_mac_read_clr8
,
3081 [GOTCH
] = igb_mac_read_clr8
,
3082 [PRC64
] = igb_mac_read_clr4
,
3083 [PRC255
] = igb_mac_read_clr4
,
3084 [PRC1023
] = igb_mac_read_clr4
,
3085 [PTC64
] = igb_mac_read_clr4
,
3086 [PTC255
] = igb_mac_read_clr4
,
3087 [PTC1023
] = igb_mac_read_clr4
,
3088 [GPRC
] = igb_mac_read_clr4
,
3089 [TPT
] = igb_mac_read_clr4
,
3090 [RUC
] = igb_mac_read_clr4
,
3091 [BPRC
] = igb_mac_read_clr4
,
3092 [MPTC
] = igb_mac_read_clr4
,
3093 [IAC
] = igb_mac_read_clr4
,
3094 [ICR
] = igb_mac_icr_read
,
3095 [STATUS
] = igb_get_status
,
3096 [ICS
] = igb_mac_ics_read
,
3098 * 8.8.10: Reading the IMC register returns the value of the IMS register.
3100 [IMC
] = igb_mac_ims_read
,
3101 [TORH
] = igb_mac_read_clr8
,
3102 [GORCH
] = igb_mac_read_clr8
,
3103 [PRC127
] = igb_mac_read_clr4
,
3104 [PRC511
] = igb_mac_read_clr4
,
3105 [PRC1522
] = igb_mac_read_clr4
,
3106 [PTC127
] = igb_mac_read_clr4
,
3107 [PTC511
] = igb_mac_read_clr4
,
3108 [PTC1522
] = igb_mac_read_clr4
,
3109 [GPTC
] = igb_mac_read_clr4
,
3110 [TPR
] = igb_mac_read_clr4
,
3111 [ROC
] = igb_mac_read_clr4
,
3112 [MPRC
] = igb_mac_read_clr4
,
3113 [BPTC
] = igb_mac_read_clr4
,
3114 [TSCTC
] = igb_mac_read_clr4
,
3115 [CTRL
] = igb_get_ctrl
,
3116 [SWSM
] = igb_mac_swsm_read
,
3117 [IMS
] = igb_mac_ims_read
,
3118 [SYSTIML
] = igb_get_systiml
,
3119 [RXSATRH
] = igb_get_rxsatrh
,
3120 [TXSTMPH
] = igb_get_txstmph
,
3122 [CRCERRS
... MPC
] = igb_mac_readreg
,
3123 [IP6AT
... IP6AT
+ 3] = igb_mac_readreg
,
3124 [IP4AT
... IP4AT
+ 6] = igb_mac_readreg
,
3125 [RA
... RA
+ 31] = igb_mac_readreg
,
3126 [RA2
... RA2
+ 31] = igb_mac_readreg
,
3127 [WUPM
... WUPM
+ 31] = igb_mac_readreg
,
3128 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_readreg
,
3129 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_readreg
,
3130 [FFMT
... FFMT
+ 254] = igb_mac_readreg
,
3131 [MDEF
... MDEF
+ 7] = igb_mac_readreg
,
3132 [FTFT
... FTFT
+ 254] = igb_mac_readreg
,
3133 [RETA
... RETA
+ 31] = igb_mac_readreg
,
3134 [RSSRK
... RSSRK
+ 9] = igb_mac_readreg
,
3135 [MAVTV0
... MAVTV3
] = igb_mac_readreg
,
3136 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_mac_eitr_read
,
3137 [PVTEICR0
] = igb_mac_read_clr4
,
3138 [PVTEICR1
] = igb_mac_read_clr4
,
3139 [PVTEICR2
] = igb_mac_read_clr4
,
3140 [PVTEICR3
] = igb_mac_read_clr4
,
3141 [PVTEICR4
] = igb_mac_read_clr4
,
3142 [PVTEICR5
] = igb_mac_read_clr4
,
3143 [PVTEICR6
] = igb_mac_read_clr4
,
3144 [PVTEICR7
] = igb_mac_read_clr4
,
3147 [FWSM
] = igb_mac_readreg
,
3148 [SW_FW_SYNC
] = igb_mac_readreg
,
3149 [HTCBDPC
] = igb_mac_read_clr4
,
3150 [EICR
] = igb_mac_read_clr4
,
3151 [EIMS
] = igb_mac_readreg
,
3152 [EIAM
] = igb_mac_readreg
,
3153 [IVAR0
... IVAR0
+ 7] = igb_mac_readreg
,
3154 igb_getreg(IVAR_MISC
),
3156 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_mac_readreg
,
3157 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_mac_vfmailbox_read
,
3158 igb_getreg(MBVFICR
),
3159 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_readreg
,
3160 igb_getreg(MBVFIMR
),
3167 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_readreg
,
3168 [VMVIR0
... VMVIR7
] = igb_mac_readreg
,
3169 [VMOLR0
... VMOLR7
] = igb_mac_readreg
,
3170 [WVBR
] = igb_mac_read_clr4
,
3171 [RQDPC0
] = igb_mac_read_clr4
,
3172 [RQDPC1
] = igb_mac_read_clr4
,
3173 [RQDPC2
] = igb_mac_read_clr4
,
3174 [RQDPC3
] = igb_mac_read_clr4
,
3175 [RQDPC4
] = igb_mac_read_clr4
,
3176 [RQDPC5
] = igb_mac_read_clr4
,
3177 [RQDPC6
] = igb_mac_read_clr4
,
3178 [RQDPC7
] = igb_mac_read_clr4
,
3179 [RQDPC8
] = igb_mac_read_clr4
,
3180 [RQDPC9
] = igb_mac_read_clr4
,
3181 [RQDPC10
] = igb_mac_read_clr4
,
3182 [RQDPC11
] = igb_mac_read_clr4
,
3183 [RQDPC12
] = igb_mac_read_clr4
,
3184 [RQDPC13
] = igb_mac_read_clr4
,
3185 [RQDPC14
] = igb_mac_read_clr4
,
3186 [RQDPC15
] = igb_mac_read_clr4
,
3187 [VTIVAR
... VTIVAR
+ 7] = igb_mac_readreg
,
3188 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_readreg
,
3190 enum { IGB_NREADOPS
= ARRAY_SIZE(igb_macreg_readops
) };
3192 #define igb_putreg(x) [x] = igb_mac_writereg
3193 typedef void (*writeops
)(IGBCore
*, int, uint32_t);
3194 static const writeops igb_macreg_writeops
[] = {
3207 igb_putreg(RDBAH10
),
3208 igb_putreg(RDBAH11
),
3209 igb_putreg(RDBAH12
),
3210 igb_putreg(RDBAH13
),
3211 igb_putreg(RDBAH14
),
3212 igb_putreg(RDBAH15
),
3213 igb_putreg(SRRCTL0
),
3214 igb_putreg(SRRCTL1
),
3215 igb_putreg(SRRCTL2
),
3216 igb_putreg(SRRCTL3
),
3217 igb_putreg(SRRCTL4
),
3218 igb_putreg(SRRCTL5
),
3219 igb_putreg(SRRCTL6
),
3220 igb_putreg(SRRCTL7
),
3221 igb_putreg(SRRCTL8
),
3222 igb_putreg(SRRCTL9
),
3223 igb_putreg(SRRCTL10
),
3224 igb_putreg(SRRCTL11
),
3225 igb_putreg(SRRCTL12
),
3226 igb_putreg(SRRCTL13
),
3227 igb_putreg(SRRCTL14
),
3228 igb_putreg(SRRCTL15
),
3229 igb_putreg(RXDCTL0
),
3230 igb_putreg(RXDCTL1
),
3231 igb_putreg(RXDCTL2
),
3232 igb_putreg(RXDCTL3
),
3233 igb_putreg(RXDCTL4
),
3234 igb_putreg(RXDCTL5
),
3235 igb_putreg(RXDCTL6
),
3236 igb_putreg(RXDCTL7
),
3237 igb_putreg(RXDCTL8
),
3238 igb_putreg(RXDCTL9
),
3239 igb_putreg(RXDCTL10
),
3240 igb_putreg(RXDCTL11
),
3241 igb_putreg(RXDCTL12
),
3242 igb_putreg(RXDCTL13
),
3243 igb_putreg(RXDCTL14
),
3244 igb_putreg(RXDCTL15
),
3247 igb_putreg(TCTL_EXT
),
3266 igb_putreg(TDBAH10
),
3267 igb_putreg(TDBAH11
),
3268 igb_putreg(TDBAH12
),
3269 igb_putreg(TDBAH13
),
3270 igb_putreg(TDBAH14
),
3271 igb_putreg(TDBAH15
),
3277 igb_putreg(FUNCTAG
),
3289 igb_putreg(TXDCTL0
),
3290 igb_putreg(TXDCTL1
),
3291 igb_putreg(TXDCTL2
),
3292 igb_putreg(TXDCTL3
),
3293 igb_putreg(TXDCTL4
),
3294 igb_putreg(TXDCTL5
),
3295 igb_putreg(TXDCTL6
),
3296 igb_putreg(TXDCTL7
),
3297 igb_putreg(TXDCTL8
),
3298 igb_putreg(TXDCTL9
),
3299 igb_putreg(TXDCTL10
),
3300 igb_putreg(TXDCTL11
),
3301 igb_putreg(TXDCTL12
),
3302 igb_putreg(TXDCTL13
),
3303 igb_putreg(TXDCTL14
),
3304 igb_putreg(TXDCTL15
),
3315 igb_putreg(TXCTL10
),
3316 igb_putreg(TXCTL11
),
3317 igb_putreg(TXCTL12
),
3318 igb_putreg(TXCTL13
),
3319 igb_putreg(TXCTL14
),
3320 igb_putreg(TXCTL15
),
3321 igb_putreg(TDWBAL0
),
3322 igb_putreg(TDWBAL1
),
3323 igb_putreg(TDWBAL2
),
3324 igb_putreg(TDWBAL3
),
3325 igb_putreg(TDWBAL4
),
3326 igb_putreg(TDWBAL5
),
3327 igb_putreg(TDWBAL6
),
3328 igb_putreg(TDWBAL7
),
3329 igb_putreg(TDWBAL8
),
3330 igb_putreg(TDWBAL9
),
3331 igb_putreg(TDWBAL10
),
3332 igb_putreg(TDWBAL11
),
3333 igb_putreg(TDWBAL12
),
3334 igb_putreg(TDWBAL13
),
3335 igb_putreg(TDWBAL14
),
3336 igb_putreg(TDWBAL15
),
3337 igb_putreg(TDWBAH0
),
3338 igb_putreg(TDWBAH1
),
3339 igb_putreg(TDWBAH2
),
3340 igb_putreg(TDWBAH3
),
3341 igb_putreg(TDWBAH4
),
3342 igb_putreg(TDWBAH5
),
3343 igb_putreg(TDWBAH6
),
3344 igb_putreg(TDWBAH7
),
3345 igb_putreg(TDWBAH8
),
3346 igb_putreg(TDWBAH9
),
3347 igb_putreg(TDWBAH10
),
3348 igb_putreg(TDWBAH11
),
3349 igb_putreg(TDWBAH12
),
3350 igb_putreg(TDWBAH13
),
3351 igb_putreg(TDWBAH14
),
3352 igb_putreg(TDWBAH15
),
3354 igb_putreg(RXSTMPH
),
3355 igb_putreg(RXSTMPL
),
3356 igb_putreg(RXSATRL
),
3357 igb_putreg(RXSATRH
),
3358 igb_putreg(TXSTMPL
),
3359 igb_putreg(TXSTMPH
),
3360 igb_putreg(SYSTIML
),
3361 igb_putreg(SYSTIMH
),
3362 igb_putreg(TIMADJL
),
3363 igb_putreg(TSYNCRXCTL
),
3364 igb_putreg(TSYNCTXCTL
),
3365 igb_putreg(EEMNGCTL
),
3371 [TDH0
] = igb_set_16bit
,
3372 [TDH1
] = igb_set_16bit
,
3373 [TDH2
] = igb_set_16bit
,
3374 [TDH3
] = igb_set_16bit
,
3375 [TDH4
] = igb_set_16bit
,
3376 [TDH5
] = igb_set_16bit
,
3377 [TDH6
] = igb_set_16bit
,
3378 [TDH7
] = igb_set_16bit
,
3379 [TDH8
] = igb_set_16bit
,
3380 [TDH9
] = igb_set_16bit
,
3381 [TDH10
] = igb_set_16bit
,
3382 [TDH11
] = igb_set_16bit
,
3383 [TDH12
] = igb_set_16bit
,
3384 [TDH13
] = igb_set_16bit
,
3385 [TDH14
] = igb_set_16bit
,
3386 [TDH15
] = igb_set_16bit
,
3387 [TDT0
] = igb_set_tdt
,
3388 [TDT1
] = igb_set_tdt
,
3389 [TDT2
] = igb_set_tdt
,
3390 [TDT3
] = igb_set_tdt
,
3391 [TDT4
] = igb_set_tdt
,
3392 [TDT5
] = igb_set_tdt
,
3393 [TDT6
] = igb_set_tdt
,
3394 [TDT7
] = igb_set_tdt
,
3395 [TDT8
] = igb_set_tdt
,
3396 [TDT9
] = igb_set_tdt
,
3397 [TDT10
] = igb_set_tdt
,
3398 [TDT11
] = igb_set_tdt
,
3399 [TDT12
] = igb_set_tdt
,
3400 [TDT13
] = igb_set_tdt
,
3401 [TDT14
] = igb_set_tdt
,
3402 [TDT15
] = igb_set_tdt
,
3403 [MDIC
] = igb_set_mdic
,
3404 [ICS
] = igb_set_ics
,
3405 [RDH0
] = igb_set_16bit
,
3406 [RDH1
] = igb_set_16bit
,
3407 [RDH2
] = igb_set_16bit
,
3408 [RDH3
] = igb_set_16bit
,
3409 [RDH4
] = igb_set_16bit
,
3410 [RDH5
] = igb_set_16bit
,
3411 [RDH6
] = igb_set_16bit
,
3412 [RDH7
] = igb_set_16bit
,
3413 [RDH8
] = igb_set_16bit
,
3414 [RDH9
] = igb_set_16bit
,
3415 [RDH10
] = igb_set_16bit
,
3416 [RDH11
] = igb_set_16bit
,
3417 [RDH12
] = igb_set_16bit
,
3418 [RDH13
] = igb_set_16bit
,
3419 [RDH14
] = igb_set_16bit
,
3420 [RDH15
] = igb_set_16bit
,
3421 [RDT0
] = igb_set_rdt
,
3422 [RDT1
] = igb_set_rdt
,
3423 [RDT2
] = igb_set_rdt
,
3424 [RDT3
] = igb_set_rdt
,
3425 [RDT4
] = igb_set_rdt
,
3426 [RDT5
] = igb_set_rdt
,
3427 [RDT6
] = igb_set_rdt
,
3428 [RDT7
] = igb_set_rdt
,
3429 [RDT8
] = igb_set_rdt
,
3430 [RDT9
] = igb_set_rdt
,
3431 [RDT10
] = igb_set_rdt
,
3432 [RDT11
] = igb_set_rdt
,
3433 [RDT12
] = igb_set_rdt
,
3434 [RDT13
] = igb_set_rdt
,
3435 [RDT14
] = igb_set_rdt
,
3436 [RDT15
] = igb_set_rdt
,
3437 [IMC
] = igb_set_imc
,
3438 [IMS
] = igb_set_ims
,
3439 [ICR
] = igb_set_icr
,
3440 [EECD
] = igb_set_eecd
,
3441 [RCTL
] = igb_set_rx_control
,
3442 [CTRL
] = igb_set_ctrl
,
3443 [EERD
] = igb_set_eerd
,
3444 [TDFH
] = igb_set_13bit
,
3445 [TDFT
] = igb_set_13bit
,
3446 [TDFHS
] = igb_set_13bit
,
3447 [TDFTS
] = igb_set_13bit
,
3448 [TDFPC
] = igb_set_13bit
,
3449 [RDFH
] = igb_set_13bit
,
3450 [RDFT
] = igb_set_13bit
,
3451 [RDFHS
] = igb_set_13bit
,
3452 [RDFTS
] = igb_set_13bit
,
3453 [RDFPC
] = igb_set_13bit
,
3454 [GCR
] = igb_set_gcr
,
3455 [RXCSUM
] = igb_set_rxcsum
,
3456 [TDLEN0
] = igb_set_dlen
,
3457 [TDLEN1
] = igb_set_dlen
,
3458 [TDLEN2
] = igb_set_dlen
,
3459 [TDLEN3
] = igb_set_dlen
,
3460 [TDLEN4
] = igb_set_dlen
,
3461 [TDLEN5
] = igb_set_dlen
,
3462 [TDLEN6
] = igb_set_dlen
,
3463 [TDLEN7
] = igb_set_dlen
,
3464 [TDLEN8
] = igb_set_dlen
,
3465 [TDLEN9
] = igb_set_dlen
,
3466 [TDLEN10
] = igb_set_dlen
,
3467 [TDLEN11
] = igb_set_dlen
,
3468 [TDLEN12
] = igb_set_dlen
,
3469 [TDLEN13
] = igb_set_dlen
,
3470 [TDLEN14
] = igb_set_dlen
,
3471 [TDLEN15
] = igb_set_dlen
,
3472 [RDLEN0
] = igb_set_dlen
,
3473 [RDLEN1
] = igb_set_dlen
,
3474 [RDLEN2
] = igb_set_dlen
,
3475 [RDLEN3
] = igb_set_dlen
,
3476 [RDLEN4
] = igb_set_dlen
,
3477 [RDLEN5
] = igb_set_dlen
,
3478 [RDLEN6
] = igb_set_dlen
,
3479 [RDLEN7
] = igb_set_dlen
,
3480 [RDLEN8
] = igb_set_dlen
,
3481 [RDLEN9
] = igb_set_dlen
,
3482 [RDLEN10
] = igb_set_dlen
,
3483 [RDLEN11
] = igb_set_dlen
,
3484 [RDLEN12
] = igb_set_dlen
,
3485 [RDLEN13
] = igb_set_dlen
,
3486 [RDLEN14
] = igb_set_dlen
,
3487 [RDLEN15
] = igb_set_dlen
,
3488 [TDBAL0
] = igb_set_dbal
,
3489 [TDBAL1
] = igb_set_dbal
,
3490 [TDBAL2
] = igb_set_dbal
,
3491 [TDBAL3
] = igb_set_dbal
,
3492 [TDBAL4
] = igb_set_dbal
,
3493 [TDBAL5
] = igb_set_dbal
,
3494 [TDBAL6
] = igb_set_dbal
,
3495 [TDBAL7
] = igb_set_dbal
,
3496 [TDBAL8
] = igb_set_dbal
,
3497 [TDBAL9
] = igb_set_dbal
,
3498 [TDBAL10
] = igb_set_dbal
,
3499 [TDBAL11
] = igb_set_dbal
,
3500 [TDBAL12
] = igb_set_dbal
,
3501 [TDBAL13
] = igb_set_dbal
,
3502 [TDBAL14
] = igb_set_dbal
,
3503 [TDBAL15
] = igb_set_dbal
,
3504 [RDBAL0
] = igb_set_dbal
,
3505 [RDBAL1
] = igb_set_dbal
,
3506 [RDBAL2
] = igb_set_dbal
,
3507 [RDBAL3
] = igb_set_dbal
,
3508 [RDBAL4
] = igb_set_dbal
,
3509 [RDBAL5
] = igb_set_dbal
,
3510 [RDBAL6
] = igb_set_dbal
,
3511 [RDBAL7
] = igb_set_dbal
,
3512 [RDBAL8
] = igb_set_dbal
,
3513 [RDBAL9
] = igb_set_dbal
,
3514 [RDBAL10
] = igb_set_dbal
,
3515 [RDBAL11
] = igb_set_dbal
,
3516 [RDBAL12
] = igb_set_dbal
,
3517 [RDBAL13
] = igb_set_dbal
,
3518 [RDBAL14
] = igb_set_dbal
,
3519 [RDBAL15
] = igb_set_dbal
,
3520 [STATUS
] = igb_set_status
,
3521 [PBACLR
] = igb_set_pbaclr
,
3522 [CTRL_EXT
] = igb_set_ctrlext
,
3523 [FCAH
] = igb_set_16bit
,
3524 [FCT
] = igb_set_16bit
,
3525 [FCTTV
] = igb_set_16bit
,
3526 [FCRTV
] = igb_set_16bit
,
3527 [FCRTH
] = igb_set_fcrth
,
3528 [FCRTL
] = igb_set_fcrtl
,
3529 [CTRL_DUP
] = igb_set_ctrl
,
3530 [RFCTL
] = igb_set_rfctl
,
3531 [TIMINCA
] = igb_set_timinca
,
3532 [TIMADJH
] = igb_set_timadjh
,
3534 [IP6AT
... IP6AT
+ 3] = igb_mac_writereg
,
3535 [IP4AT
... IP4AT
+ 6] = igb_mac_writereg
,
3536 [RA
] = igb_mac_writereg
,
3537 [RA
+ 1] = igb_mac_setmacaddr
,
3538 [RA
+ 2 ... RA
+ 31] = igb_mac_writereg
,
3539 [RA2
... RA2
+ 31] = igb_mac_writereg
,
3540 [WUPM
... WUPM
+ 31] = igb_mac_writereg
,
3541 [MTA
... MTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
3542 [VFTA
... VFTA
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = igb_mac_writereg
,
3543 [FFMT
... FFMT
+ 254] = igb_set_4bit
,
3544 [MDEF
... MDEF
+ 7] = igb_mac_writereg
,
3545 [FTFT
... FTFT
+ 254] = igb_mac_writereg
,
3546 [RETA
... RETA
+ 31] = igb_mac_writereg
,
3547 [RSSRK
... RSSRK
+ 9] = igb_mac_writereg
,
3548 [MAVTV0
... MAVTV3
] = igb_mac_writereg
,
3549 [EITR0
... EITR0
+ IGB_INTR_NUM
- 1] = igb_set_eitr
,
3552 [FWSM
] = igb_mac_writereg
,
3553 [SW_FW_SYNC
] = igb_mac_writereg
,
3554 [EICR
] = igb_set_eicr
,
3555 [EICS
] = igb_set_eics
,
3556 [EIAC
] = igb_set_eiac
,
3557 [EIAM
] = igb_set_eiam
,
3558 [EIMC
] = igb_set_eimc
,
3559 [EIMS
] = igb_set_eims
,
3560 [IVAR0
... IVAR0
+ 7] = igb_mac_writereg
,
3561 igb_putreg(IVAR_MISC
),
3563 [P2VMAILBOX0
... P2VMAILBOX7
] = igb_set_pfmailbox
,
3564 [V2PMAILBOX0
... V2PMAILBOX7
] = igb_set_vfmailbox
,
3565 [MBVFICR
] = igb_w1c
,
3566 [VMBMEM0
... VMBMEM0
+ 127] = igb_mac_writereg
,
3567 igb_putreg(MBVFIMR
),
3574 [VLVF0
... VLVF0
+ E1000_VLVF_ARRAY_SIZE
- 1] = igb_mac_writereg
,
3575 [VMVIR0
... VMVIR7
] = igb_mac_writereg
,
3576 [VMOLR0
... VMOLR7
] = igb_mac_writereg
,
3577 [UTA
... UTA
+ E1000_MC_TBL_SIZE
- 1] = igb_mac_writereg
,
3578 [PVTCTRL0
] = igb_set_vtctrl
,
3579 [PVTCTRL1
] = igb_set_vtctrl
,
3580 [PVTCTRL2
] = igb_set_vtctrl
,
3581 [PVTCTRL3
] = igb_set_vtctrl
,
3582 [PVTCTRL4
] = igb_set_vtctrl
,
3583 [PVTCTRL5
] = igb_set_vtctrl
,
3584 [PVTCTRL6
] = igb_set_vtctrl
,
3585 [PVTCTRL7
] = igb_set_vtctrl
,
3586 [PVTEICS0
] = igb_set_vteics
,
3587 [PVTEICS1
] = igb_set_vteics
,
3588 [PVTEICS2
] = igb_set_vteics
,
3589 [PVTEICS3
] = igb_set_vteics
,
3590 [PVTEICS4
] = igb_set_vteics
,
3591 [PVTEICS5
] = igb_set_vteics
,
3592 [PVTEICS6
] = igb_set_vteics
,
3593 [PVTEICS7
] = igb_set_vteics
,
3594 [PVTEIMS0
] = igb_set_vteims
,
3595 [PVTEIMS1
] = igb_set_vteims
,
3596 [PVTEIMS2
] = igb_set_vteims
,
3597 [PVTEIMS3
] = igb_set_vteims
,
3598 [PVTEIMS4
] = igb_set_vteims
,
3599 [PVTEIMS5
] = igb_set_vteims
,
3600 [PVTEIMS6
] = igb_set_vteims
,
3601 [PVTEIMS7
] = igb_set_vteims
,
3602 [PVTEIMC0
] = igb_set_vteimc
,
3603 [PVTEIMC1
] = igb_set_vteimc
,
3604 [PVTEIMC2
] = igb_set_vteimc
,
3605 [PVTEIMC3
] = igb_set_vteimc
,
3606 [PVTEIMC4
] = igb_set_vteimc
,
3607 [PVTEIMC5
] = igb_set_vteimc
,
3608 [PVTEIMC6
] = igb_set_vteimc
,
3609 [PVTEIMC7
] = igb_set_vteimc
,
3610 [PVTEIAC0
] = igb_set_vteiac
,
3611 [PVTEIAC1
] = igb_set_vteiac
,
3612 [PVTEIAC2
] = igb_set_vteiac
,
3613 [PVTEIAC3
] = igb_set_vteiac
,
3614 [PVTEIAC4
] = igb_set_vteiac
,
3615 [PVTEIAC5
] = igb_set_vteiac
,
3616 [PVTEIAC6
] = igb_set_vteiac
,
3617 [PVTEIAC7
] = igb_set_vteiac
,
3618 [PVTEIAM0
] = igb_set_vteiam
,
3619 [PVTEIAM1
] = igb_set_vteiam
,
3620 [PVTEIAM2
] = igb_set_vteiam
,
3621 [PVTEIAM3
] = igb_set_vteiam
,
3622 [PVTEIAM4
] = igb_set_vteiam
,
3623 [PVTEIAM5
] = igb_set_vteiam
,
3624 [PVTEIAM6
] = igb_set_vteiam
,
3625 [PVTEIAM7
] = igb_set_vteiam
,
3626 [PVTEICR0
] = igb_set_vteicr
,
3627 [PVTEICR1
] = igb_set_vteicr
,
3628 [PVTEICR2
] = igb_set_vteicr
,
3629 [PVTEICR3
] = igb_set_vteicr
,
3630 [PVTEICR4
] = igb_set_vteicr
,
3631 [PVTEICR5
] = igb_set_vteicr
,
3632 [PVTEICR6
] = igb_set_vteicr
,
3633 [PVTEICR7
] = igb_set_vteicr
,
3634 [VTIVAR
... VTIVAR
+ 7] = igb_set_vtivar
,
3635 [VTIVAR_MISC
... VTIVAR_MISC
+ 7] = igb_mac_writereg
3637 enum { IGB_NWRITEOPS
= ARRAY_SIZE(igb_macreg_writeops
) };
3639 enum { MAC_ACCESS_PARTIAL
= 1 };
3642 * The array below combines alias offsets of the index values for the
3643 * MAC registers that have aliases, with the indication of not fully
3644 * implemented registers (lowest bit). This combination is possible
3645 * because all of the offsets are even.
3647 static const uint16_t mac_reg_access
[E1000E_MAC_SIZE
] = {
3648 /* Alias index offsets */
3650 [RDFH_A
] = 0xe904, [RDFT_A
] = 0xe904,
3651 [TDFH_A
] = 0xed00, [TDFT_A
] = 0xed00,
3652 [RA_A
... RA_A
+ 31] = 0x14f0,
3653 [VFTA_A
... VFTA_A
+ E1000_VLAN_FILTER_TBL_SIZE
- 1] = 0x1400,
3655 [RDBAL0_A
] = 0x2600,
3656 [RDBAH0_A
] = 0x2600,
3657 [RDLEN0_A
] = 0x2600,
3658 [SRRCTL0_A
] = 0x2600,
3661 [RXDCTL0_A
] = 0x2600,
3662 [RXCTL0_A
] = 0x2600,
3663 [RQDPC0_A
] = 0x2600,
3664 [RDBAL1_A
] = 0x25D0,
3665 [RDBAL2_A
] = 0x25A0,
3666 [RDBAL3_A
] = 0x2570,
3667 [RDBAH1_A
] = 0x25D0,
3668 [RDBAH2_A
] = 0x25A0,
3669 [RDBAH3_A
] = 0x2570,
3670 [RDLEN1_A
] = 0x25D0,
3671 [RDLEN2_A
] = 0x25A0,
3672 [RDLEN3_A
] = 0x2570,
3673 [SRRCTL1_A
] = 0x25D0,
3674 [SRRCTL2_A
] = 0x25A0,
3675 [SRRCTL3_A
] = 0x2570,
3682 [RXDCTL1_A
] = 0x25D0,
3683 [RXDCTL2_A
] = 0x25A0,
3684 [RXDCTL3_A
] = 0x2570,
3685 [RXCTL1_A
] = 0x25D0,
3686 [RXCTL2_A
] = 0x25A0,
3687 [RXCTL3_A
] = 0x2570,
3688 [RQDPC1_A
] = 0x25D0,
3689 [RQDPC2_A
] = 0x25A0,
3690 [RQDPC3_A
] = 0x2570,
3691 [TDBAL0_A
] = 0x2A00,
3692 [TDBAH0_A
] = 0x2A00,
3693 [TDLEN0_A
] = 0x2A00,
3696 [TXCTL0_A
] = 0x2A00,
3697 [TDWBAL0_A
] = 0x2A00,
3698 [TDWBAH0_A
] = 0x2A00,
3699 [TDBAL1_A
] = 0x29D0,
3700 [TDBAL2_A
] = 0x29A0,
3701 [TDBAL3_A
] = 0x2970,
3702 [TDBAH1_A
] = 0x29D0,
3703 [TDBAH2_A
] = 0x29A0,
3704 [TDBAH3_A
] = 0x2970,
3705 [TDLEN1_A
] = 0x29D0,
3706 [TDLEN2_A
] = 0x29A0,
3707 [TDLEN3_A
] = 0x2970,
3714 [TXDCTL0_A
] = 0x2A00,
3715 [TXDCTL1_A
] = 0x29D0,
3716 [TXDCTL2_A
] = 0x29A0,
3717 [TXDCTL3_A
] = 0x2970,
3718 [TXCTL1_A
] = 0x29D0,
3719 [TXCTL2_A
] = 0x29A0,
3720 [TXCTL3_A
] = 0x29D0,
3721 [TDWBAL1_A
] = 0x29D0,
3722 [TDWBAL2_A
] = 0x29A0,
3723 [TDWBAL3_A
] = 0x2970,
3724 [TDWBAH1_A
] = 0x29D0,
3725 [TDWBAH2_A
] = 0x29A0,
3726 [TDWBAH3_A
] = 0x2970,
3728 /* Access options */
3729 [RDFH
] = MAC_ACCESS_PARTIAL
, [RDFT
] = MAC_ACCESS_PARTIAL
,
3730 [RDFHS
] = MAC_ACCESS_PARTIAL
, [RDFTS
] = MAC_ACCESS_PARTIAL
,
3731 [RDFPC
] = MAC_ACCESS_PARTIAL
,
3732 [TDFH
] = MAC_ACCESS_PARTIAL
, [TDFT
] = MAC_ACCESS_PARTIAL
,
3733 [TDFHS
] = MAC_ACCESS_PARTIAL
, [TDFTS
] = MAC_ACCESS_PARTIAL
,
3734 [TDFPC
] = MAC_ACCESS_PARTIAL
, [EECD
] = MAC_ACCESS_PARTIAL
,
3735 [FLA
] = MAC_ACCESS_PARTIAL
,
3736 [FCAL
] = MAC_ACCESS_PARTIAL
, [FCAH
] = MAC_ACCESS_PARTIAL
,
3737 [FCT
] = MAC_ACCESS_PARTIAL
, [FCTTV
] = MAC_ACCESS_PARTIAL
,
3738 [FCRTV
] = MAC_ACCESS_PARTIAL
, [FCRTL
] = MAC_ACCESS_PARTIAL
,
3739 [FCRTH
] = MAC_ACCESS_PARTIAL
,
3740 [MAVTV0
... MAVTV3
] = MAC_ACCESS_PARTIAL
3744 igb_core_write(IGBCore
*core
, hwaddr addr
, uint64_t val
, unsigned size
)
3746 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
3748 if (index
< IGB_NWRITEOPS
&& igb_macreg_writeops
[index
]) {
3749 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
3750 trace_e1000e_wrn_regs_write_trivial(index
<< 2);
3752 trace_e1000e_core_write(index
<< 2, size
, val
);
3753 igb_macreg_writeops
[index
](core
, index
, val
);
3754 } else if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
3755 trace_e1000e_wrn_regs_write_ro(index
<< 2, size
, val
);
3757 trace_e1000e_wrn_regs_write_unknown(index
<< 2, size
, val
);
3762 igb_core_read(IGBCore
*core
, hwaddr addr
, unsigned size
)
3765 uint16_t index
= igb_get_reg_index_with_offset(mac_reg_access
, addr
);
3767 if (index
< IGB_NREADOPS
&& igb_macreg_readops
[index
]) {
3768 if (mac_reg_access
[index
] & MAC_ACCESS_PARTIAL
) {
3769 trace_e1000e_wrn_regs_read_trivial(index
<< 2);
3771 val
= igb_macreg_readops
[index
](core
, index
);
3772 trace_e1000e_core_read(index
<< 2, size
, val
);
3775 trace_e1000e_wrn_regs_read_unknown(index
<< 2, size
);
3781 igb_autoneg_pause(IGBCore
*core
)
3783 timer_del(core
->autoneg_timer
);
3787 igb_autoneg_resume(IGBCore
*core
)
3789 if (igb_have_autoneg(core
) &&
3790 !(core
->phy
[MII_BMSR
] & MII_BMSR_AN_COMP
)) {
3791 qemu_get_queue(core
->owner_nic
)->link_down
= false;
3792 timer_mod(core
->autoneg_timer
,
3793 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + 500);
3798 igb_vm_state_change(void *opaque
, bool running
, RunState state
)
3800 IGBCore
*core
= opaque
;
3803 trace_e1000e_vm_state_running();
3804 igb_intrmgr_resume(core
);
3805 igb_autoneg_resume(core
);
3807 trace_e1000e_vm_state_stopped();
3808 igb_autoneg_pause(core
);
3809 igb_intrmgr_pause(core
);
3814 igb_core_pci_realize(IGBCore
*core
,
3815 const uint16_t *eeprom_templ
,
3816 uint32_t eeprom_size
,
3817 const uint8_t *macaddr
)
3821 core
->autoneg_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
3822 igb_autoneg_timer
, core
);
3823 igb_intrmgr_pci_realize(core
);
3825 core
->vmstate
= qemu_add_vm_change_state_handler(igb_vm_state_change
, core
);
3827 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
3828 net_tx_pkt_init(&core
->tx
[i
].tx_pkt
, core
->owner
, E1000E_MAX_TX_FRAGS
);
3831 net_rx_pkt_init(&core
->rx_pkt
);
3833 e1000x_core_prepare_eeprom(core
->eeprom
,
3836 PCI_DEVICE_GET_CLASS(core
->owner
)->device_id
,
3838 igb_update_rx_offloads(core
);
3842 igb_core_pci_uninit(IGBCore
*core
)
3846 timer_free(core
->autoneg_timer
);
3848 igb_intrmgr_pci_unint(core
);
3850 qemu_del_vm_change_state_handler(core
->vmstate
);
3852 for (i
= 0; i
< IGB_NUM_QUEUES
; i
++) {
3853 net_tx_pkt_reset(core
->tx
[i
].tx_pkt
);
3854 net_tx_pkt_uninit(core
->tx
[i
].tx_pkt
);
3857 net_rx_pkt_uninit(core
->rx_pkt
);
3860 static const uint16_t
3861 igb_phy_reg_init
[] = {
3862 [MII_BMCR
] = MII_BMCR_SPEED1000
|
3866 [MII_BMSR
] = MII_BMSR_EXTCAP
|
3876 [MII_PHYID1
] = IGP03E1000_E_PHY_ID
>> 16,
3877 [MII_PHYID2
] = (IGP03E1000_E_PHY_ID
& 0xfff0) | 1,
3878 [MII_ANAR
] = MII_ANAR_CSMACD
| MII_ANAR_10
|
3879 MII_ANAR_10FD
| MII_ANAR_TX
|
3880 MII_ANAR_TXFD
| MII_ANAR_PAUSE
|
3881 MII_ANAR_PAUSE_ASYM
,
3882 [MII_ANLPAR
] = MII_ANLPAR_10
| MII_ANLPAR_10FD
|
3883 MII_ANLPAR_TX
| MII_ANLPAR_TXFD
|
3884 MII_ANLPAR_T4
| MII_ANLPAR_PAUSE
,
3885 [MII_ANER
] = MII_ANER_NP
| MII_ANER_NWAY
,
3886 [MII_ANNP
] = 0x1 | MII_ANNP_MP
,
3887 [MII_CTRL1000
] = MII_CTRL1000_HALF
| MII_CTRL1000_FULL
|
3888 MII_CTRL1000_PORT
| MII_CTRL1000_MASTER
,
3889 [MII_STAT1000
] = MII_STAT1000_HALF
| MII_STAT1000_FULL
|
3890 MII_STAT1000_ROK
| MII_STAT1000_LOK
,
3891 [MII_EXTSTAT
] = MII_EXTSTAT_1000T_HD
| MII_EXTSTAT_1000T_FD
,
3893 [IGP01E1000_PHY_PORT_CONFIG
] = BIT(5) | BIT(8),
3894 [IGP01E1000_PHY_PORT_STATUS
] = IGP01E1000_PSSR_SPEED_1000MBPS
,
3895 [IGP02E1000_PHY_POWER_MGMT
] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU
|
3896 IGP01E1000_PSCFR_SMART_SPEED
3899 static const uint32_t igb_mac_reg_init
[] = {
3900 [LEDCTL
] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
3901 [EEMNGCTL
] = BIT(31),
3902 [RXDCTL0
] = E1000_RXDCTL_QUEUE_ENABLE
| (1 << 16),
3903 [RXDCTL1
] = 1 << 16,
3904 [RXDCTL2
] = 1 << 16,
3905 [RXDCTL3
] = 1 << 16,
3906 [RXDCTL4
] = 1 << 16,
3907 [RXDCTL5
] = 1 << 16,
3908 [RXDCTL6
] = 1 << 16,
3909 [RXDCTL7
] = 1 << 16,
3910 [RXDCTL8
] = 1 << 16,
3911 [RXDCTL9
] = 1 << 16,
3912 [RXDCTL10
] = 1 << 16,
3913 [RXDCTL11
] = 1 << 16,
3914 [RXDCTL12
] = 1 << 16,
3915 [RXDCTL13
] = 1 << 16,
3916 [RXDCTL14
] = 1 << 16,
3917 [RXDCTL15
] = 1 << 16,
3918 [TIPG
] = 0x08 | (0x04 << 10) | (0x06 << 20),
3919 [CTRL
] = E1000_CTRL_FD
| E1000_CTRL_LRST
| E1000_CTRL_SPD_1000
|
3920 E1000_CTRL_ADVD3WUC
,
3921 [STATUS
] = E1000_STATUS_PHYRA
| BIT(31),
3922 [EECD
] = E1000_EECD_FWE_DIS
| E1000_EECD_PRES
|
3923 (2 << E1000_EECD_SIZE_EX_SHIFT
),
3924 [GCR
] = E1000_L0S_ADJUST
|
3925 E1000_GCR_CMPL_TMOUT_RESEND
|
3926 E1000_GCR_CAP_VER2
|
3927 E1000_L1_ENTRY_LATENCY_MSB
|
3928 E1000_L1_ENTRY_LATENCY_LSB
,
3929 [RXCSUM
] = E1000_RXCSUM_IPOFLD
| E1000_RXCSUM_TUOFLD
,
3932 [TCTL
] = E1000_TCTL_PSP
| (0xF << E1000_CT_SHIFT
) |
3933 (0x40 << E1000_COLD_SHIFT
) | (0x1 << 26) | (0xA << 28),
3934 [TCTL_EXT
] = 0x40 | (0x42 << 10),
3935 [DTXCTL
] = E1000_DTXCTL_8023LL
| E1000_DTXCTL_SPOOF_INT
,
3936 [VET
] = ETH_P_VLAN
| (ETH_P_VLAN
<< 16),
3938 [V2PMAILBOX0
... V2PMAILBOX0
+ IGB_MAX_VF_FUNCTIONS
- 1] = E1000_V2PMAILBOX_RSTI
,
3942 [VMOLR0
... VMOLR0
+ 7] = 0x2600 | E1000_VMOLR_STRCRC
,
3943 [RPLOLR
] = E1000_RPLOLR_STRCRC
,
3945 [TXCTL0
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3946 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3947 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3948 [TXCTL1
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3949 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3950 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3951 [TXCTL2
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3952 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3953 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3954 [TXCTL3
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3955 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3956 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3957 [TXCTL4
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3958 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3959 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3960 [TXCTL5
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3961 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3962 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3963 [TXCTL6
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3964 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3965 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3966 [TXCTL7
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3967 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3968 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3969 [TXCTL8
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3970 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3971 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3972 [TXCTL9
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3973 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3974 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3975 [TXCTL10
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3976 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3977 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3978 [TXCTL11
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3979 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3980 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3981 [TXCTL12
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3982 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3983 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3984 [TXCTL13
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3985 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3986 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3987 [TXCTL14
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3988 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3989 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3990 [TXCTL15
] = E1000_DCA_TXCTRL_DATA_RRO_EN
|
3991 E1000_DCA_TXCTRL_TX_WB_RO_EN
|
3992 E1000_DCA_TXCTRL_DESC_RRO_EN
,
3995 static void igb_reset(IGBCore
*core
, bool sw
)
4000 timer_del(core
->autoneg_timer
);
4002 igb_intrmgr_reset(core
);
4004 memset(core
->phy
, 0, sizeof core
->phy
);
4005 memcpy(core
->phy
, igb_phy_reg_init
, sizeof igb_phy_reg_init
);
4007 for (i
= 0; i
< E1000E_MAC_SIZE
; i
++) {
4009 (i
== RXPBS
|| i
== TXPBS
||
4010 (i
>= EITR0
&& i
< EITR0
+ IGB_INTR_NUM
))) {
4014 core
->mac
[i
] = i
< ARRAY_SIZE(igb_mac_reg_init
) ?
4015 igb_mac_reg_init
[i
] : 0;
4018 if (qemu_get_queue(core
->owner_nic
)->link_down
) {
4019 igb_link_down(core
);
4022 e1000x_reset_mac_addr(core
->owner_nic
, core
->mac
, core
->permanent_mac
);
4024 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4026 net_tx_pkt_reset(tx
->tx_pkt
);
4033 tx
->skip_cp
= false;
4038 igb_core_reset(IGBCore
*core
)
4040 igb_reset(core
, false);
4043 void igb_core_pre_save(IGBCore
*core
)
4046 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4049 * If link is down and auto-negotiation is supported and ongoing,
4050 * complete auto-negotiation immediately. This allows us to look
4051 * at MII_BMSR_AN_COMP to infer link status on load.
4053 if (nc
->link_down
&& igb_have_autoneg(core
)) {
4054 core
->phy
[MII_BMSR
] |= MII_BMSR_AN_COMP
;
4055 igb_update_flowctl_status(core
);
4058 for (i
= 0; i
< ARRAY_SIZE(core
->tx
); i
++) {
4059 if (net_tx_pkt_has_fragments(core
->tx
[i
].tx_pkt
)) {
4060 core
->tx
[i
].skip_cp
= true;
4066 igb_core_post_load(IGBCore
*core
)
4068 NetClientState
*nc
= qemu_get_queue(core
->owner_nic
);
4071 * nc.link_down can't be migrated, so infer link_down according
4072 * to link status bit in core.mac[STATUS].
4074 nc
->link_down
= (core
->mac
[STATUS
] & E1000_STATUS_LU
) == 0;