1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/types.h>
8 #include <linux/init.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pagemap.h>
12 #include <linux/delay.h>
13 #include <linux/netdevice.h>
14 #include <linux/interrupt.h>
15 #include <linux/tcp.h>
16 #include <linux/ipv6.h>
17 #include <linux/slab.h>
18 #include <net/checksum.h>
19 #include <net/ip6_checksum.h>
20 #include <linux/ethtool.h>
21 #include <linux/if_vlan.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/pm_qos.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/aer.h>
27 #include <linux/prefetch.h>
28 #include <linux/suspend.h>
32 char e1000e_driver_name
[] = "e1000e";
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
35 static int debug
= -1;
36 module_param(debug
, int, 0);
37 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
39 static const struct e1000_info
*e1000_info_tbl
[] = {
40 [board_82571
] = &e1000_82571_info
,
41 [board_82572
] = &e1000_82572_info
,
42 [board_82573
] = &e1000_82573_info
,
43 [board_82574
] = &e1000_82574_info
,
44 [board_82583
] = &e1000_82583_info
,
45 [board_80003es2lan
] = &e1000_es2_info
,
46 [board_ich8lan
] = &e1000_ich8_info
,
47 [board_ich9lan
] = &e1000_ich9_info
,
48 [board_ich10lan
] = &e1000_ich10_info
,
49 [board_pchlan
] = &e1000_pch_info
,
50 [board_pch2lan
] = &e1000_pch2_info
,
51 [board_pch_lpt
] = &e1000_pch_lpt_info
,
52 [board_pch_spt
] = &e1000_pch_spt_info
,
53 [board_pch_cnp
] = &e1000_pch_cnp_info
,
54 [board_pch_tgp
] = &e1000_pch_tgp_info
,
57 struct e1000_reg_info
{
62 static const struct e1000_reg_info e1000_reg_info_tbl
[] = {
63 /* General Registers */
65 {E1000_STATUS
, "STATUS"},
66 {E1000_CTRL_EXT
, "CTRL_EXT"},
68 /* Interrupt Registers */
73 {E1000_RDLEN(0), "RDLEN"},
74 {E1000_RDH(0), "RDH"},
75 {E1000_RDT(0), "RDT"},
77 {E1000_RXDCTL(0), "RXDCTL"},
79 {E1000_RDBAL(0), "RDBAL"},
80 {E1000_RDBAH(0), "RDBAH"},
83 {E1000_RDFHS
, "RDFHS"},
84 {E1000_RDFTS
, "RDFTS"},
85 {E1000_RDFPC
, "RDFPC"},
89 {E1000_TDBAL(0), "TDBAL"},
90 {E1000_TDBAH(0), "TDBAH"},
91 {E1000_TDLEN(0), "TDLEN"},
92 {E1000_TDH(0), "TDH"},
93 {E1000_TDT(0), "TDT"},
95 {E1000_TXDCTL(0), "TXDCTL"},
97 {E1000_TARC(0), "TARC"},
100 {E1000_TDFHS
, "TDFHS"},
101 {E1000_TDFTS
, "TDFTS"},
102 {E1000_TDFPC
, "TDFPC"},
104 /* List Terminator */
109 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
110 * @hw: pointer to the HW structure
112 * When updating the MAC CSR registers, the Manageability Engine (ME) could
113 * be accessing the registers at the same time. Normally, this is handled in
114 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
115 * accesses later than it should which could result in the register to have
116 * an incorrect value. Workaround this by checking the FWSM register which
117 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
118 * and try again a number of times.
120 static void __ew32_prepare(struct e1000_hw
*hw
)
122 s32 i
= E1000_ICH_FWSM_PCIM2PCI_COUNT
;
124 while ((er32(FWSM
) & E1000_ICH_FWSM_PCIM2PCI
) && --i
)
128 void __ew32(struct e1000_hw
*hw
, unsigned long reg
, u32 val
)
130 if (hw
->adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
133 writel(val
, hw
->hw_addr
+ reg
);
137 * e1000_regdump - register printout routine
138 * @hw: pointer to the HW structure
139 * @reginfo: pointer to the register info table
141 static void e1000_regdump(struct e1000_hw
*hw
, struct e1000_reg_info
*reginfo
)
147 switch (reginfo
->ofs
) {
148 case E1000_RXDCTL(0):
149 for (n
= 0; n
< 2; n
++)
150 regs
[n
] = __er32(hw
, E1000_RXDCTL(n
));
152 case E1000_TXDCTL(0):
153 for (n
= 0; n
< 2; n
++)
154 regs
[n
] = __er32(hw
, E1000_TXDCTL(n
));
157 for (n
= 0; n
< 2; n
++)
158 regs
[n
] = __er32(hw
, E1000_TARC(n
));
161 pr_info("%-15s %08x\n",
162 reginfo
->name
, __er32(hw
, reginfo
->ofs
));
166 snprintf(rname
, 16, "%s%s", reginfo
->name
, "[0-1]");
167 pr_info("%-15s %08x %08x\n", rname
, regs
[0], regs
[1]);
170 static void e1000e_dump_ps_pages(struct e1000_adapter
*adapter
,
171 struct e1000_buffer
*bi
)
174 struct e1000_ps_page
*ps_page
;
176 for (i
= 0; i
< adapter
->rx_ps_pages
; i
++) {
177 ps_page
= &bi
->ps_pages
[i
];
180 pr_info("packet dump for ps_page %d:\n", i
);
181 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_ADDRESS
,
182 16, 1, page_address(ps_page
->page
),
189 * e1000e_dump - Print registers, Tx-ring and Rx-ring
190 * @adapter: board private structure
192 static void e1000e_dump(struct e1000_adapter
*adapter
)
194 struct net_device
*netdev
= adapter
->netdev
;
195 struct e1000_hw
*hw
= &adapter
->hw
;
196 struct e1000_reg_info
*reginfo
;
197 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
198 struct e1000_tx_desc
*tx_desc
;
203 struct e1000_buffer
*buffer_info
;
204 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
205 union e1000_rx_desc_packet_split
*rx_desc_ps
;
206 union e1000_rx_desc_extended
*rx_desc
;
216 if (!netif_msg_hw(adapter
))
219 /* Print netdevice Info */
221 dev_info(&adapter
->pdev
->dev
, "Net device Info\n");
222 pr_info("Device Name state trans_start\n");
223 pr_info("%-15s %016lX %016lX\n", netdev
->name
,
224 netdev
->state
, dev_trans_start(netdev
));
227 /* Print Registers */
228 dev_info(&adapter
->pdev
->dev
, "Register Dump\n");
229 pr_info(" Register Name Value\n");
230 for (reginfo
= (struct e1000_reg_info
*)e1000_reg_info_tbl
;
231 reginfo
->name
; reginfo
++) {
232 e1000_regdump(hw
, reginfo
);
235 /* Print Tx Ring Summary */
236 if (!netdev
|| !netif_running(netdev
))
239 dev_info(&adapter
->pdev
->dev
, "Tx Ring Summary\n");
240 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
241 buffer_info
= &tx_ring
->buffer_info
[tx_ring
->next_to_clean
];
242 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
243 0, tx_ring
->next_to_use
, tx_ring
->next_to_clean
,
244 (unsigned long long)buffer_info
->dma
,
246 buffer_info
->next_to_watch
,
247 (unsigned long long)buffer_info
->time_stamp
);
250 if (!netif_msg_tx_done(adapter
))
251 goto rx_ring_summary
;
253 dev_info(&adapter
->pdev
->dev
, "Tx Ring Dump\n");
255 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
257 * Legacy Transmit Descriptor
258 * +--------------------------------------------------------------+
259 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
260 * +--------------------------------------------------------------+
261 * 8 | Special | CSS | Status | CMD | CSO | Length |
262 * +--------------------------------------------------------------+
263 * 63 48 47 36 35 32 31 24 23 16 15 0
265 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
266 * 63 48 47 40 39 32 31 16 15 8 7 0
267 * +----------------------------------------------------------------+
268 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
269 * +----------------------------------------------------------------+
270 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
271 * +----------------------------------------------------------------+
272 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
274 * Extended Data Descriptor (DTYP=0x1)
275 * +----------------------------------------------------------------+
276 * 0 | Buffer Address [63:0] |
277 * +----------------------------------------------------------------+
278 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
279 * +----------------------------------------------------------------+
280 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
282 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
283 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
284 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
285 for (i
= 0; tx_ring
->desc
&& (i
< tx_ring
->count
); i
++) {
286 const char *next_desc
;
287 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
288 buffer_info
= &tx_ring
->buffer_info
[i
];
289 u0
= (struct my_u0
*)tx_desc
;
290 if (i
== tx_ring
->next_to_use
&& i
== tx_ring
->next_to_clean
)
291 next_desc
= " NTC/U";
292 else if (i
== tx_ring
->next_to_use
)
294 else if (i
== tx_ring
->next_to_clean
)
298 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
299 (!(le64_to_cpu(u0
->b
) & BIT(29)) ? 'l' :
300 ((le64_to_cpu(u0
->b
) & BIT(20)) ? 'd' : 'c')),
302 (unsigned long long)le64_to_cpu(u0
->a
),
303 (unsigned long long)le64_to_cpu(u0
->b
),
304 (unsigned long long)buffer_info
->dma
,
305 buffer_info
->length
, buffer_info
->next_to_watch
,
306 (unsigned long long)buffer_info
->time_stamp
,
307 buffer_info
->skb
, next_desc
);
309 if (netif_msg_pktdata(adapter
) && buffer_info
->skb
)
310 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_ADDRESS
,
311 16, 1, buffer_info
->skb
->data
,
312 buffer_info
->skb
->len
, true);
315 /* Print Rx Ring Summary */
317 dev_info(&adapter
->pdev
->dev
, "Rx Ring Summary\n");
318 pr_info("Queue [NTU] [NTC]\n");
319 pr_info(" %5d %5X %5X\n",
320 0, rx_ring
->next_to_use
, rx_ring
->next_to_clean
);
323 if (!netif_msg_rx_status(adapter
))
326 dev_info(&adapter
->pdev
->dev
, "Rx Ring Dump\n");
327 switch (adapter
->rx_ps_pages
) {
331 /* [Extended] Packet Split Receive Descriptor Format
333 * +-----------------------------------------------------+
334 * 0 | Buffer Address 0 [63:0] |
335 * +-----------------------------------------------------+
336 * 8 | Buffer Address 1 [63:0] |
337 * +-----------------------------------------------------+
338 * 16 | Buffer Address 2 [63:0] |
339 * +-----------------------------------------------------+
340 * 24 | Buffer Address 3 [63:0] |
341 * +-----------------------------------------------------+
343 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
344 /* [Extended] Receive Descriptor (Write-Back) Format
346 * 63 48 47 32 31 13 12 8 7 4 3 0
347 * +------------------------------------------------------+
348 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
349 * | Checksum | Ident | | Queue | | Type |
350 * +------------------------------------------------------+
351 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
352 * +------------------------------------------------------+
353 * 63 48 47 32 31 20 19 0
355 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
356 for (i
= 0; i
< rx_ring
->count
; i
++) {
357 const char *next_desc
;
358 buffer_info
= &rx_ring
->buffer_info
[i
];
359 rx_desc_ps
= E1000_RX_DESC_PS(*rx_ring
, i
);
360 u1
= (struct my_u1
*)rx_desc_ps
;
362 le32_to_cpu(rx_desc_ps
->wb
.middle
.status_error
);
364 if (i
== rx_ring
->next_to_use
)
366 else if (i
== rx_ring
->next_to_clean
)
371 if (staterr
& E1000_RXD_STAT_DD
) {
372 /* Descriptor Done */
373 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
375 (unsigned long long)le64_to_cpu(u1
->a
),
376 (unsigned long long)le64_to_cpu(u1
->b
),
377 (unsigned long long)le64_to_cpu(u1
->c
),
378 (unsigned long long)le64_to_cpu(u1
->d
),
379 buffer_info
->skb
, next_desc
);
381 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
383 (unsigned long long)le64_to_cpu(u1
->a
),
384 (unsigned long long)le64_to_cpu(u1
->b
),
385 (unsigned long long)le64_to_cpu(u1
->c
),
386 (unsigned long long)le64_to_cpu(u1
->d
),
387 (unsigned long long)buffer_info
->dma
,
388 buffer_info
->skb
, next_desc
);
390 if (netif_msg_pktdata(adapter
))
391 e1000e_dump_ps_pages(adapter
,
398 /* Extended Receive Descriptor (Read) Format
400 * +-----------------------------------------------------+
401 * 0 | Buffer Address [63:0] |
402 * +-----------------------------------------------------+
404 * +-----------------------------------------------------+
406 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
407 /* Extended Receive Descriptor (Write-Back) Format
409 * 63 48 47 32 31 24 23 4 3 0
410 * +------------------------------------------------------+
412 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
413 * | Packet | IP | | | Type |
414 * | Checksum | Ident | | | |
415 * +------------------------------------------------------+
416 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
417 * +------------------------------------------------------+
418 * 63 48 47 32 31 20 19 0
420 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
422 for (i
= 0; i
< rx_ring
->count
; i
++) {
423 const char *next_desc
;
425 buffer_info
= &rx_ring
->buffer_info
[i
];
426 rx_desc
= E1000_RX_DESC_EXT(*rx_ring
, i
);
427 u1
= (struct my_u1
*)rx_desc
;
428 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
430 if (i
== rx_ring
->next_to_use
)
432 else if (i
== rx_ring
->next_to_clean
)
437 if (staterr
& E1000_RXD_STAT_DD
) {
438 /* Descriptor Done */
439 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
441 (unsigned long long)le64_to_cpu(u1
->a
),
442 (unsigned long long)le64_to_cpu(u1
->b
),
443 buffer_info
->skb
, next_desc
);
445 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
447 (unsigned long long)le64_to_cpu(u1
->a
),
448 (unsigned long long)le64_to_cpu(u1
->b
),
449 (unsigned long long)buffer_info
->dma
,
450 buffer_info
->skb
, next_desc
);
452 if (netif_msg_pktdata(adapter
) &&
454 print_hex_dump(KERN_INFO
, "",
455 DUMP_PREFIX_ADDRESS
, 16,
457 buffer_info
->skb
->data
,
458 adapter
->rx_buffer_len
,
466 * e1000_desc_unused - calculate if we have unused descriptors
467 * @ring: pointer to ring struct to perform calculation on
469 static int e1000_desc_unused(struct e1000_ring
*ring
)
471 if (ring
->next_to_clean
> ring
->next_to_use
)
472 return ring
->next_to_clean
- ring
->next_to_use
- 1;
474 return ring
->count
+ ring
->next_to_clean
- ring
->next_to_use
- 1;
478 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
479 * @adapter: board private structure
480 * @hwtstamps: time stamp structure to update
481 * @systim: unsigned 64bit system time value.
483 * Convert the system time value stored in the RX/TXSTMP registers into a
484 * hwtstamp which can be used by the upper level time stamping functions.
486 * The 'systim_lock' spinlock is used to protect the consistency of the
487 * system time value. This is needed because reading the 64 bit time
488 * value involves reading two 32 bit registers. The first read latches the
491 static void e1000e_systim_to_hwtstamp(struct e1000_adapter
*adapter
,
492 struct skb_shared_hwtstamps
*hwtstamps
,
498 spin_lock_irqsave(&adapter
->systim_lock
, flags
);
499 ns
= timecounter_cyc2time(&adapter
->tc
, systim
);
500 spin_unlock_irqrestore(&adapter
->systim_lock
, flags
);
502 memset(hwtstamps
, 0, sizeof(*hwtstamps
));
503 hwtstamps
->hwtstamp
= ns_to_ktime(ns
);
507 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
508 * @adapter: board private structure
509 * @status: descriptor extended error and status field
510 * @skb: particular skb to include time stamp
512 * If the time stamp is valid, convert it into the timecounter ns value
513 * and store that result into the shhwtstamps structure which is passed
514 * up the network stack.
516 static void e1000e_rx_hwtstamp(struct e1000_adapter
*adapter
, u32 status
,
519 struct e1000_hw
*hw
= &adapter
->hw
;
522 if (!(adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
) ||
523 !(status
& E1000_RXDEXT_STATERR_TST
) ||
524 !(er32(TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
))
527 /* The Rx time stamp registers contain the time stamp. No other
528 * received packet will be time stamped until the Rx time stamp
529 * registers are read. Because only one packet can be time stamped
530 * at a time, the register values must belong to this packet and
531 * therefore none of the other additional attributes need to be
534 rxstmp
= (u64
)er32(RXSTMPL
);
535 rxstmp
|= (u64
)er32(RXSTMPH
) << 32;
536 e1000e_systim_to_hwtstamp(adapter
, skb_hwtstamps(skb
), rxstmp
);
538 adapter
->flags2
&= ~FLAG2_CHECK_RX_HWTSTAMP
;
542 * e1000_receive_skb - helper function to handle Rx indications
543 * @adapter: board private structure
544 * @netdev: pointer to netdev struct
545 * @staterr: descriptor extended error and status field as written by hardware
546 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
547 * @skb: pointer to sk_buff to be indicated to stack
549 static void e1000_receive_skb(struct e1000_adapter
*adapter
,
550 struct net_device
*netdev
, struct sk_buff
*skb
,
551 u32 staterr
, __le16 vlan
)
553 u16 tag
= le16_to_cpu(vlan
);
555 e1000e_rx_hwtstamp(adapter
, staterr
, skb
);
557 skb
->protocol
= eth_type_trans(skb
, netdev
);
559 if (staterr
& E1000_RXD_STAT_VP
)
560 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), tag
);
562 napi_gro_receive(&adapter
->napi
, skb
);
566 * e1000_rx_checksum - Receive Checksum Offload
567 * @adapter: board private structure
568 * @status_err: receive descriptor status and error fields
569 * @skb: socket buffer with received data
571 static void e1000_rx_checksum(struct e1000_adapter
*adapter
, u32 status_err
,
574 u16 status
= (u16
)status_err
;
575 u8 errors
= (u8
)(status_err
>> 24);
577 skb_checksum_none_assert(skb
);
579 /* Rx checksum disabled */
580 if (!(adapter
->netdev
->features
& NETIF_F_RXCSUM
))
583 /* Ignore Checksum bit is set */
584 if (status
& E1000_RXD_STAT_IXSM
)
587 /* TCP/UDP checksum error bit or IP checksum error bit is set */
588 if (errors
& (E1000_RXD_ERR_TCPE
| E1000_RXD_ERR_IPE
)) {
589 /* let the stack verify checksum errors */
590 adapter
->hw_csum_err
++;
594 /* TCP/UDP Checksum has not been calculated */
595 if (!(status
& (E1000_RXD_STAT_TCPCS
| E1000_RXD_STAT_UDPCS
)))
598 /* It must be a TCP or UDP packet with a valid checksum */
599 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
600 adapter
->hw_csum_good
++;
603 static void e1000e_update_rdt_wa(struct e1000_ring
*rx_ring
, unsigned int i
)
605 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
606 struct e1000_hw
*hw
= &adapter
->hw
;
609 writel(i
, rx_ring
->tail
);
611 if (unlikely(i
!= readl(rx_ring
->tail
))) {
612 u32 rctl
= er32(RCTL
);
614 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
615 e_err("ME firmware caused invalid RDT - resetting\n");
616 schedule_work(&adapter
->reset_task
);
620 static void e1000e_update_tdt_wa(struct e1000_ring
*tx_ring
, unsigned int i
)
622 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
623 struct e1000_hw
*hw
= &adapter
->hw
;
626 writel(i
, tx_ring
->tail
);
628 if (unlikely(i
!= readl(tx_ring
->tail
))) {
629 u32 tctl
= er32(TCTL
);
631 ew32(TCTL
, tctl
& ~E1000_TCTL_EN
);
632 e_err("ME firmware caused invalid TDT - resetting\n");
633 schedule_work(&adapter
->reset_task
);
638 * e1000_alloc_rx_buffers - Replace used receive buffers
639 * @rx_ring: Rx descriptor ring
640 * @cleaned_count: number to reallocate
641 * @gfp: flags for allocation
643 static void e1000_alloc_rx_buffers(struct e1000_ring
*rx_ring
,
644 int cleaned_count
, gfp_t gfp
)
646 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
647 struct net_device
*netdev
= adapter
->netdev
;
648 struct pci_dev
*pdev
= adapter
->pdev
;
649 union e1000_rx_desc_extended
*rx_desc
;
650 struct e1000_buffer
*buffer_info
;
653 unsigned int bufsz
= adapter
->rx_buffer_len
;
655 i
= rx_ring
->next_to_use
;
656 buffer_info
= &rx_ring
->buffer_info
[i
];
658 while (cleaned_count
--) {
659 skb
= buffer_info
->skb
;
665 skb
= __netdev_alloc_skb_ip_align(netdev
, bufsz
, gfp
);
667 /* Better luck next round */
668 adapter
->alloc_rx_buff_failed
++;
672 buffer_info
->skb
= skb
;
674 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
675 adapter
->rx_buffer_len
,
677 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
)) {
678 dev_err(&pdev
->dev
, "Rx DMA map failed\n");
679 adapter
->rx_dma_failed
++;
683 rx_desc
= E1000_RX_DESC_EXT(*rx_ring
, i
);
684 rx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
686 if (unlikely(!(i
& (E1000_RX_BUFFER_WRITE
- 1)))) {
687 /* Force memory writes to complete before letting h/w
688 * know there are new descriptors to fetch. (Only
689 * applicable for weak-ordered memory model archs,
693 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
694 e1000e_update_rdt_wa(rx_ring
, i
);
696 writel(i
, rx_ring
->tail
);
699 if (i
== rx_ring
->count
)
701 buffer_info
= &rx_ring
->buffer_info
[i
];
704 rx_ring
->next_to_use
= i
;
708 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
709 * @rx_ring: Rx descriptor ring
710 * @cleaned_count: number to reallocate
711 * @gfp: flags for allocation
713 static void e1000_alloc_rx_buffers_ps(struct e1000_ring
*rx_ring
,
714 int cleaned_count
, gfp_t gfp
)
716 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
717 struct net_device
*netdev
= adapter
->netdev
;
718 struct pci_dev
*pdev
= adapter
->pdev
;
719 union e1000_rx_desc_packet_split
*rx_desc
;
720 struct e1000_buffer
*buffer_info
;
721 struct e1000_ps_page
*ps_page
;
725 i
= rx_ring
->next_to_use
;
726 buffer_info
= &rx_ring
->buffer_info
[i
];
728 while (cleaned_count
--) {
729 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
731 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
732 ps_page
= &buffer_info
->ps_pages
[j
];
733 if (j
>= adapter
->rx_ps_pages
) {
734 /* all unused desc entries get hw null ptr */
735 rx_desc
->read
.buffer_addr
[j
+ 1] =
739 if (!ps_page
->page
) {
740 ps_page
->page
= alloc_page(gfp
);
741 if (!ps_page
->page
) {
742 adapter
->alloc_rx_buff_failed
++;
745 ps_page
->dma
= dma_map_page(&pdev
->dev
,
749 if (dma_mapping_error(&pdev
->dev
,
751 dev_err(&adapter
->pdev
->dev
,
752 "Rx DMA page map failed\n");
753 adapter
->rx_dma_failed
++;
757 /* Refresh the desc even if buffer_addrs
758 * didn't change because each write-back
761 rx_desc
->read
.buffer_addr
[j
+ 1] =
762 cpu_to_le64(ps_page
->dma
);
765 skb
= __netdev_alloc_skb_ip_align(netdev
, adapter
->rx_ps_bsize0
,
769 adapter
->alloc_rx_buff_failed
++;
773 buffer_info
->skb
= skb
;
774 buffer_info
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
775 adapter
->rx_ps_bsize0
,
777 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
)) {
778 dev_err(&pdev
->dev
, "Rx DMA map failed\n");
779 adapter
->rx_dma_failed
++;
781 dev_kfree_skb_any(skb
);
782 buffer_info
->skb
= NULL
;
786 rx_desc
->read
.buffer_addr
[0] = cpu_to_le64(buffer_info
->dma
);
788 if (unlikely(!(i
& (E1000_RX_BUFFER_WRITE
- 1)))) {
789 /* Force memory writes to complete before letting h/w
790 * know there are new descriptors to fetch. (Only
791 * applicable for weak-ordered memory model archs,
795 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
796 e1000e_update_rdt_wa(rx_ring
, i
<< 1);
798 writel(i
<< 1, rx_ring
->tail
);
802 if (i
== rx_ring
->count
)
804 buffer_info
= &rx_ring
->buffer_info
[i
];
808 rx_ring
->next_to_use
= i
;
812 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
813 * @rx_ring: Rx descriptor ring
814 * @cleaned_count: number of buffers to allocate this pass
815 * @gfp: flags for allocation
818 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring
*rx_ring
,
819 int cleaned_count
, gfp_t gfp
)
821 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
822 struct net_device
*netdev
= adapter
->netdev
;
823 struct pci_dev
*pdev
= adapter
->pdev
;
824 union e1000_rx_desc_extended
*rx_desc
;
825 struct e1000_buffer
*buffer_info
;
828 unsigned int bufsz
= 256 - 16; /* for skb_reserve */
830 i
= rx_ring
->next_to_use
;
831 buffer_info
= &rx_ring
->buffer_info
[i
];
833 while (cleaned_count
--) {
834 skb
= buffer_info
->skb
;
840 skb
= __netdev_alloc_skb_ip_align(netdev
, bufsz
, gfp
);
841 if (unlikely(!skb
)) {
842 /* Better luck next round */
843 adapter
->alloc_rx_buff_failed
++;
847 buffer_info
->skb
= skb
;
849 /* allocate a new page if necessary */
850 if (!buffer_info
->page
) {
851 buffer_info
->page
= alloc_page(gfp
);
852 if (unlikely(!buffer_info
->page
)) {
853 adapter
->alloc_rx_buff_failed
++;
858 if (!buffer_info
->dma
) {
859 buffer_info
->dma
= dma_map_page(&pdev
->dev
,
860 buffer_info
->page
, 0,
863 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
)) {
864 adapter
->alloc_rx_buff_failed
++;
869 rx_desc
= E1000_RX_DESC_EXT(*rx_ring
, i
);
870 rx_desc
->read
.buffer_addr
= cpu_to_le64(buffer_info
->dma
);
872 if (unlikely(++i
== rx_ring
->count
))
874 buffer_info
= &rx_ring
->buffer_info
[i
];
877 if (likely(rx_ring
->next_to_use
!= i
)) {
878 rx_ring
->next_to_use
= i
;
879 if (unlikely(i
-- == 0))
880 i
= (rx_ring
->count
- 1);
882 /* Force memory writes to complete before letting h/w
883 * know there are new descriptors to fetch. (Only
884 * applicable for weak-ordered memory model archs,
888 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
889 e1000e_update_rdt_wa(rx_ring
, i
);
891 writel(i
, rx_ring
->tail
);
895 static inline void e1000_rx_hash(struct net_device
*netdev
, __le32 rss
,
898 if (netdev
->features
& NETIF_F_RXHASH
)
899 skb_set_hash(skb
, le32_to_cpu(rss
), PKT_HASH_TYPE_L3
);
903 * e1000_clean_rx_irq - Send received data up the network stack
904 * @rx_ring: Rx descriptor ring
905 * @work_done: output parameter for indicating completed work
906 * @work_to_do: how many packets we can clean
908 * the return value indicates whether actual cleaning was done, there
909 * is no guarantee that everything was cleaned
911 static bool e1000_clean_rx_irq(struct e1000_ring
*rx_ring
, int *work_done
,
914 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
915 struct net_device
*netdev
= adapter
->netdev
;
916 struct pci_dev
*pdev
= adapter
->pdev
;
917 struct e1000_hw
*hw
= &adapter
->hw
;
918 union e1000_rx_desc_extended
*rx_desc
, *next_rxd
;
919 struct e1000_buffer
*buffer_info
, *next_buffer
;
922 int cleaned_count
= 0;
923 bool cleaned
= false;
924 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
926 i
= rx_ring
->next_to_clean
;
927 rx_desc
= E1000_RX_DESC_EXT(*rx_ring
, i
);
928 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
929 buffer_info
= &rx_ring
->buffer_info
[i
];
931 while (staterr
& E1000_RXD_STAT_DD
) {
934 if (*work_done
>= work_to_do
)
937 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
939 skb
= buffer_info
->skb
;
940 buffer_info
->skb
= NULL
;
942 prefetch(skb
->data
- NET_IP_ALIGN
);
945 if (i
== rx_ring
->count
)
947 next_rxd
= E1000_RX_DESC_EXT(*rx_ring
, i
);
950 next_buffer
= &rx_ring
->buffer_info
[i
];
954 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
955 adapter
->rx_buffer_len
, DMA_FROM_DEVICE
);
956 buffer_info
->dma
= 0;
958 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
960 /* !EOP means multiple descriptors were used to store a single
961 * packet, if that's the case we need to toss it. In fact, we
962 * need to toss every packet with the EOP bit clear and the
963 * next frame that _does_ have the EOP bit set, as it is by
964 * definition only a frame fragment
966 if (unlikely(!(staterr
& E1000_RXD_STAT_EOP
)))
967 adapter
->flags2
|= FLAG2_IS_DISCARDING
;
969 if (adapter
->flags2
& FLAG2_IS_DISCARDING
) {
970 /* All receives must fit into a single buffer */
971 e_dbg("Receive packet consumed multiple buffers\n");
973 buffer_info
->skb
= skb
;
974 if (staterr
& E1000_RXD_STAT_EOP
)
975 adapter
->flags2
&= ~FLAG2_IS_DISCARDING
;
979 if (unlikely((staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) &&
980 !(netdev
->features
& NETIF_F_RXALL
))) {
982 buffer_info
->skb
= skb
;
986 /* adjust length to remove Ethernet CRC */
987 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
)) {
988 /* If configured to store CRC, don't subtract FCS,
989 * but keep the FCS bytes out of the total_rx_bytes
992 if (netdev
->features
& NETIF_F_RXFCS
)
998 total_rx_bytes
+= length
;
1001 /* code added for copybreak, this should improve
1002 * performance for small packets with large amounts
1003 * of reassembly being done in the stack
1005 if (length
< copybreak
) {
1006 struct sk_buff
*new_skb
=
1007 napi_alloc_skb(&adapter
->napi
, length
);
1009 skb_copy_to_linear_data_offset(new_skb
,
1015 /* save the skb in buffer_info as good */
1016 buffer_info
->skb
= skb
;
1019 /* else just continue with the old one */
1021 /* end copybreak code */
1022 skb_put(skb
, length
);
1024 /* Receive Checksum Offload */
1025 e1000_rx_checksum(adapter
, staterr
, skb
);
1027 e1000_rx_hash(netdev
, rx_desc
->wb
.lower
.hi_dword
.rss
, skb
);
1029 e1000_receive_skb(adapter
, netdev
, skb
, staterr
,
1030 rx_desc
->wb
.upper
.vlan
);
1033 rx_desc
->wb
.upper
.status_error
&= cpu_to_le32(~0xFF);
1035 /* return some buffers to hardware, one at a time is too slow */
1036 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
1037 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
,
1042 /* use prefetched values */
1044 buffer_info
= next_buffer
;
1046 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1048 rx_ring
->next_to_clean
= i
;
1050 cleaned_count
= e1000_desc_unused(rx_ring
);
1052 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
, GFP_ATOMIC
);
1054 adapter
->total_rx_bytes
+= total_rx_bytes
;
1055 adapter
->total_rx_packets
+= total_rx_packets
;
1059 static void e1000_put_txbuf(struct e1000_ring
*tx_ring
,
1060 struct e1000_buffer
*buffer_info
,
1063 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
1065 if (buffer_info
->dma
) {
1066 if (buffer_info
->mapped_as_page
)
1067 dma_unmap_page(&adapter
->pdev
->dev
, buffer_info
->dma
,
1068 buffer_info
->length
, DMA_TO_DEVICE
);
1070 dma_unmap_single(&adapter
->pdev
->dev
, buffer_info
->dma
,
1071 buffer_info
->length
, DMA_TO_DEVICE
);
1072 buffer_info
->dma
= 0;
1074 if (buffer_info
->skb
) {
1076 dev_kfree_skb_any(buffer_info
->skb
);
1078 dev_consume_skb_any(buffer_info
->skb
);
1079 buffer_info
->skb
= NULL
;
1081 buffer_info
->time_stamp
= 0;
1084 static void e1000_print_hw_hang(struct work_struct
*work
)
1086 struct e1000_adapter
*adapter
= container_of(work
,
1087 struct e1000_adapter
,
1089 struct net_device
*netdev
= adapter
->netdev
;
1090 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1091 unsigned int i
= tx_ring
->next_to_clean
;
1092 unsigned int eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1093 struct e1000_tx_desc
*eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
1094 struct e1000_hw
*hw
= &adapter
->hw
;
1095 u16 phy_status
, phy_1000t_status
, phy_ext_status
;
1098 if (test_bit(__E1000_DOWN
, &adapter
->state
))
1101 if (!adapter
->tx_hang_recheck
&& (adapter
->flags2
& FLAG2_DMA_BURST
)) {
1102 /* May be block on write-back, flush and detect again
1103 * flush pending descriptor writebacks to memory
1105 ew32(TIDV
, adapter
->tx_int_delay
| E1000_TIDV_FPD
);
1106 /* execute the writes immediately */
1108 /* Due to rare timing issues, write to TIDV again to ensure
1109 * the write is successful
1111 ew32(TIDV
, adapter
->tx_int_delay
| E1000_TIDV_FPD
);
1112 /* execute the writes immediately */
1114 adapter
->tx_hang_recheck
= true;
1117 adapter
->tx_hang_recheck
= false;
1119 if (er32(TDH(0)) == er32(TDT(0))) {
1120 e_dbg("false hang detected, ignoring\n");
1124 /* Real hang detected */
1125 netif_stop_queue(netdev
);
1127 e1e_rphy(hw
, MII_BMSR
, &phy_status
);
1128 e1e_rphy(hw
, MII_STAT1000
, &phy_1000t_status
);
1129 e1e_rphy(hw
, MII_ESTATUS
, &phy_ext_status
);
1131 pci_read_config_word(adapter
->pdev
, PCI_STATUS
, &pci_status
);
1133 /* detected Hardware unit hang */
1134 e_err("Detected Hardware Unit Hang:\n"
1137 " next_to_use <%x>\n"
1138 " next_to_clean <%x>\n"
1139 "buffer_info[next_to_clean]:\n"
1140 " time_stamp <%lx>\n"
1141 " next_to_watch <%x>\n"
1143 " next_to_watch.status <%x>\n"
1146 "PHY 1000BASE-T Status <%x>\n"
1147 "PHY Extended Status <%x>\n"
1148 "PCI Status <%x>\n",
1149 readl(tx_ring
->head
), readl(tx_ring
->tail
), tx_ring
->next_to_use
,
1150 tx_ring
->next_to_clean
, tx_ring
->buffer_info
[eop
].time_stamp
,
1151 eop
, jiffies
, eop_desc
->upper
.fields
.status
, er32(STATUS
),
1152 phy_status
, phy_1000t_status
, phy_ext_status
, pci_status
);
1154 e1000e_dump(adapter
);
1156 /* Suggest workaround for known h/w issue */
1157 if ((hw
->mac
.type
== e1000_pchlan
) && (er32(CTRL
) & E1000_CTRL_TFCE
))
1158 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1162 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1163 * @work: pointer to work struct
1165 * This work function polls the TSYNCTXCTL valid bit to determine when a
1166 * timestamp has been taken for the current stored skb. The timestamp must
1167 * be for this skb because only one such packet is allowed in the queue.
1169 static void e1000e_tx_hwtstamp_work(struct work_struct
*work
)
1171 struct e1000_adapter
*adapter
= container_of(work
, struct e1000_adapter
,
1173 struct e1000_hw
*hw
= &adapter
->hw
;
1175 if (er32(TSYNCTXCTL
) & E1000_TSYNCTXCTL_VALID
) {
1176 struct sk_buff
*skb
= adapter
->tx_hwtstamp_skb
;
1177 struct skb_shared_hwtstamps shhwtstamps
;
1180 txstmp
= er32(TXSTMPL
);
1181 txstmp
|= (u64
)er32(TXSTMPH
) << 32;
1183 e1000e_systim_to_hwtstamp(adapter
, &shhwtstamps
, txstmp
);
1185 /* Clear the global tx_hwtstamp_skb pointer and force writes
1186 * prior to notifying the stack of a Tx timestamp.
1188 adapter
->tx_hwtstamp_skb
= NULL
;
1189 wmb(); /* force write prior to skb_tstamp_tx */
1191 skb_tstamp_tx(skb
, &shhwtstamps
);
1192 dev_consume_skb_any(skb
);
1193 } else if (time_after(jiffies
, adapter
->tx_hwtstamp_start
1194 + adapter
->tx_timeout_factor
* HZ
)) {
1195 dev_kfree_skb_any(adapter
->tx_hwtstamp_skb
);
1196 adapter
->tx_hwtstamp_skb
= NULL
;
1197 adapter
->tx_hwtstamp_timeouts
++;
1198 e_warn("clearing Tx timestamp hang\n");
1200 /* reschedule to check later */
1201 schedule_work(&adapter
->tx_hwtstamp_work
);
1206 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1207 * @tx_ring: Tx descriptor ring
1209 * the return value indicates whether actual cleaning was done, there
1210 * is no guarantee that everything was cleaned
1212 static bool e1000_clean_tx_irq(struct e1000_ring
*tx_ring
)
1214 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
1215 struct net_device
*netdev
= adapter
->netdev
;
1216 struct e1000_hw
*hw
= &adapter
->hw
;
1217 struct e1000_tx_desc
*tx_desc
, *eop_desc
;
1218 struct e1000_buffer
*buffer_info
;
1219 unsigned int i
, eop
;
1220 unsigned int count
= 0;
1221 unsigned int total_tx_bytes
= 0, total_tx_packets
= 0;
1222 unsigned int bytes_compl
= 0, pkts_compl
= 0;
1224 i
= tx_ring
->next_to_clean
;
1225 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1226 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
1228 while ((eop_desc
->upper
.data
& cpu_to_le32(E1000_TXD_STAT_DD
)) &&
1229 (count
< tx_ring
->count
)) {
1230 bool cleaned
= false;
1232 dma_rmb(); /* read buffer_info after eop_desc */
1233 for (; !cleaned
; count
++) {
1234 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
1235 buffer_info
= &tx_ring
->buffer_info
[i
];
1236 cleaned
= (i
== eop
);
1239 total_tx_packets
+= buffer_info
->segs
;
1240 total_tx_bytes
+= buffer_info
->bytecount
;
1241 if (buffer_info
->skb
) {
1242 bytes_compl
+= buffer_info
->skb
->len
;
1247 e1000_put_txbuf(tx_ring
, buffer_info
, false);
1248 tx_desc
->upper
.data
= 0;
1251 if (i
== tx_ring
->count
)
1255 if (i
== tx_ring
->next_to_use
)
1257 eop
= tx_ring
->buffer_info
[i
].next_to_watch
;
1258 eop_desc
= E1000_TX_DESC(*tx_ring
, eop
);
1261 tx_ring
->next_to_clean
= i
;
1263 netdev_completed_queue(netdev
, pkts_compl
, bytes_compl
);
1265 #define TX_WAKE_THRESHOLD 32
1266 if (count
&& netif_carrier_ok(netdev
) &&
1267 e1000_desc_unused(tx_ring
) >= TX_WAKE_THRESHOLD
) {
1268 /* Make sure that anybody stopping the queue after this
1269 * sees the new next_to_clean.
1273 if (netif_queue_stopped(netdev
) &&
1274 !(test_bit(__E1000_DOWN
, &adapter
->state
))) {
1275 netif_wake_queue(netdev
);
1276 ++adapter
->restart_queue
;
1280 if (adapter
->detect_tx_hung
) {
1281 /* Detect a transmit hang in hardware, this serializes the
1282 * check with the clearing of time_stamp and movement of i
1284 adapter
->detect_tx_hung
= false;
1285 if (tx_ring
->buffer_info
[i
].time_stamp
&&
1286 time_after(jiffies
, tx_ring
->buffer_info
[i
].time_stamp
1287 + (adapter
->tx_timeout_factor
* HZ
)) &&
1288 !(er32(STATUS
) & E1000_STATUS_TXOFF
))
1289 schedule_work(&adapter
->print_hang_task
);
1291 adapter
->tx_hang_recheck
= false;
1293 adapter
->total_tx_bytes
+= total_tx_bytes
;
1294 adapter
->total_tx_packets
+= total_tx_packets
;
1295 return count
< tx_ring
->count
;
1299 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1300 * @rx_ring: Rx descriptor ring
1301 * @work_done: output parameter for indicating completed work
1302 * @work_to_do: how many packets we can clean
1304 * the return value indicates whether actual cleaning was done, there
1305 * is no guarantee that everything was cleaned
1307 static bool e1000_clean_rx_irq_ps(struct e1000_ring
*rx_ring
, int *work_done
,
1310 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
1311 struct e1000_hw
*hw
= &adapter
->hw
;
1312 union e1000_rx_desc_packet_split
*rx_desc
, *next_rxd
;
1313 struct net_device
*netdev
= adapter
->netdev
;
1314 struct pci_dev
*pdev
= adapter
->pdev
;
1315 struct e1000_buffer
*buffer_info
, *next_buffer
;
1316 struct e1000_ps_page
*ps_page
;
1317 struct sk_buff
*skb
;
1319 u32 length
, staterr
;
1320 int cleaned_count
= 0;
1321 bool cleaned
= false;
1322 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1324 i
= rx_ring
->next_to_clean
;
1325 rx_desc
= E1000_RX_DESC_PS(*rx_ring
, i
);
1326 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
1327 buffer_info
= &rx_ring
->buffer_info
[i
];
1329 while (staterr
& E1000_RXD_STAT_DD
) {
1330 if (*work_done
>= work_to_do
)
1333 skb
= buffer_info
->skb
;
1334 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
1336 /* in the packet split case this is header only */
1337 prefetch(skb
->data
- NET_IP_ALIGN
);
1340 if (i
== rx_ring
->count
)
1342 next_rxd
= E1000_RX_DESC_PS(*rx_ring
, i
);
1345 next_buffer
= &rx_ring
->buffer_info
[i
];
1349 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
1350 adapter
->rx_ps_bsize0
, DMA_FROM_DEVICE
);
1351 buffer_info
->dma
= 0;
1353 /* see !EOP comment in other Rx routine */
1354 if (!(staterr
& E1000_RXD_STAT_EOP
))
1355 adapter
->flags2
|= FLAG2_IS_DISCARDING
;
1357 if (adapter
->flags2
& FLAG2_IS_DISCARDING
) {
1358 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1359 dev_kfree_skb_irq(skb
);
1360 if (staterr
& E1000_RXD_STAT_EOP
)
1361 adapter
->flags2
&= ~FLAG2_IS_DISCARDING
;
1365 if (unlikely((staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) &&
1366 !(netdev
->features
& NETIF_F_RXALL
))) {
1367 dev_kfree_skb_irq(skb
);
1371 length
= le16_to_cpu(rx_desc
->wb
.middle
.length0
);
1374 e_dbg("Last part of the packet spanning multiple descriptors\n");
1375 dev_kfree_skb_irq(skb
);
1380 skb_put(skb
, length
);
1383 /* this looks ugly, but it seems compiler issues make
1384 * it more efficient than reusing j
1386 int l1
= le16_to_cpu(rx_desc
->wb
.upper
.length
[0]);
1388 /* page alloc/put takes too long and effects small
1389 * packet throughput, so unsplit small packets and
1390 * save the alloc/put only valid in softirq (napi)
1391 * context to call kmap_*
1393 if (l1
&& (l1
<= copybreak
) &&
1394 ((length
+ l1
) <= adapter
->rx_ps_bsize0
)) {
1397 ps_page
= &buffer_info
->ps_pages
[0];
1399 /* there is no documentation about how to call
1400 * kmap_atomic, so we can't hold the mapping
1403 dma_sync_single_for_cpu(&pdev
->dev
,
1407 vaddr
= kmap_atomic(ps_page
->page
);
1408 memcpy(skb_tail_pointer(skb
), vaddr
, l1
);
1409 kunmap_atomic(vaddr
);
1410 dma_sync_single_for_device(&pdev
->dev
,
1415 /* remove the CRC */
1416 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
)) {
1417 if (!(netdev
->features
& NETIF_F_RXFCS
))
1426 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
1427 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
[j
]);
1431 ps_page
= &buffer_info
->ps_pages
[j
];
1432 dma_unmap_page(&pdev
->dev
, ps_page
->dma
, PAGE_SIZE
,
1435 skb_fill_page_desc(skb
, j
, ps_page
->page
, 0, length
);
1436 ps_page
->page
= NULL
;
1438 skb
->data_len
+= length
;
1439 skb
->truesize
+= PAGE_SIZE
;
1442 /* strip the ethernet crc, problem is we're using pages now so
1443 * this whole operation can get a little cpu intensive
1445 if (!(adapter
->flags2
& FLAG2_CRC_STRIPPING
)) {
1446 if (!(netdev
->features
& NETIF_F_RXFCS
))
1447 pskb_trim(skb
, skb
->len
- 4);
1451 total_rx_bytes
+= skb
->len
;
1454 e1000_rx_checksum(adapter
, staterr
, skb
);
1456 e1000_rx_hash(netdev
, rx_desc
->wb
.lower
.hi_dword
.rss
, skb
);
1458 if (rx_desc
->wb
.upper
.header_status
&
1459 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP
))
1460 adapter
->rx_hdr_split
++;
1462 e1000_receive_skb(adapter
, netdev
, skb
, staterr
,
1463 rx_desc
->wb
.middle
.vlan
);
1466 rx_desc
->wb
.middle
.status_error
&= cpu_to_le32(~0xFF);
1467 buffer_info
->skb
= NULL
;
1469 /* return some buffers to hardware, one at a time is too slow */
1470 if (cleaned_count
>= E1000_RX_BUFFER_WRITE
) {
1471 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
,
1476 /* use prefetched values */
1478 buffer_info
= next_buffer
;
1480 staterr
= le32_to_cpu(rx_desc
->wb
.middle
.status_error
);
1482 rx_ring
->next_to_clean
= i
;
1484 cleaned_count
= e1000_desc_unused(rx_ring
);
1486 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
, GFP_ATOMIC
);
1488 adapter
->total_rx_bytes
+= total_rx_bytes
;
1489 adapter
->total_rx_packets
+= total_rx_packets
;
1493 static void e1000_consume_page(struct e1000_buffer
*bi
, struct sk_buff
*skb
,
1498 skb
->data_len
+= length
;
1499 skb
->truesize
+= PAGE_SIZE
;
1503 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1504 * @rx_ring: Rx descriptor ring
1505 * @work_done: output parameter for indicating completed work
1506 * @work_to_do: how many packets we can clean
1508 * the return value indicates whether actual cleaning was done, there
1509 * is no guarantee that everything was cleaned
1511 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring
*rx_ring
, int *work_done
,
1514 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
1515 struct net_device
*netdev
= adapter
->netdev
;
1516 struct pci_dev
*pdev
= adapter
->pdev
;
1517 union e1000_rx_desc_extended
*rx_desc
, *next_rxd
;
1518 struct e1000_buffer
*buffer_info
, *next_buffer
;
1519 u32 length
, staterr
;
1521 int cleaned_count
= 0;
1522 bool cleaned
= false;
1523 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1524 struct skb_shared_info
*shinfo
;
1526 i
= rx_ring
->next_to_clean
;
1527 rx_desc
= E1000_RX_DESC_EXT(*rx_ring
, i
);
1528 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1529 buffer_info
= &rx_ring
->buffer_info
[i
];
1531 while (staterr
& E1000_RXD_STAT_DD
) {
1532 struct sk_buff
*skb
;
1534 if (*work_done
>= work_to_do
)
1537 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
1539 skb
= buffer_info
->skb
;
1540 buffer_info
->skb
= NULL
;
1543 if (i
== rx_ring
->count
)
1545 next_rxd
= E1000_RX_DESC_EXT(*rx_ring
, i
);
1548 next_buffer
= &rx_ring
->buffer_info
[i
];
1552 dma_unmap_page(&pdev
->dev
, buffer_info
->dma
, PAGE_SIZE
,
1554 buffer_info
->dma
= 0;
1556 length
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
1558 /* errors is only valid for DD + EOP descriptors */
1559 if (unlikely((staterr
& E1000_RXD_STAT_EOP
) &&
1560 ((staterr
& E1000_RXDEXT_ERR_FRAME_ERR_MASK
) &&
1561 !(netdev
->features
& NETIF_F_RXALL
)))) {
1562 /* recycle both page and skb */
1563 buffer_info
->skb
= skb
;
1564 /* an error means any chain goes out the window too */
1565 if (rx_ring
->rx_skb_top
)
1566 dev_kfree_skb_irq(rx_ring
->rx_skb_top
);
1567 rx_ring
->rx_skb_top
= NULL
;
1570 #define rxtop (rx_ring->rx_skb_top)
1571 if (!(staterr
& E1000_RXD_STAT_EOP
)) {
1572 /* this descriptor is only the beginning (or middle) */
1574 /* this is the beginning of a chain */
1576 skb_fill_page_desc(rxtop
, 0, buffer_info
->page
,
1579 /* this is the middle of a chain */
1580 shinfo
= skb_shinfo(rxtop
);
1581 skb_fill_page_desc(rxtop
, shinfo
->nr_frags
,
1582 buffer_info
->page
, 0,
1584 /* re-use the skb, only consumed the page */
1585 buffer_info
->skb
= skb
;
1587 e1000_consume_page(buffer_info
, rxtop
, length
);
1591 /* end of the chain */
1592 shinfo
= skb_shinfo(rxtop
);
1593 skb_fill_page_desc(rxtop
, shinfo
->nr_frags
,
1594 buffer_info
->page
, 0,
1596 /* re-use the current skb, we only consumed the
1599 buffer_info
->skb
= skb
;
1602 e1000_consume_page(buffer_info
, skb
, length
);
1604 /* no chain, got EOP, this buf is the packet
1605 * copybreak to save the put_page/alloc_page
1607 if (length
<= copybreak
&&
1608 skb_tailroom(skb
) >= length
) {
1610 vaddr
= kmap_atomic(buffer_info
->page
);
1611 memcpy(skb_tail_pointer(skb
), vaddr
,
1613 kunmap_atomic(vaddr
);
1614 /* re-use the page, so don't erase
1617 skb_put(skb
, length
);
1619 skb_fill_page_desc(skb
, 0,
1620 buffer_info
->page
, 0,
1622 e1000_consume_page(buffer_info
, skb
,
1628 /* Receive Checksum Offload */
1629 e1000_rx_checksum(adapter
, staterr
, skb
);
1631 e1000_rx_hash(netdev
, rx_desc
->wb
.lower
.hi_dword
.rss
, skb
);
1633 /* probably a little skewed due to removing CRC */
1634 total_rx_bytes
+= skb
->len
;
1637 /* eth type trans needs skb->data to point to something */
1638 if (!pskb_may_pull(skb
, ETH_HLEN
)) {
1639 e_err("pskb_may_pull failed.\n");
1640 dev_kfree_skb_irq(skb
);
1644 e1000_receive_skb(adapter
, netdev
, skb
, staterr
,
1645 rx_desc
->wb
.upper
.vlan
);
1648 rx_desc
->wb
.upper
.status_error
&= cpu_to_le32(~0xFF);
1650 /* return some buffers to hardware, one at a time is too slow */
1651 if (unlikely(cleaned_count
>= E1000_RX_BUFFER_WRITE
)) {
1652 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
,
1657 /* use prefetched values */
1659 buffer_info
= next_buffer
;
1661 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1663 rx_ring
->next_to_clean
= i
;
1665 cleaned_count
= e1000_desc_unused(rx_ring
);
1667 adapter
->alloc_rx_buf(rx_ring
, cleaned_count
, GFP_ATOMIC
);
1669 adapter
->total_rx_bytes
+= total_rx_bytes
;
1670 adapter
->total_rx_packets
+= total_rx_packets
;
1675 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1676 * @rx_ring: Rx descriptor ring
1678 static void e1000_clean_rx_ring(struct e1000_ring
*rx_ring
)
1680 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
1681 struct e1000_buffer
*buffer_info
;
1682 struct e1000_ps_page
*ps_page
;
1683 struct pci_dev
*pdev
= adapter
->pdev
;
1686 /* Free all the Rx ring sk_buffs */
1687 for (i
= 0; i
< rx_ring
->count
; i
++) {
1688 buffer_info
= &rx_ring
->buffer_info
[i
];
1689 if (buffer_info
->dma
) {
1690 if (adapter
->clean_rx
== e1000_clean_rx_irq
)
1691 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
1692 adapter
->rx_buffer_len
,
1694 else if (adapter
->clean_rx
== e1000_clean_jumbo_rx_irq
)
1695 dma_unmap_page(&pdev
->dev
, buffer_info
->dma
,
1696 PAGE_SIZE
, DMA_FROM_DEVICE
);
1697 else if (adapter
->clean_rx
== e1000_clean_rx_irq_ps
)
1698 dma_unmap_single(&pdev
->dev
, buffer_info
->dma
,
1699 adapter
->rx_ps_bsize0
,
1701 buffer_info
->dma
= 0;
1704 if (buffer_info
->page
) {
1705 put_page(buffer_info
->page
);
1706 buffer_info
->page
= NULL
;
1709 if (buffer_info
->skb
) {
1710 dev_kfree_skb(buffer_info
->skb
);
1711 buffer_info
->skb
= NULL
;
1714 for (j
= 0; j
< PS_PAGE_BUFFERS
; j
++) {
1715 ps_page
= &buffer_info
->ps_pages
[j
];
1718 dma_unmap_page(&pdev
->dev
, ps_page
->dma
, PAGE_SIZE
,
1721 put_page(ps_page
->page
);
1722 ps_page
->page
= NULL
;
1726 /* there also may be some cached data from a chained receive */
1727 if (rx_ring
->rx_skb_top
) {
1728 dev_kfree_skb(rx_ring
->rx_skb_top
);
1729 rx_ring
->rx_skb_top
= NULL
;
1732 /* Zero out the descriptor ring */
1733 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1735 rx_ring
->next_to_clean
= 0;
1736 rx_ring
->next_to_use
= 0;
1737 adapter
->flags2
&= ~FLAG2_IS_DISCARDING
;
1740 static void e1000e_downshift_workaround(struct work_struct
*work
)
1742 struct e1000_adapter
*adapter
= container_of(work
,
1743 struct e1000_adapter
,
1746 if (test_bit(__E1000_DOWN
, &adapter
->state
))
1749 e1000e_gig_downshift_workaround_ich8lan(&adapter
->hw
);
1753 * e1000_intr_msi - Interrupt Handler
1754 * @irq: interrupt number
1755 * @data: pointer to a network interface device structure
1757 static irqreturn_t
e1000_intr_msi(int __always_unused irq
, void *data
)
1759 struct net_device
*netdev
= data
;
1760 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1761 struct e1000_hw
*hw
= &adapter
->hw
;
1762 u32 icr
= er32(ICR
);
1764 /* read ICR disables interrupts using IAM */
1765 if (icr
& E1000_ICR_LSC
) {
1766 hw
->mac
.get_link_status
= true;
1767 /* ICH8 workaround-- Call gig speed drop workaround on cable
1768 * disconnect (LSC) before accessing any PHY registers
1770 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1771 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1772 schedule_work(&adapter
->downshift_task
);
1774 /* 80003ES2LAN workaround-- For packet buffer work-around on
1775 * link down event; disable receives here in the ISR and reset
1776 * adapter in watchdog
1778 if (netif_carrier_ok(netdev
) &&
1779 adapter
->flags
& FLAG_RX_NEEDS_RESTART
) {
1780 /* disable receives */
1781 u32 rctl
= er32(RCTL
);
1783 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1784 adapter
->flags
|= FLAG_RESTART_NOW
;
1786 /* guard against interrupt when we're going down */
1787 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1788 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1791 /* Reset on uncorrectable ECC error */
1792 if ((icr
& E1000_ICR_ECCER
) && (hw
->mac
.type
>= e1000_pch_lpt
)) {
1793 u32 pbeccsts
= er32(PBECCSTS
);
1795 adapter
->corr_errors
+=
1796 pbeccsts
& E1000_PBECCSTS_CORR_ERR_CNT_MASK
;
1797 adapter
->uncorr_errors
+=
1798 (pbeccsts
& E1000_PBECCSTS_UNCORR_ERR_CNT_MASK
) >>
1799 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT
;
1801 /* Do the reset outside of interrupt context */
1802 schedule_work(&adapter
->reset_task
);
1804 /* return immediately since reset is imminent */
1808 if (napi_schedule_prep(&adapter
->napi
)) {
1809 adapter
->total_tx_bytes
= 0;
1810 adapter
->total_tx_packets
= 0;
1811 adapter
->total_rx_bytes
= 0;
1812 adapter
->total_rx_packets
= 0;
1813 __napi_schedule(&adapter
->napi
);
1820 * e1000_intr - Interrupt Handler
1821 * @irq: interrupt number
1822 * @data: pointer to a network interface device structure
1824 static irqreturn_t
e1000_intr(int __always_unused irq
, void *data
)
1826 struct net_device
*netdev
= data
;
1827 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1828 struct e1000_hw
*hw
= &adapter
->hw
;
1829 u32 rctl
, icr
= er32(ICR
);
1831 if (!icr
|| test_bit(__E1000_DOWN
, &adapter
->state
))
1832 return IRQ_NONE
; /* Not our interrupt */
1834 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1835 * not set, then the adapter didn't send an interrupt
1837 if (!(icr
& E1000_ICR_INT_ASSERTED
))
1840 /* Interrupt Auto-Mask...upon reading ICR,
1841 * interrupts are masked. No need for the
1845 if (icr
& E1000_ICR_LSC
) {
1846 hw
->mac
.get_link_status
= true;
1847 /* ICH8 workaround-- Call gig speed drop workaround on cable
1848 * disconnect (LSC) before accessing any PHY registers
1850 if ((adapter
->flags
& FLAG_LSC_GIG_SPEED_DROP
) &&
1851 (!(er32(STATUS
) & E1000_STATUS_LU
)))
1852 schedule_work(&adapter
->downshift_task
);
1854 /* 80003ES2LAN workaround--
1855 * For packet buffer work-around on link down event;
1856 * disable receives here in the ISR and
1857 * reset adapter in watchdog
1859 if (netif_carrier_ok(netdev
) &&
1860 (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)) {
1861 /* disable receives */
1863 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
1864 adapter
->flags
|= FLAG_RESTART_NOW
;
1866 /* guard against interrupt when we're going down */
1867 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1868 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1871 /* Reset on uncorrectable ECC error */
1872 if ((icr
& E1000_ICR_ECCER
) && (hw
->mac
.type
>= e1000_pch_lpt
)) {
1873 u32 pbeccsts
= er32(PBECCSTS
);
1875 adapter
->corr_errors
+=
1876 pbeccsts
& E1000_PBECCSTS_CORR_ERR_CNT_MASK
;
1877 adapter
->uncorr_errors
+=
1878 (pbeccsts
& E1000_PBECCSTS_UNCORR_ERR_CNT_MASK
) >>
1879 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT
;
1881 /* Do the reset outside of interrupt context */
1882 schedule_work(&adapter
->reset_task
);
1884 /* return immediately since reset is imminent */
1888 if (napi_schedule_prep(&adapter
->napi
)) {
1889 adapter
->total_tx_bytes
= 0;
1890 adapter
->total_tx_packets
= 0;
1891 adapter
->total_rx_bytes
= 0;
1892 adapter
->total_rx_packets
= 0;
1893 __napi_schedule(&adapter
->napi
);
1899 static irqreturn_t
e1000_msix_other(int __always_unused irq
, void *data
)
1901 struct net_device
*netdev
= data
;
1902 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1903 struct e1000_hw
*hw
= &adapter
->hw
;
1904 u32 icr
= er32(ICR
);
1906 if (icr
& adapter
->eiac_mask
)
1907 ew32(ICS
, (icr
& adapter
->eiac_mask
));
1909 if (icr
& E1000_ICR_LSC
) {
1910 hw
->mac
.get_link_status
= true;
1911 /* guard against interrupt when we're going down */
1912 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1913 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
1916 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1917 ew32(IMS
, E1000_IMS_OTHER
| IMS_OTHER_MASK
);
1922 static irqreturn_t
e1000_intr_msix_tx(int __always_unused irq
, void *data
)
1924 struct net_device
*netdev
= data
;
1925 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1926 struct e1000_hw
*hw
= &adapter
->hw
;
1927 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1929 adapter
->total_tx_bytes
= 0;
1930 adapter
->total_tx_packets
= 0;
1932 if (!e1000_clean_tx_irq(tx_ring
))
1933 /* Ring was not completely cleaned, so fire another interrupt */
1934 ew32(ICS
, tx_ring
->ims_val
);
1936 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
1937 ew32(IMS
, adapter
->tx_ring
->ims_val
);
1942 static irqreturn_t
e1000_intr_msix_rx(int __always_unused irq
, void *data
)
1944 struct net_device
*netdev
= data
;
1945 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
1946 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1948 /* Write the ITR value calculated at the end of the
1949 * previous interrupt.
1951 if (rx_ring
->set_itr
) {
1952 u32 itr
= rx_ring
->itr_val
?
1953 1000000000 / (rx_ring
->itr_val
* 256) : 0;
1955 writel(itr
, rx_ring
->itr_register
);
1956 rx_ring
->set_itr
= 0;
1959 if (napi_schedule_prep(&adapter
->napi
)) {
1960 adapter
->total_rx_bytes
= 0;
1961 adapter
->total_rx_packets
= 0;
1962 __napi_schedule(&adapter
->napi
);
1968 * e1000_configure_msix - Configure MSI-X hardware
1969 * @adapter: board private structure
1971 * e1000_configure_msix sets up the hardware to properly
1972 * generate MSI-X interrupts.
1974 static void e1000_configure_msix(struct e1000_adapter
*adapter
)
1976 struct e1000_hw
*hw
= &adapter
->hw
;
1977 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
1978 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
1980 u32 ctrl_ext
, ivar
= 0;
1982 adapter
->eiac_mask
= 0;
1984 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1985 if (hw
->mac
.type
== e1000_82574
) {
1986 u32 rfctl
= er32(RFCTL
);
1988 rfctl
|= E1000_RFCTL_ACK_DIS
;
1992 /* Configure Rx vector */
1993 rx_ring
->ims_val
= E1000_IMS_RXQ0
;
1994 adapter
->eiac_mask
|= rx_ring
->ims_val
;
1995 if (rx_ring
->itr_val
)
1996 writel(1000000000 / (rx_ring
->itr_val
* 256),
1997 rx_ring
->itr_register
);
1999 writel(1, rx_ring
->itr_register
);
2000 ivar
= E1000_IVAR_INT_ALLOC_VALID
| vector
;
2002 /* Configure Tx vector */
2003 tx_ring
->ims_val
= E1000_IMS_TXQ0
;
2005 if (tx_ring
->itr_val
)
2006 writel(1000000000 / (tx_ring
->itr_val
* 256),
2007 tx_ring
->itr_register
);
2009 writel(1, tx_ring
->itr_register
);
2010 adapter
->eiac_mask
|= tx_ring
->ims_val
;
2011 ivar
|= ((E1000_IVAR_INT_ALLOC_VALID
| vector
) << 8);
2013 /* set vector for Other Causes, e.g. link changes */
2015 ivar
|= ((E1000_IVAR_INT_ALLOC_VALID
| vector
) << 16);
2016 if (rx_ring
->itr_val
)
2017 writel(1000000000 / (rx_ring
->itr_val
* 256),
2018 hw
->hw_addr
+ E1000_EITR_82574(vector
));
2020 writel(1, hw
->hw_addr
+ E1000_EITR_82574(vector
));
2022 /* Cause Tx interrupts on every write back */
2027 /* enable MSI-X PBA support */
2028 ctrl_ext
= er32(CTRL_EXT
) & ~E1000_CTRL_EXT_IAME
;
2029 ctrl_ext
|= E1000_CTRL_EXT_PBA_CLR
| E1000_CTRL_EXT_EIAME
;
2030 ew32(CTRL_EXT
, ctrl_ext
);
2034 void e1000e_reset_interrupt_capability(struct e1000_adapter
*adapter
)
2036 if (adapter
->msix_entries
) {
2037 pci_disable_msix(adapter
->pdev
);
2038 kfree(adapter
->msix_entries
);
2039 adapter
->msix_entries
= NULL
;
2040 } else if (adapter
->flags
& FLAG_MSI_ENABLED
) {
2041 pci_disable_msi(adapter
->pdev
);
2042 adapter
->flags
&= ~FLAG_MSI_ENABLED
;
2047 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2048 * @adapter: board private structure
2050 * Attempt to configure interrupts using the best available
2051 * capabilities of the hardware and kernel.
2053 void e1000e_set_interrupt_capability(struct e1000_adapter
*adapter
)
2058 switch (adapter
->int_mode
) {
2059 case E1000E_INT_MODE_MSIX
:
2060 if (adapter
->flags
& FLAG_HAS_MSIX
) {
2061 adapter
->num_vectors
= 3; /* RxQ0, TxQ0 and other */
2062 adapter
->msix_entries
= kcalloc(adapter
->num_vectors
,
2066 if (adapter
->msix_entries
) {
2067 struct e1000_adapter
*a
= adapter
;
2069 for (i
= 0; i
< adapter
->num_vectors
; i
++)
2070 adapter
->msix_entries
[i
].entry
= i
;
2072 err
= pci_enable_msix_range(a
->pdev
,
2079 /* MSI-X failed, so fall through and try MSI */
2080 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2081 e1000e_reset_interrupt_capability(adapter
);
2083 adapter
->int_mode
= E1000E_INT_MODE_MSI
;
2085 case E1000E_INT_MODE_MSI
:
2086 if (!pci_enable_msi(adapter
->pdev
)) {
2087 adapter
->flags
|= FLAG_MSI_ENABLED
;
2089 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
2090 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2093 case E1000E_INT_MODE_LEGACY
:
2094 /* Don't do anything; this is the system default */
2098 /* store the number of vectors being used */
2099 adapter
->num_vectors
= 1;
2103 * e1000_request_msix - Initialize MSI-X interrupts
2104 * @adapter: board private structure
2106 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2109 static int e1000_request_msix(struct e1000_adapter
*adapter
)
2111 struct net_device
*netdev
= adapter
->netdev
;
2112 int err
= 0, vector
= 0;
2114 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
2115 snprintf(adapter
->rx_ring
->name
,
2116 sizeof(adapter
->rx_ring
->name
) - 1,
2117 "%.14s-rx-0", netdev
->name
);
2119 memcpy(adapter
->rx_ring
->name
, netdev
->name
, IFNAMSIZ
);
2120 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2121 e1000_intr_msix_rx
, 0, adapter
->rx_ring
->name
,
2125 adapter
->rx_ring
->itr_register
= adapter
->hw
.hw_addr
+
2126 E1000_EITR_82574(vector
);
2127 adapter
->rx_ring
->itr_val
= adapter
->itr
;
2130 if (strlen(netdev
->name
) < (IFNAMSIZ
- 5))
2131 snprintf(adapter
->tx_ring
->name
,
2132 sizeof(adapter
->tx_ring
->name
) - 1,
2133 "%.14s-tx-0", netdev
->name
);
2135 memcpy(adapter
->tx_ring
->name
, netdev
->name
, IFNAMSIZ
);
2136 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2137 e1000_intr_msix_tx
, 0, adapter
->tx_ring
->name
,
2141 adapter
->tx_ring
->itr_register
= adapter
->hw
.hw_addr
+
2142 E1000_EITR_82574(vector
);
2143 adapter
->tx_ring
->itr_val
= adapter
->itr
;
2146 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
2147 e1000_msix_other
, 0, netdev
->name
, netdev
);
2151 e1000_configure_msix(adapter
);
2157 * e1000_request_irq - initialize interrupts
2158 * @adapter: board private structure
2160 * Attempts to configure interrupts using the best available
2161 * capabilities of the hardware and kernel.
2163 static int e1000_request_irq(struct e1000_adapter
*adapter
)
2165 struct net_device
*netdev
= adapter
->netdev
;
2168 if (adapter
->msix_entries
) {
2169 err
= e1000_request_msix(adapter
);
2172 /* fall back to MSI */
2173 e1000e_reset_interrupt_capability(adapter
);
2174 adapter
->int_mode
= E1000E_INT_MODE_MSI
;
2175 e1000e_set_interrupt_capability(adapter
);
2177 if (adapter
->flags
& FLAG_MSI_ENABLED
) {
2178 err
= request_irq(adapter
->pdev
->irq
, e1000_intr_msi
, 0,
2179 netdev
->name
, netdev
);
2183 /* fall back to legacy interrupt */
2184 e1000e_reset_interrupt_capability(adapter
);
2185 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
2188 err
= request_irq(adapter
->pdev
->irq
, e1000_intr
, IRQF_SHARED
,
2189 netdev
->name
, netdev
);
2191 e_err("Unable to allocate interrupt, Error: %d\n", err
);
2196 static void e1000_free_irq(struct e1000_adapter
*adapter
)
2198 struct net_device
*netdev
= adapter
->netdev
;
2200 if (adapter
->msix_entries
) {
2203 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
2206 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
2209 /* Other Causes interrupt vector */
2210 free_irq(adapter
->msix_entries
[vector
].vector
, netdev
);
2214 free_irq(adapter
->pdev
->irq
, netdev
);
2218 * e1000_irq_disable - Mask off interrupt generation on the NIC
2219 * @adapter: board private structure
2221 static void e1000_irq_disable(struct e1000_adapter
*adapter
)
2223 struct e1000_hw
*hw
= &adapter
->hw
;
2226 if (adapter
->msix_entries
)
2227 ew32(EIAC_82574
, 0);
2230 if (adapter
->msix_entries
) {
2233 for (i
= 0; i
< adapter
->num_vectors
; i
++)
2234 synchronize_irq(adapter
->msix_entries
[i
].vector
);
2236 synchronize_irq(adapter
->pdev
->irq
);
2241 * e1000_irq_enable - Enable default interrupt generation settings
2242 * @adapter: board private structure
2244 static void e1000_irq_enable(struct e1000_adapter
*adapter
)
2246 struct e1000_hw
*hw
= &adapter
->hw
;
2248 if (adapter
->msix_entries
) {
2249 ew32(EIAC_82574
, adapter
->eiac_mask
& E1000_EIAC_MASK_82574
);
2250 ew32(IMS
, adapter
->eiac_mask
| E1000_IMS_OTHER
|
2252 } else if (hw
->mac
.type
>= e1000_pch_lpt
) {
2253 ew32(IMS
, IMS_ENABLE_MASK
| E1000_IMS_ECCER
);
2255 ew32(IMS
, IMS_ENABLE_MASK
);
2261 * e1000e_get_hw_control - get control of the h/w from f/w
2262 * @adapter: address of board private structure
2264 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2265 * For ASF and Pass Through versions of f/w this means that
2266 * the driver is loaded. For AMT version (only with 82573)
2267 * of the f/w this means that the network i/f is open.
2269 void e1000e_get_hw_control(struct e1000_adapter
*adapter
)
2271 struct e1000_hw
*hw
= &adapter
->hw
;
2275 /* Let firmware know the driver has taken over */
2276 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
2278 ew32(SWSM
, swsm
| E1000_SWSM_DRV_LOAD
);
2279 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
2280 ctrl_ext
= er32(CTRL_EXT
);
2281 ew32(CTRL_EXT
, ctrl_ext
| E1000_CTRL_EXT_DRV_LOAD
);
2286 * e1000e_release_hw_control - release control of the h/w to f/w
2287 * @adapter: address of board private structure
2289 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2290 * For ASF and Pass Through versions of f/w this means that the
2291 * driver is no longer loaded. For AMT version (only with 82573) i
2292 * of the f/w this means that the network i/f is closed.
2295 void e1000e_release_hw_control(struct e1000_adapter
*adapter
)
2297 struct e1000_hw
*hw
= &adapter
->hw
;
2301 /* Let firmware taken over control of h/w */
2302 if (adapter
->flags
& FLAG_HAS_SWSM_ON_LOAD
) {
2304 ew32(SWSM
, swsm
& ~E1000_SWSM_DRV_LOAD
);
2305 } else if (adapter
->flags
& FLAG_HAS_CTRLEXT_ON_LOAD
) {
2306 ctrl_ext
= er32(CTRL_EXT
);
2307 ew32(CTRL_EXT
, ctrl_ext
& ~E1000_CTRL_EXT_DRV_LOAD
);
2312 * e1000_alloc_ring_dma - allocate memory for a ring structure
2313 * @adapter: board private structure
2314 * @ring: ring struct for which to allocate dma
2316 static int e1000_alloc_ring_dma(struct e1000_adapter
*adapter
,
2317 struct e1000_ring
*ring
)
2319 struct pci_dev
*pdev
= adapter
->pdev
;
2321 ring
->desc
= dma_alloc_coherent(&pdev
->dev
, ring
->size
, &ring
->dma
,
2330 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2331 * @tx_ring: Tx descriptor ring
2333 * Return 0 on success, negative on failure
2335 int e1000e_setup_tx_resources(struct e1000_ring
*tx_ring
)
2337 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
2338 int err
= -ENOMEM
, size
;
2340 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
2341 tx_ring
->buffer_info
= vzalloc(size
);
2342 if (!tx_ring
->buffer_info
)
2345 /* round up to nearest 4K */
2346 tx_ring
->size
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
2347 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2349 err
= e1000_alloc_ring_dma(adapter
, tx_ring
);
2353 tx_ring
->next_to_use
= 0;
2354 tx_ring
->next_to_clean
= 0;
2358 vfree(tx_ring
->buffer_info
);
2359 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2364 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2365 * @rx_ring: Rx descriptor ring
2367 * Returns 0 on success, negative on failure
2369 int e1000e_setup_rx_resources(struct e1000_ring
*rx_ring
)
2371 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
2372 struct e1000_buffer
*buffer_info
;
2373 int i
, size
, desc_len
, err
= -ENOMEM
;
2375 size
= sizeof(struct e1000_buffer
) * rx_ring
->count
;
2376 rx_ring
->buffer_info
= vzalloc(size
);
2377 if (!rx_ring
->buffer_info
)
2380 for (i
= 0; i
< rx_ring
->count
; i
++) {
2381 buffer_info
= &rx_ring
->buffer_info
[i
];
2382 buffer_info
->ps_pages
= kcalloc(PS_PAGE_BUFFERS
,
2383 sizeof(struct e1000_ps_page
),
2385 if (!buffer_info
->ps_pages
)
2389 desc_len
= sizeof(union e1000_rx_desc_packet_split
);
2391 /* Round up to nearest 4K */
2392 rx_ring
->size
= rx_ring
->count
* desc_len
;
2393 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2395 err
= e1000_alloc_ring_dma(adapter
, rx_ring
);
2399 rx_ring
->next_to_clean
= 0;
2400 rx_ring
->next_to_use
= 0;
2401 rx_ring
->rx_skb_top
= NULL
;
2406 for (i
= 0; i
< rx_ring
->count
; i
++) {
2407 buffer_info
= &rx_ring
->buffer_info
[i
];
2408 kfree(buffer_info
->ps_pages
);
2411 vfree(rx_ring
->buffer_info
);
2412 e_err("Unable to allocate memory for the receive descriptor ring\n");
2417 * e1000_clean_tx_ring - Free Tx Buffers
2418 * @tx_ring: Tx descriptor ring
2420 static void e1000_clean_tx_ring(struct e1000_ring
*tx_ring
)
2422 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
2423 struct e1000_buffer
*buffer_info
;
2427 for (i
= 0; i
< tx_ring
->count
; i
++) {
2428 buffer_info
= &tx_ring
->buffer_info
[i
];
2429 e1000_put_txbuf(tx_ring
, buffer_info
, false);
2432 netdev_reset_queue(adapter
->netdev
);
2433 size
= sizeof(struct e1000_buffer
) * tx_ring
->count
;
2434 memset(tx_ring
->buffer_info
, 0, size
);
2436 memset(tx_ring
->desc
, 0, tx_ring
->size
);
2438 tx_ring
->next_to_use
= 0;
2439 tx_ring
->next_to_clean
= 0;
2443 * e1000e_free_tx_resources - Free Tx Resources per Queue
2444 * @tx_ring: Tx descriptor ring
2446 * Free all transmit software resources
2448 void e1000e_free_tx_resources(struct e1000_ring
*tx_ring
)
2450 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
2451 struct pci_dev
*pdev
= adapter
->pdev
;
2453 e1000_clean_tx_ring(tx_ring
);
2455 vfree(tx_ring
->buffer_info
);
2456 tx_ring
->buffer_info
= NULL
;
2458 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2460 tx_ring
->desc
= NULL
;
2464 * e1000e_free_rx_resources - Free Rx Resources
2465 * @rx_ring: Rx descriptor ring
2467 * Free all receive software resources
2469 void e1000e_free_rx_resources(struct e1000_ring
*rx_ring
)
2471 struct e1000_adapter
*adapter
= rx_ring
->adapter
;
2472 struct pci_dev
*pdev
= adapter
->pdev
;
2475 e1000_clean_rx_ring(rx_ring
);
2477 for (i
= 0; i
< rx_ring
->count
; i
++)
2478 kfree(rx_ring
->buffer_info
[i
].ps_pages
);
2480 vfree(rx_ring
->buffer_info
);
2481 rx_ring
->buffer_info
= NULL
;
2483 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2485 rx_ring
->desc
= NULL
;
2489 * e1000_update_itr - update the dynamic ITR value based on statistics
2490 * @itr_setting: current adapter->itr
2491 * @packets: the number of packets during this measurement interval
2492 * @bytes: the number of bytes during this measurement interval
2494 * Stores a new ITR value based on packets and byte
2495 * counts during the last interrupt. The advantage of per interrupt
2496 * computation is faster updates and more accurate ITR for the current
2497 * traffic pattern. Constants in this function were computed
2498 * based on theoretical maximum wire speed and thresholds were set based
2499 * on testing data as well as attempting to minimize response time
2500 * while increasing bulk throughput. This functionality is controlled
2501 * by the InterruptThrottleRate module parameter.
2503 static unsigned int e1000_update_itr(u16 itr_setting
, int packets
, int bytes
)
2505 unsigned int retval
= itr_setting
;
2510 switch (itr_setting
) {
2511 case lowest_latency
:
2512 /* handle TSO and jumbo frames */
2513 if (bytes
/ packets
> 8000)
2514 retval
= bulk_latency
;
2515 else if ((packets
< 5) && (bytes
> 512))
2516 retval
= low_latency
;
2518 case low_latency
: /* 50 usec aka 20000 ints/s */
2519 if (bytes
> 10000) {
2520 /* this if handles the TSO accounting */
2521 if (bytes
/ packets
> 8000)
2522 retval
= bulk_latency
;
2523 else if ((packets
< 10) || ((bytes
/ packets
) > 1200))
2524 retval
= bulk_latency
;
2525 else if ((packets
> 35))
2526 retval
= lowest_latency
;
2527 } else if (bytes
/ packets
> 2000) {
2528 retval
= bulk_latency
;
2529 } else if (packets
<= 2 && bytes
< 512) {
2530 retval
= lowest_latency
;
2533 case bulk_latency
: /* 250 usec aka 4000 ints/s */
2534 if (bytes
> 25000) {
2536 retval
= low_latency
;
2537 } else if (bytes
< 6000) {
2538 retval
= low_latency
;
2546 static void e1000_set_itr(struct e1000_adapter
*adapter
)
2549 u32 new_itr
= adapter
->itr
;
2551 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2552 if (adapter
->link_speed
!= SPEED_1000
) {
2558 if (adapter
->flags2
& FLAG2_DISABLE_AIM
) {
2563 adapter
->tx_itr
= e1000_update_itr(adapter
->tx_itr
,
2564 adapter
->total_tx_packets
,
2565 adapter
->total_tx_bytes
);
2566 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2567 if (adapter
->itr_setting
== 3 && adapter
->tx_itr
== lowest_latency
)
2568 adapter
->tx_itr
= low_latency
;
2570 adapter
->rx_itr
= e1000_update_itr(adapter
->rx_itr
,
2571 adapter
->total_rx_packets
,
2572 adapter
->total_rx_bytes
);
2573 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2574 if (adapter
->itr_setting
== 3 && adapter
->rx_itr
== lowest_latency
)
2575 adapter
->rx_itr
= low_latency
;
2577 current_itr
= max(adapter
->rx_itr
, adapter
->tx_itr
);
2579 /* counts and packets in update_itr are dependent on these numbers */
2580 switch (current_itr
) {
2581 case lowest_latency
:
2585 new_itr
= 20000; /* aka hwitr = ~200 */
2595 if (new_itr
!= adapter
->itr
) {
2596 /* this attempts to bias the interrupt rate towards Bulk
2597 * by adding intermediate steps when interrupt rate is
2600 new_itr
= new_itr
> adapter
->itr
?
2601 min(adapter
->itr
+ (new_itr
>> 2), new_itr
) : new_itr
;
2602 adapter
->itr
= new_itr
;
2603 adapter
->rx_ring
->itr_val
= new_itr
;
2604 if (adapter
->msix_entries
)
2605 adapter
->rx_ring
->set_itr
= 1;
2607 e1000e_write_itr(adapter
, new_itr
);
2612 * e1000e_write_itr - write the ITR value to the appropriate registers
2613 * @adapter: address of board private structure
2614 * @itr: new ITR value to program
2616 * e1000e_write_itr determines if the adapter is in MSI-X mode
2617 * and, if so, writes the EITR registers with the ITR value.
2618 * Otherwise, it writes the ITR value into the ITR register.
2620 void e1000e_write_itr(struct e1000_adapter
*adapter
, u32 itr
)
2622 struct e1000_hw
*hw
= &adapter
->hw
;
2623 u32 new_itr
= itr
? 1000000000 / (itr
* 256) : 0;
2625 if (adapter
->msix_entries
) {
2628 for (vector
= 0; vector
< adapter
->num_vectors
; vector
++)
2629 writel(new_itr
, hw
->hw_addr
+ E1000_EITR_82574(vector
));
2636 * e1000_alloc_queues - Allocate memory for all rings
2637 * @adapter: board private structure to initialize
2639 static int e1000_alloc_queues(struct e1000_adapter
*adapter
)
2641 int size
= sizeof(struct e1000_ring
);
2643 adapter
->tx_ring
= kzalloc(size
, GFP_KERNEL
);
2644 if (!adapter
->tx_ring
)
2646 adapter
->tx_ring
->count
= adapter
->tx_ring_count
;
2647 adapter
->tx_ring
->adapter
= adapter
;
2649 adapter
->rx_ring
= kzalloc(size
, GFP_KERNEL
);
2650 if (!adapter
->rx_ring
)
2652 adapter
->rx_ring
->count
= adapter
->rx_ring_count
;
2653 adapter
->rx_ring
->adapter
= adapter
;
2657 e_err("Unable to allocate memory for queues\n");
2658 kfree(adapter
->rx_ring
);
2659 kfree(adapter
->tx_ring
);
2664 * e1000e_poll - NAPI Rx polling callback
2665 * @napi: struct associated with this polling callback
2666 * @budget: number of packets driver is allowed to process this poll
2668 static int e1000e_poll(struct napi_struct
*napi
, int budget
)
2670 struct e1000_adapter
*adapter
= container_of(napi
, struct e1000_adapter
,
2672 struct e1000_hw
*hw
= &adapter
->hw
;
2673 struct net_device
*poll_dev
= adapter
->netdev
;
2674 int tx_cleaned
= 1, work_done
= 0;
2676 adapter
= netdev_priv(poll_dev
);
2678 if (!adapter
->msix_entries
||
2679 (adapter
->rx_ring
->ims_val
& adapter
->tx_ring
->ims_val
))
2680 tx_cleaned
= e1000_clean_tx_irq(adapter
->tx_ring
);
2682 adapter
->clean_rx(adapter
->rx_ring
, &work_done
, budget
);
2684 if (!tx_cleaned
|| work_done
== budget
)
2687 /* Exit the polling mode, but don't re-enable interrupts if stack might
2688 * poll us due to busy-polling
2690 if (likely(napi_complete_done(napi
, work_done
))) {
2691 if (adapter
->itr_setting
& 3)
2692 e1000_set_itr(adapter
);
2693 if (!test_bit(__E1000_DOWN
, &adapter
->state
)) {
2694 if (adapter
->msix_entries
)
2695 ew32(IMS
, adapter
->rx_ring
->ims_val
);
2697 e1000_irq_enable(adapter
);
2704 static int e1000_vlan_rx_add_vid(struct net_device
*netdev
,
2705 __always_unused __be16 proto
, u16 vid
)
2707 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2708 struct e1000_hw
*hw
= &adapter
->hw
;
2711 /* don't update vlan cookie if already programmed */
2712 if ((adapter
->hw
.mng_cookie
.status
&
2713 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2714 (vid
== adapter
->mng_vlan_id
))
2717 /* add VID to filter table */
2718 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2719 index
= (vid
>> 5) & 0x7F;
2720 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
2721 vfta
|= BIT((vid
& 0x1F));
2722 hw
->mac
.ops
.write_vfta(hw
, index
, vfta
);
2725 set_bit(vid
, adapter
->active_vlans
);
2730 static int e1000_vlan_rx_kill_vid(struct net_device
*netdev
,
2731 __always_unused __be16 proto
, u16 vid
)
2733 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
2734 struct e1000_hw
*hw
= &adapter
->hw
;
2737 if ((adapter
->hw
.mng_cookie
.status
&
2738 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) &&
2739 (vid
== adapter
->mng_vlan_id
)) {
2740 /* release control to f/w */
2741 e1000e_release_hw_control(adapter
);
2745 /* remove VID from filter table */
2746 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2747 index
= (vid
>> 5) & 0x7F;
2748 vfta
= E1000_READ_REG_ARRAY(hw
, E1000_VFTA
, index
);
2749 vfta
&= ~BIT((vid
& 0x1F));
2750 hw
->mac
.ops
.write_vfta(hw
, index
, vfta
);
2753 clear_bit(vid
, adapter
->active_vlans
);
2759 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2760 * @adapter: board private structure to initialize
2762 static void e1000e_vlan_filter_disable(struct e1000_adapter
*adapter
)
2764 struct net_device
*netdev
= adapter
->netdev
;
2765 struct e1000_hw
*hw
= &adapter
->hw
;
2768 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2769 /* disable VLAN receive filtering */
2771 rctl
&= ~(E1000_RCTL_VFE
| E1000_RCTL_CFIEN
);
2774 if (adapter
->mng_vlan_id
!= (u16
)E1000_MNG_VLAN_NONE
) {
2775 e1000_vlan_rx_kill_vid(netdev
, htons(ETH_P_8021Q
),
2776 adapter
->mng_vlan_id
);
2777 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
2783 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2784 * @adapter: board private structure to initialize
2786 static void e1000e_vlan_filter_enable(struct e1000_adapter
*adapter
)
2788 struct e1000_hw
*hw
= &adapter
->hw
;
2791 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
) {
2792 /* enable VLAN receive filtering */
2794 rctl
|= E1000_RCTL_VFE
;
2795 rctl
&= ~E1000_RCTL_CFIEN
;
2801 * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
2802 * @adapter: board private structure to initialize
2804 static void e1000e_vlan_strip_disable(struct e1000_adapter
*adapter
)
2806 struct e1000_hw
*hw
= &adapter
->hw
;
2809 /* disable VLAN tag insert/strip */
2811 ctrl
&= ~E1000_CTRL_VME
;
2816 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2817 * @adapter: board private structure to initialize
2819 static void e1000e_vlan_strip_enable(struct e1000_adapter
*adapter
)
2821 struct e1000_hw
*hw
= &adapter
->hw
;
2824 /* enable VLAN tag insert/strip */
2826 ctrl
|= E1000_CTRL_VME
;
2830 static void e1000_update_mng_vlan(struct e1000_adapter
*adapter
)
2832 struct net_device
*netdev
= adapter
->netdev
;
2833 u16 vid
= adapter
->hw
.mng_cookie
.vlan_id
;
2834 u16 old_vid
= adapter
->mng_vlan_id
;
2836 if (adapter
->hw
.mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
) {
2837 e1000_vlan_rx_add_vid(netdev
, htons(ETH_P_8021Q
), vid
);
2838 adapter
->mng_vlan_id
= vid
;
2841 if ((old_vid
!= (u16
)E1000_MNG_VLAN_NONE
) && (vid
!= old_vid
))
2842 e1000_vlan_rx_kill_vid(netdev
, htons(ETH_P_8021Q
), old_vid
);
2845 static void e1000_restore_vlan(struct e1000_adapter
*adapter
)
2849 e1000_vlan_rx_add_vid(adapter
->netdev
, htons(ETH_P_8021Q
), 0);
2851 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
2852 e1000_vlan_rx_add_vid(adapter
->netdev
, htons(ETH_P_8021Q
), vid
);
2855 static void e1000_init_manageability_pt(struct e1000_adapter
*adapter
)
2857 struct e1000_hw
*hw
= &adapter
->hw
;
2858 u32 manc
, manc2h
, mdef
, i
, j
;
2860 if (!(adapter
->flags
& FLAG_MNG_PT_ENABLED
))
2865 /* enable receiving management packets to the host. this will probably
2866 * generate destination unreachable messages from the host OS, but
2867 * the packets will be handled on SMBUS
2869 manc
|= E1000_MANC_EN_MNG2HOST
;
2870 manc2h
= er32(MANC2H
);
2872 switch (hw
->mac
.type
) {
2874 manc2h
|= (E1000_MANC2H_PORT_623
| E1000_MANC2H_PORT_664
);
2878 /* Check if IPMI pass-through decision filter already exists;
2881 for (i
= 0, j
= 0; i
< 8; i
++) {
2882 mdef
= er32(MDEF(i
));
2884 /* Ignore filters with anything other than IPMI ports */
2885 if (mdef
& ~(E1000_MDEF_PORT_623
| E1000_MDEF_PORT_664
))
2888 /* Enable this decision filter in MANC2H */
2895 if (j
== (E1000_MDEF_PORT_623
| E1000_MDEF_PORT_664
))
2898 /* Create new decision filter in an empty filter */
2899 for (i
= 0, j
= 0; i
< 8; i
++)
2900 if (er32(MDEF(i
)) == 0) {
2901 ew32(MDEF(i
), (E1000_MDEF_PORT_623
|
2902 E1000_MDEF_PORT_664
));
2909 e_warn("Unable to create IPMI pass-through filter\n");
2913 ew32(MANC2H
, manc2h
);
2918 * e1000_configure_tx - Configure Transmit Unit after Reset
2919 * @adapter: board private structure
2921 * Configure the Tx unit of the MAC after a reset.
2923 static void e1000_configure_tx(struct e1000_adapter
*adapter
)
2925 struct e1000_hw
*hw
= &adapter
->hw
;
2926 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
2928 u32 tdlen
, tctl
, tarc
;
2930 /* Setup the HW Tx Head and Tail descriptor pointers */
2931 tdba
= tx_ring
->dma
;
2932 tdlen
= tx_ring
->count
* sizeof(struct e1000_tx_desc
);
2933 ew32(TDBAL(0), (tdba
& DMA_BIT_MASK(32)));
2934 ew32(TDBAH(0), (tdba
>> 32));
2935 ew32(TDLEN(0), tdlen
);
2938 tx_ring
->head
= adapter
->hw
.hw_addr
+ E1000_TDH(0);
2939 tx_ring
->tail
= adapter
->hw
.hw_addr
+ E1000_TDT(0);
2941 writel(0, tx_ring
->head
);
2942 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
2943 e1000e_update_tdt_wa(tx_ring
, 0);
2945 writel(0, tx_ring
->tail
);
2947 /* Set the Tx Interrupt Delay register */
2948 ew32(TIDV
, adapter
->tx_int_delay
);
2949 /* Tx irq moderation */
2950 ew32(TADV
, adapter
->tx_abs_int_delay
);
2952 if (adapter
->flags2
& FLAG2_DMA_BURST
) {
2953 u32 txdctl
= er32(TXDCTL(0));
2955 txdctl
&= ~(E1000_TXDCTL_PTHRESH
| E1000_TXDCTL_HTHRESH
|
2956 E1000_TXDCTL_WTHRESH
);
2957 /* set up some performance related parameters to encourage the
2958 * hardware to use the bus more efficiently in bursts, depends
2959 * on the tx_int_delay to be enabled,
2960 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2961 * hthresh = 1 ==> prefetch when one or more available
2962 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2963 * BEWARE: this seems to work but should be considered first if
2964 * there are Tx hangs or other Tx related bugs
2966 txdctl
|= E1000_TXDCTL_DMA_BURST_ENABLE
;
2967 ew32(TXDCTL(0), txdctl
);
2969 /* erratum work around: set txdctl the same for both queues */
2970 ew32(TXDCTL(1), er32(TXDCTL(0)));
2972 /* Program the Transmit Control Register */
2974 tctl
&= ~E1000_TCTL_CT
;
2975 tctl
|= E1000_TCTL_PSP
| E1000_TCTL_RTLC
|
2976 (E1000_COLLISION_THRESHOLD
<< E1000_CT_SHIFT
);
2978 if (adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) {
2979 tarc
= er32(TARC(0));
2980 /* set the speed mode bit, we'll clear it if we're not at
2981 * gigabit link later
2983 #define SPEED_MODE_BIT BIT(21)
2984 tarc
|= SPEED_MODE_BIT
;
2985 ew32(TARC(0), tarc
);
2988 /* errata: program both queues to unweighted RR */
2989 if (adapter
->flags
& FLAG_TARC_SET_BIT_ZERO
) {
2990 tarc
= er32(TARC(0));
2992 ew32(TARC(0), tarc
);
2993 tarc
= er32(TARC(1));
2995 ew32(TARC(1), tarc
);
2998 /* Setup Transmit Descriptor Settings for eop descriptor */
2999 adapter
->txd_cmd
= E1000_TXD_CMD_EOP
| E1000_TXD_CMD_IFCS
;
3001 /* only set IDE if we are delaying interrupts using the timers */
3002 if (adapter
->tx_int_delay
)
3003 adapter
->txd_cmd
|= E1000_TXD_CMD_IDE
;
3005 /* enable Report Status bit */
3006 adapter
->txd_cmd
|= E1000_TXD_CMD_RS
;
3010 hw
->mac
.ops
.config_collision_dist(hw
);
3012 /* SPT and KBL Si errata workaround to avoid data corruption */
3013 if (hw
->mac
.type
== e1000_pch_spt
) {
3016 reg_val
= er32(IOSFPC
);
3017 reg_val
|= E1000_RCTL_RDMTS_HEX
;
3018 ew32(IOSFPC
, reg_val
);
3020 reg_val
= er32(TARC(0));
3021 /* SPT and KBL Si errata workaround to avoid Tx hang.
3022 * Dropping the number of outstanding requests from
3023 * 3 to 2 in order to avoid a buffer overrun.
3025 reg_val
&= ~E1000_TARC0_CB_MULTIQ_3_REQ
;
3026 reg_val
|= E1000_TARC0_CB_MULTIQ_2_REQ
;
3027 ew32(TARC(0), reg_val
);
3031 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3032 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3035 * e1000_setup_rctl - configure the receive control registers
3036 * @adapter: Board private structure
3038 static void e1000_setup_rctl(struct e1000_adapter
*adapter
)
3040 struct e1000_hw
*hw
= &adapter
->hw
;
3044 /* Workaround Si errata on PCHx - configure jumbo frame flow.
3045 * If jumbo frames not set, program related MAC/PHY registers
3048 if (hw
->mac
.type
>= e1000_pch2lan
) {
3051 if (adapter
->netdev
->mtu
> ETH_DATA_LEN
)
3052 ret_val
= e1000_lv_jumbo_workaround_ich8lan(hw
, true);
3054 ret_val
= e1000_lv_jumbo_workaround_ich8lan(hw
, false);
3057 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3060 /* Program MC offset vector base */
3062 rctl
&= ~(3 << E1000_RCTL_MO_SHIFT
);
3063 rctl
|= E1000_RCTL_EN
| E1000_RCTL_BAM
|
3064 E1000_RCTL_LBM_NO
| E1000_RCTL_RDMTS_HALF
|
3065 (adapter
->hw
.mac
.mc_filter_type
<< E1000_RCTL_MO_SHIFT
);
3067 /* Do not Store bad packets */
3068 rctl
&= ~E1000_RCTL_SBP
;
3070 /* Enable Long Packet receive */
3071 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
)
3072 rctl
&= ~E1000_RCTL_LPE
;
3074 rctl
|= E1000_RCTL_LPE
;
3076 /* Some systems expect that the CRC is included in SMBUS traffic. The
3077 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3078 * host memory when this is enabled
3080 if (adapter
->flags2
& FLAG2_CRC_STRIPPING
)
3081 rctl
|= E1000_RCTL_SECRC
;
3083 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3084 if ((hw
->phy
.type
== e1000_phy_82577
) && (rctl
& E1000_RCTL_LPE
)) {
3087 e1e_rphy(hw
, PHY_REG(770, 26), &phy_data
);
3090 e1e_wphy(hw
, PHY_REG(770, 26), phy_data
);
3092 e1e_rphy(hw
, 22, &phy_data
);
3094 phy_data
|= BIT(14);
3095 e1e_wphy(hw
, 0x10, 0x2823);
3096 e1e_wphy(hw
, 0x11, 0x0003);
3097 e1e_wphy(hw
, 22, phy_data
);
3100 /* Setup buffer sizes */
3101 rctl
&= ~E1000_RCTL_SZ_4096
;
3102 rctl
|= E1000_RCTL_BSEX
;
3103 switch (adapter
->rx_buffer_len
) {
3106 rctl
|= E1000_RCTL_SZ_2048
;
3107 rctl
&= ~E1000_RCTL_BSEX
;
3110 rctl
|= E1000_RCTL_SZ_4096
;
3113 rctl
|= E1000_RCTL_SZ_8192
;
3116 rctl
|= E1000_RCTL_SZ_16384
;
3120 /* Enable Extended Status in all Receive Descriptors */
3121 rfctl
= er32(RFCTL
);
3122 rfctl
|= E1000_RFCTL_EXTEN
;
3125 /* 82571 and greater support packet-split where the protocol
3126 * header is placed in skb->data and the packet data is
3127 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3128 * In the case of a non-split, skb->data is linearly filled,
3129 * followed by the page buffers. Therefore, skb->data is
3130 * sized to hold the largest protocol header.
3132 * allocations using alloc_page take too long for regular MTU
3133 * so only enable packet split for jumbo frames
3135 * Using pages when the page size is greater than 16k wastes
3136 * a lot of memory, since we allocate 3 pages at all times
3139 pages
= PAGE_USE_COUNT(adapter
->netdev
->mtu
);
3140 if ((pages
<= 3) && (PAGE_SIZE
<= 16384) && (rctl
& E1000_RCTL_LPE
))
3141 adapter
->rx_ps_pages
= pages
;
3143 adapter
->rx_ps_pages
= 0;
3145 if (adapter
->rx_ps_pages
) {
3148 /* Enable Packet split descriptors */
3149 rctl
|= E1000_RCTL_DTYP_PS
;
3151 psrctl
|= adapter
->rx_ps_bsize0
>> E1000_PSRCTL_BSIZE0_SHIFT
;
3153 switch (adapter
->rx_ps_pages
) {
3155 psrctl
|= PAGE_SIZE
<< E1000_PSRCTL_BSIZE3_SHIFT
;
3158 psrctl
|= PAGE_SIZE
<< E1000_PSRCTL_BSIZE2_SHIFT
;
3161 psrctl
|= PAGE_SIZE
>> E1000_PSRCTL_BSIZE1_SHIFT
;
3165 ew32(PSRCTL
, psrctl
);
3168 /* This is useful for sniffing bad packets. */
3169 if (adapter
->netdev
->features
& NETIF_F_RXALL
) {
3170 /* UPE and MPE will be handled by normal PROMISC logic
3171 * in e1000e_set_rx_mode
3173 rctl
|= (E1000_RCTL_SBP
| /* Receive bad packets */
3174 E1000_RCTL_BAM
| /* RX All Bcast Pkts */
3175 E1000_RCTL_PMCF
); /* RX All MAC Ctrl Pkts */
3177 rctl
&= ~(E1000_RCTL_VFE
| /* Disable VLAN filter */
3178 E1000_RCTL_DPF
| /* Allow filtered pause */
3179 E1000_RCTL_CFIEN
); /* Dis VLAN CFIEN Filter */
3180 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3181 * and that breaks VLANs.
3186 /* just started the receive unit, no need to restart */
3187 adapter
->flags
&= ~FLAG_RESTART_NOW
;
3191 * e1000_configure_rx - Configure Receive Unit after Reset
3192 * @adapter: board private structure
3194 * Configure the Rx unit of the MAC after a reset.
3196 static void e1000_configure_rx(struct e1000_adapter
*adapter
)
3198 struct e1000_hw
*hw
= &adapter
->hw
;
3199 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
3201 u32 rdlen
, rctl
, rxcsum
, ctrl_ext
;
3203 if (adapter
->rx_ps_pages
) {
3204 /* this is a 32 byte descriptor */
3205 rdlen
= rx_ring
->count
*
3206 sizeof(union e1000_rx_desc_packet_split
);
3207 adapter
->clean_rx
= e1000_clean_rx_irq_ps
;
3208 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers_ps
;
3209 } else if (adapter
->netdev
->mtu
> ETH_FRAME_LEN
+ ETH_FCS_LEN
) {
3210 rdlen
= rx_ring
->count
* sizeof(union e1000_rx_desc_extended
);
3211 adapter
->clean_rx
= e1000_clean_jumbo_rx_irq
;
3212 adapter
->alloc_rx_buf
= e1000_alloc_jumbo_rx_buffers
;
3214 rdlen
= rx_ring
->count
* sizeof(union e1000_rx_desc_extended
);
3215 adapter
->clean_rx
= e1000_clean_rx_irq
;
3216 adapter
->alloc_rx_buf
= e1000_alloc_rx_buffers
;
3219 /* disable receives while setting up the descriptors */
3221 if (!(adapter
->flags2
& FLAG2_NO_DISABLE_RX
))
3222 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
3224 usleep_range(10000, 11000);
3226 if (adapter
->flags2
& FLAG2_DMA_BURST
) {
3227 /* set the writeback threshold (only takes effect if the RDTR
3228 * is set). set GRAN=1 and write back up to 0x4 worth, and
3229 * enable prefetching of 0x20 Rx descriptors
3235 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE
);
3236 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE
);
3239 /* set the Receive Delay Timer Register */
3240 ew32(RDTR
, adapter
->rx_int_delay
);
3242 /* irq moderation */
3243 ew32(RADV
, adapter
->rx_abs_int_delay
);
3244 if ((adapter
->itr_setting
!= 0) && (adapter
->itr
!= 0))
3245 e1000e_write_itr(adapter
, adapter
->itr
);
3247 ctrl_ext
= er32(CTRL_EXT
);
3248 /* Auto-Mask interrupts upon ICR access */
3249 ctrl_ext
|= E1000_CTRL_EXT_IAME
;
3250 ew32(IAM
, 0xffffffff);
3251 ew32(CTRL_EXT
, ctrl_ext
);
3254 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3255 * the Base and Length of the Rx Descriptor Ring
3257 rdba
= rx_ring
->dma
;
3258 ew32(RDBAL(0), (rdba
& DMA_BIT_MASK(32)));
3259 ew32(RDBAH(0), (rdba
>> 32));
3260 ew32(RDLEN(0), rdlen
);
3263 rx_ring
->head
= adapter
->hw
.hw_addr
+ E1000_RDH(0);
3264 rx_ring
->tail
= adapter
->hw
.hw_addr
+ E1000_RDT(0);
3266 writel(0, rx_ring
->head
);
3267 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
3268 e1000e_update_rdt_wa(rx_ring
, 0);
3270 writel(0, rx_ring
->tail
);
3272 /* Enable Receive Checksum Offload for TCP and UDP */
3273 rxcsum
= er32(RXCSUM
);
3274 if (adapter
->netdev
->features
& NETIF_F_RXCSUM
)
3275 rxcsum
|= E1000_RXCSUM_TUOFL
;
3277 rxcsum
&= ~E1000_RXCSUM_TUOFL
;
3278 ew32(RXCSUM
, rxcsum
);
3280 /* With jumbo frames, excessive C-state transition latencies result
3281 * in dropped transactions.
3283 if (adapter
->netdev
->mtu
> ETH_DATA_LEN
) {
3285 ((er32(PBA
) & E1000_PBA_RXA_MASK
) * 1024 -
3286 adapter
->max_frame_size
) * 8 / 1000;
3288 if (adapter
->flags
& FLAG_IS_ICH
) {
3289 u32 rxdctl
= er32(RXDCTL(0));
3291 ew32(RXDCTL(0), rxdctl
| 0x3 | BIT(8));
3294 dev_info(&adapter
->pdev
->dev
,
3295 "Some CPU C-states have been disabled in order to enable jumbo frames\n");
3296 cpu_latency_qos_update_request(&adapter
->pm_qos_req
, lat
);
3298 cpu_latency_qos_update_request(&adapter
->pm_qos_req
,
3299 PM_QOS_DEFAULT_VALUE
);
3302 /* Enable Receives */
3307 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3308 * @netdev: network interface device structure
3310 * Writes multicast address list to the MTA hash table.
3311 * Returns: -ENOMEM on failure
3312 * 0 on no addresses written
3313 * X on writing X addresses to MTA
3315 static int e1000e_write_mc_addr_list(struct net_device
*netdev
)
3317 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3318 struct e1000_hw
*hw
= &adapter
->hw
;
3319 struct netdev_hw_addr
*ha
;
3323 if (netdev_mc_empty(netdev
)) {
3324 /* nothing to program, so clear mc list */
3325 hw
->mac
.ops
.update_mc_addr_list(hw
, NULL
, 0);
3329 mta_list
= kcalloc(netdev_mc_count(netdev
), ETH_ALEN
, GFP_ATOMIC
);
3333 /* update_mc_addr_list expects a packed array of only addresses. */
3335 netdev_for_each_mc_addr(ha
, netdev
)
3336 memcpy(mta_list
+ (i
++ * ETH_ALEN
), ha
->addr
, ETH_ALEN
);
3338 hw
->mac
.ops
.update_mc_addr_list(hw
, mta_list
, i
);
3341 return netdev_mc_count(netdev
);
3345 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3346 * @netdev: network interface device structure
3348 * Writes unicast address list to the RAR table.
3349 * Returns: -ENOMEM on failure/insufficient address space
3350 * 0 on no addresses written
3351 * X on writing X addresses to the RAR table
3353 static int e1000e_write_uc_addr_list(struct net_device
*netdev
)
3355 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3356 struct e1000_hw
*hw
= &adapter
->hw
;
3357 unsigned int rar_entries
;
3360 rar_entries
= hw
->mac
.ops
.rar_get_count(hw
);
3362 /* save a rar entry for our hardware address */
3365 /* save a rar entry for the LAA workaround */
3366 if (adapter
->flags
& FLAG_RESET_OVERWRITES_LAA
)
3369 /* return ENOMEM indicating insufficient memory for addresses */
3370 if (netdev_uc_count(netdev
) > rar_entries
)
3373 if (!netdev_uc_empty(netdev
) && rar_entries
) {
3374 struct netdev_hw_addr
*ha
;
3376 /* write the addresses in reverse order to avoid write
3379 netdev_for_each_uc_addr(ha
, netdev
) {
3384 ret_val
= hw
->mac
.ops
.rar_set(hw
, ha
->addr
, rar_entries
--);
3391 /* zero out the remaining RAR entries not used above */
3392 for (; rar_entries
> 0; rar_entries
--) {
3393 ew32(RAH(rar_entries
), 0);
3394 ew32(RAL(rar_entries
), 0);
3402 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3403 * @netdev: network interface device structure
3405 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3406 * address list or the network interface flags are updated. This routine is
3407 * responsible for configuring the hardware for proper unicast, multicast,
3408 * promiscuous mode, and all-multi behavior.
3410 static void e1000e_set_rx_mode(struct net_device
*netdev
)
3412 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
3413 struct e1000_hw
*hw
= &adapter
->hw
;
3416 if (pm_runtime_suspended(netdev
->dev
.parent
))
3419 /* Check for Promiscuous and All Multicast modes */
3422 /* clear the affected bits */
3423 rctl
&= ~(E1000_RCTL_UPE
| E1000_RCTL_MPE
);
3425 if (netdev
->flags
& IFF_PROMISC
) {
3426 rctl
|= (E1000_RCTL_UPE
| E1000_RCTL_MPE
);
3427 /* Do not hardware filter VLANs in promisc mode */
3428 e1000e_vlan_filter_disable(adapter
);
3432 if (netdev
->flags
& IFF_ALLMULTI
) {
3433 rctl
|= E1000_RCTL_MPE
;
3435 /* Write addresses to the MTA, if the attempt fails
3436 * then we should just turn on promiscuous mode so
3437 * that we can at least receive multicast traffic
3439 count
= e1000e_write_mc_addr_list(netdev
);
3441 rctl
|= E1000_RCTL_MPE
;
3443 e1000e_vlan_filter_enable(adapter
);
3444 /* Write addresses to available RAR registers, if there is not
3445 * sufficient space to store all the addresses then enable
3446 * unicast promiscuous mode
3448 count
= e1000e_write_uc_addr_list(netdev
);
3450 rctl
|= E1000_RCTL_UPE
;
3455 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
3456 e1000e_vlan_strip_enable(adapter
);
3458 e1000e_vlan_strip_disable(adapter
);
3461 static void e1000e_setup_rss_hash(struct e1000_adapter
*adapter
)
3463 struct e1000_hw
*hw
= &adapter
->hw
;
3468 netdev_rss_key_fill(rss_key
, sizeof(rss_key
));
3469 for (i
= 0; i
< 10; i
++)
3470 ew32(RSSRK(i
), rss_key
[i
]);
3472 /* Direct all traffic to queue 0 */
3473 for (i
= 0; i
< 32; i
++)
3476 /* Disable raw packet checksumming so that RSS hash is placed in
3477 * descriptor on writeback.
3479 rxcsum
= er32(RXCSUM
);
3480 rxcsum
|= E1000_RXCSUM_PCSD
;
3482 ew32(RXCSUM
, rxcsum
);
3484 mrqc
= (E1000_MRQC_RSS_FIELD_IPV4
|
3485 E1000_MRQC_RSS_FIELD_IPV4_TCP
|
3486 E1000_MRQC_RSS_FIELD_IPV6
|
3487 E1000_MRQC_RSS_FIELD_IPV6_TCP
|
3488 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX
);
3494 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3495 * @adapter: board private structure
3496 * @timinca: pointer to returned time increment attributes
3498 * Get attributes for incrementing the System Time Register SYSTIML/H at
3499 * the default base frequency, and set the cyclecounter shift value.
3501 s32
e1000e_get_base_timinca(struct e1000_adapter
*adapter
, u32
*timinca
)
3503 struct e1000_hw
*hw
= &adapter
->hw
;
3504 u32 incvalue
, incperiod
, shift
;
3506 /* Make sure clock is enabled on I217/I218/I219 before checking
3509 if ((hw
->mac
.type
>= e1000_pch_lpt
) &&
3510 !(er32(TSYNCTXCTL
) & E1000_TSYNCTXCTL_ENABLED
) &&
3511 !(er32(TSYNCRXCTL
) & E1000_TSYNCRXCTL_ENABLED
)) {
3512 u32 fextnvm7
= er32(FEXTNVM7
);
3514 if (!(fextnvm7
& BIT(0))) {
3515 ew32(FEXTNVM7
, fextnvm7
| BIT(0));
3520 switch (hw
->mac
.type
) {
3522 /* Stable 96MHz frequency */
3523 incperiod
= INCPERIOD_96MHZ
;
3524 incvalue
= INCVALUE_96MHZ
;
3525 shift
= INCVALUE_SHIFT_96MHZ
;
3526 adapter
->cc
.shift
= shift
+ INCPERIOD_SHIFT_96MHZ
;
3529 if (er32(TSYNCRXCTL
) & E1000_TSYNCRXCTL_SYSCFI
) {
3530 /* Stable 96MHz frequency */
3531 incperiod
= INCPERIOD_96MHZ
;
3532 incvalue
= INCVALUE_96MHZ
;
3533 shift
= INCVALUE_SHIFT_96MHZ
;
3534 adapter
->cc
.shift
= shift
+ INCPERIOD_SHIFT_96MHZ
;
3536 /* Stable 25MHz frequency */
3537 incperiod
= INCPERIOD_25MHZ
;
3538 incvalue
= INCVALUE_25MHZ
;
3539 shift
= INCVALUE_SHIFT_25MHZ
;
3540 adapter
->cc
.shift
= shift
;
3544 /* Stable 24MHz frequency */
3545 incperiod
= INCPERIOD_24MHZ
;
3546 incvalue
= INCVALUE_24MHZ
;
3547 shift
= INCVALUE_SHIFT_24MHZ
;
3548 adapter
->cc
.shift
= shift
;
3555 if (er32(TSYNCRXCTL
) & E1000_TSYNCRXCTL_SYSCFI
) {
3556 /* Stable 24MHz frequency */
3557 incperiod
= INCPERIOD_24MHZ
;
3558 incvalue
= INCVALUE_24MHZ
;
3559 shift
= INCVALUE_SHIFT_24MHZ
;
3560 adapter
->cc
.shift
= shift
;
3562 /* Stable 38400KHz frequency */
3563 incperiod
= INCPERIOD_38400KHZ
;
3564 incvalue
= INCVALUE_38400KHZ
;
3565 shift
= INCVALUE_SHIFT_38400KHZ
;
3566 adapter
->cc
.shift
= shift
;
3571 /* Stable 25MHz frequency */
3572 incperiod
= INCPERIOD_25MHZ
;
3573 incvalue
= INCVALUE_25MHZ
;
3574 shift
= INCVALUE_SHIFT_25MHZ
;
3575 adapter
->cc
.shift
= shift
;
3581 *timinca
= ((incperiod
<< E1000_TIMINCA_INCPERIOD_SHIFT
) |
3582 ((incvalue
<< shift
) & E1000_TIMINCA_INCVALUE_MASK
));
3588 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3589 * @adapter: board private structure
3590 * @config: timestamp configuration
3592 * Outgoing time stamping can be enabled and disabled. Play nice and
3593 * disable it when requested, although it shouldn't cause any overhead
3594 * when no packet needs it. At most one packet in the queue may be
3595 * marked for time stamping, otherwise it would be impossible to tell
3596 * for sure to which packet the hardware time stamp belongs.
3598 * Incoming time stamping has to be configured via the hardware filters.
3599 * Not all combinations are supported, in particular event type has to be
3600 * specified. Matching the kind of event packet is not supported, with the
3601 * exception of "all V2 events regardless of level 2 or 4".
3603 static int e1000e_config_hwtstamp(struct e1000_adapter
*adapter
,
3604 struct hwtstamp_config
*config
)
3606 struct e1000_hw
*hw
= &adapter
->hw
;
3607 u32 tsync_tx_ctl
= E1000_TSYNCTXCTL_ENABLED
;
3608 u32 tsync_rx_ctl
= E1000_TSYNCRXCTL_ENABLED
;
3615 if (!(adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
))
3618 /* flags reserved for future extensions - must be zero */
3622 switch (config
->tx_type
) {
3623 case HWTSTAMP_TX_OFF
:
3626 case HWTSTAMP_TX_ON
:
3632 switch (config
->rx_filter
) {
3633 case HWTSTAMP_FILTER_NONE
:
3636 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
3637 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
3638 rxmtrl
= E1000_RXMTRL_PTP_V1_SYNC_MESSAGE
;
3641 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
3642 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L4_V1
;
3643 rxmtrl
= E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE
;
3646 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
3647 /* Also time stamps V2 L2 Path Delay Request/Response */
3648 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_V2
;
3649 rxmtrl
= E1000_RXMTRL_PTP_V2_SYNC_MESSAGE
;
3652 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
3653 /* Also time stamps V2 L2 Path Delay Request/Response. */
3654 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_V2
;
3655 rxmtrl
= E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE
;
3658 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
3659 /* Hardware cannot filter just V2 L4 Sync messages */
3661 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
3662 /* Also time stamps V2 Path Delay Request/Response. */
3663 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
3664 rxmtrl
= E1000_RXMTRL_PTP_V2_SYNC_MESSAGE
;
3668 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
3669 /* Hardware cannot filter just V2 L4 Delay Request messages */
3671 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
3672 /* Also time stamps V2 Path Delay Request/Response. */
3673 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_L2_L4_V2
;
3674 rxmtrl
= E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE
;
3678 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
3679 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
3680 /* Hardware cannot filter just V2 L4 or L2 Event messages */
3682 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
3683 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_EVENT_V2
;
3684 config
->rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
3688 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
3689 /* For V1, the hardware can only filter Sync messages or
3690 * Delay Request messages but not both so fall-through to
3691 * time stamp all packets.
3694 case HWTSTAMP_FILTER_NTP_ALL
:
3695 case HWTSTAMP_FILTER_ALL
:
3698 tsync_rx_ctl
|= E1000_TSYNCRXCTL_TYPE_ALL
;
3699 config
->rx_filter
= HWTSTAMP_FILTER_ALL
;
3705 adapter
->hwtstamp_config
= *config
;
3707 /* enable/disable Tx h/w time stamping */
3708 regval
= er32(TSYNCTXCTL
);
3709 regval
&= ~E1000_TSYNCTXCTL_ENABLED
;
3710 regval
|= tsync_tx_ctl
;
3711 ew32(TSYNCTXCTL
, regval
);
3712 if ((er32(TSYNCTXCTL
) & E1000_TSYNCTXCTL_ENABLED
) !=
3713 (regval
& E1000_TSYNCTXCTL_ENABLED
)) {
3714 e_err("Timesync Tx Control register not set as expected\n");
3718 /* enable/disable Rx h/w time stamping */
3719 regval
= er32(TSYNCRXCTL
);
3720 regval
&= ~(E1000_TSYNCRXCTL_ENABLED
| E1000_TSYNCRXCTL_TYPE_MASK
);
3721 regval
|= tsync_rx_ctl
;
3722 ew32(TSYNCRXCTL
, regval
);
3723 if ((er32(TSYNCRXCTL
) & (E1000_TSYNCRXCTL_ENABLED
|
3724 E1000_TSYNCRXCTL_TYPE_MASK
)) !=
3725 (regval
& (E1000_TSYNCRXCTL_ENABLED
|
3726 E1000_TSYNCRXCTL_TYPE_MASK
))) {
3727 e_err("Timesync Rx Control register not set as expected\n");
3731 /* L2: define ethertype filter for time stamped packets */
3733 rxmtrl
|= ETH_P_1588
;
3735 /* define which PTP packets get time stamped */
3736 ew32(RXMTRL
, rxmtrl
);
3738 /* Filter by destination port */
3740 rxudp
= PTP_EV_PORT
;
3741 cpu_to_be16s(&rxudp
);
3747 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
3755 * e1000_configure - configure the hardware for Rx and Tx
3756 * @adapter: private board structure
3758 static void e1000_configure(struct e1000_adapter
*adapter
)
3760 struct e1000_ring
*rx_ring
= adapter
->rx_ring
;
3762 e1000e_set_rx_mode(adapter
->netdev
);
3764 e1000_restore_vlan(adapter
);
3765 e1000_init_manageability_pt(adapter
);
3767 e1000_configure_tx(adapter
);
3769 if (adapter
->netdev
->features
& NETIF_F_RXHASH
)
3770 e1000e_setup_rss_hash(adapter
);
3771 e1000_setup_rctl(adapter
);
3772 e1000_configure_rx(adapter
);
3773 adapter
->alloc_rx_buf(rx_ring
, e1000_desc_unused(rx_ring
), GFP_KERNEL
);
3777 * e1000e_power_up_phy - restore link in case the phy was powered down
3778 * @adapter: address of board private structure
3780 * The phy may be powered down to save power and turn off link when the
3781 * driver is unloaded and wake on lan is not enabled (among others)
3782 * *** this routine MUST be followed by a call to e1000e_reset ***
3784 void e1000e_power_up_phy(struct e1000_adapter
*adapter
)
3786 if (adapter
->hw
.phy
.ops
.power_up
)
3787 adapter
->hw
.phy
.ops
.power_up(&adapter
->hw
);
3789 adapter
->hw
.mac
.ops
.setup_link(&adapter
->hw
);
3793 * e1000_power_down_phy - Power down the PHY
3794 * @adapter: board private structure
3796 * Power down the PHY so no link is implied when interface is down.
3797 * The PHY cannot be powered down if management or WoL is active.
3799 static void e1000_power_down_phy(struct e1000_adapter
*adapter
)
3801 if (adapter
->hw
.phy
.ops
.power_down
)
3802 adapter
->hw
.phy
.ops
.power_down(&adapter
->hw
);
3806 * e1000_flush_tx_ring - remove all descriptors from the tx_ring
3807 * @adapter: board private structure
3809 * We want to clear all pending descriptors from the TX ring.
3810 * zeroing happens when the HW reads the regs. We assign the ring itself as
3811 * the data of the next descriptor. We don't care about the data we are about
3814 static void e1000_flush_tx_ring(struct e1000_adapter
*adapter
)
3816 struct e1000_hw
*hw
= &adapter
->hw
;
3817 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
3818 struct e1000_tx_desc
*tx_desc
= NULL
;
3819 u32 tdt
, tctl
, txd_lower
= E1000_TXD_CMD_IFCS
;
3823 ew32(TCTL
, tctl
| E1000_TCTL_EN
);
3825 BUG_ON(tdt
!= tx_ring
->next_to_use
);
3826 tx_desc
= E1000_TX_DESC(*tx_ring
, tx_ring
->next_to_use
);
3827 tx_desc
->buffer_addr
= cpu_to_le64(tx_ring
->dma
);
3829 tx_desc
->lower
.data
= cpu_to_le32(txd_lower
| size
);
3830 tx_desc
->upper
.data
= 0;
3831 /* flush descriptors to memory before notifying the HW */
3833 tx_ring
->next_to_use
++;
3834 if (tx_ring
->next_to_use
== tx_ring
->count
)
3835 tx_ring
->next_to_use
= 0;
3836 ew32(TDT(0), tx_ring
->next_to_use
);
3837 usleep_range(200, 250);
3841 * e1000_flush_rx_ring - remove all descriptors from the rx_ring
3842 * @adapter: board private structure
3844 * Mark all descriptors in the RX ring as consumed and disable the rx ring
3846 static void e1000_flush_rx_ring(struct e1000_adapter
*adapter
)
3849 struct e1000_hw
*hw
= &adapter
->hw
;
3852 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
3854 usleep_range(100, 150);
3856 rxdctl
= er32(RXDCTL(0));
3857 /* zero the lower 14 bits (prefetch and host thresholds) */
3858 rxdctl
&= 0xffffc000;
3860 /* update thresholds: prefetch threshold to 31, host threshold to 1
3861 * and make sure the granularity is "descriptors" and not "cache lines"
3863 rxdctl
|= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC
);
3865 ew32(RXDCTL(0), rxdctl
);
3866 /* momentarily enable the RX ring for the changes to take effect */
3867 ew32(RCTL
, rctl
| E1000_RCTL_EN
);
3869 usleep_range(100, 150);
3870 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
3874 * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
3875 * @adapter: board private structure
3877 * In i219, the descriptor rings must be emptied before resetting the HW
3878 * or before changing the device state to D3 during runtime (runtime PM).
3880 * Failure to do this will cause the HW to enter a unit hang state which can
3881 * only be released by PCI reset on the device
3885 static void e1000_flush_desc_rings(struct e1000_adapter
*adapter
)
3888 u32 fext_nvm11
, tdlen
;
3889 struct e1000_hw
*hw
= &adapter
->hw
;
3891 /* First, disable MULR fix in FEXTNVM11 */
3892 fext_nvm11
= er32(FEXTNVM11
);
3893 fext_nvm11
|= E1000_FEXTNVM11_DISABLE_MULR_FIX
;
3894 ew32(FEXTNVM11
, fext_nvm11
);
3895 /* do nothing if we're not in faulty state, or if the queue is empty */
3896 tdlen
= er32(TDLEN(0));
3897 pci_read_config_word(adapter
->pdev
, PCICFG_DESC_RING_STATUS
,
3899 if (!(hang_state
& FLUSH_DESC_REQUIRED
) || !tdlen
)
3901 e1000_flush_tx_ring(adapter
);
3902 /* recheck, maybe the fault is caused by the rx ring */
3903 pci_read_config_word(adapter
->pdev
, PCICFG_DESC_RING_STATUS
,
3905 if (hang_state
& FLUSH_DESC_REQUIRED
)
3906 e1000_flush_rx_ring(adapter
);
3910 * e1000e_systim_reset - reset the timesync registers after a hardware reset
3911 * @adapter: board private structure
3913 * When the MAC is reset, all hardware bits for timesync will be reset to the
3914 * default values. This function will restore the settings last in place.
3915 * Since the clock SYSTIME registers are reset, we will simply restore the
3916 * cyclecounter to the kernel real clock time.
3918 static void e1000e_systim_reset(struct e1000_adapter
*adapter
)
3920 struct ptp_clock_info
*info
= &adapter
->ptp_clock_info
;
3921 struct e1000_hw
*hw
= &adapter
->hw
;
3922 unsigned long flags
;
3926 if (!(adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
))
3929 if (info
->adjfreq
) {
3930 /* restore the previous ptp frequency delta */
3931 ret_val
= info
->adjfreq(info
, adapter
->ptp_delta
);
3933 /* set the default base frequency if no adjustment possible */
3934 ret_val
= e1000e_get_base_timinca(adapter
, &timinca
);
3936 ew32(TIMINCA
, timinca
);
3940 dev_warn(&adapter
->pdev
->dev
,
3941 "Failed to restore TIMINCA clock rate delta: %d\n",
3946 /* reset the systim ns time counter */
3947 spin_lock_irqsave(&adapter
->systim_lock
, flags
);
3948 timecounter_init(&adapter
->tc
, &adapter
->cc
,
3949 ktime_to_ns(ktime_get_real()));
3950 spin_unlock_irqrestore(&adapter
->systim_lock
, flags
);
3952 /* restore the previous hwtstamp configuration settings */
3953 e1000e_config_hwtstamp(adapter
, &adapter
->hwtstamp_config
);
3957 * e1000e_reset - bring the hardware into a known good state
3958 * @adapter: board private structure
3960 * This function boots the hardware and enables some settings that
3961 * require a configuration cycle of the hardware - those cannot be
3962 * set/changed during runtime. After reset the device needs to be
3963 * properly configured for Rx, Tx etc.
3965 void e1000e_reset(struct e1000_adapter
*adapter
)
3967 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
3968 struct e1000_fc_info
*fc
= &adapter
->hw
.fc
;
3969 struct e1000_hw
*hw
= &adapter
->hw
;
3970 u32 tx_space
, min_tx_space
, min_rx_space
;
3971 u32 pba
= adapter
->pba
;
3974 /* reset Packet Buffer Allocation to default */
3977 if (adapter
->max_frame_size
> (VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN
)) {
3978 /* To maintain wire speed transmits, the Tx FIFO should be
3979 * large enough to accommodate two full transmit packets,
3980 * rounded up to the next 1KB and expressed in KB. Likewise,
3981 * the Rx FIFO should be large enough to accommodate at least
3982 * one full receive packet and is similarly rounded up and
3986 /* upper 16 bits has Tx packet buffer allocation size in KB */
3987 tx_space
= pba
>> 16;
3988 /* lower 16 bits has Rx packet buffer allocation size in KB */
3990 /* the Tx fifo also stores 16 bytes of information about the Tx
3991 * but don't include ethernet FCS because hardware appends it
3993 min_tx_space
= (adapter
->max_frame_size
+
3994 sizeof(struct e1000_tx_desc
) - ETH_FCS_LEN
) * 2;
3995 min_tx_space
= ALIGN(min_tx_space
, 1024);
3996 min_tx_space
>>= 10;
3997 /* software strips receive CRC, so leave room for it */
3998 min_rx_space
= adapter
->max_frame_size
;
3999 min_rx_space
= ALIGN(min_rx_space
, 1024);
4000 min_rx_space
>>= 10;
4002 /* If current Tx allocation is less than the min Tx FIFO size,
4003 * and the min Tx FIFO size is less than the current Rx FIFO
4004 * allocation, take space away from current Rx allocation
4006 if ((tx_space
< min_tx_space
) &&
4007 ((min_tx_space
- tx_space
) < pba
)) {
4008 pba
-= min_tx_space
- tx_space
;
4010 /* if short on Rx space, Rx wins and must trump Tx
4013 if (pba
< min_rx_space
)
4020 /* flow control settings
4022 * The high water mark must be low enough to fit one full frame
4023 * (or the size used for early receive) above it in the Rx FIFO.
4024 * Set it to the lower of:
4025 * - 90% of the Rx FIFO size, and
4026 * - the full Rx FIFO size minus one full frame
4028 if (adapter
->flags
& FLAG_DISABLE_FC_PAUSE_TIME
)
4029 fc
->pause_time
= 0xFFFF;
4031 fc
->pause_time
= E1000_FC_PAUSE_TIME
;
4032 fc
->send_xon
= true;
4033 fc
->current_mode
= fc
->requested_mode
;
4035 switch (hw
->mac
.type
) {
4037 case e1000_ich10lan
:
4038 if (adapter
->netdev
->mtu
> ETH_DATA_LEN
) {
4041 fc
->high_water
= 0x2800;
4042 fc
->low_water
= fc
->high_water
- 8;
4047 hwm
= min(((pba
<< 10) * 9 / 10),
4048 ((pba
<< 10) - adapter
->max_frame_size
));
4050 fc
->high_water
= hwm
& E1000_FCRTH_RTH
; /* 8-byte granularity */
4051 fc
->low_water
= fc
->high_water
- 8;
4054 /* Workaround PCH LOM adapter hangs with certain network
4055 * loads. If hangs persist, try disabling Tx flow control.
4057 if (adapter
->netdev
->mtu
> ETH_DATA_LEN
) {
4058 fc
->high_water
= 0x3500;
4059 fc
->low_water
= 0x1500;
4061 fc
->high_water
= 0x5000;
4062 fc
->low_water
= 0x3000;
4064 fc
->refresh_time
= 0x1000;
4074 fc
->refresh_time
= 0xFFFF;
4075 fc
->pause_time
= 0xFFFF;
4077 if (adapter
->netdev
->mtu
<= ETH_DATA_LEN
) {
4078 fc
->high_water
= 0x05C20;
4079 fc
->low_water
= 0x05048;
4085 fc
->high_water
= ((pba
<< 10) * 9 / 10) & E1000_FCRTH_RTH
;
4086 fc
->low_water
= ((pba
<< 10) * 8 / 10) & E1000_FCRTL_RTL
;
4090 /* Alignment of Tx data is on an arbitrary byte boundary with the
4091 * maximum size per Tx descriptor limited only to the transmit
4092 * allocation of the packet buffer minus 96 bytes with an upper
4093 * limit of 24KB due to receive synchronization limitations.
4095 adapter
->tx_fifo_limit
= min_t(u32
, ((er32(PBA
) >> 16) << 10) - 96,
4098 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
4099 * fit in receive buffer.
4101 if (adapter
->itr_setting
& 0x3) {
4102 if ((adapter
->max_frame_size
* 2) > (pba
<< 10)) {
4103 if (!(adapter
->flags2
& FLAG2_DISABLE_AIM
)) {
4104 dev_info(&adapter
->pdev
->dev
,
4105 "Interrupt Throttle Rate off\n");
4106 adapter
->flags2
|= FLAG2_DISABLE_AIM
;
4107 e1000e_write_itr(adapter
, 0);
4109 } else if (adapter
->flags2
& FLAG2_DISABLE_AIM
) {
4110 dev_info(&adapter
->pdev
->dev
,
4111 "Interrupt Throttle Rate on\n");
4112 adapter
->flags2
&= ~FLAG2_DISABLE_AIM
;
4113 adapter
->itr
= 20000;
4114 e1000e_write_itr(adapter
, adapter
->itr
);
4118 if (hw
->mac
.type
>= e1000_pch_spt
)
4119 e1000_flush_desc_rings(adapter
);
4120 /* Allow time for pending master requests to run */
4121 mac
->ops
.reset_hw(hw
);
4123 /* For parts with AMT enabled, let the firmware know
4124 * that the network interface is in control
4126 if (adapter
->flags
& FLAG_HAS_AMT
)
4127 e1000e_get_hw_control(adapter
);
4131 if (mac
->ops
.init_hw(hw
))
4132 e_err("Hardware Error\n");
4134 e1000_update_mng_vlan(adapter
);
4136 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
4137 ew32(VET
, ETH_P_8021Q
);
4139 e1000e_reset_adaptive(hw
);
4141 /* restore systim and hwtstamp settings */
4142 e1000e_systim_reset(adapter
);
4144 /* Set EEE advertisement as appropriate */
4145 if (adapter
->flags2
& FLAG2_HAS_EEE
) {
4149 switch (hw
->phy
.type
) {
4150 case e1000_phy_82579
:
4151 adv_addr
= I82579_EEE_ADVERTISEMENT
;
4153 case e1000_phy_i217
:
4154 adv_addr
= I217_EEE_ADVERTISEMENT
;
4157 dev_err(&adapter
->pdev
->dev
,
4158 "Invalid PHY type setting EEE advertisement\n");
4162 ret_val
= hw
->phy
.ops
.acquire(hw
);
4164 dev_err(&adapter
->pdev
->dev
,
4165 "EEE advertisement - unable to acquire PHY\n");
4169 e1000_write_emi_reg_locked(hw
, adv_addr
,
4170 hw
->dev_spec
.ich8lan
.eee_disable
?
4171 0 : adapter
->eee_advert
);
4173 hw
->phy
.ops
.release(hw
);
4176 if (!netif_running(adapter
->netdev
) &&
4177 !test_bit(__E1000_TESTING
, &adapter
->state
))
4178 e1000_power_down_phy(adapter
);
4180 e1000_get_phy_info(hw
);
4182 if ((adapter
->flags
& FLAG_HAS_SMART_POWER_DOWN
) &&
4183 !(adapter
->flags
& FLAG_SMART_POWER_DOWN
)) {
4185 /* speed up time to link by disabling smart power down, ignore
4186 * the return value of this function because there is nothing
4187 * different we would do if it failed
4189 e1e_rphy(hw
, IGP02E1000_PHY_POWER_MGMT
, &phy_data
);
4190 phy_data
&= ~IGP02E1000_PM_SPD
;
4191 e1e_wphy(hw
, IGP02E1000_PHY_POWER_MGMT
, phy_data
);
4193 if (hw
->mac
.type
>= e1000_pch_spt
&& adapter
->int_mode
== 0) {
4196 /* Fextnvm7 @ 0xe4[2] = 1 */
4197 reg
= er32(FEXTNVM7
);
4198 reg
|= E1000_FEXTNVM7_SIDE_CLK_UNGATE
;
4199 ew32(FEXTNVM7
, reg
);
4200 /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
4201 reg
= er32(FEXTNVM9
);
4202 reg
|= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS
|
4203 E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS
;
4204 ew32(FEXTNVM9
, reg
);
4210 * e1000e_trigger_lsc - trigger an LSC interrupt
4213 * Fire a link status change interrupt to start the watchdog.
4215 static void e1000e_trigger_lsc(struct e1000_adapter
*adapter
)
4217 struct e1000_hw
*hw
= &adapter
->hw
;
4219 if (adapter
->msix_entries
)
4220 ew32(ICS
, E1000_ICS_LSC
| E1000_ICS_OTHER
);
4222 ew32(ICS
, E1000_ICS_LSC
);
4225 void e1000e_up(struct e1000_adapter
*adapter
)
4227 /* hardware has been reset, we need to reload some things */
4228 e1000_configure(adapter
);
4230 clear_bit(__E1000_DOWN
, &adapter
->state
);
4232 if (adapter
->msix_entries
)
4233 e1000_configure_msix(adapter
);
4234 e1000_irq_enable(adapter
);
4236 /* Tx queue started by watchdog timer when link is up */
4238 e1000e_trigger_lsc(adapter
);
4241 static void e1000e_flush_descriptors(struct e1000_adapter
*adapter
)
4243 struct e1000_hw
*hw
= &adapter
->hw
;
4245 if (!(adapter
->flags2
& FLAG2_DMA_BURST
))
4248 /* flush pending descriptor writebacks to memory */
4249 ew32(TIDV
, adapter
->tx_int_delay
| E1000_TIDV_FPD
);
4250 ew32(RDTR
, adapter
->rx_int_delay
| E1000_RDTR_FPD
);
4252 /* execute the writes immediately */
4255 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
4256 * write is successful
4258 ew32(TIDV
, adapter
->tx_int_delay
| E1000_TIDV_FPD
);
4259 ew32(RDTR
, adapter
->rx_int_delay
| E1000_RDTR_FPD
);
4261 /* execute the writes immediately */
4265 static void e1000e_update_stats(struct e1000_adapter
*adapter
);
4268 * e1000e_down - quiesce the device and optionally reset the hardware
4269 * @adapter: board private structure
4270 * @reset: boolean flag to reset the hardware or not
4272 void e1000e_down(struct e1000_adapter
*adapter
, bool reset
)
4274 struct net_device
*netdev
= adapter
->netdev
;
4275 struct e1000_hw
*hw
= &adapter
->hw
;
4278 /* signal that we're down so the interrupt handler does not
4279 * reschedule our watchdog timer
4281 set_bit(__E1000_DOWN
, &adapter
->state
);
4283 netif_carrier_off(netdev
);
4285 /* disable receives in the hardware */
4287 if (!(adapter
->flags2
& FLAG2_NO_DISABLE_RX
))
4288 ew32(RCTL
, rctl
& ~E1000_RCTL_EN
);
4289 /* flush and sleep below */
4291 netif_stop_queue(netdev
);
4293 /* disable transmits in the hardware */
4295 tctl
&= ~E1000_TCTL_EN
;
4298 /* flush both disables and wait for them to finish */
4300 usleep_range(10000, 11000);
4302 e1000_irq_disable(adapter
);
4304 napi_synchronize(&adapter
->napi
);
4306 del_timer_sync(&adapter
->watchdog_timer
);
4307 del_timer_sync(&adapter
->phy_info_timer
);
4309 spin_lock(&adapter
->stats64_lock
);
4310 e1000e_update_stats(adapter
);
4311 spin_unlock(&adapter
->stats64_lock
);
4313 e1000e_flush_descriptors(adapter
);
4315 adapter
->link_speed
= 0;
4316 adapter
->link_duplex
= 0;
4318 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4319 if ((hw
->mac
.type
>= e1000_pch2lan
) &&
4320 (adapter
->netdev
->mtu
> ETH_DATA_LEN
) &&
4321 e1000_lv_jumbo_workaround_ich8lan(hw
, false))
4322 e_dbg("failed to disable jumbo frame workaround mode\n");
4324 if (!pci_channel_offline(adapter
->pdev
)) {
4326 e1000e_reset(adapter
);
4327 else if (hw
->mac
.type
>= e1000_pch_spt
)
4328 e1000_flush_desc_rings(adapter
);
4330 e1000_clean_tx_ring(adapter
->tx_ring
);
4331 e1000_clean_rx_ring(adapter
->rx_ring
);
4334 void e1000e_reinit_locked(struct e1000_adapter
*adapter
)
4337 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
4338 usleep_range(1000, 1100);
4339 e1000e_down(adapter
, true);
4341 clear_bit(__E1000_RESETTING
, &adapter
->state
);
4345 * e1000e_sanitize_systim - sanitize raw cycle counter reads
4346 * @hw: pointer to the HW structure
4347 * @systim: PHC time value read, sanitized and returned
4348 * @sts: structure to hold system time before and after reading SYSTIML,
4351 * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
4352 * check to see that the time is incrementing at a reasonable
4353 * rate and is a multiple of incvalue.
4355 static u64
e1000e_sanitize_systim(struct e1000_hw
*hw
, u64 systim
,
4356 struct ptp_system_timestamp
*sts
)
4358 u64 time_delta
, rem
, temp
;
4363 incvalue
= er32(TIMINCA
) & E1000_TIMINCA_INCVALUE_MASK
;
4364 for (i
= 0; i
< E1000_MAX_82574_SYSTIM_REREADS
; i
++) {
4365 /* latch SYSTIMH on read of SYSTIML */
4366 ptp_read_system_prets(sts
);
4367 systim_next
= (u64
)er32(SYSTIML
);
4368 ptp_read_system_postts(sts
);
4369 systim_next
|= (u64
)er32(SYSTIMH
) << 32;
4371 time_delta
= systim_next
- systim
;
4373 /* VMWare users have seen incvalue of zero, don't div / 0 */
4374 rem
= incvalue
? do_div(temp
, incvalue
) : (time_delta
!= 0);
4376 systim
= systim_next
;
4378 if ((time_delta
< E1000_82574_SYSTIM_EPSILON
) && (rem
== 0))
4386 * e1000e_read_systim - read SYSTIM register
4387 * @adapter: board private structure
4388 * @sts: structure which will contain system time before and after reading
4389 * SYSTIML, may be NULL
4391 u64
e1000e_read_systim(struct e1000_adapter
*adapter
,
4392 struct ptp_system_timestamp
*sts
)
4394 struct e1000_hw
*hw
= &adapter
->hw
;
4395 u32 systimel
, systimel_2
, systimeh
;
4397 /* SYSTIMH latching upon SYSTIML read does not work well.
4398 * This means that if SYSTIML overflows after we read it but before
4399 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
4400 * will experience a huge non linear increment in the systime value
4401 * to fix that we test for overflow and if true, we re-read systime.
4403 ptp_read_system_prets(sts
);
4404 systimel
= er32(SYSTIML
);
4405 ptp_read_system_postts(sts
);
4406 systimeh
= er32(SYSTIMH
);
4407 /* Is systimel is so large that overflow is possible? */
4408 if (systimel
>= (u32
)0xffffffff - E1000_TIMINCA_INCVALUE_MASK
) {
4409 ptp_read_system_prets(sts
);
4410 systimel_2
= er32(SYSTIML
);
4411 ptp_read_system_postts(sts
);
4412 if (systimel
> systimel_2
) {
4413 /* There was an overflow, read again SYSTIMH, and use
4416 systimeh
= er32(SYSTIMH
);
4417 systimel
= systimel_2
;
4420 systim
= (u64
)systimel
;
4421 systim
|= (u64
)systimeh
<< 32;
4423 if (adapter
->flags2
& FLAG2_CHECK_SYSTIM_OVERFLOW
)
4424 systim
= e1000e_sanitize_systim(hw
, systim
, sts
);
4430 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4431 * @cc: cyclecounter structure
4433 static u64
e1000e_cyclecounter_read(const struct cyclecounter
*cc
)
4435 struct e1000_adapter
*adapter
= container_of(cc
, struct e1000_adapter
,
4438 return e1000e_read_systim(adapter
, NULL
);
4442 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4443 * @adapter: board private structure to initialize
4445 * e1000_sw_init initializes the Adapter private data structure.
4446 * Fields are initialized based on PCI device information and
4447 * OS network device settings (MTU size).
4449 static int e1000_sw_init(struct e1000_adapter
*adapter
)
4451 struct net_device
*netdev
= adapter
->netdev
;
4453 adapter
->rx_buffer_len
= VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN
;
4454 adapter
->rx_ps_bsize0
= 128;
4455 adapter
->max_frame_size
= netdev
->mtu
+ VLAN_ETH_HLEN
+ ETH_FCS_LEN
;
4456 adapter
->min_frame_size
= ETH_ZLEN
+ ETH_FCS_LEN
;
4457 adapter
->tx_ring_count
= E1000_DEFAULT_TXD
;
4458 adapter
->rx_ring_count
= E1000_DEFAULT_RXD
;
4460 spin_lock_init(&adapter
->stats64_lock
);
4462 e1000e_set_interrupt_capability(adapter
);
4464 if (e1000_alloc_queues(adapter
))
4467 /* Setup hardware time stamping cyclecounter */
4468 if (adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
) {
4469 adapter
->cc
.read
= e1000e_cyclecounter_read
;
4470 adapter
->cc
.mask
= CYCLECOUNTER_MASK(64);
4471 adapter
->cc
.mult
= 1;
4472 /* cc.shift set in e1000e_get_base_tininca() */
4474 spin_lock_init(&adapter
->systim_lock
);
4475 INIT_WORK(&adapter
->tx_hwtstamp_work
, e1000e_tx_hwtstamp_work
);
4478 /* Explicitly disable IRQ since the NIC can be in any state. */
4479 e1000_irq_disable(adapter
);
4481 set_bit(__E1000_DOWN
, &adapter
->state
);
4486 * e1000_intr_msi_test - Interrupt Handler
4487 * @irq: interrupt number
4488 * @data: pointer to a network interface device structure
4490 static irqreturn_t
e1000_intr_msi_test(int __always_unused irq
, void *data
)
4492 struct net_device
*netdev
= data
;
4493 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4494 struct e1000_hw
*hw
= &adapter
->hw
;
4495 u32 icr
= er32(ICR
);
4497 e_dbg("icr is %08X\n", icr
);
4498 if (icr
& E1000_ICR_RXSEQ
) {
4499 adapter
->flags
&= ~FLAG_MSI_TEST_FAILED
;
4500 /* Force memory writes to complete before acknowledging the
4501 * interrupt is handled.
4510 * e1000_test_msi_interrupt - Returns 0 for successful test
4511 * @adapter: board private struct
4513 * code flow taken from tg3.c
4515 static int e1000_test_msi_interrupt(struct e1000_adapter
*adapter
)
4517 struct net_device
*netdev
= adapter
->netdev
;
4518 struct e1000_hw
*hw
= &adapter
->hw
;
4521 /* poll_enable hasn't been called yet, so don't need disable */
4522 /* clear any pending events */
4525 /* free the real vector and request a test handler */
4526 e1000_free_irq(adapter
);
4527 e1000e_reset_interrupt_capability(adapter
);
4529 /* Assume that the test fails, if it succeeds then the test
4530 * MSI irq handler will unset this flag
4532 adapter
->flags
|= FLAG_MSI_TEST_FAILED
;
4534 err
= pci_enable_msi(adapter
->pdev
);
4536 goto msi_test_failed
;
4538 err
= request_irq(adapter
->pdev
->irq
, e1000_intr_msi_test
, 0,
4539 netdev
->name
, netdev
);
4541 pci_disable_msi(adapter
->pdev
);
4542 goto msi_test_failed
;
4545 /* Force memory writes to complete before enabling and firing an
4550 e1000_irq_enable(adapter
);
4552 /* fire an unusual interrupt on the test handler */
4553 ew32(ICS
, E1000_ICS_RXSEQ
);
4557 e1000_irq_disable(adapter
);
4559 rmb(); /* read flags after interrupt has been fired */
4561 if (adapter
->flags
& FLAG_MSI_TEST_FAILED
) {
4562 adapter
->int_mode
= E1000E_INT_MODE_LEGACY
;
4563 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4565 e_dbg("MSI interrupt test succeeded!\n");
4568 free_irq(adapter
->pdev
->irq
, netdev
);
4569 pci_disable_msi(adapter
->pdev
);
4572 e1000e_set_interrupt_capability(adapter
);
4573 return e1000_request_irq(adapter
);
4577 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4578 * @adapter: board private struct
4580 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4582 static int e1000_test_msi(struct e1000_adapter
*adapter
)
4587 if (!(adapter
->flags
& FLAG_MSI_ENABLED
))
4590 /* disable SERR in case the MSI write causes a master abort */
4591 pci_read_config_word(adapter
->pdev
, PCI_COMMAND
, &pci_cmd
);
4592 if (pci_cmd
& PCI_COMMAND_SERR
)
4593 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
,
4594 pci_cmd
& ~PCI_COMMAND_SERR
);
4596 err
= e1000_test_msi_interrupt(adapter
);
4598 /* re-enable SERR */
4599 if (pci_cmd
& PCI_COMMAND_SERR
) {
4600 pci_read_config_word(adapter
->pdev
, PCI_COMMAND
, &pci_cmd
);
4601 pci_cmd
|= PCI_COMMAND_SERR
;
4602 pci_write_config_word(adapter
->pdev
, PCI_COMMAND
, pci_cmd
);
4609 * e1000e_open - Called when a network interface is made active
4610 * @netdev: network interface device structure
4612 * Returns 0 on success, negative value on failure
4614 * The open entry point is called when a network interface is made
4615 * active by the system (IFF_UP). At this point all resources needed
4616 * for transmit and receive operations are allocated, the interrupt
4617 * handler is registered with the OS, the watchdog timer is started,
4618 * and the stack is notified that the interface is ready.
4620 int e1000e_open(struct net_device
*netdev
)
4622 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4623 struct e1000_hw
*hw
= &adapter
->hw
;
4624 struct pci_dev
*pdev
= adapter
->pdev
;
4627 /* disallow open during test */
4628 if (test_bit(__E1000_TESTING
, &adapter
->state
))
4631 pm_runtime_get_sync(&pdev
->dev
);
4633 netif_carrier_off(netdev
);
4634 netif_stop_queue(netdev
);
4636 /* allocate transmit descriptors */
4637 err
= e1000e_setup_tx_resources(adapter
->tx_ring
);
4641 /* allocate receive descriptors */
4642 err
= e1000e_setup_rx_resources(adapter
->rx_ring
);
4646 /* If AMT is enabled, let the firmware know that the network
4647 * interface is now open and reset the part to a known state.
4649 if (adapter
->flags
& FLAG_HAS_AMT
) {
4650 e1000e_get_hw_control(adapter
);
4651 e1000e_reset(adapter
);
4654 e1000e_power_up_phy(adapter
);
4656 adapter
->mng_vlan_id
= E1000_MNG_VLAN_NONE
;
4657 if ((adapter
->hw
.mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
))
4658 e1000_update_mng_vlan(adapter
);
4660 /* DMA latency requirement to workaround jumbo issue */
4661 cpu_latency_qos_add_request(&adapter
->pm_qos_req
, PM_QOS_DEFAULT_VALUE
);
4663 /* before we allocate an interrupt, we must be ready to handle it.
4664 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4665 * as soon as we call pci_request_irq, so we have to setup our
4666 * clean_rx handler before we do so.
4668 e1000_configure(adapter
);
4670 err
= e1000_request_irq(adapter
);
4674 /* Work around PCIe errata with MSI interrupts causing some chipsets to
4675 * ignore e1000e MSI messages, which means we need to test our MSI
4678 if (adapter
->int_mode
!= E1000E_INT_MODE_LEGACY
) {
4679 err
= e1000_test_msi(adapter
);
4681 e_err("Interrupt allocation failed\n");
4686 /* From here on the code is the same as e1000e_up() */
4687 clear_bit(__E1000_DOWN
, &adapter
->state
);
4689 napi_enable(&adapter
->napi
);
4691 e1000_irq_enable(adapter
);
4693 adapter
->tx_hang_recheck
= false;
4695 hw
->mac
.get_link_status
= true;
4696 pm_runtime_put(&pdev
->dev
);
4698 e1000e_trigger_lsc(adapter
);
4703 cpu_latency_qos_remove_request(&adapter
->pm_qos_req
);
4704 e1000e_release_hw_control(adapter
);
4705 e1000_power_down_phy(adapter
);
4706 e1000e_free_rx_resources(adapter
->rx_ring
);
4708 e1000e_free_tx_resources(adapter
->tx_ring
);
4710 e1000e_reset(adapter
);
4711 pm_runtime_put_sync(&pdev
->dev
);
4717 * e1000e_close - Disables a network interface
4718 * @netdev: network interface device structure
4720 * Returns 0, this is not allowed to fail
4722 * The close entry point is called when an interface is de-activated
4723 * by the OS. The hardware is still under the drivers control, but
4724 * needs to be disabled. A global MAC reset is issued to stop the
4725 * hardware, and all transmit and receive resources are freed.
4727 int e1000e_close(struct net_device
*netdev
)
4729 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4730 struct pci_dev
*pdev
= adapter
->pdev
;
4731 int count
= E1000_CHECK_RESET_COUNT
;
4733 while (test_bit(__E1000_RESETTING
, &adapter
->state
) && count
--)
4734 usleep_range(10000, 11000);
4736 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
4738 pm_runtime_get_sync(&pdev
->dev
);
4740 if (netif_device_present(netdev
)) {
4741 e1000e_down(adapter
, true);
4742 e1000_free_irq(adapter
);
4744 /* Link status message must follow this format */
4745 netdev_info(netdev
, "NIC Link is Down\n");
4748 napi_disable(&adapter
->napi
);
4750 e1000e_free_tx_resources(adapter
->tx_ring
);
4751 e1000e_free_rx_resources(adapter
->rx_ring
);
4753 /* kill manageability vlan ID if supported, but not if a vlan with
4754 * the same ID is registered on the host OS (let 8021q kill it)
4756 if (adapter
->hw
.mng_cookie
.status
& E1000_MNG_DHCP_COOKIE_STATUS_VLAN
)
4757 e1000_vlan_rx_kill_vid(netdev
, htons(ETH_P_8021Q
),
4758 adapter
->mng_vlan_id
);
4760 /* If AMT is enabled, let the firmware know that the network
4761 * interface is now closed
4763 if ((adapter
->flags
& FLAG_HAS_AMT
) &&
4764 !test_bit(__E1000_TESTING
, &adapter
->state
))
4765 e1000e_release_hw_control(adapter
);
4767 cpu_latency_qos_remove_request(&adapter
->pm_qos_req
);
4769 pm_runtime_put_sync(&pdev
->dev
);
4775 * e1000_set_mac - Change the Ethernet Address of the NIC
4776 * @netdev: network interface device structure
4777 * @p: pointer to an address structure
4779 * Returns 0 on success, negative on failure
4781 static int e1000_set_mac(struct net_device
*netdev
, void *p
)
4783 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
4784 struct e1000_hw
*hw
= &adapter
->hw
;
4785 struct sockaddr
*addr
= p
;
4787 if (!is_valid_ether_addr(addr
->sa_data
))
4788 return -EADDRNOTAVAIL
;
4790 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
4791 memcpy(adapter
->hw
.mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
4793 hw
->mac
.ops
.rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
, 0);
4795 if (adapter
->flags
& FLAG_RESET_OVERWRITES_LAA
) {
4796 /* activate the work around */
4797 e1000e_set_laa_state_82571(&adapter
->hw
, 1);
4799 /* Hold a copy of the LAA in RAR[14] This is done so that
4800 * between the time RAR[0] gets clobbered and the time it
4801 * gets fixed (in e1000_watchdog), the actual LAA is in one
4802 * of the RARs and no incoming packets directed to this port
4803 * are dropped. Eventually the LAA will be in RAR[0] and
4806 hw
->mac
.ops
.rar_set(&adapter
->hw
, adapter
->hw
.mac
.addr
,
4807 adapter
->hw
.mac
.rar_entry_count
- 1);
4814 * e1000e_update_phy_task - work thread to update phy
4815 * @work: pointer to our work struct
4817 * this worker thread exists because we must acquire a
4818 * semaphore to read the phy, which we could msleep while
4819 * waiting for it, and we can't msleep in a timer.
4821 static void e1000e_update_phy_task(struct work_struct
*work
)
4823 struct e1000_adapter
*adapter
= container_of(work
,
4824 struct e1000_adapter
,
4826 struct e1000_hw
*hw
= &adapter
->hw
;
4828 if (test_bit(__E1000_DOWN
, &adapter
->state
))
4831 e1000_get_phy_info(hw
);
4833 /* Enable EEE on 82579 after link up */
4834 if (hw
->phy
.type
>= e1000_phy_82579
)
4835 e1000_set_eee_pchlan(hw
);
4839 * e1000_update_phy_info - timre call-back to update PHY info
4840 * @t: pointer to timer_list containing private info adapter
4842 * Need to wait a few seconds after link up to get diagnostic information from
4845 static void e1000_update_phy_info(struct timer_list
*t
)
4847 struct e1000_adapter
*adapter
= from_timer(adapter
, t
, phy_info_timer
);
4849 if (test_bit(__E1000_DOWN
, &adapter
->state
))
4852 schedule_work(&adapter
->update_phy_task
);
4856 * e1000e_update_phy_stats - Update the PHY statistics counters
4857 * @adapter: board private structure
4859 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4861 static void e1000e_update_phy_stats(struct e1000_adapter
*adapter
)
4863 struct e1000_hw
*hw
= &adapter
->hw
;
4867 ret_val
= hw
->phy
.ops
.acquire(hw
);
4871 /* A page set is expensive so check if already on desired page.
4872 * If not, set to the page with the PHY status registers.
4875 ret_val
= e1000e_read_phy_reg_mdic(hw
, IGP01E1000_PHY_PAGE_SELECT
,
4879 if (phy_data
!= (HV_STATS_PAGE
<< IGP_PAGE_SHIFT
)) {
4880 ret_val
= hw
->phy
.ops
.set_page(hw
,
4881 HV_STATS_PAGE
<< IGP_PAGE_SHIFT
);
4886 /* Single Collision Count */
4887 hw
->phy
.ops
.read_reg_page(hw
, HV_SCC_UPPER
, &phy_data
);
4888 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_SCC_LOWER
, &phy_data
);
4890 adapter
->stats
.scc
+= phy_data
;
4892 /* Excessive Collision Count */
4893 hw
->phy
.ops
.read_reg_page(hw
, HV_ECOL_UPPER
, &phy_data
);
4894 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_ECOL_LOWER
, &phy_data
);
4896 adapter
->stats
.ecol
+= phy_data
;
4898 /* Multiple Collision Count */
4899 hw
->phy
.ops
.read_reg_page(hw
, HV_MCC_UPPER
, &phy_data
);
4900 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_MCC_LOWER
, &phy_data
);
4902 adapter
->stats
.mcc
+= phy_data
;
4904 /* Late Collision Count */
4905 hw
->phy
.ops
.read_reg_page(hw
, HV_LATECOL_UPPER
, &phy_data
);
4906 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_LATECOL_LOWER
, &phy_data
);
4908 adapter
->stats
.latecol
+= phy_data
;
4910 /* Collision Count - also used for adaptive IFS */
4911 hw
->phy
.ops
.read_reg_page(hw
, HV_COLC_UPPER
, &phy_data
);
4912 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_COLC_LOWER
, &phy_data
);
4914 hw
->mac
.collision_delta
= phy_data
;
4917 hw
->phy
.ops
.read_reg_page(hw
, HV_DC_UPPER
, &phy_data
);
4918 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_DC_LOWER
, &phy_data
);
4920 adapter
->stats
.dc
+= phy_data
;
4922 /* Transmit with no CRS */
4923 hw
->phy
.ops
.read_reg_page(hw
, HV_TNCRS_UPPER
, &phy_data
);
4924 ret_val
= hw
->phy
.ops
.read_reg_page(hw
, HV_TNCRS_LOWER
, &phy_data
);
4926 adapter
->stats
.tncrs
+= phy_data
;
4929 hw
->phy
.ops
.release(hw
);
4933 * e1000e_update_stats - Update the board statistics counters
4934 * @adapter: board private structure
4936 static void e1000e_update_stats(struct e1000_adapter
*adapter
)
4938 struct net_device
*netdev
= adapter
->netdev
;
4939 struct e1000_hw
*hw
= &adapter
->hw
;
4940 struct pci_dev
*pdev
= adapter
->pdev
;
4942 /* Prevent stats update while adapter is being reset, or if the pci
4943 * connection is down.
4945 if (adapter
->link_speed
== 0)
4947 if (pci_channel_offline(pdev
))
4950 adapter
->stats
.crcerrs
+= er32(CRCERRS
);
4951 adapter
->stats
.gprc
+= er32(GPRC
);
4952 adapter
->stats
.gorc
+= er32(GORCL
);
4953 er32(GORCH
); /* Clear gorc */
4954 adapter
->stats
.bprc
+= er32(BPRC
);
4955 adapter
->stats
.mprc
+= er32(MPRC
);
4956 adapter
->stats
.roc
+= er32(ROC
);
4958 adapter
->stats
.mpc
+= er32(MPC
);
4960 /* Half-duplex statistics */
4961 if (adapter
->link_duplex
== HALF_DUPLEX
) {
4962 if (adapter
->flags2
& FLAG2_HAS_PHY_STATS
) {
4963 e1000e_update_phy_stats(adapter
);
4965 adapter
->stats
.scc
+= er32(SCC
);
4966 adapter
->stats
.ecol
+= er32(ECOL
);
4967 adapter
->stats
.mcc
+= er32(MCC
);
4968 adapter
->stats
.latecol
+= er32(LATECOL
);
4969 adapter
->stats
.dc
+= er32(DC
);
4971 hw
->mac
.collision_delta
= er32(COLC
);
4973 if ((hw
->mac
.type
!= e1000_82574
) &&
4974 (hw
->mac
.type
!= e1000_82583
))
4975 adapter
->stats
.tncrs
+= er32(TNCRS
);
4977 adapter
->stats
.colc
+= hw
->mac
.collision_delta
;
4980 adapter
->stats
.xonrxc
+= er32(XONRXC
);
4981 adapter
->stats
.xontxc
+= er32(XONTXC
);
4982 adapter
->stats
.xoffrxc
+= er32(XOFFRXC
);
4983 adapter
->stats
.xofftxc
+= er32(XOFFTXC
);
4984 adapter
->stats
.gptc
+= er32(GPTC
);
4985 adapter
->stats
.gotc
+= er32(GOTCL
);
4986 er32(GOTCH
); /* Clear gotc */
4987 adapter
->stats
.rnbc
+= er32(RNBC
);
4988 adapter
->stats
.ruc
+= er32(RUC
);
4990 adapter
->stats
.mptc
+= er32(MPTC
);
4991 adapter
->stats
.bptc
+= er32(BPTC
);
4993 /* used for adaptive IFS */
4995 hw
->mac
.tx_packet_delta
= er32(TPT
);
4996 adapter
->stats
.tpt
+= hw
->mac
.tx_packet_delta
;
4998 adapter
->stats
.algnerrc
+= er32(ALGNERRC
);
4999 adapter
->stats
.rxerrc
+= er32(RXERRC
);
5000 adapter
->stats
.cexterr
+= er32(CEXTERR
);
5001 adapter
->stats
.tsctc
+= er32(TSCTC
);
5002 adapter
->stats
.tsctfc
+= er32(TSCTFC
);
5004 /* Fill out the OS statistics structure */
5005 netdev
->stats
.multicast
= adapter
->stats
.mprc
;
5006 netdev
->stats
.collisions
= adapter
->stats
.colc
;
5010 /* RLEC on some newer hardware can be incorrect so build
5011 * our own version based on RUC and ROC
5013 netdev
->stats
.rx_errors
= adapter
->stats
.rxerrc
+
5014 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
5015 adapter
->stats
.ruc
+ adapter
->stats
.roc
+ adapter
->stats
.cexterr
;
5016 netdev
->stats
.rx_length_errors
= adapter
->stats
.ruc
+
5018 netdev
->stats
.rx_crc_errors
= adapter
->stats
.crcerrs
;
5019 netdev
->stats
.rx_frame_errors
= adapter
->stats
.algnerrc
;
5020 netdev
->stats
.rx_missed_errors
= adapter
->stats
.mpc
;
5023 netdev
->stats
.tx_errors
= adapter
->stats
.ecol
+ adapter
->stats
.latecol
;
5024 netdev
->stats
.tx_aborted_errors
= adapter
->stats
.ecol
;
5025 netdev
->stats
.tx_window_errors
= adapter
->stats
.latecol
;
5026 netdev
->stats
.tx_carrier_errors
= adapter
->stats
.tncrs
;
5028 /* Tx Dropped needs to be maintained elsewhere */
5030 /* Management Stats */
5031 adapter
->stats
.mgptc
+= er32(MGTPTC
);
5032 adapter
->stats
.mgprc
+= er32(MGTPRC
);
5033 adapter
->stats
.mgpdc
+= er32(MGTPDC
);
5035 /* Correctable ECC Errors */
5036 if (hw
->mac
.type
>= e1000_pch_lpt
) {
5037 u32 pbeccsts
= er32(PBECCSTS
);
5039 adapter
->corr_errors
+=
5040 pbeccsts
& E1000_PBECCSTS_CORR_ERR_CNT_MASK
;
5041 adapter
->uncorr_errors
+=
5042 (pbeccsts
& E1000_PBECCSTS_UNCORR_ERR_CNT_MASK
) >>
5043 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT
;
5048 * e1000_phy_read_status - Update the PHY register status snapshot
5049 * @adapter: board private structure
5051 static void e1000_phy_read_status(struct e1000_adapter
*adapter
)
5053 struct e1000_hw
*hw
= &adapter
->hw
;
5054 struct e1000_phy_regs
*phy
= &adapter
->phy_regs
;
5056 if (!pm_runtime_suspended((&adapter
->pdev
->dev
)->parent
) &&
5057 (er32(STATUS
) & E1000_STATUS_LU
) &&
5058 (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
)) {
5061 ret_val
= e1e_rphy(hw
, MII_BMCR
, &phy
->bmcr
);
5062 ret_val
|= e1e_rphy(hw
, MII_BMSR
, &phy
->bmsr
);
5063 ret_val
|= e1e_rphy(hw
, MII_ADVERTISE
, &phy
->advertise
);
5064 ret_val
|= e1e_rphy(hw
, MII_LPA
, &phy
->lpa
);
5065 ret_val
|= e1e_rphy(hw
, MII_EXPANSION
, &phy
->expansion
);
5066 ret_val
|= e1e_rphy(hw
, MII_CTRL1000
, &phy
->ctrl1000
);
5067 ret_val
|= e1e_rphy(hw
, MII_STAT1000
, &phy
->stat1000
);
5068 ret_val
|= e1e_rphy(hw
, MII_ESTATUS
, &phy
->estatus
);
5070 e_warn("Error reading PHY register\n");
5072 /* Do not read PHY registers if link is not up
5073 * Set values to typical power-on defaults
5075 phy
->bmcr
= (BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_FULLDPLX
);
5076 phy
->bmsr
= (BMSR_100FULL
| BMSR_100HALF
| BMSR_10FULL
|
5077 BMSR_10HALF
| BMSR_ESTATEN
| BMSR_ANEGCAPABLE
|
5079 phy
->advertise
= (ADVERTISE_PAUSE_ASYM
| ADVERTISE_PAUSE_CAP
|
5080 ADVERTISE_ALL
| ADVERTISE_CSMA
);
5082 phy
->expansion
= EXPANSION_ENABLENPAGE
;
5083 phy
->ctrl1000
= ADVERTISE_1000FULL
;
5085 phy
->estatus
= (ESTATUS_1000_TFULL
| ESTATUS_1000_THALF
);
5089 static void e1000_print_link_info(struct e1000_adapter
*adapter
)
5091 struct e1000_hw
*hw
= &adapter
->hw
;
5092 u32 ctrl
= er32(CTRL
);
5094 /* Link status message must follow this format for user tools */
5095 netdev_info(adapter
->netdev
,
5096 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5097 adapter
->link_speed
,
5098 adapter
->link_duplex
== FULL_DUPLEX
? "Full" : "Half",
5099 (ctrl
& E1000_CTRL_TFCE
) && (ctrl
& E1000_CTRL_RFCE
) ? "Rx/Tx" :
5100 (ctrl
& E1000_CTRL_RFCE
) ? "Rx" :
5101 (ctrl
& E1000_CTRL_TFCE
) ? "Tx" : "None");
5104 static bool e1000e_has_link(struct e1000_adapter
*adapter
)
5106 struct e1000_hw
*hw
= &adapter
->hw
;
5107 bool link_active
= false;
5110 /* get_link_status is set on LSC (link status) interrupt or
5111 * Rx sequence error interrupt. get_link_status will stay
5112 * true until the check_for_link establishes link
5113 * for copper adapters ONLY
5115 switch (hw
->phy
.media_type
) {
5116 case e1000_media_type_copper
:
5117 if (hw
->mac
.get_link_status
) {
5118 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
5119 link_active
= !hw
->mac
.get_link_status
;
5124 case e1000_media_type_fiber
:
5125 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
5126 link_active
= !!(er32(STATUS
) & E1000_STATUS_LU
);
5128 case e1000_media_type_internal_serdes
:
5129 ret_val
= hw
->mac
.ops
.check_for_link(hw
);
5130 link_active
= hw
->mac
.serdes_has_link
;
5133 case e1000_media_type_unknown
:
5137 if ((ret_val
== -E1000_ERR_PHY
) && (hw
->phy
.type
== e1000_phy_igp_3
) &&
5138 (er32(CTRL
) & E1000_PHY_CTRL_GBE_DISABLE
)) {
5139 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
5140 e_info("Gigabit has been disabled, downgrading speed\n");
5146 static void e1000e_enable_receives(struct e1000_adapter
*adapter
)
5148 /* make sure the receive unit is started */
5149 if ((adapter
->flags
& FLAG_RX_NEEDS_RESTART
) &&
5150 (adapter
->flags
& FLAG_RESTART_NOW
)) {
5151 struct e1000_hw
*hw
= &adapter
->hw
;
5152 u32 rctl
= er32(RCTL
);
5154 ew32(RCTL
, rctl
| E1000_RCTL_EN
);
5155 adapter
->flags
&= ~FLAG_RESTART_NOW
;
5159 static void e1000e_check_82574_phy_workaround(struct e1000_adapter
*adapter
)
5161 struct e1000_hw
*hw
= &adapter
->hw
;
5163 /* With 82574 controllers, PHY needs to be checked periodically
5164 * for hung state and reset, if two calls return true
5166 if (e1000_check_phy_82574(hw
))
5167 adapter
->phy_hang_count
++;
5169 adapter
->phy_hang_count
= 0;
5171 if (adapter
->phy_hang_count
> 1) {
5172 adapter
->phy_hang_count
= 0;
5173 e_dbg("PHY appears hung - resetting\n");
5174 schedule_work(&adapter
->reset_task
);
5179 * e1000_watchdog - Timer Call-back
5180 * @t: pointer to timer_list containing private info adapter
5182 static void e1000_watchdog(struct timer_list
*t
)
5184 struct e1000_adapter
*adapter
= from_timer(adapter
, t
, watchdog_timer
);
5186 /* Do the rest outside of interrupt context */
5187 schedule_work(&adapter
->watchdog_task
);
5189 /* TODO: make this use queue_delayed_work() */
5192 static void e1000_watchdog_task(struct work_struct
*work
)
5194 struct e1000_adapter
*adapter
= container_of(work
,
5195 struct e1000_adapter
,
5197 struct net_device
*netdev
= adapter
->netdev
;
5198 struct e1000_mac_info
*mac
= &adapter
->hw
.mac
;
5199 struct e1000_phy_info
*phy
= &adapter
->hw
.phy
;
5200 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
5201 u32 dmoff_exit_timeout
= 100, tries
= 0;
5202 struct e1000_hw
*hw
= &adapter
->hw
;
5203 u32 link
, tctl
, pcim_state
;
5205 if (test_bit(__E1000_DOWN
, &adapter
->state
))
5208 link
= e1000e_has_link(adapter
);
5209 if ((netif_carrier_ok(netdev
)) && link
) {
5210 /* Cancel scheduled suspend requests. */
5211 pm_runtime_resume(netdev
->dev
.parent
);
5213 e1000e_enable_receives(adapter
);
5217 if ((e1000e_enable_tx_pkt_filtering(hw
)) &&
5218 (adapter
->mng_vlan_id
!= adapter
->hw
.mng_cookie
.vlan_id
))
5219 e1000_update_mng_vlan(adapter
);
5222 if (!netif_carrier_ok(netdev
)) {
5225 /* Cancel scheduled suspend requests. */
5226 pm_runtime_resume(netdev
->dev
.parent
);
5228 /* Checking if MAC is in DMoff state*/
5229 if (er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
) {
5230 pcim_state
= er32(STATUS
);
5231 while (pcim_state
& E1000_STATUS_PCIM_STATE
) {
5232 if (tries
++ == dmoff_exit_timeout
) {
5233 e_dbg("Error in exiting dmoff\n");
5236 usleep_range(10000, 20000);
5237 pcim_state
= er32(STATUS
);
5239 /* Checking if MAC exited DMoff state */
5240 if (!(pcim_state
& E1000_STATUS_PCIM_STATE
))
5241 e1000_phy_hw_reset(&adapter
->hw
);
5245 /* update snapshot of PHY registers on LSC */
5246 e1000_phy_read_status(adapter
);
5247 mac
->ops
.get_link_up_info(&adapter
->hw
,
5248 &adapter
->link_speed
,
5249 &adapter
->link_duplex
);
5250 e1000_print_link_info(adapter
);
5252 /* check if SmartSpeed worked */
5253 e1000e_check_downshift(hw
);
5254 if (phy
->speed_downgraded
)
5256 "Link Speed was downgraded by SmartSpeed\n");
5258 /* On supported PHYs, check for duplex mismatch only
5259 * if link has autonegotiated at 10/100 half
5261 if ((hw
->phy
.type
== e1000_phy_igp_3
||
5262 hw
->phy
.type
== e1000_phy_bm
) &&
5264 (adapter
->link_speed
== SPEED_10
||
5265 adapter
->link_speed
== SPEED_100
) &&
5266 (adapter
->link_duplex
== HALF_DUPLEX
)) {
5269 e1e_rphy(hw
, MII_EXPANSION
, &autoneg_exp
);
5271 if (!(autoneg_exp
& EXPANSION_NWAY
))
5272 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
5275 /* adjust timeout factor according to speed/duplex */
5276 adapter
->tx_timeout_factor
= 1;
5277 switch (adapter
->link_speed
) {
5280 adapter
->tx_timeout_factor
= 16;
5284 adapter
->tx_timeout_factor
= 10;
5288 /* workaround: re-program speed mode bit after
5291 if ((adapter
->flags
& FLAG_TARC_SPEED_MODE_BIT
) &&
5295 tarc0
= er32(TARC(0));
5296 tarc0
&= ~SPEED_MODE_BIT
;
5297 ew32(TARC(0), tarc0
);
5300 /* disable TSO for pcie and 10/100 speeds, to avoid
5301 * some hardware issues
5303 if (!(adapter
->flags
& FLAG_TSO_FORCE
)) {
5304 switch (adapter
->link_speed
) {
5307 e_info("10/100 speed: disabling TSO\n");
5308 netdev
->features
&= ~NETIF_F_TSO
;
5309 netdev
->features
&= ~NETIF_F_TSO6
;
5312 netdev
->features
|= NETIF_F_TSO
;
5313 netdev
->features
|= NETIF_F_TSO6
;
5319 if (hw
->mac
.type
== e1000_pch_spt
) {
5320 netdev
->features
&= ~NETIF_F_TSO
;
5321 netdev
->features
&= ~NETIF_F_TSO6
;
5325 /* enable transmits in the hardware, need to do this
5326 * after setting TARC(0)
5329 tctl
|= E1000_TCTL_EN
;
5332 /* Perform any post-link-up configuration before
5333 * reporting link up.
5335 if (phy
->ops
.cfg_on_link_up
)
5336 phy
->ops
.cfg_on_link_up(hw
);
5338 netif_wake_queue(netdev
);
5339 netif_carrier_on(netdev
);
5341 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
5342 mod_timer(&adapter
->phy_info_timer
,
5343 round_jiffies(jiffies
+ 2 * HZ
));
5346 if (netif_carrier_ok(netdev
)) {
5347 adapter
->link_speed
= 0;
5348 adapter
->link_duplex
= 0;
5349 /* Link status message must follow this format */
5350 netdev_info(netdev
, "NIC Link is Down\n");
5351 netif_carrier_off(netdev
);
5352 netif_stop_queue(netdev
);
5353 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
5354 mod_timer(&adapter
->phy_info_timer
,
5355 round_jiffies(jiffies
+ 2 * HZ
));
5357 /* 8000ES2LAN requires a Rx packet buffer work-around
5358 * on link down event; reset the controller to flush
5359 * the Rx packet buffer.
5361 if (adapter
->flags
& FLAG_RX_NEEDS_RESTART
)
5362 adapter
->flags
|= FLAG_RESTART_NOW
;
5364 pm_schedule_suspend(netdev
->dev
.parent
,
5370 spin_lock(&adapter
->stats64_lock
);
5371 e1000e_update_stats(adapter
);
5373 mac
->tx_packet_delta
= adapter
->stats
.tpt
- adapter
->tpt_old
;
5374 adapter
->tpt_old
= adapter
->stats
.tpt
;
5375 mac
->collision_delta
= adapter
->stats
.colc
- adapter
->colc_old
;
5376 adapter
->colc_old
= adapter
->stats
.colc
;
5378 adapter
->gorc
= adapter
->stats
.gorc
- adapter
->gorc_old
;
5379 adapter
->gorc_old
= adapter
->stats
.gorc
;
5380 adapter
->gotc
= adapter
->stats
.gotc
- adapter
->gotc_old
;
5381 adapter
->gotc_old
= adapter
->stats
.gotc
;
5382 spin_unlock(&adapter
->stats64_lock
);
5384 /* If the link is lost the controller stops DMA, but
5385 * if there is queued Tx work it cannot be done. So
5386 * reset the controller to flush the Tx packet buffers.
5388 if (!netif_carrier_ok(netdev
) &&
5389 (e1000_desc_unused(tx_ring
) + 1 < tx_ring
->count
))
5390 adapter
->flags
|= FLAG_RESTART_NOW
;
5392 /* If reset is necessary, do it outside of interrupt context. */
5393 if (adapter
->flags
& FLAG_RESTART_NOW
) {
5394 schedule_work(&adapter
->reset_task
);
5395 /* return immediately since reset is imminent */
5399 e1000e_update_adaptive(&adapter
->hw
);
5401 /* Simple mode for Interrupt Throttle Rate (ITR) */
5402 if (adapter
->itr_setting
== 4) {
5403 /* Symmetric Tx/Rx gets a reduced ITR=2000;
5404 * Total asymmetrical Tx or Rx gets ITR=8000;
5405 * everyone else is between 2000-8000.
5407 u32 goc
= (adapter
->gotc
+ adapter
->gorc
) / 10000;
5408 u32 dif
= (adapter
->gotc
> adapter
->gorc
?
5409 adapter
->gotc
- adapter
->gorc
:
5410 adapter
->gorc
- adapter
->gotc
) / 10000;
5411 u32 itr
= goc
> 0 ? (dif
* 6000 / goc
+ 2000) : 8000;
5413 e1000e_write_itr(adapter
, itr
);
5416 /* Cause software interrupt to ensure Rx ring is cleaned */
5417 if (adapter
->msix_entries
)
5418 ew32(ICS
, adapter
->rx_ring
->ims_val
);
5420 ew32(ICS
, E1000_ICS_RXDMT0
);
5422 /* flush pending descriptors to memory before detecting Tx hang */
5423 e1000e_flush_descriptors(adapter
);
5425 /* Force detection of hung controller every watchdog period */
5426 adapter
->detect_tx_hung
= true;
5428 /* With 82571 controllers, LAA may be overwritten due to controller
5429 * reset from the other port. Set the appropriate LAA in RAR[0]
5431 if (e1000e_get_laa_state_82571(hw
))
5432 hw
->mac
.ops
.rar_set(hw
, adapter
->hw
.mac
.addr
, 0);
5434 if (adapter
->flags2
& FLAG2_CHECK_PHY_HANG
)
5435 e1000e_check_82574_phy_workaround(adapter
);
5437 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5438 if (adapter
->hwtstamp_config
.rx_filter
!= HWTSTAMP_FILTER_NONE
) {
5439 if ((adapter
->flags2
& FLAG2_CHECK_RX_HWTSTAMP
) &&
5440 (er32(TSYNCRXCTL
) & E1000_TSYNCRXCTL_VALID
)) {
5442 adapter
->rx_hwtstamp_cleared
++;
5444 adapter
->flags2
|= FLAG2_CHECK_RX_HWTSTAMP
;
5448 /* Reset the timer */
5449 if (!test_bit(__E1000_DOWN
, &adapter
->state
))
5450 mod_timer(&adapter
->watchdog_timer
,
5451 round_jiffies(jiffies
+ 2 * HZ
));
5454 #define E1000_TX_FLAGS_CSUM 0x00000001
5455 #define E1000_TX_FLAGS_VLAN 0x00000002
5456 #define E1000_TX_FLAGS_TSO 0x00000004
5457 #define E1000_TX_FLAGS_IPV4 0x00000008
5458 #define E1000_TX_FLAGS_NO_FCS 0x00000010
5459 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5460 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5461 #define E1000_TX_FLAGS_VLAN_SHIFT 16
5463 static int e1000_tso(struct e1000_ring
*tx_ring
, struct sk_buff
*skb
,
5466 struct e1000_context_desc
*context_desc
;
5467 struct e1000_buffer
*buffer_info
;
5471 u8 ipcss
, ipcso
, tucss
, tucso
, hdr_len
;
5474 if (!skb_is_gso(skb
))
5477 err
= skb_cow_head(skb
, 0);
5481 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
5482 mss
= skb_shinfo(skb
)->gso_size
;
5483 if (protocol
== htons(ETH_P_IP
)) {
5484 struct iphdr
*iph
= ip_hdr(skb
);
5487 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
5489 cmd_length
= E1000_TXD_CMD_IP
;
5490 ipcse
= skb_transport_offset(skb
) - 1;
5491 } else if (skb_is_gso_v6(skb
)) {
5492 tcp_v6_gso_csum_prep(skb
);
5495 ipcss
= skb_network_offset(skb
);
5496 ipcso
= (void *)&(ip_hdr(skb
)->check
) - (void *)skb
->data
;
5497 tucss
= skb_transport_offset(skb
);
5498 tucso
= (void *)&(tcp_hdr(skb
)->check
) - (void *)skb
->data
;
5500 cmd_length
|= (E1000_TXD_CMD_DEXT
| E1000_TXD_CMD_TSE
|
5501 E1000_TXD_CMD_TCP
| (skb
->len
- (hdr_len
)));
5503 i
= tx_ring
->next_to_use
;
5504 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
5505 buffer_info
= &tx_ring
->buffer_info
[i
];
5507 context_desc
->lower_setup
.ip_fields
.ipcss
= ipcss
;
5508 context_desc
->lower_setup
.ip_fields
.ipcso
= ipcso
;
5509 context_desc
->lower_setup
.ip_fields
.ipcse
= cpu_to_le16(ipcse
);
5510 context_desc
->upper_setup
.tcp_fields
.tucss
= tucss
;
5511 context_desc
->upper_setup
.tcp_fields
.tucso
= tucso
;
5512 context_desc
->upper_setup
.tcp_fields
.tucse
= 0;
5513 context_desc
->tcp_seg_setup
.fields
.mss
= cpu_to_le16(mss
);
5514 context_desc
->tcp_seg_setup
.fields
.hdr_len
= hdr_len
;
5515 context_desc
->cmd_and_length
= cpu_to_le32(cmd_length
);
5517 buffer_info
->time_stamp
= jiffies
;
5518 buffer_info
->next_to_watch
= i
;
5521 if (i
== tx_ring
->count
)
5523 tx_ring
->next_to_use
= i
;
5528 static bool e1000_tx_csum(struct e1000_ring
*tx_ring
, struct sk_buff
*skb
,
5531 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
5532 struct e1000_context_desc
*context_desc
;
5533 struct e1000_buffer
*buffer_info
;
5536 u32 cmd_len
= E1000_TXD_CMD_DEXT
;
5538 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
5542 case cpu_to_be16(ETH_P_IP
):
5543 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
5544 cmd_len
|= E1000_TXD_CMD_TCP
;
5546 case cpu_to_be16(ETH_P_IPV6
):
5547 /* XXX not handling all IPV6 headers */
5548 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
5549 cmd_len
|= E1000_TXD_CMD_TCP
;
5552 if (unlikely(net_ratelimit()))
5553 e_warn("checksum_partial proto=%x!\n",
5554 be16_to_cpu(protocol
));
5558 css
= skb_checksum_start_offset(skb
);
5560 i
= tx_ring
->next_to_use
;
5561 buffer_info
= &tx_ring
->buffer_info
[i
];
5562 context_desc
= E1000_CONTEXT_DESC(*tx_ring
, i
);
5564 context_desc
->lower_setup
.ip_config
= 0;
5565 context_desc
->upper_setup
.tcp_fields
.tucss
= css
;
5566 context_desc
->upper_setup
.tcp_fields
.tucso
= css
+ skb
->csum_offset
;
5567 context_desc
->upper_setup
.tcp_fields
.tucse
= 0;
5568 context_desc
->tcp_seg_setup
.data
= 0;
5569 context_desc
->cmd_and_length
= cpu_to_le32(cmd_len
);
5571 buffer_info
->time_stamp
= jiffies
;
5572 buffer_info
->next_to_watch
= i
;
5575 if (i
== tx_ring
->count
)
5577 tx_ring
->next_to_use
= i
;
5582 static int e1000_tx_map(struct e1000_ring
*tx_ring
, struct sk_buff
*skb
,
5583 unsigned int first
, unsigned int max_per_txd
,
5584 unsigned int nr_frags
)
5586 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
5587 struct pci_dev
*pdev
= adapter
->pdev
;
5588 struct e1000_buffer
*buffer_info
;
5589 unsigned int len
= skb_headlen(skb
);
5590 unsigned int offset
= 0, size
, count
= 0, i
;
5591 unsigned int f
, bytecount
, segs
;
5593 i
= tx_ring
->next_to_use
;
5596 buffer_info
= &tx_ring
->buffer_info
[i
];
5597 size
= min(len
, max_per_txd
);
5599 buffer_info
->length
= size
;
5600 buffer_info
->time_stamp
= jiffies
;
5601 buffer_info
->next_to_watch
= i
;
5602 buffer_info
->dma
= dma_map_single(&pdev
->dev
,
5604 size
, DMA_TO_DEVICE
);
5605 buffer_info
->mapped_as_page
= false;
5606 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
5615 if (i
== tx_ring
->count
)
5620 for (f
= 0; f
< nr_frags
; f
++) {
5621 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[f
];
5623 len
= skb_frag_size(frag
);
5628 if (i
== tx_ring
->count
)
5631 buffer_info
= &tx_ring
->buffer_info
[i
];
5632 size
= min(len
, max_per_txd
);
5634 buffer_info
->length
= size
;
5635 buffer_info
->time_stamp
= jiffies
;
5636 buffer_info
->next_to_watch
= i
;
5637 buffer_info
->dma
= skb_frag_dma_map(&pdev
->dev
, frag
,
5640 buffer_info
->mapped_as_page
= true;
5641 if (dma_mapping_error(&pdev
->dev
, buffer_info
->dma
))
5650 segs
= skb_shinfo(skb
)->gso_segs
? : 1;
5651 /* multiply data chunks by size of headers */
5652 bytecount
= ((segs
- 1) * skb_headlen(skb
)) + skb
->len
;
5654 tx_ring
->buffer_info
[i
].skb
= skb
;
5655 tx_ring
->buffer_info
[i
].segs
= segs
;
5656 tx_ring
->buffer_info
[i
].bytecount
= bytecount
;
5657 tx_ring
->buffer_info
[first
].next_to_watch
= i
;
5662 dev_err(&pdev
->dev
, "Tx DMA map failed\n");
5663 buffer_info
->dma
= 0;
5669 i
+= tx_ring
->count
;
5671 buffer_info
= &tx_ring
->buffer_info
[i
];
5672 e1000_put_txbuf(tx_ring
, buffer_info
, true);
5678 static void e1000_tx_queue(struct e1000_ring
*tx_ring
, int tx_flags
, int count
)
5680 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
5681 struct e1000_tx_desc
*tx_desc
= NULL
;
5682 struct e1000_buffer
*buffer_info
;
5683 u32 txd_upper
= 0, txd_lower
= E1000_TXD_CMD_IFCS
;
5686 if (tx_flags
& E1000_TX_FLAGS_TSO
) {
5687 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
|
5689 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
5691 if (tx_flags
& E1000_TX_FLAGS_IPV4
)
5692 txd_upper
|= E1000_TXD_POPTS_IXSM
<< 8;
5695 if (tx_flags
& E1000_TX_FLAGS_CSUM
) {
5696 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
5697 txd_upper
|= E1000_TXD_POPTS_TXSM
<< 8;
5700 if (tx_flags
& E1000_TX_FLAGS_VLAN
) {
5701 txd_lower
|= E1000_TXD_CMD_VLE
;
5702 txd_upper
|= (tx_flags
& E1000_TX_FLAGS_VLAN_MASK
);
5705 if (unlikely(tx_flags
& E1000_TX_FLAGS_NO_FCS
))
5706 txd_lower
&= ~(E1000_TXD_CMD_IFCS
);
5708 if (unlikely(tx_flags
& E1000_TX_FLAGS_HWTSTAMP
)) {
5709 txd_lower
|= E1000_TXD_CMD_DEXT
| E1000_TXD_DTYP_D
;
5710 txd_upper
|= E1000_TXD_EXTCMD_TSTAMP
;
5713 i
= tx_ring
->next_to_use
;
5716 buffer_info
= &tx_ring
->buffer_info
[i
];
5717 tx_desc
= E1000_TX_DESC(*tx_ring
, i
);
5718 tx_desc
->buffer_addr
= cpu_to_le64(buffer_info
->dma
);
5719 tx_desc
->lower
.data
= cpu_to_le32(txd_lower
|
5720 buffer_info
->length
);
5721 tx_desc
->upper
.data
= cpu_to_le32(txd_upper
);
5724 if (i
== tx_ring
->count
)
5726 } while (--count
> 0);
5728 tx_desc
->lower
.data
|= cpu_to_le32(adapter
->txd_cmd
);
5730 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5731 if (unlikely(tx_flags
& E1000_TX_FLAGS_NO_FCS
))
5732 tx_desc
->lower
.data
&= ~(cpu_to_le32(E1000_TXD_CMD_IFCS
));
5734 /* Force memory writes to complete before letting h/w
5735 * know there are new descriptors to fetch. (Only
5736 * applicable for weak-ordered memory model archs,
5741 tx_ring
->next_to_use
= i
;
5744 #define MINIMUM_DHCP_PACKET_SIZE 282
5745 static int e1000_transfer_dhcp_info(struct e1000_adapter
*adapter
,
5746 struct sk_buff
*skb
)
5748 struct e1000_hw
*hw
= &adapter
->hw
;
5751 if (skb_vlan_tag_present(skb
) &&
5752 !((skb_vlan_tag_get(skb
) == adapter
->hw
.mng_cookie
.vlan_id
) &&
5753 (adapter
->hw
.mng_cookie
.status
&
5754 E1000_MNG_DHCP_COOKIE_STATUS_VLAN
)))
5757 if (skb
->len
<= MINIMUM_DHCP_PACKET_SIZE
)
5760 if (((struct ethhdr
*)skb
->data
)->h_proto
!= htons(ETH_P_IP
))
5764 const struct iphdr
*ip
= (struct iphdr
*)((u8
*)skb
->data
+ 14);
5767 if (ip
->protocol
!= IPPROTO_UDP
)
5770 udp
= (struct udphdr
*)((u8
*)ip
+ (ip
->ihl
<< 2));
5771 if (ntohs(udp
->dest
) != 67)
5774 offset
= (u8
*)udp
+ 8 - skb
->data
;
5775 length
= skb
->len
- offset
;
5776 return e1000e_mng_write_dhcp_info(hw
, (u8
*)udp
+ 8, length
);
5782 static int __e1000_maybe_stop_tx(struct e1000_ring
*tx_ring
, int size
)
5784 struct e1000_adapter
*adapter
= tx_ring
->adapter
;
5786 netif_stop_queue(adapter
->netdev
);
5787 /* Herbert's original patch had:
5788 * smp_mb__after_netif_stop_queue();
5789 * but since that doesn't exist yet, just open code it.
5793 /* We need to check again in a case another CPU has just
5794 * made room available.
5796 if (e1000_desc_unused(tx_ring
) < size
)
5800 netif_start_queue(adapter
->netdev
);
5801 ++adapter
->restart_queue
;
5805 static int e1000_maybe_stop_tx(struct e1000_ring
*tx_ring
, int size
)
5807 BUG_ON(size
> tx_ring
->count
);
5809 if (e1000_desc_unused(tx_ring
) >= size
)
5811 return __e1000_maybe_stop_tx(tx_ring
, size
);
5814 static netdev_tx_t
e1000_xmit_frame(struct sk_buff
*skb
,
5815 struct net_device
*netdev
)
5817 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
5818 struct e1000_ring
*tx_ring
= adapter
->tx_ring
;
5820 unsigned int tx_flags
= 0;
5821 unsigned int len
= skb_headlen(skb
);
5822 unsigned int nr_frags
;
5827 __be16 protocol
= vlan_get_protocol(skb
);
5829 if (test_bit(__E1000_DOWN
, &adapter
->state
)) {
5830 dev_kfree_skb_any(skb
);
5831 return NETDEV_TX_OK
;
5834 if (skb
->len
<= 0) {
5835 dev_kfree_skb_any(skb
);
5836 return NETDEV_TX_OK
;
5839 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5840 * pad skb in order to meet this minimum size requirement
5842 if (skb_put_padto(skb
, 17))
5843 return NETDEV_TX_OK
;
5845 mss
= skb_shinfo(skb
)->gso_size
;
5849 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5850 * points to just header, pull a few bytes of payload from
5851 * frags into skb->data
5853 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
5854 /* we do this workaround for ES2LAN, but it is un-necessary,
5855 * avoiding it could save a lot of cycles
5857 if (skb
->data_len
&& (hdr_len
== len
)) {
5858 unsigned int pull_size
;
5860 pull_size
= min_t(unsigned int, 4, skb
->data_len
);
5861 if (!__pskb_pull_tail(skb
, pull_size
)) {
5862 e_err("__pskb_pull_tail failed.\n");
5863 dev_kfree_skb_any(skb
);
5864 return NETDEV_TX_OK
;
5866 len
= skb_headlen(skb
);
5870 /* reserve a descriptor for the offload context */
5871 if ((mss
) || (skb
->ip_summed
== CHECKSUM_PARTIAL
))
5875 count
+= DIV_ROUND_UP(len
, adapter
->tx_fifo_limit
);
5877 nr_frags
= skb_shinfo(skb
)->nr_frags
;
5878 for (f
= 0; f
< nr_frags
; f
++)
5879 count
+= DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb
)->frags
[f
]),
5880 adapter
->tx_fifo_limit
);
5882 if (adapter
->hw
.mac
.tx_pkt_filtering
)
5883 e1000_transfer_dhcp_info(adapter
, skb
);
5885 /* need: count + 2 desc gap to keep tail from touching
5886 * head, otherwise try next time
5888 if (e1000_maybe_stop_tx(tx_ring
, count
+ 2))
5889 return NETDEV_TX_BUSY
;
5891 if (skb_vlan_tag_present(skb
)) {
5892 tx_flags
|= E1000_TX_FLAGS_VLAN
;
5893 tx_flags
|= (skb_vlan_tag_get(skb
) <<
5894 E1000_TX_FLAGS_VLAN_SHIFT
);
5897 first
= tx_ring
->next_to_use
;
5899 tso
= e1000_tso(tx_ring
, skb
, protocol
);
5901 dev_kfree_skb_any(skb
);
5902 return NETDEV_TX_OK
;
5906 tx_flags
|= E1000_TX_FLAGS_TSO
;
5907 else if (e1000_tx_csum(tx_ring
, skb
, protocol
))
5908 tx_flags
|= E1000_TX_FLAGS_CSUM
;
5910 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5911 * 82571 hardware supports TSO capabilities for IPv6 as well...
5912 * no longer assume, we must.
5914 if (protocol
== htons(ETH_P_IP
))
5915 tx_flags
|= E1000_TX_FLAGS_IPV4
;
5917 if (unlikely(skb
->no_fcs
))
5918 tx_flags
|= E1000_TX_FLAGS_NO_FCS
;
5920 /* if count is 0 then mapping error has occurred */
5921 count
= e1000_tx_map(tx_ring
, skb
, first
, adapter
->tx_fifo_limit
,
5924 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
5925 (adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
)) {
5926 if (!adapter
->tx_hwtstamp_skb
) {
5927 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
5928 tx_flags
|= E1000_TX_FLAGS_HWTSTAMP
;
5929 adapter
->tx_hwtstamp_skb
= skb_get(skb
);
5930 adapter
->tx_hwtstamp_start
= jiffies
;
5931 schedule_work(&adapter
->tx_hwtstamp_work
);
5933 adapter
->tx_hwtstamp_skipped
++;
5937 skb_tx_timestamp(skb
);
5939 netdev_sent_queue(netdev
, skb
->len
);
5940 e1000_tx_queue(tx_ring
, tx_flags
, count
);
5941 /* Make sure there is space in the ring for the next send. */
5942 e1000_maybe_stop_tx(tx_ring
,
5944 DIV_ROUND_UP(PAGE_SIZE
,
5945 adapter
->tx_fifo_limit
) + 2));
5947 if (!netdev_xmit_more() ||
5948 netif_xmit_stopped(netdev_get_tx_queue(netdev
, 0))) {
5949 if (adapter
->flags2
& FLAG2_PCIM2PCI_ARBITER_WA
)
5950 e1000e_update_tdt_wa(tx_ring
,
5951 tx_ring
->next_to_use
);
5953 writel(tx_ring
->next_to_use
, tx_ring
->tail
);
5956 dev_kfree_skb_any(skb
);
5957 tx_ring
->buffer_info
[first
].time_stamp
= 0;
5958 tx_ring
->next_to_use
= first
;
5961 return NETDEV_TX_OK
;
5965 * e1000_tx_timeout - Respond to a Tx Hang
5966 * @netdev: network interface device structure
5967 * @txqueue: index of the hung queue (unused)
5969 static void e1000_tx_timeout(struct net_device
*netdev
, unsigned int __always_unused txqueue
)
5971 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
5973 /* Do the reset outside of interrupt context */
5974 adapter
->tx_timeout_count
++;
5975 schedule_work(&adapter
->reset_task
);
5978 static void e1000_reset_task(struct work_struct
*work
)
5980 struct e1000_adapter
*adapter
;
5981 adapter
= container_of(work
, struct e1000_adapter
, reset_task
);
5984 /* don't run the task if already down */
5985 if (test_bit(__E1000_DOWN
, &adapter
->state
)) {
5990 if (!(adapter
->flags
& FLAG_RESTART_NOW
)) {
5991 e1000e_dump(adapter
);
5992 e_err("Reset adapter unexpectedly\n");
5994 e1000e_reinit_locked(adapter
);
5999 * e1000e_get_stats64 - Get System Network Statistics
6000 * @netdev: network interface device structure
6001 * @stats: rtnl_link_stats64 pointer
6003 * Returns the address of the device statistics structure.
6005 void e1000e_get_stats64(struct net_device
*netdev
,
6006 struct rtnl_link_stats64
*stats
)
6008 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6010 spin_lock(&adapter
->stats64_lock
);
6011 e1000e_update_stats(adapter
);
6012 /* Fill out the OS statistics structure */
6013 stats
->rx_bytes
= adapter
->stats
.gorc
;
6014 stats
->rx_packets
= adapter
->stats
.gprc
;
6015 stats
->tx_bytes
= adapter
->stats
.gotc
;
6016 stats
->tx_packets
= adapter
->stats
.gptc
;
6017 stats
->multicast
= adapter
->stats
.mprc
;
6018 stats
->collisions
= adapter
->stats
.colc
;
6022 /* RLEC on some newer hardware can be incorrect so build
6023 * our own version based on RUC and ROC
6025 stats
->rx_errors
= adapter
->stats
.rxerrc
+
6026 adapter
->stats
.crcerrs
+ adapter
->stats
.algnerrc
+
6027 adapter
->stats
.ruc
+ adapter
->stats
.roc
+ adapter
->stats
.cexterr
;
6028 stats
->rx_length_errors
= adapter
->stats
.ruc
+ adapter
->stats
.roc
;
6029 stats
->rx_crc_errors
= adapter
->stats
.crcerrs
;
6030 stats
->rx_frame_errors
= adapter
->stats
.algnerrc
;
6031 stats
->rx_missed_errors
= adapter
->stats
.mpc
;
6034 stats
->tx_errors
= adapter
->stats
.ecol
+ adapter
->stats
.latecol
;
6035 stats
->tx_aborted_errors
= adapter
->stats
.ecol
;
6036 stats
->tx_window_errors
= adapter
->stats
.latecol
;
6037 stats
->tx_carrier_errors
= adapter
->stats
.tncrs
;
6039 /* Tx Dropped needs to be maintained elsewhere */
6041 spin_unlock(&adapter
->stats64_lock
);
6045 * e1000_change_mtu - Change the Maximum Transfer Unit
6046 * @netdev: network interface device structure
6047 * @new_mtu: new value for maximum frame size
6049 * Returns 0 on success, negative on failure
6051 static int e1000_change_mtu(struct net_device
*netdev
, int new_mtu
)
6053 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6054 int max_frame
= new_mtu
+ VLAN_ETH_HLEN
+ ETH_FCS_LEN
;
6056 /* Jumbo frame support */
6057 if ((new_mtu
> ETH_DATA_LEN
) &&
6058 !(adapter
->flags
& FLAG_HAS_JUMBO_FRAMES
)) {
6059 e_err("Jumbo Frames not supported.\n");
6063 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
6064 if ((adapter
->hw
.mac
.type
>= e1000_pch2lan
) &&
6065 !(adapter
->flags2
& FLAG2_CRC_STRIPPING
) &&
6066 (new_mtu
> ETH_DATA_LEN
)) {
6067 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
6071 while (test_and_set_bit(__E1000_RESETTING
, &adapter
->state
))
6072 usleep_range(1000, 1100);
6073 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
6074 adapter
->max_frame_size
= max_frame
;
6075 netdev_dbg(netdev
, "changing MTU from %d to %d\n",
6076 netdev
->mtu
, new_mtu
);
6077 netdev
->mtu
= new_mtu
;
6079 pm_runtime_get_sync(netdev
->dev
.parent
);
6081 if (netif_running(netdev
))
6082 e1000e_down(adapter
, true);
6084 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
6085 * means we reserve 2 more, this pushes us to allocate from the next
6087 * i.e. RXBUFFER_2048 --> size-4096 slab
6088 * However with the new *_jumbo_rx* routines, jumbo receives will use
6092 if (max_frame
<= 2048)
6093 adapter
->rx_buffer_len
= 2048;
6095 adapter
->rx_buffer_len
= 4096;
6097 /* adjust allocation if LPE protects us, and we aren't using SBP */
6098 if (max_frame
<= (VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN
))
6099 adapter
->rx_buffer_len
= VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN
;
6101 if (netif_running(netdev
))
6104 e1000e_reset(adapter
);
6106 pm_runtime_put_sync(netdev
->dev
.parent
);
6108 clear_bit(__E1000_RESETTING
, &adapter
->state
);
6113 static int e1000_mii_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
,
6116 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6117 struct mii_ioctl_data
*data
= if_mii(ifr
);
6119 if (adapter
->hw
.phy
.media_type
!= e1000_media_type_copper
)
6124 data
->phy_id
= adapter
->hw
.phy
.addr
;
6127 e1000_phy_read_status(adapter
);
6129 switch (data
->reg_num
& 0x1F) {
6131 data
->val_out
= adapter
->phy_regs
.bmcr
;
6134 data
->val_out
= adapter
->phy_regs
.bmsr
;
6137 data
->val_out
= (adapter
->hw
.phy
.id
>> 16);
6140 data
->val_out
= (adapter
->hw
.phy
.id
& 0xFFFF);
6143 data
->val_out
= adapter
->phy_regs
.advertise
;
6146 data
->val_out
= adapter
->phy_regs
.lpa
;
6149 data
->val_out
= adapter
->phy_regs
.expansion
;
6152 data
->val_out
= adapter
->phy_regs
.ctrl1000
;
6155 data
->val_out
= adapter
->phy_regs
.stat1000
;
6158 data
->val_out
= adapter
->phy_regs
.estatus
;
6172 * e1000e_hwtstamp_set - control hardware time stamping
6173 * @netdev: network interface device structure
6174 * @ifr: interface request
6176 * Outgoing time stamping can be enabled and disabled. Play nice and
6177 * disable it when requested, although it shouldn't cause any overhead
6178 * when no packet needs it. At most one packet in the queue may be
6179 * marked for time stamping, otherwise it would be impossible to tell
6180 * for sure to which packet the hardware time stamp belongs.
6182 * Incoming time stamping has to be configured via the hardware filters.
6183 * Not all combinations are supported, in particular event type has to be
6184 * specified. Matching the kind of event packet is not supported, with the
6185 * exception of "all V2 events regardless of level 2 or 4".
6187 static int e1000e_hwtstamp_set(struct net_device
*netdev
, struct ifreq
*ifr
)
6189 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6190 struct hwtstamp_config config
;
6193 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
6196 ret_val
= e1000e_config_hwtstamp(adapter
, &config
);
6200 switch (config
.rx_filter
) {
6201 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
6202 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
6203 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
6204 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
6205 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
6206 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
6207 /* With V2 type filters which specify a Sync or Delay Request,
6208 * Path Delay Request/Response messages are also time stamped
6209 * by hardware so notify the caller the requested packets plus
6210 * some others are time stamped.
6212 config
.rx_filter
= HWTSTAMP_FILTER_SOME
;
6218 return copy_to_user(ifr
->ifr_data
, &config
,
6219 sizeof(config
)) ? -EFAULT
: 0;
6222 static int e1000e_hwtstamp_get(struct net_device
*netdev
, struct ifreq
*ifr
)
6224 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6226 return copy_to_user(ifr
->ifr_data
, &adapter
->hwtstamp_config
,
6227 sizeof(adapter
->hwtstamp_config
)) ? -EFAULT
: 0;
6230 static int e1000_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
6236 return e1000_mii_ioctl(netdev
, ifr
, cmd
);
6238 return e1000e_hwtstamp_set(netdev
, ifr
);
6240 return e1000e_hwtstamp_get(netdev
, ifr
);
6246 static int e1000_init_phy_wakeup(struct e1000_adapter
*adapter
, u32 wufc
)
6248 struct e1000_hw
*hw
= &adapter
->hw
;
6249 u32 i
, mac_reg
, wuc
;
6250 u16 phy_reg
, wuc_enable
;
6253 /* copy MAC RARs to PHY RARs */
6254 e1000_copy_rx_addrs_to_phy_ich8lan(hw
);
6256 retval
= hw
->phy
.ops
.acquire(hw
);
6258 e_err("Could not acquire PHY\n");
6262 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
6263 retval
= e1000_enable_phy_wakeup_reg_access_bm(hw
, &wuc_enable
);
6267 /* copy MAC MTA to PHY MTA - only needed for pchlan */
6268 for (i
= 0; i
< adapter
->hw
.mac
.mta_reg_count
; i
++) {
6269 mac_reg
= E1000_READ_REG_ARRAY(hw
, E1000_MTA
, i
);
6270 hw
->phy
.ops
.write_reg_page(hw
, BM_MTA(i
),
6271 (u16
)(mac_reg
& 0xFFFF));
6272 hw
->phy
.ops
.write_reg_page(hw
, BM_MTA(i
) + 1,
6273 (u16
)((mac_reg
>> 16) & 0xFFFF));
6276 /* configure PHY Rx Control register */
6277 hw
->phy
.ops
.read_reg_page(&adapter
->hw
, BM_RCTL
, &phy_reg
);
6278 mac_reg
= er32(RCTL
);
6279 if (mac_reg
& E1000_RCTL_UPE
)
6280 phy_reg
|= BM_RCTL_UPE
;
6281 if (mac_reg
& E1000_RCTL_MPE
)
6282 phy_reg
|= BM_RCTL_MPE
;
6283 phy_reg
&= ~(BM_RCTL_MO_MASK
);
6284 if (mac_reg
& E1000_RCTL_MO_3
)
6285 phy_reg
|= (((mac_reg
& E1000_RCTL_MO_3
) >> E1000_RCTL_MO_SHIFT
)
6286 << BM_RCTL_MO_SHIFT
);
6287 if (mac_reg
& E1000_RCTL_BAM
)
6288 phy_reg
|= BM_RCTL_BAM
;
6289 if (mac_reg
& E1000_RCTL_PMCF
)
6290 phy_reg
|= BM_RCTL_PMCF
;
6291 mac_reg
= er32(CTRL
);
6292 if (mac_reg
& E1000_CTRL_RFCE
)
6293 phy_reg
|= BM_RCTL_RFCE
;
6294 hw
->phy
.ops
.write_reg_page(&adapter
->hw
, BM_RCTL
, phy_reg
);
6296 wuc
= E1000_WUC_PME_EN
;
6297 if (wufc
& (E1000_WUFC_MAG
| E1000_WUFC_LNKC
))
6298 wuc
|= E1000_WUC_APME
;
6300 /* enable PHY wakeup in MAC register */
6302 ew32(WUC
, (E1000_WUC_PHY_WAKE
| E1000_WUC_APMPME
|
6303 E1000_WUC_PME_STATUS
| wuc
));
6305 /* configure and enable PHY wakeup in PHY registers */
6306 hw
->phy
.ops
.write_reg_page(&adapter
->hw
, BM_WUFC
, wufc
);
6307 hw
->phy
.ops
.write_reg_page(&adapter
->hw
, BM_WUC
, wuc
);
6309 /* activate PHY wakeup */
6310 wuc_enable
|= BM_WUC_ENABLE_BIT
| BM_WUC_HOST_WU_BIT
;
6311 retval
= e1000_disable_phy_wakeup_reg_access_bm(hw
, &wuc_enable
);
6313 e_err("Could not set PHY Host Wakeup bit\n");
6315 hw
->phy
.ops
.release(hw
);
6320 static void e1000e_flush_lpic(struct pci_dev
*pdev
)
6322 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6323 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6324 struct e1000_hw
*hw
= &adapter
->hw
;
6327 pm_runtime_get_sync(netdev
->dev
.parent
);
6329 ret_val
= hw
->phy
.ops
.acquire(hw
);
6333 pr_info("EEE TX LPI TIMER: %08X\n",
6334 er32(LPIC
) >> E1000_LPIC_LPIET_SHIFT
);
6336 hw
->phy
.ops
.release(hw
);
6339 pm_runtime_put_sync(netdev
->dev
.parent
);
6342 /* S0ix implementation */
6343 static void e1000e_s0ix_entry_flow(struct e1000_adapter
*adapter
)
6345 struct e1000_hw
*hw
= &adapter
->hw
;
6349 if (er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
) {
6350 /* Request ME configure the device for S0ix */
6351 mac_data
= er32(H2ME
);
6352 mac_data
|= E1000_H2ME_START_DPG
;
6353 mac_data
&= ~E1000_H2ME_EXIT_DPG
;
6354 ew32(H2ME
, mac_data
);
6356 /* Request driver configure the device to S0ix */
6357 /* Disable the periodic inband message,
6358 * don't request PCIe clock in K1 page770_17[10:9] = 10b
6360 e1e_rphy(hw
, HV_PM_CTRL
, &phy_data
);
6361 phy_data
&= ~HV_PM_CTRL_K1_CLK_REQ
;
6362 phy_data
|= BIT(10);
6363 e1e_wphy(hw
, HV_PM_CTRL
, phy_data
);
6365 /* Make sure we don't exit K1 every time a new packet arrives
6366 * 772_29[5] = 1 CS_Mode_Stay_In_K1
6368 e1e_rphy(hw
, I217_CGFREG
, &phy_data
);
6370 e1e_wphy(hw
, I217_CGFREG
, phy_data
);
6372 /* Change the MAC/PHY interface to SMBus
6373 * Force the SMBus in PHY page769_23[0] = 1
6374 * Force the SMBus in MAC CTRL_EXT[11] = 1
6376 e1e_rphy(hw
, CV_SMB_CTRL
, &phy_data
);
6377 phy_data
|= CV_SMB_CTRL_FORCE_SMBUS
;
6378 e1e_wphy(hw
, CV_SMB_CTRL
, phy_data
);
6379 mac_data
= er32(CTRL_EXT
);
6380 mac_data
|= E1000_CTRL_EXT_FORCE_SMBUS
;
6381 ew32(CTRL_EXT
, mac_data
);
6383 /* DFT control: PHY bit: page769_20[0] = 1
6384 * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1
6386 e1e_rphy(hw
, I82579_DFT_CTRL
, &phy_data
);
6388 e1e_wphy(hw
, I82579_DFT_CTRL
, phy_data
);
6390 mac_data
= er32(EXTCNF_CTRL
);
6391 mac_data
|= E1000_EXTCNF_CTRL_GATE_PHY_CFG
;
6392 ew32(EXTCNF_CTRL
, mac_data
);
6394 /* Enable the Dynamic Power Gating in the MAC */
6395 mac_data
= er32(FEXTNVM7
);
6396 mac_data
|= BIT(22);
6397 ew32(FEXTNVM7
, mac_data
);
6399 /* Disable disconnected cable conditioning for Power Gating */
6400 mac_data
= er32(DPGFR
);
6402 ew32(DPGFR
, mac_data
);
6404 /* Don't wake from dynamic Power Gating with clock request */
6405 mac_data
= er32(FEXTNVM12
);
6406 mac_data
|= BIT(12);
6407 ew32(FEXTNVM12
, mac_data
);
6409 /* Ungate PGCB clock */
6410 mac_data
= er32(FEXTNVM9
);
6411 mac_data
&= ~BIT(28);
6412 ew32(FEXTNVM9
, mac_data
);
6414 /* Enable K1 off to enable mPHY Power Gating */
6415 mac_data
= er32(FEXTNVM6
);
6416 mac_data
|= BIT(31);
6417 ew32(FEXTNVM6
, mac_data
);
6419 /* Enable mPHY power gating for any link and speed */
6420 mac_data
= er32(FEXTNVM8
);
6422 ew32(FEXTNVM8
, mac_data
);
6424 /* Enable the Dynamic Clock Gating in the DMA and MAC */
6425 mac_data
= er32(CTRL_EXT
);
6426 mac_data
|= E1000_CTRL_EXT_DMA_DYN_CLK_EN
;
6427 ew32(CTRL_EXT
, mac_data
);
6429 /* No MAC DPG gating SLP_S0 in modern standby
6430 * Switch the logic of the lanphypc to use PMC counter
6432 mac_data
= er32(FEXTNVM5
);
6434 ew32(FEXTNVM5
, mac_data
);
6437 /* Disable the time synchronization clock */
6438 mac_data
= er32(FEXTNVM7
);
6439 mac_data
|= BIT(31);
6440 mac_data
&= ~BIT(0);
6441 ew32(FEXTNVM7
, mac_data
);
6443 /* Dynamic Power Gating Enable */
6444 mac_data
= er32(CTRL_EXT
);
6446 ew32(CTRL_EXT
, mac_data
);
6448 /* Check MAC Tx/Rx packet buffer pointers.
6449 * Reset MAC Tx/Rx packet buffer pointers to suppress any
6450 * pending traffic indication that would prevent power gating.
6452 mac_data
= er32(TDFH
);
6455 mac_data
= er32(TDFT
);
6458 mac_data
= er32(TDFHS
);
6461 mac_data
= er32(TDFTS
);
6464 mac_data
= er32(TDFPC
);
6467 mac_data
= er32(RDFH
);
6470 mac_data
= er32(RDFT
);
6473 mac_data
= er32(RDFHS
);
6476 mac_data
= er32(RDFTS
);
6479 mac_data
= er32(RDFPC
);
6484 static void e1000e_s0ix_exit_flow(struct e1000_adapter
*adapter
)
6486 struct e1000_hw
*hw
= &adapter
->hw
;
6487 bool firmware_bug
= false;
6492 if (er32(FWSM
) & E1000_ICH_FWSM_FW_VALID
) {
6493 /* Request ME unconfigure the device from S0ix */
6494 mac_data
= er32(H2ME
);
6495 mac_data
&= ~E1000_H2ME_START_DPG
;
6496 mac_data
|= E1000_H2ME_EXIT_DPG
;
6497 ew32(H2ME
, mac_data
);
6499 /* Poll up to 2.5 seconds for ME to unconfigure DPG.
6500 * If this takes more than 1 second, show a warning indicating a
6503 while (!(er32(EXFWSM
) & E1000_EXFWSM_DPG_EXIT_DONE
)) {
6504 if (i
> 100 && !firmware_bug
)
6505 firmware_bug
= true;
6508 e_dbg("Timeout (firmware bug): %d msec\n",
6513 usleep_range(10000, 11000);
6516 e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n",
6519 e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i
* 10);
6521 /* Request driver unconfigure the device from S0ix */
6523 /* Disable the Dynamic Power Gating in the MAC */
6524 mac_data
= er32(FEXTNVM7
);
6525 mac_data
&= 0xFFBFFFFF;
6526 ew32(FEXTNVM7
, mac_data
);
6528 /* Disable mPHY power gating for any link and speed */
6529 mac_data
= er32(FEXTNVM8
);
6530 mac_data
&= ~BIT(9);
6531 ew32(FEXTNVM8
, mac_data
);
6533 /* Disable K1 off */
6534 mac_data
= er32(FEXTNVM6
);
6535 mac_data
&= ~BIT(31);
6536 ew32(FEXTNVM6
, mac_data
);
6538 /* Disable Ungate PGCB clock */
6539 mac_data
= er32(FEXTNVM9
);
6540 mac_data
|= BIT(28);
6541 ew32(FEXTNVM9
, mac_data
);
6543 /* Cancel not waking from dynamic
6544 * Power Gating with clock request
6546 mac_data
= er32(FEXTNVM12
);
6547 mac_data
&= ~BIT(12);
6548 ew32(FEXTNVM12
, mac_data
);
6550 /* Cancel disable disconnected cable conditioning
6553 mac_data
= er32(DPGFR
);
6554 mac_data
&= ~BIT(2);
6555 ew32(DPGFR
, mac_data
);
6557 /* Disable the Dynamic Clock Gating in the DMA and MAC */
6558 mac_data
= er32(CTRL_EXT
);
6559 mac_data
&= 0xFFF7FFFF;
6560 ew32(CTRL_EXT
, mac_data
);
6562 /* Revert the lanphypc logic to use the internal Gbe counter
6563 * and not the PMC counter
6565 mac_data
= er32(FEXTNVM5
);
6566 mac_data
&= 0xFFFFFF7F;
6567 ew32(FEXTNVM5
, mac_data
);
6569 /* Enable the periodic inband message,
6570 * Request PCIe clock in K1 page770_17[10:9] =01b
6572 e1e_rphy(hw
, HV_PM_CTRL
, &phy_data
);
6574 phy_data
|= HV_PM_CTRL_K1_CLK_REQ
;
6575 e1e_wphy(hw
, HV_PM_CTRL
, phy_data
);
6577 /* Return back configuration
6578 * 772_29[5] = 0 CS_Mode_Stay_In_K1
6580 e1e_rphy(hw
, I217_CGFREG
, &phy_data
);
6582 e1e_wphy(hw
, I217_CGFREG
, phy_data
);
6584 /* Change the MAC/PHY interface to Kumeran
6585 * Unforce the SMBus in PHY page769_23[0] = 0
6586 * Unforce the SMBus in MAC CTRL_EXT[11] = 0
6588 e1e_rphy(hw
, CV_SMB_CTRL
, &phy_data
);
6589 phy_data
&= ~CV_SMB_CTRL_FORCE_SMBUS
;
6590 e1e_wphy(hw
, CV_SMB_CTRL
, phy_data
);
6591 mac_data
= er32(CTRL_EXT
);
6592 mac_data
&= ~E1000_CTRL_EXT_FORCE_SMBUS
;
6593 ew32(CTRL_EXT
, mac_data
);
6596 /* Disable Dynamic Power Gating */
6597 mac_data
= er32(CTRL_EXT
);
6598 mac_data
&= 0xFFFFFFF7;
6599 ew32(CTRL_EXT
, mac_data
);
6601 /* Enable the time synchronization clock */
6602 mac_data
= er32(FEXTNVM7
);
6603 mac_data
&= ~BIT(31);
6605 ew32(FEXTNVM7
, mac_data
);
6608 static int e1000e_pm_freeze(struct device
*dev
)
6610 struct net_device
*netdev
= dev_get_drvdata(dev
);
6611 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6616 present
= netif_device_present(netdev
);
6617 netif_device_detach(netdev
);
6619 if (present
&& netif_running(netdev
)) {
6620 int count
= E1000_CHECK_RESET_COUNT
;
6622 while (test_bit(__E1000_RESETTING
, &adapter
->state
) && count
--)
6623 usleep_range(10000, 11000);
6625 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
6627 /* Quiesce the device without resetting the hardware */
6628 e1000e_down(adapter
, false);
6629 e1000_free_irq(adapter
);
6633 e1000e_reset_interrupt_capability(adapter
);
6635 /* Allow time for pending master requests to run */
6636 e1000e_disable_pcie_master(&adapter
->hw
);
6641 static int __e1000_shutdown(struct pci_dev
*pdev
, bool runtime
)
6643 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6644 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6645 struct e1000_hw
*hw
= &adapter
->hw
;
6646 u32 ctrl
, ctrl_ext
, rctl
, status
, wufc
;
6649 /* Runtime suspend should only enable wakeup for link changes */
6651 wufc
= E1000_WUFC_LNKC
;
6652 else if (device_may_wakeup(&pdev
->dev
))
6653 wufc
= adapter
->wol
;
6657 status
= er32(STATUS
);
6658 if (status
& E1000_STATUS_LU
)
6659 wufc
&= ~E1000_WUFC_LNKC
;
6662 e1000_setup_rctl(adapter
);
6663 e1000e_set_rx_mode(netdev
);
6665 /* turn on all-multi mode if wake on multicast is enabled */
6666 if (wufc
& E1000_WUFC_MC
) {
6668 rctl
|= E1000_RCTL_MPE
;
6673 ctrl
|= E1000_CTRL_ADVD3WUC
;
6674 if (!(adapter
->flags2
& FLAG2_HAS_PHY_WAKEUP
))
6675 ctrl
|= E1000_CTRL_EN_PHY_PWR_MGMT
;
6678 if (adapter
->hw
.phy
.media_type
== e1000_media_type_fiber
||
6679 adapter
->hw
.phy
.media_type
==
6680 e1000_media_type_internal_serdes
) {
6681 /* keep the laser running in D3 */
6682 ctrl_ext
= er32(CTRL_EXT
);
6683 ctrl_ext
|= E1000_CTRL_EXT_SDP3_DATA
;
6684 ew32(CTRL_EXT
, ctrl_ext
);
6688 e1000e_power_up_phy(adapter
);
6690 if (adapter
->flags
& FLAG_IS_ICH
)
6691 e1000_suspend_workarounds_ich8lan(&adapter
->hw
);
6693 if (adapter
->flags2
& FLAG2_HAS_PHY_WAKEUP
) {
6694 /* enable wakeup by the PHY */
6695 retval
= e1000_init_phy_wakeup(adapter
, wufc
);
6699 /* enable wakeup by the MAC */
6701 ew32(WUC
, E1000_WUC_PME_EN
);
6707 e1000_power_down_phy(adapter
);
6710 if (adapter
->hw
.phy
.type
== e1000_phy_igp_3
) {
6711 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter
->hw
);
6712 } else if (hw
->mac
.type
>= e1000_pch_lpt
) {
6713 if (wufc
&& !(wufc
& (E1000_WUFC_EX
| E1000_WUFC_MC
| E1000_WUFC_BC
)))
6714 /* ULP does not support wake from unicast, multicast
6717 retval
= e1000_enable_ulp_lpt_lp(hw
, !runtime
);
6723 /* Ensure that the appropriate bits are set in LPI_CTRL
6726 if ((hw
->phy
.type
>= e1000_phy_i217
) &&
6727 adapter
->eee_advert
&& hw
->dev_spec
.ich8lan
.eee_lp_ability
) {
6730 retval
= hw
->phy
.ops
.acquire(hw
);
6732 retval
= e1e_rphy_locked(hw
, I82579_LPI_CTRL
,
6735 if (adapter
->eee_advert
&
6736 hw
->dev_spec
.ich8lan
.eee_lp_ability
&
6737 I82579_EEE_100_SUPPORTED
)
6738 lpi_ctrl
|= I82579_LPI_CTRL_100_ENABLE
;
6739 if (adapter
->eee_advert
&
6740 hw
->dev_spec
.ich8lan
.eee_lp_ability
&
6741 I82579_EEE_1000_SUPPORTED
)
6742 lpi_ctrl
|= I82579_LPI_CTRL_1000_ENABLE
;
6744 retval
= e1e_wphy_locked(hw
, I82579_LPI_CTRL
,
6748 hw
->phy
.ops
.release(hw
);
6751 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6752 * would have already happened in close and is redundant.
6754 e1000e_release_hw_control(adapter
);
6756 pci_clear_master(pdev
);
6758 /* The pci-e switch on some quad port adapters will report a
6759 * correctable error when the MAC transitions from D0 to D3. To
6760 * prevent this we need to mask off the correctable errors on the
6761 * downstream port of the pci-e switch.
6763 * We don't have the associated upstream bridge while assigning
6764 * the PCI device into guest. For example, the KVM on power is
6767 if (adapter
->flags
& FLAG_IS_QUAD_PORT
) {
6768 struct pci_dev
*us_dev
= pdev
->bus
->self
;
6774 pcie_capability_read_word(us_dev
, PCI_EXP_DEVCTL
, &devctl
);
6775 pcie_capability_write_word(us_dev
, PCI_EXP_DEVCTL
,
6776 (devctl
& ~PCI_EXP_DEVCTL_CERE
));
6778 pci_save_state(pdev
);
6779 pci_prepare_to_sleep(pdev
);
6781 pcie_capability_write_word(us_dev
, PCI_EXP_DEVCTL
, devctl
);
6788 * __e1000e_disable_aspm - Disable ASPM states
6789 * @pdev: pointer to PCI device struct
6790 * @state: bit-mask of ASPM states to disable
6791 * @locked: indication if this context holds pci_bus_sem locked.
6793 * Some devices *must* have certain ASPM states disabled per hardware errata.
6795 static void __e1000e_disable_aspm(struct pci_dev
*pdev
, u16 state
, int locked
)
6797 struct pci_dev
*parent
= pdev
->bus
->self
;
6798 u16 aspm_dis_mask
= 0;
6799 u16 pdev_aspmc
, parent_aspmc
;
6802 case PCIE_LINK_STATE_L0S
:
6803 case PCIE_LINK_STATE_L0S
| PCIE_LINK_STATE_L1
:
6804 aspm_dis_mask
|= PCI_EXP_LNKCTL_ASPM_L0S
;
6805 fallthrough
; /* can't have L1 without L0s */
6806 case PCIE_LINK_STATE_L1
:
6807 aspm_dis_mask
|= PCI_EXP_LNKCTL_ASPM_L1
;
6813 pcie_capability_read_word(pdev
, PCI_EXP_LNKCTL
, &pdev_aspmc
);
6814 pdev_aspmc
&= PCI_EXP_LNKCTL_ASPMC
;
6817 pcie_capability_read_word(parent
, PCI_EXP_LNKCTL
,
6819 parent_aspmc
&= PCI_EXP_LNKCTL_ASPMC
;
6822 /* Nothing to do if the ASPM states to be disabled already are */
6823 if (!(pdev_aspmc
& aspm_dis_mask
) &&
6824 (!parent
|| !(parent_aspmc
& aspm_dis_mask
)))
6827 dev_info(&pdev
->dev
, "Disabling ASPM %s %s\n",
6828 (aspm_dis_mask
& pdev_aspmc
& PCI_EXP_LNKCTL_ASPM_L0S
) ?
6830 (aspm_dis_mask
& pdev_aspmc
& PCI_EXP_LNKCTL_ASPM_L1
) ?
6833 #ifdef CONFIG_PCIEASPM
6835 pci_disable_link_state_locked(pdev
, state
);
6837 pci_disable_link_state(pdev
, state
);
6839 /* Double-check ASPM control. If not disabled by the above, the
6840 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6841 * not enabled); override by writing PCI config space directly.
6843 pcie_capability_read_word(pdev
, PCI_EXP_LNKCTL
, &pdev_aspmc
);
6844 pdev_aspmc
&= PCI_EXP_LNKCTL_ASPMC
;
6846 if (!(aspm_dis_mask
& pdev_aspmc
))
6850 /* Both device and parent should have the same ASPM setting.
6851 * Disable ASPM in downstream component first and then upstream.
6853 pcie_capability_clear_word(pdev
, PCI_EXP_LNKCTL
, aspm_dis_mask
);
6856 pcie_capability_clear_word(parent
, PCI_EXP_LNKCTL
,
6861 * e1000e_disable_aspm - Disable ASPM states.
6862 * @pdev: pointer to PCI device struct
6863 * @state: bit-mask of ASPM states to disable
6865 * This function acquires the pci_bus_sem!
6866 * Some devices *must* have certain ASPM states disabled per hardware errata.
6868 static void e1000e_disable_aspm(struct pci_dev
*pdev
, u16 state
)
6870 __e1000e_disable_aspm(pdev
, state
, 0);
6874 * e1000e_disable_aspm_locked - Disable ASPM states.
6875 * @pdev: pointer to PCI device struct
6876 * @state: bit-mask of ASPM states to disable
6878 * This function must be called with pci_bus_sem acquired!
6879 * Some devices *must* have certain ASPM states disabled per hardware errata.
6881 static void e1000e_disable_aspm_locked(struct pci_dev
*pdev
, u16 state
)
6883 __e1000e_disable_aspm(pdev
, state
, 1);
6886 static int e1000e_pm_thaw(struct device
*dev
)
6888 struct net_device
*netdev
= dev_get_drvdata(dev
);
6889 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6892 e1000e_set_interrupt_capability(adapter
);
6895 if (netif_running(netdev
)) {
6896 rc
= e1000_request_irq(adapter
);
6903 netif_device_attach(netdev
);
6910 static int __e1000_resume(struct pci_dev
*pdev
)
6912 struct net_device
*netdev
= pci_get_drvdata(pdev
);
6913 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6914 struct e1000_hw
*hw
= &adapter
->hw
;
6915 u16 aspm_disable_flag
= 0;
6917 if (adapter
->flags2
& FLAG2_DISABLE_ASPM_L0S
)
6918 aspm_disable_flag
= PCIE_LINK_STATE_L0S
;
6919 if (adapter
->flags2
& FLAG2_DISABLE_ASPM_L1
)
6920 aspm_disable_flag
|= PCIE_LINK_STATE_L1
;
6921 if (aspm_disable_flag
)
6922 e1000e_disable_aspm(pdev
, aspm_disable_flag
);
6924 pci_set_master(pdev
);
6926 if (hw
->mac
.type
>= e1000_pch2lan
)
6927 e1000_resume_workarounds_pchlan(&adapter
->hw
);
6929 e1000e_power_up_phy(adapter
);
6931 /* report the system wakeup cause from S3/S4 */
6932 if (adapter
->flags2
& FLAG2_HAS_PHY_WAKEUP
) {
6935 e1e_rphy(&adapter
->hw
, BM_WUS
, &phy_data
);
6937 e_info("PHY Wakeup cause - %s\n",
6938 phy_data
& E1000_WUS_EX
? "Unicast Packet" :
6939 phy_data
& E1000_WUS_MC
? "Multicast Packet" :
6940 phy_data
& E1000_WUS_BC
? "Broadcast Packet" :
6941 phy_data
& E1000_WUS_MAG
? "Magic Packet" :
6942 phy_data
& E1000_WUS_LNKC
?
6943 "Link Status Change" : "other");
6945 e1e_wphy(&adapter
->hw
, BM_WUS
, ~0);
6947 u32 wus
= er32(WUS
);
6950 e_info("MAC Wakeup cause - %s\n",
6951 wus
& E1000_WUS_EX
? "Unicast Packet" :
6952 wus
& E1000_WUS_MC
? "Multicast Packet" :
6953 wus
& E1000_WUS_BC
? "Broadcast Packet" :
6954 wus
& E1000_WUS_MAG
? "Magic Packet" :
6955 wus
& E1000_WUS_LNKC
? "Link Status Change" :
6961 e1000e_reset(adapter
);
6963 e1000_init_manageability_pt(adapter
);
6965 /* If the controller has AMT, do not set DRV_LOAD until the interface
6966 * is up. For all other cases, let the f/w know that the h/w is now
6967 * under the control of the driver.
6969 if (!(adapter
->flags
& FLAG_HAS_AMT
))
6970 e1000e_get_hw_control(adapter
);
6975 static __maybe_unused
int e1000e_pm_prepare(struct device
*dev
)
6977 return pm_runtime_suspended(dev
) &&
6978 pm_suspend_via_firmware();
6981 static __maybe_unused
int e1000e_pm_suspend(struct device
*dev
)
6983 struct net_device
*netdev
= pci_get_drvdata(to_pci_dev(dev
));
6984 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
6985 struct pci_dev
*pdev
= to_pci_dev(dev
);
6988 e1000e_flush_lpic(pdev
);
6990 e1000e_pm_freeze(dev
);
6992 rc
= __e1000_shutdown(pdev
, false);
6994 e1000e_pm_thaw(dev
);
6996 /* Introduce S0ix implementation */
6997 if (adapter
->flags2
& FLAG2_ENABLE_S0IX_FLOWS
)
6998 e1000e_s0ix_entry_flow(adapter
);
7004 static __maybe_unused
int e1000e_pm_resume(struct device
*dev
)
7006 struct net_device
*netdev
= pci_get_drvdata(to_pci_dev(dev
));
7007 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7008 struct pci_dev
*pdev
= to_pci_dev(dev
);
7011 /* Introduce S0ix implementation */
7012 if (adapter
->flags2
& FLAG2_ENABLE_S0IX_FLOWS
)
7013 e1000e_s0ix_exit_flow(adapter
);
7015 rc
= __e1000_resume(pdev
);
7019 return e1000e_pm_thaw(dev
);
7022 static __maybe_unused
int e1000e_pm_runtime_idle(struct device
*dev
)
7024 struct net_device
*netdev
= dev_get_drvdata(dev
);
7025 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7028 eee_lp
= adapter
->hw
.dev_spec
.ich8lan
.eee_lp_ability
;
7030 if (!e1000e_has_link(adapter
)) {
7031 adapter
->hw
.dev_spec
.ich8lan
.eee_lp_ability
= eee_lp
;
7032 pm_schedule_suspend(dev
, 5 * MSEC_PER_SEC
);
7038 static __maybe_unused
int e1000e_pm_runtime_resume(struct device
*dev
)
7040 struct pci_dev
*pdev
= to_pci_dev(dev
);
7041 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7042 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7045 rc
= __e1000_resume(pdev
);
7049 if (netdev
->flags
& IFF_UP
)
7055 static __maybe_unused
int e1000e_pm_runtime_suspend(struct device
*dev
)
7057 struct pci_dev
*pdev
= to_pci_dev(dev
);
7058 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7059 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7061 if (netdev
->flags
& IFF_UP
) {
7062 int count
= E1000_CHECK_RESET_COUNT
;
7064 while (test_bit(__E1000_RESETTING
, &adapter
->state
) && count
--)
7065 usleep_range(10000, 11000);
7067 WARN_ON(test_bit(__E1000_RESETTING
, &adapter
->state
));
7069 /* Down the device without resetting the hardware */
7070 e1000e_down(adapter
, false);
7073 if (__e1000_shutdown(pdev
, true)) {
7074 e1000e_pm_runtime_resume(dev
);
7081 static void e1000_shutdown(struct pci_dev
*pdev
)
7083 e1000e_flush_lpic(pdev
);
7085 e1000e_pm_freeze(&pdev
->dev
);
7087 __e1000_shutdown(pdev
, false);
7090 #ifdef CONFIG_NET_POLL_CONTROLLER
7092 static irqreturn_t
e1000_intr_msix(int __always_unused irq
, void *data
)
7094 struct net_device
*netdev
= data
;
7095 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7097 if (adapter
->msix_entries
) {
7098 int vector
, msix_irq
;
7101 msix_irq
= adapter
->msix_entries
[vector
].vector
;
7102 if (disable_hardirq(msix_irq
))
7103 e1000_intr_msix_rx(msix_irq
, netdev
);
7104 enable_irq(msix_irq
);
7107 msix_irq
= adapter
->msix_entries
[vector
].vector
;
7108 if (disable_hardirq(msix_irq
))
7109 e1000_intr_msix_tx(msix_irq
, netdev
);
7110 enable_irq(msix_irq
);
7113 msix_irq
= adapter
->msix_entries
[vector
].vector
;
7114 if (disable_hardirq(msix_irq
))
7115 e1000_msix_other(msix_irq
, netdev
);
7116 enable_irq(msix_irq
);
7124 * @netdev: network interface device structure
7126 * Polling 'interrupt' - used by things like netconsole to send skbs
7127 * without having to re-enable interrupts. It's not called while
7128 * the interrupt routine is executing.
7130 static void e1000_netpoll(struct net_device
*netdev
)
7132 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7134 switch (adapter
->int_mode
) {
7135 case E1000E_INT_MODE_MSIX
:
7136 e1000_intr_msix(adapter
->pdev
->irq
, netdev
);
7138 case E1000E_INT_MODE_MSI
:
7139 if (disable_hardirq(adapter
->pdev
->irq
))
7140 e1000_intr_msi(adapter
->pdev
->irq
, netdev
);
7141 enable_irq(adapter
->pdev
->irq
);
7143 default: /* E1000E_INT_MODE_LEGACY */
7144 if (disable_hardirq(adapter
->pdev
->irq
))
7145 e1000_intr(adapter
->pdev
->irq
, netdev
);
7146 enable_irq(adapter
->pdev
->irq
);
7153 * e1000_io_error_detected - called when PCI error is detected
7154 * @pdev: Pointer to PCI device
7155 * @state: The current pci connection state
7157 * This function is called after a PCI bus error affecting
7158 * this device has been detected.
7160 static pci_ers_result_t
e1000_io_error_detected(struct pci_dev
*pdev
,
7161 pci_channel_state_t state
)
7163 e1000e_pm_freeze(&pdev
->dev
);
7165 if (state
== pci_channel_io_perm_failure
)
7166 return PCI_ERS_RESULT_DISCONNECT
;
7168 pci_disable_device(pdev
);
7170 /* Request a slot reset. */
7171 return PCI_ERS_RESULT_NEED_RESET
;
7175 * e1000_io_slot_reset - called after the pci bus has been reset.
7176 * @pdev: Pointer to PCI device
7178 * Restart the card from scratch, as if from a cold-boot. Implementation
7179 * resembles the first-half of the e1000e_pm_resume routine.
7181 static pci_ers_result_t
e1000_io_slot_reset(struct pci_dev
*pdev
)
7183 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7184 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7185 struct e1000_hw
*hw
= &adapter
->hw
;
7186 u16 aspm_disable_flag
= 0;
7188 pci_ers_result_t result
;
7190 if (adapter
->flags2
& FLAG2_DISABLE_ASPM_L0S
)
7191 aspm_disable_flag
= PCIE_LINK_STATE_L0S
;
7192 if (adapter
->flags2
& FLAG2_DISABLE_ASPM_L1
)
7193 aspm_disable_flag
|= PCIE_LINK_STATE_L1
;
7194 if (aspm_disable_flag
)
7195 e1000e_disable_aspm_locked(pdev
, aspm_disable_flag
);
7197 err
= pci_enable_device_mem(pdev
);
7200 "Cannot re-enable PCI device after reset.\n");
7201 result
= PCI_ERS_RESULT_DISCONNECT
;
7203 pdev
->state_saved
= true;
7204 pci_restore_state(pdev
);
7205 pci_set_master(pdev
);
7207 pci_enable_wake(pdev
, PCI_D3hot
, 0);
7208 pci_enable_wake(pdev
, PCI_D3cold
, 0);
7210 e1000e_reset(adapter
);
7212 result
= PCI_ERS_RESULT_RECOVERED
;
7219 * e1000_io_resume - called when traffic can start flowing again.
7220 * @pdev: Pointer to PCI device
7222 * This callback is called when the error recovery driver tells us that
7223 * its OK to resume normal operation. Implementation resembles the
7224 * second-half of the e1000e_pm_resume routine.
7226 static void e1000_io_resume(struct pci_dev
*pdev
)
7228 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7229 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7231 e1000_init_manageability_pt(adapter
);
7233 e1000e_pm_thaw(&pdev
->dev
);
7235 /* If the controller has AMT, do not set DRV_LOAD until the interface
7236 * is up. For all other cases, let the f/w know that the h/w is now
7237 * under the control of the driver.
7239 if (!(adapter
->flags
& FLAG_HAS_AMT
))
7240 e1000e_get_hw_control(adapter
);
7243 static void e1000_print_device_info(struct e1000_adapter
*adapter
)
7245 struct e1000_hw
*hw
= &adapter
->hw
;
7246 struct net_device
*netdev
= adapter
->netdev
;
7248 u8 pba_str
[E1000_PBANUM_LENGTH
];
7250 /* print bus type/speed/width info */
7251 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
7253 ((hw
->bus
.width
== e1000_bus_width_pcie_x4
) ? "Width x4" :
7257 e_info("Intel(R) PRO/%s Network Connection\n",
7258 (hw
->phy
.type
== e1000_phy_ife
) ? "10/100" : "1000");
7259 ret_val
= e1000_read_pba_string_generic(hw
, pba_str
,
7260 E1000_PBANUM_LENGTH
);
7262 strlcpy((char *)pba_str
, "Unknown", sizeof(pba_str
));
7263 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
7264 hw
->mac
.type
, hw
->phy
.type
, pba_str
);
7267 static void e1000_eeprom_checks(struct e1000_adapter
*adapter
)
7269 struct e1000_hw
*hw
= &adapter
->hw
;
7273 if (hw
->mac
.type
!= e1000_82573
)
7276 ret_val
= e1000_read_nvm(hw
, NVM_INIT_CONTROL2_REG
, 1, &buf
);
7278 if (!ret_val
&& (!(buf
& BIT(0)))) {
7279 /* Deep Smart Power Down (DSPD) */
7280 dev_warn(&adapter
->pdev
->dev
,
7281 "Warning: detected DSPD enabled in EEPROM\n");
7285 static netdev_features_t
e1000_fix_features(struct net_device
*netdev
,
7286 netdev_features_t features
)
7288 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7289 struct e1000_hw
*hw
= &adapter
->hw
;
7291 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
7292 if ((hw
->mac
.type
>= e1000_pch2lan
) && (netdev
->mtu
> ETH_DATA_LEN
))
7293 features
&= ~NETIF_F_RXFCS
;
7295 /* Since there is no support for separate Rx/Tx vlan accel
7296 * enable/disable make sure Tx flag is always in same state as Rx.
7298 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
7299 features
|= NETIF_F_HW_VLAN_CTAG_TX
;
7301 features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
7306 static int e1000_set_features(struct net_device
*netdev
,
7307 netdev_features_t features
)
7309 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7310 netdev_features_t changed
= features
^ netdev
->features
;
7312 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
))
7313 adapter
->flags
|= FLAG_TSO_FORCE
;
7315 if (!(changed
& (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_TX
|
7316 NETIF_F_RXCSUM
| NETIF_F_RXHASH
| NETIF_F_RXFCS
|
7320 if (changed
& NETIF_F_RXFCS
) {
7321 if (features
& NETIF_F_RXFCS
) {
7322 adapter
->flags2
&= ~FLAG2_CRC_STRIPPING
;
7324 /* We need to take it back to defaults, which might mean
7325 * stripping is still disabled at the adapter level.
7327 if (adapter
->flags2
& FLAG2_DFLT_CRC_STRIPPING
)
7328 adapter
->flags2
|= FLAG2_CRC_STRIPPING
;
7330 adapter
->flags2
&= ~FLAG2_CRC_STRIPPING
;
7334 netdev
->features
= features
;
7336 if (netif_running(netdev
))
7337 e1000e_reinit_locked(adapter
);
7339 e1000e_reset(adapter
);
7344 static const struct net_device_ops e1000e_netdev_ops
= {
7345 .ndo_open
= e1000e_open
,
7346 .ndo_stop
= e1000e_close
,
7347 .ndo_start_xmit
= e1000_xmit_frame
,
7348 .ndo_get_stats64
= e1000e_get_stats64
,
7349 .ndo_set_rx_mode
= e1000e_set_rx_mode
,
7350 .ndo_set_mac_address
= e1000_set_mac
,
7351 .ndo_change_mtu
= e1000_change_mtu
,
7352 .ndo_eth_ioctl
= e1000_ioctl
,
7353 .ndo_tx_timeout
= e1000_tx_timeout
,
7354 .ndo_validate_addr
= eth_validate_addr
,
7356 .ndo_vlan_rx_add_vid
= e1000_vlan_rx_add_vid
,
7357 .ndo_vlan_rx_kill_vid
= e1000_vlan_rx_kill_vid
,
7358 #ifdef CONFIG_NET_POLL_CONTROLLER
7359 .ndo_poll_controller
= e1000_netpoll
,
7361 .ndo_set_features
= e1000_set_features
,
7362 .ndo_fix_features
= e1000_fix_features
,
7363 .ndo_features_check
= passthru_features_check
,
7367 * e1000_probe - Device Initialization Routine
7368 * @pdev: PCI device information struct
7369 * @ent: entry in e1000_pci_tbl
7371 * Returns 0 on success, negative on failure
7373 * e1000_probe initializes an adapter identified by a pci_dev structure.
7374 * The OS initialization, configuring of the adapter private structure,
7375 * and a hardware reset occur.
7377 static int e1000_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
7379 struct net_device
*netdev
;
7380 struct e1000_adapter
*adapter
;
7381 struct e1000_hw
*hw
;
7382 const struct e1000_info
*ei
= e1000_info_tbl
[ent
->driver_data
];
7383 resource_size_t mmio_start
, mmio_len
;
7384 resource_size_t flash_start
, flash_len
;
7385 static int cards_found
;
7386 u16 aspm_disable_flag
= 0;
7387 int bars
, i
, err
, pci_using_dac
;
7388 u16 eeprom_data
= 0;
7389 u16 eeprom_apme_mask
= E1000_EEPROM_APME
;
7392 if (ei
->flags2
& FLAG2_DISABLE_ASPM_L0S
)
7393 aspm_disable_flag
= PCIE_LINK_STATE_L0S
;
7394 if (ei
->flags2
& FLAG2_DISABLE_ASPM_L1
)
7395 aspm_disable_flag
|= PCIE_LINK_STATE_L1
;
7396 if (aspm_disable_flag
)
7397 e1000e_disable_aspm(pdev
, aspm_disable_flag
);
7399 err
= pci_enable_device_mem(pdev
);
7404 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
7408 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
7411 "No usable DMA configuration, aborting\n");
7416 bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
7417 err
= pci_request_selected_regions_exclusive(pdev
, bars
,
7418 e1000e_driver_name
);
7422 /* AER (Advanced Error Reporting) hooks */
7423 pci_enable_pcie_error_reporting(pdev
);
7425 pci_set_master(pdev
);
7426 /* PCI config space info */
7427 err
= pci_save_state(pdev
);
7429 goto err_alloc_etherdev
;
7432 netdev
= alloc_etherdev(sizeof(struct e1000_adapter
));
7434 goto err_alloc_etherdev
;
7436 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
7438 netdev
->irq
= pdev
->irq
;
7440 pci_set_drvdata(pdev
, netdev
);
7441 adapter
= netdev_priv(netdev
);
7443 adapter
->netdev
= netdev
;
7444 adapter
->pdev
= pdev
;
7446 adapter
->pba
= ei
->pba
;
7447 adapter
->flags
= ei
->flags
;
7448 adapter
->flags2
= ei
->flags2
;
7449 adapter
->hw
.adapter
= adapter
;
7450 adapter
->hw
.mac
.type
= ei
->mac
;
7451 adapter
->max_hw_frame_size
= ei
->max_hw_frame_size
;
7452 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
7454 mmio_start
= pci_resource_start(pdev
, 0);
7455 mmio_len
= pci_resource_len(pdev
, 0);
7458 adapter
->hw
.hw_addr
= ioremap(mmio_start
, mmio_len
);
7459 if (!adapter
->hw
.hw_addr
)
7462 if ((adapter
->flags
& FLAG_HAS_FLASH
) &&
7463 (pci_resource_flags(pdev
, 1) & IORESOURCE_MEM
) &&
7464 (hw
->mac
.type
< e1000_pch_spt
)) {
7465 flash_start
= pci_resource_start(pdev
, 1);
7466 flash_len
= pci_resource_len(pdev
, 1);
7467 adapter
->hw
.flash_address
= ioremap(flash_start
, flash_len
);
7468 if (!adapter
->hw
.flash_address
)
7472 /* Set default EEE advertisement */
7473 if (adapter
->flags2
& FLAG2_HAS_EEE
)
7474 adapter
->eee_advert
= MDIO_EEE_100TX
| MDIO_EEE_1000T
;
7476 /* construct the net_device struct */
7477 netdev
->netdev_ops
= &e1000e_netdev_ops
;
7478 e1000e_set_ethtool_ops(netdev
);
7479 netdev
->watchdog_timeo
= 5 * HZ
;
7480 netif_napi_add(netdev
, &adapter
->napi
, e1000e_poll
, 64);
7481 strlcpy(netdev
->name
, pci_name(pdev
), sizeof(netdev
->name
));
7483 netdev
->mem_start
= mmio_start
;
7484 netdev
->mem_end
= mmio_start
+ mmio_len
;
7486 adapter
->bd_number
= cards_found
++;
7488 e1000e_check_options(adapter
);
7490 /* setup adapter struct */
7491 err
= e1000_sw_init(adapter
);
7495 memcpy(&hw
->mac
.ops
, ei
->mac_ops
, sizeof(hw
->mac
.ops
));
7496 memcpy(&hw
->nvm
.ops
, ei
->nvm_ops
, sizeof(hw
->nvm
.ops
));
7497 memcpy(&hw
->phy
.ops
, ei
->phy_ops
, sizeof(hw
->phy
.ops
));
7499 err
= ei
->get_variants(adapter
);
7503 if ((adapter
->flags
& FLAG_IS_ICH
) &&
7504 (adapter
->flags
& FLAG_READ_ONLY_NVM
) &&
7505 (hw
->mac
.type
< e1000_pch_spt
))
7506 e1000e_write_protect_nvm_ich8lan(&adapter
->hw
);
7508 hw
->mac
.ops
.get_bus_info(&adapter
->hw
);
7510 adapter
->hw
.phy
.autoneg_wait_to_complete
= 0;
7512 /* Copper options */
7513 if (adapter
->hw
.phy
.media_type
== e1000_media_type_copper
) {
7514 adapter
->hw
.phy
.mdix
= AUTO_ALL_MODES
;
7515 adapter
->hw
.phy
.disable_polarity_correction
= 0;
7516 adapter
->hw
.phy
.ms_type
= e1000_ms_hw_default
;
7519 if (hw
->phy
.ops
.check_reset_block
&& hw
->phy
.ops
.check_reset_block(hw
))
7520 dev_info(&pdev
->dev
,
7521 "PHY reset is blocked due to SOL/IDER session.\n");
7523 /* Set initial default active device features */
7524 netdev
->features
= (NETIF_F_SG
|
7525 NETIF_F_HW_VLAN_CTAG_RX
|
7526 NETIF_F_HW_VLAN_CTAG_TX
|
7533 /* Set user-changeable features (subset of all device features) */
7534 netdev
->hw_features
= netdev
->features
;
7535 netdev
->hw_features
|= NETIF_F_RXFCS
;
7536 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
7537 netdev
->hw_features
|= NETIF_F_RXALL
;
7539 if (adapter
->flags
& FLAG_HAS_HW_VLAN_FILTER
)
7540 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
7542 netdev
->vlan_features
|= (NETIF_F_SG
|
7547 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
7549 if (pci_using_dac
) {
7550 netdev
->features
|= NETIF_F_HIGHDMA
;
7551 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
7554 /* MTU range: 68 - max_hw_frame_size */
7555 netdev
->min_mtu
= ETH_MIN_MTU
;
7556 netdev
->max_mtu
= adapter
->max_hw_frame_size
-
7557 (VLAN_ETH_HLEN
+ ETH_FCS_LEN
);
7559 if (e1000e_enable_mng_pass_thru(&adapter
->hw
))
7560 adapter
->flags
|= FLAG_MNG_PT_ENABLED
;
7562 /* before reading the NVM, reset the controller to
7563 * put the device in a known good starting state
7565 adapter
->hw
.mac
.ops
.reset_hw(&adapter
->hw
);
7567 /* systems with ASPM and others may see the checksum fail on the first
7568 * attempt. Let's give it a few tries
7571 if (e1000_validate_nvm_checksum(&adapter
->hw
) >= 0)
7574 dev_err(&pdev
->dev
, "The NVM Checksum Is Not Valid\n");
7580 e1000_eeprom_checks(adapter
);
7582 /* copy the MAC address */
7583 if (e1000e_read_mac_addr(&adapter
->hw
))
7585 "NVM Read Error while reading MAC address\n");
7587 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
7589 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
7590 dev_err(&pdev
->dev
, "Invalid MAC Address: %pM\n",
7596 timer_setup(&adapter
->watchdog_timer
, e1000_watchdog
, 0);
7597 timer_setup(&adapter
->phy_info_timer
, e1000_update_phy_info
, 0);
7599 INIT_WORK(&adapter
->reset_task
, e1000_reset_task
);
7600 INIT_WORK(&adapter
->watchdog_task
, e1000_watchdog_task
);
7601 INIT_WORK(&adapter
->downshift_task
, e1000e_downshift_workaround
);
7602 INIT_WORK(&adapter
->update_phy_task
, e1000e_update_phy_task
);
7603 INIT_WORK(&adapter
->print_hang_task
, e1000_print_hw_hang
);
7605 /* Initialize link parameters. User can change them with ethtool */
7606 adapter
->hw
.mac
.autoneg
= 1;
7607 adapter
->fc_autoneg
= true;
7608 adapter
->hw
.fc
.requested_mode
= e1000_fc_default
;
7609 adapter
->hw
.fc
.current_mode
= e1000_fc_default
;
7610 adapter
->hw
.phy
.autoneg_advertised
= 0x2f;
7612 /* Initial Wake on LAN setting - If APM wake is enabled in
7613 * the EEPROM, enable the ACPI Magic Packet filter
7615 if (adapter
->flags
& FLAG_APME_IN_WUC
) {
7616 /* APME bit in EEPROM is mapped to WUC.APME */
7617 eeprom_data
= er32(WUC
);
7618 eeprom_apme_mask
= E1000_WUC_APME
;
7619 if ((hw
->mac
.type
> e1000_ich10lan
) &&
7620 (eeprom_data
& E1000_WUC_PHY_WAKE
))
7621 adapter
->flags2
|= FLAG2_HAS_PHY_WAKEUP
;
7622 } else if (adapter
->flags
& FLAG_APME_IN_CTRL3
) {
7623 if (adapter
->flags
& FLAG_APME_CHECK_PORT_B
&&
7624 (adapter
->hw
.bus
.func
== 1))
7625 ret_val
= e1000_read_nvm(&adapter
->hw
,
7626 NVM_INIT_CONTROL3_PORT_B
,
7629 ret_val
= e1000_read_nvm(&adapter
->hw
,
7630 NVM_INIT_CONTROL3_PORT_A
,
7634 /* fetch WoL from EEPROM */
7636 e_dbg("NVM read error getting WoL initial values: %d\n", ret_val
);
7637 else if (eeprom_data
& eeprom_apme_mask
)
7638 adapter
->eeprom_wol
|= E1000_WUFC_MAG
;
7640 /* now that we have the eeprom settings, apply the special cases
7641 * where the eeprom may be wrong or the board simply won't support
7642 * wake on lan on a particular port
7644 if (!(adapter
->flags
& FLAG_HAS_WOL
))
7645 adapter
->eeprom_wol
= 0;
7647 /* initialize the wol settings based on the eeprom settings */
7648 adapter
->wol
= adapter
->eeprom_wol
;
7650 /* make sure adapter isn't asleep if manageability is enabled */
7651 if (adapter
->wol
|| (adapter
->flags
& FLAG_MNG_PT_ENABLED
) ||
7652 (hw
->mac
.ops
.check_mng_mode(hw
)))
7653 device_wakeup_enable(&pdev
->dev
);
7655 /* save off EEPROM version number */
7656 ret_val
= e1000_read_nvm(&adapter
->hw
, 5, 1, &adapter
->eeprom_vers
);
7659 e_dbg("NVM read error getting EEPROM version: %d\n", ret_val
);
7660 adapter
->eeprom_vers
= 0;
7663 /* init PTP hardware clock */
7664 e1000e_ptp_init(adapter
);
7666 /* reset the hardware with the new settings */
7667 e1000e_reset(adapter
);
7669 /* If the controller has AMT, do not set DRV_LOAD until the interface
7670 * is up. For all other cases, let the f/w know that the h/w is now
7671 * under the control of the driver.
7673 if (!(adapter
->flags
& FLAG_HAS_AMT
))
7674 e1000e_get_hw_control(adapter
);
7676 if (hw
->mac
.type
>= e1000_pch_cnp
)
7677 adapter
->flags2
|= FLAG2_ENABLE_S0IX_FLOWS
;
7679 strlcpy(netdev
->name
, "eth%d", sizeof(netdev
->name
));
7680 err
= register_netdev(netdev
);
7684 /* carrier off reporting is important to ethtool even BEFORE open */
7685 netif_carrier_off(netdev
);
7687 e1000_print_device_info(adapter
);
7689 dev_pm_set_driver_flags(&pdev
->dev
, DPM_FLAG_SMART_PREPARE
);
7691 if (pci_dev_run_wake(pdev
) && hw
->mac
.type
!= e1000_pch_cnp
)
7692 pm_runtime_put_noidle(&pdev
->dev
);
7697 if (!(adapter
->flags
& FLAG_HAS_AMT
))
7698 e1000e_release_hw_control(adapter
);
7700 if (hw
->phy
.ops
.check_reset_block
&& !hw
->phy
.ops
.check_reset_block(hw
))
7701 e1000_phy_hw_reset(&adapter
->hw
);
7703 kfree(adapter
->tx_ring
);
7704 kfree(adapter
->rx_ring
);
7706 if ((adapter
->hw
.flash_address
) && (hw
->mac
.type
< e1000_pch_spt
))
7707 iounmap(adapter
->hw
.flash_address
);
7708 e1000e_reset_interrupt_capability(adapter
);
7710 iounmap(adapter
->hw
.hw_addr
);
7712 free_netdev(netdev
);
7714 pci_disable_pcie_error_reporting(pdev
);
7715 pci_release_mem_regions(pdev
);
7718 pci_disable_device(pdev
);
7723 * e1000_remove - Device Removal Routine
7724 * @pdev: PCI device information struct
7726 * e1000_remove is called by the PCI subsystem to alert the driver
7727 * that it should release a PCI device. This could be caused by a
7728 * Hot-Plug event, or because the driver is going to be removed from
7731 static void e1000_remove(struct pci_dev
*pdev
)
7733 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7734 struct e1000_adapter
*adapter
= netdev_priv(netdev
);
7736 e1000e_ptp_remove(adapter
);
7738 /* The timers may be rescheduled, so explicitly disable them
7739 * from being rescheduled.
7741 set_bit(__E1000_DOWN
, &adapter
->state
);
7742 del_timer_sync(&adapter
->watchdog_timer
);
7743 del_timer_sync(&adapter
->phy_info_timer
);
7745 cancel_work_sync(&adapter
->reset_task
);
7746 cancel_work_sync(&adapter
->watchdog_task
);
7747 cancel_work_sync(&adapter
->downshift_task
);
7748 cancel_work_sync(&adapter
->update_phy_task
);
7749 cancel_work_sync(&adapter
->print_hang_task
);
7751 if (adapter
->flags
& FLAG_HAS_HW_TIMESTAMP
) {
7752 cancel_work_sync(&adapter
->tx_hwtstamp_work
);
7753 if (adapter
->tx_hwtstamp_skb
) {
7754 dev_consume_skb_any(adapter
->tx_hwtstamp_skb
);
7755 adapter
->tx_hwtstamp_skb
= NULL
;
7759 unregister_netdev(netdev
);
7761 if (pci_dev_run_wake(pdev
))
7762 pm_runtime_get_noresume(&pdev
->dev
);
7764 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7765 * would have already happened in close and is redundant.
7767 e1000e_release_hw_control(adapter
);
7769 e1000e_reset_interrupt_capability(adapter
);
7770 kfree(adapter
->tx_ring
);
7771 kfree(adapter
->rx_ring
);
7773 iounmap(adapter
->hw
.hw_addr
);
7774 if ((adapter
->hw
.flash_address
) &&
7775 (adapter
->hw
.mac
.type
< e1000_pch_spt
))
7776 iounmap(adapter
->hw
.flash_address
);
7777 pci_release_mem_regions(pdev
);
7779 free_netdev(netdev
);
7782 pci_disable_pcie_error_reporting(pdev
);
7784 pci_disable_device(pdev
);
7787 /* PCI Error Recovery (ERS) */
7788 static const struct pci_error_handlers e1000_err_handler
= {
7789 .error_detected
= e1000_io_error_detected
,
7790 .slot_reset
= e1000_io_slot_reset
,
7791 .resume
= e1000_io_resume
,
7794 static const struct pci_device_id e1000_pci_tbl
[] = {
7795 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_COPPER
), board_82571
},
7796 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_FIBER
), board_82571
},
7797 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER
), board_82571
},
7798 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_COPPER_LP
),
7800 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_QUAD_FIBER
), board_82571
},
7801 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES
), board_82571
},
7802 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_DUAL
), board_82571
},
7803 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571EB_SERDES_QUAD
), board_82571
},
7804 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82571PT_QUAD_COPPER
), board_82571
},
7806 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI
), board_82572
},
7807 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_COPPER
), board_82572
},
7808 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_FIBER
), board_82572
},
7809 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82572EI_SERDES
), board_82572
},
7811 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E
), board_82573
},
7812 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573E_IAMT
), board_82573
},
7813 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82573L
), board_82573
},
7815 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82574L
), board_82574
},
7816 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82574LA
), board_82574
},
7817 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_82583V
), board_82583
},
7819 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_DPT
),
7820 board_80003es2lan
},
7821 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_COPPER_SPT
),
7822 board_80003es2lan
},
7823 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_DPT
),
7824 board_80003es2lan
},
7825 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_80003ES2LAN_SERDES_SPT
),
7826 board_80003es2lan
},
7828 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE
), board_ich8lan
},
7829 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_G
), board_ich8lan
},
7830 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IFE_GT
), board_ich8lan
},
7831 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_AMT
), board_ich8lan
},
7832 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_C
), board_ich8lan
},
7833 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M
), board_ich8lan
},
7834 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_IGP_M_AMT
), board_ich8lan
},
7835 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH8_82567V_3
), board_ich8lan
},
7837 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE
), board_ich9lan
},
7838 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_G
), board_ich9lan
},
7839 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IFE_GT
), board_ich9lan
},
7840 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_AMT
), board_ich9lan
},
7841 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_C
), board_ich9lan
},
7842 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_BM
), board_ich9lan
},
7843 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M
), board_ich9lan
},
7844 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_AMT
), board_ich9lan
},
7845 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH9_IGP_M_V
), board_ich9lan
},
7847 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LM
), board_ich9lan
},
7848 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_LF
), board_ich9lan
},
7849 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_R_BM_V
), board_ich9lan
},
7851 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_D_BM_LM
), board_ich10lan
},
7852 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_D_BM_LF
), board_ich10lan
},
7853 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_ICH10_D_BM_V
), board_ich10lan
},
7855 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_M_HV_LM
), board_pchlan
},
7856 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_M_HV_LC
), board_pchlan
},
7857 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_D_HV_DM
), board_pchlan
},
7858 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_D_HV_DC
), board_pchlan
},
7860 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH2_LV_LM
), board_pch2lan
},
7861 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH2_LV_V
), board_pch2lan
},
7863 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LPT_I217_LM
), board_pch_lpt
},
7864 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LPT_I217_V
), board_pch_lpt
},
7865 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LPTLP_I218_LM
), board_pch_lpt
},
7866 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LPTLP_I218_V
), board_pch_lpt
},
7867 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_I218_LM2
), board_pch_lpt
},
7868 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_I218_V2
), board_pch_lpt
},
7869 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_I218_LM3
), board_pch_lpt
},
7870 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_I218_V3
), board_pch_lpt
},
7871 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_LM
), board_pch_spt
},
7872 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_V
), board_pch_spt
},
7873 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_LM2
), board_pch_spt
},
7874 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_V2
), board_pch_spt
},
7875 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LBG_I219_LM3
), board_pch_spt
},
7876 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_LM4
), board_pch_spt
},
7877 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_V4
), board_pch_spt
},
7878 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_LM5
), board_pch_spt
},
7879 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_SPT_I219_V5
), board_pch_spt
},
7880 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CNP_I219_LM6
), board_pch_cnp
},
7881 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CNP_I219_V6
), board_pch_cnp
},
7882 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CNP_I219_LM7
), board_pch_cnp
},
7883 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CNP_I219_V7
), board_pch_cnp
},
7884 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ICP_I219_LM8
), board_pch_cnp
},
7885 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ICP_I219_V8
), board_pch_cnp
},
7886 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ICP_I219_LM9
), board_pch_cnp
},
7887 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ICP_I219_V9
), board_pch_cnp
},
7888 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_LM10
), board_pch_cnp
},
7889 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_V10
), board_pch_cnp
},
7890 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_LM11
), board_pch_cnp
},
7891 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_V11
), board_pch_cnp
},
7892 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_LM12
), board_pch_spt
},
7893 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_CMP_I219_V12
), board_pch_spt
},
7894 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_LM13
), board_pch_tgp
},
7895 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_V13
), board_pch_tgp
},
7896 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_LM14
), board_pch_tgp
},
7897 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_V14
), board_pch_tgp
},
7898 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_LM15
), board_pch_tgp
},
7899 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_TGP_I219_V15
), board_pch_tgp
},
7900 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_RPL_I219_LM23
), board_pch_tgp
},
7901 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_RPL_I219_V23
), board_pch_tgp
},
7902 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ADP_I219_LM16
), board_pch_tgp
},
7903 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ADP_I219_V16
), board_pch_tgp
},
7904 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ADP_I219_LM17
), board_pch_tgp
},
7905 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_ADP_I219_V17
), board_pch_tgp
},
7906 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_RPL_I219_LM22
), board_pch_tgp
},
7907 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_RPL_I219_V22
), board_pch_tgp
},
7908 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_MTP_I219_LM18
), board_pch_tgp
},
7909 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_MTP_I219_V18
), board_pch_tgp
},
7910 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_MTP_I219_LM19
), board_pch_tgp
},
7911 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_MTP_I219_V19
), board_pch_tgp
},
7912 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LNP_I219_LM20
), board_pch_tgp
},
7913 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LNP_I219_V20
), board_pch_tgp
},
7914 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LNP_I219_LM21
), board_pch_tgp
},
7915 { PCI_VDEVICE(INTEL
, E1000_DEV_ID_PCH_LNP_I219_V21
), board_pch_tgp
},
7917 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
7919 MODULE_DEVICE_TABLE(pci
, e1000_pci_tbl
);
7921 static const struct dev_pm_ops e1000_pm_ops
= {
7922 #ifdef CONFIG_PM_SLEEP
7923 .prepare
= e1000e_pm_prepare
,
7924 .suspend
= e1000e_pm_suspend
,
7925 .resume
= e1000e_pm_resume
,
7926 .freeze
= e1000e_pm_freeze
,
7927 .thaw
= e1000e_pm_thaw
,
7928 .poweroff
= e1000e_pm_suspend
,
7929 .restore
= e1000e_pm_resume
,
7931 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend
, e1000e_pm_runtime_resume
,
7932 e1000e_pm_runtime_idle
)
7935 /* PCI Device API Driver */
7936 static struct pci_driver e1000_driver
= {
7937 .name
= e1000e_driver_name
,
7938 .id_table
= e1000_pci_tbl
,
7939 .probe
= e1000_probe
,
7940 .remove
= e1000_remove
,
7942 .pm
= &e1000_pm_ops
,
7944 .shutdown
= e1000_shutdown
,
7945 .err_handler
= &e1000_err_handler
7949 * e1000_init_module - Driver Registration Routine
7951 * e1000_init_module is the first routine called when the driver is
7952 * loaded. All it does is register with the PCI subsystem.
7954 static int __init
e1000_init_module(void)
7956 pr_info("Intel(R) PRO/1000 Network Driver\n");
7957 pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
7959 return pci_register_driver(&e1000_driver
);
7961 module_init(e1000_init_module
);
7964 * e1000_exit_module - Driver Exit Cleanup Routine
7966 * e1000_exit_module is called just before the driver is removed
7969 static void __exit
e1000_exit_module(void)
7971 pci_unregister_driver(&e1000_driver
);
7973 module_exit(e1000_exit_module
);
7975 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7976 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7977 MODULE_LICENSE("GPL v2");