1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/notifier.h>
18 #include <linux/tcp.h>
20 #include <linux/ethtool.h>
21 #include <linux/topology.h>
22 #include <linux/gfp.h>
23 #include <linux/aer.h>
24 #include <linux/interrupt.h>
25 #include "net_driver.h"
27 #include <net/udp_tunnel.h>
34 #include "mcdi_pcol.h"
35 #include "workarounds.h"
37 /**************************************************************************
41 **************************************************************************
44 /* Loopback mode names (see LOOPBACK_MODE()) */
45 const unsigned int efx_loopback_mode_max
= LOOPBACK_MAX
;
46 const char *const efx_loopback_mode_names
[] = {
47 [LOOPBACK_NONE
] = "NONE",
48 [LOOPBACK_DATA
] = "DATAPATH",
49 [LOOPBACK_GMAC
] = "GMAC",
50 [LOOPBACK_XGMII
] = "XGMII",
51 [LOOPBACK_XGXS
] = "XGXS",
52 [LOOPBACK_XAUI
] = "XAUI",
53 [LOOPBACK_GMII
] = "GMII",
54 [LOOPBACK_SGMII
] = "SGMII",
55 [LOOPBACK_XGBR
] = "XGBR",
56 [LOOPBACK_XFI
] = "XFI",
57 [LOOPBACK_XAUI_FAR
] = "XAUI_FAR",
58 [LOOPBACK_GMII_FAR
] = "GMII_FAR",
59 [LOOPBACK_SGMII_FAR
] = "SGMII_FAR",
60 [LOOPBACK_XFI_FAR
] = "XFI_FAR",
61 [LOOPBACK_GPHY
] = "GPHY",
62 [LOOPBACK_PHYXS
] = "PHYXS",
63 [LOOPBACK_PCS
] = "PCS",
64 [LOOPBACK_PMAPMD
] = "PMA/PMD",
65 [LOOPBACK_XPORT
] = "XPORT",
66 [LOOPBACK_XGMII_WS
] = "XGMII_WS",
67 [LOOPBACK_XAUI_WS
] = "XAUI_WS",
68 [LOOPBACK_XAUI_WS_FAR
] = "XAUI_WS_FAR",
69 [LOOPBACK_XAUI_WS_NEAR
] = "XAUI_WS_NEAR",
70 [LOOPBACK_GMII_WS
] = "GMII_WS",
71 [LOOPBACK_XFI_WS
] = "XFI_WS",
72 [LOOPBACK_XFI_WS_FAR
] = "XFI_WS_FAR",
73 [LOOPBACK_PHYXS_WS
] = "PHYXS_WS",
76 const unsigned int efx_reset_type_max
= RESET_TYPE_MAX
;
77 const char *const efx_reset_type_names
[] = {
78 [RESET_TYPE_INVISIBLE
] = "INVISIBLE",
79 [RESET_TYPE_ALL
] = "ALL",
80 [RESET_TYPE_RECOVER_OR_ALL
] = "RECOVER_OR_ALL",
81 [RESET_TYPE_WORLD
] = "WORLD",
82 [RESET_TYPE_RECOVER_OR_DISABLE
] = "RECOVER_OR_DISABLE",
83 [RESET_TYPE_DATAPATH
] = "DATAPATH",
84 [RESET_TYPE_MC_BIST
] = "MC_BIST",
85 [RESET_TYPE_DISABLE
] = "DISABLE",
86 [RESET_TYPE_TX_WATCHDOG
] = "TX_WATCHDOG",
87 [RESET_TYPE_INT_ERROR
] = "INT_ERROR",
88 [RESET_TYPE_DMA_ERROR
] = "DMA_ERROR",
89 [RESET_TYPE_TX_SKIP
] = "TX_SKIP",
90 [RESET_TYPE_MC_FAILURE
] = "MC_FAILURE",
91 [RESET_TYPE_MCDI_TIMEOUT
] = "MCDI_TIMEOUT (FLR)",
94 /* UDP tunnel type names */
95 static const char *const efx_udp_tunnel_type_names
[] = {
96 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
] = "vxlan",
97 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
] = "geneve",
100 void efx_get_udp_tunnel_type_name(u16 type
, char *buf
, size_t buflen
)
102 if (type
< ARRAY_SIZE(efx_udp_tunnel_type_names
) &&
103 efx_udp_tunnel_type_names
[type
] != NULL
)
104 snprintf(buf
, buflen
, "%s", efx_udp_tunnel_type_names
[type
]);
106 snprintf(buf
, buflen
, "type %d", type
);
109 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
110 * queued onto this work queue. This is not a per-nic work queue, because
111 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
113 static struct workqueue_struct
*reset_workqueue
;
115 /* How often and how many times to poll for a reset while waiting for a
116 * BIST that another function started to complete.
118 #define BIST_WAIT_DELAY_MS 100
119 #define BIST_WAIT_DELAY_COUNT 100
121 /**************************************************************************
123 * Configurable values
125 *************************************************************************/
128 * Use separate channels for TX and RX events
130 * Set this to 1 to use separate channels for TX and RX. It allows us
131 * to control interrupt affinity separately for TX and RX.
133 * This is only used in MSI-X interrupt mode
135 bool efx_separate_tx_channels
;
136 module_param(efx_separate_tx_channels
, bool, 0444);
137 MODULE_PARM_DESC(efx_separate_tx_channels
,
138 "Use separate channels for TX and RX");
140 /* This is the weight assigned to each of the (per-channel) virtual
143 static int napi_weight
= 64;
145 /* This is the time (in jiffies) between invocations of the hardware
147 * On Falcon-based NICs, this will:
148 * - Check the on-board hardware monitor;
149 * - Poll the link state and reconfigure the hardware as necessary.
150 * On Siena-based NICs for power systems with EEH support, this will give EEH a
153 static unsigned int efx_monitor_interval
= 1 * HZ
;
155 /* Initial interrupt moderation settings. They can be modified after
156 * module load with ethtool.
158 * The default for RX should strike a balance between increasing the
159 * round-trip latency and reducing overhead.
161 static unsigned int rx_irq_mod_usec
= 60;
163 /* Initial interrupt moderation settings. They can be modified after
164 * module load with ethtool.
166 * This default is chosen to ensure that a 10G link does not go idle
167 * while a TX queue is stopped after it has become full. A queue is
168 * restarted when it drops below half full. The time this takes (assuming
169 * worst case 3 descriptors per packet and 1024 descriptors) is
170 * 512 / 3 * 1.2 = 205 usec.
172 static unsigned int tx_irq_mod_usec
= 150;
174 /* This is the first interrupt mode to try out of:
179 static unsigned int interrupt_mode
;
181 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
182 * i.e. the number of CPUs among which we may distribute simultaneous
183 * interrupt handling.
185 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
186 * The default (0) means to assign an interrupt to each core.
188 static unsigned int rss_cpus
;
189 module_param(rss_cpus
, uint
, 0444);
190 MODULE_PARM_DESC(rss_cpus
, "Number of CPUs to use for Receive-Side Scaling");
192 static bool phy_flash_cfg
;
193 module_param(phy_flash_cfg
, bool, 0644);
194 MODULE_PARM_DESC(phy_flash_cfg
, "Set PHYs into reflash mode initially");
196 static unsigned irq_adapt_low_thresh
= 8000;
197 module_param(irq_adapt_low_thresh
, uint
, 0644);
198 MODULE_PARM_DESC(irq_adapt_low_thresh
,
199 "Threshold score for reducing IRQ moderation");
201 static unsigned irq_adapt_high_thresh
= 16000;
202 module_param(irq_adapt_high_thresh
, uint
, 0644);
203 MODULE_PARM_DESC(irq_adapt_high_thresh
,
204 "Threshold score for increasing IRQ moderation");
206 static unsigned debug
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
207 NETIF_MSG_LINK
| NETIF_MSG_IFDOWN
|
208 NETIF_MSG_IFUP
| NETIF_MSG_RX_ERR
|
209 NETIF_MSG_TX_ERR
| NETIF_MSG_HW
);
210 module_param(debug
, uint
, 0);
211 MODULE_PARM_DESC(debug
, "Bitmapped debugging message enable value");
213 /**************************************************************************
215 * Utility functions and prototypes
217 *************************************************************************/
219 static int efx_soft_enable_interrupts(struct efx_nic
*efx
);
220 static void efx_soft_disable_interrupts(struct efx_nic
*efx
);
221 static void efx_remove_channel(struct efx_channel
*channel
);
222 static void efx_remove_channels(struct efx_nic
*efx
);
223 static const struct efx_channel_type efx_default_channel_type
;
224 static void efx_remove_port(struct efx_nic
*efx
);
225 static void efx_init_napi_channel(struct efx_channel
*channel
);
226 static void efx_fini_napi(struct efx_nic
*efx
);
227 static void efx_fini_napi_channel(struct efx_channel
*channel
);
228 static void efx_fini_struct(struct efx_nic
*efx
);
229 static void efx_start_all(struct efx_nic
*efx
);
230 static void efx_stop_all(struct efx_nic
*efx
);
232 #define EFX_ASSERT_RESET_SERIALISED(efx) \
234 if ((efx->state == STATE_READY) || \
235 (efx->state == STATE_RECOVERY) || \
236 (efx->state == STATE_DISABLED)) \
240 static int efx_check_disabled(struct efx_nic
*efx
)
242 if (efx
->state
== STATE_DISABLED
|| efx
->state
== STATE_RECOVERY
) {
243 netif_err(efx
, drv
, efx
->net_dev
,
244 "device is disabled due to earlier errors\n");
250 /**************************************************************************
252 * Event queue processing
254 *************************************************************************/
256 /* Process channel's event queue
258 * This function is responsible for processing the event queue of a
259 * single channel. The caller must guarantee that this function will
260 * never be concurrently called more than once on the same channel,
261 * though different channels may be being processed concurrently.
263 static int efx_process_channel(struct efx_channel
*channel
, int budget
)
265 struct efx_tx_queue
*tx_queue
;
268 if (unlikely(!channel
->enabled
))
271 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
272 tx_queue
->pkts_compl
= 0;
273 tx_queue
->bytes_compl
= 0;
276 spent
= efx_nic_process_eventq(channel
, budget
);
277 if (spent
&& efx_channel_has_rx_queue(channel
)) {
278 struct efx_rx_queue
*rx_queue
=
279 efx_channel_get_rx_queue(channel
);
281 efx_rx_flush_packet(channel
);
282 efx_fast_push_rx_descriptors(rx_queue
, true);
286 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
287 if (tx_queue
->bytes_compl
) {
288 netdev_tx_completed_queue(tx_queue
->core_txq
,
289 tx_queue
->pkts_compl
, tx_queue
->bytes_compl
);
298 * NAPI guarantees serialisation of polls of the same device, which
299 * provides the guarantee required by efx_process_channel().
301 static void efx_update_irq_mod(struct efx_nic
*efx
, struct efx_channel
*channel
)
303 int step
= efx
->irq_mod_step_us
;
305 if (channel
->irq_mod_score
< irq_adapt_low_thresh
) {
306 if (channel
->irq_moderation_us
> step
) {
307 channel
->irq_moderation_us
-= step
;
308 efx
->type
->push_irq_moderation(channel
);
310 } else if (channel
->irq_mod_score
> irq_adapt_high_thresh
) {
311 if (channel
->irq_moderation_us
<
312 efx
->irq_rx_moderation_us
) {
313 channel
->irq_moderation_us
+= step
;
314 efx
->type
->push_irq_moderation(channel
);
318 channel
->irq_count
= 0;
319 channel
->irq_mod_score
= 0;
322 static int efx_poll(struct napi_struct
*napi
, int budget
)
324 struct efx_channel
*channel
=
325 container_of(napi
, struct efx_channel
, napi_str
);
326 struct efx_nic
*efx
= channel
->efx
;
329 netif_vdbg(efx
, intr
, efx
->net_dev
,
330 "channel %d NAPI poll executing on CPU %d\n",
331 channel
->channel
, raw_smp_processor_id());
333 spent
= efx_process_channel(channel
, budget
);
335 if (spent
< budget
) {
336 if (efx_channel_has_rx_queue(channel
) &&
337 efx
->irq_rx_adaptive
&&
338 unlikely(++channel
->irq_count
== 1000)) {
339 efx_update_irq_mod(efx
, channel
);
342 efx_filter_rfs_expire(channel
);
344 /* There is no race here; although napi_disable() will
345 * only wait for napi_complete(), this isn't a problem
346 * since efx_nic_eventq_read_ack() will have no effect if
347 * interrupts have already been disabled.
349 if (napi_complete_done(napi
, spent
))
350 efx_nic_eventq_read_ack(channel
);
356 /* Create event queue
357 * Event queue memory allocations are done only once. If the channel
358 * is reset, the memory buffer will be reused; this guards against
359 * errors during channel reset and also simplifies interrupt handling.
361 static int efx_probe_eventq(struct efx_channel
*channel
)
363 struct efx_nic
*efx
= channel
->efx
;
364 unsigned long entries
;
366 netif_dbg(efx
, probe
, efx
->net_dev
,
367 "chan %d create event queue\n", channel
->channel
);
369 /* Build an event queue with room for one event per tx and rx buffer,
370 * plus some extra for link state events and MCDI completions. */
371 entries
= roundup_pow_of_two(efx
->rxq_entries
+ efx
->txq_entries
+ 128);
372 EFX_WARN_ON_PARANOID(entries
> EFX_MAX_EVQ_SIZE
);
373 channel
->eventq_mask
= max(entries
, EFX_MIN_EVQ_SIZE
) - 1;
375 return efx_nic_probe_eventq(channel
);
378 /* Prepare channel's event queue */
379 static int efx_init_eventq(struct efx_channel
*channel
)
381 struct efx_nic
*efx
= channel
->efx
;
384 EFX_WARN_ON_PARANOID(channel
->eventq_init
);
386 netif_dbg(efx
, drv
, efx
->net_dev
,
387 "chan %d init event queue\n", channel
->channel
);
389 rc
= efx_nic_init_eventq(channel
);
391 efx
->type
->push_irq_moderation(channel
);
392 channel
->eventq_read_ptr
= 0;
393 channel
->eventq_init
= true;
398 /* Enable event queue processing and NAPI */
399 void efx_start_eventq(struct efx_channel
*channel
)
401 netif_dbg(channel
->efx
, ifup
, channel
->efx
->net_dev
,
402 "chan %d start event queue\n", channel
->channel
);
404 /* Make sure the NAPI handler sees the enabled flag set */
405 channel
->enabled
= true;
408 napi_enable(&channel
->napi_str
);
409 efx_nic_eventq_read_ack(channel
);
412 /* Disable event queue processing and NAPI */
413 void efx_stop_eventq(struct efx_channel
*channel
)
415 if (!channel
->enabled
)
418 napi_disable(&channel
->napi_str
);
419 channel
->enabled
= false;
422 static void efx_fini_eventq(struct efx_channel
*channel
)
424 if (!channel
->eventq_init
)
427 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
428 "chan %d fini event queue\n", channel
->channel
);
430 efx_nic_fini_eventq(channel
);
431 channel
->eventq_init
= false;
434 static void efx_remove_eventq(struct efx_channel
*channel
)
436 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
437 "chan %d remove event queue\n", channel
->channel
);
439 efx_nic_remove_eventq(channel
);
442 /**************************************************************************
446 *************************************************************************/
448 /* Allocate and initialise a channel structure. */
449 static struct efx_channel
*
450 efx_alloc_channel(struct efx_nic
*efx
, int i
, struct efx_channel
*old_channel
)
452 struct efx_channel
*channel
;
453 struct efx_rx_queue
*rx_queue
;
454 struct efx_tx_queue
*tx_queue
;
457 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
462 channel
->channel
= i
;
463 channel
->type
= &efx_default_channel_type
;
465 for (j
= 0; j
< EFX_TXQ_TYPES
; j
++) {
466 tx_queue
= &channel
->tx_queue
[j
];
468 tx_queue
->queue
= i
* EFX_TXQ_TYPES
+ j
;
469 tx_queue
->channel
= channel
;
472 rx_queue
= &channel
->rx_queue
;
474 timer_setup(&rx_queue
->slow_fill
, efx_rx_slow_fill
, 0);
479 /* Allocate and initialise a channel structure, copying parameters
480 * (but not resources) from an old channel structure.
482 static struct efx_channel
*
483 efx_copy_channel(const struct efx_channel
*old_channel
)
485 struct efx_channel
*channel
;
486 struct efx_rx_queue
*rx_queue
;
487 struct efx_tx_queue
*tx_queue
;
490 channel
= kmalloc(sizeof(*channel
), GFP_KERNEL
);
494 *channel
= *old_channel
;
496 channel
->napi_dev
= NULL
;
497 INIT_HLIST_NODE(&channel
->napi_str
.napi_hash_node
);
498 channel
->napi_str
.napi_id
= 0;
499 channel
->napi_str
.state
= 0;
500 memset(&channel
->eventq
, 0, sizeof(channel
->eventq
));
502 for (j
= 0; j
< EFX_TXQ_TYPES
; j
++) {
503 tx_queue
= &channel
->tx_queue
[j
];
504 if (tx_queue
->channel
)
505 tx_queue
->channel
= channel
;
506 tx_queue
->buffer
= NULL
;
507 memset(&tx_queue
->txd
, 0, sizeof(tx_queue
->txd
));
510 rx_queue
= &channel
->rx_queue
;
511 rx_queue
->buffer
= NULL
;
512 memset(&rx_queue
->rxd
, 0, sizeof(rx_queue
->rxd
));
513 timer_setup(&rx_queue
->slow_fill
, efx_rx_slow_fill
, 0);
518 static int efx_probe_channel(struct efx_channel
*channel
)
520 struct efx_tx_queue
*tx_queue
;
521 struct efx_rx_queue
*rx_queue
;
524 netif_dbg(channel
->efx
, probe
, channel
->efx
->net_dev
,
525 "creating channel %d\n", channel
->channel
);
527 rc
= channel
->type
->pre_probe(channel
);
531 rc
= efx_probe_eventq(channel
);
535 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
536 rc
= efx_probe_tx_queue(tx_queue
);
541 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
542 rc
= efx_probe_rx_queue(rx_queue
);
550 efx_remove_channel(channel
);
555 efx_get_channel_name(struct efx_channel
*channel
, char *buf
, size_t len
)
557 struct efx_nic
*efx
= channel
->efx
;
561 number
= channel
->channel
;
562 if (efx
->tx_channel_offset
== 0) {
564 } else if (channel
->channel
< efx
->tx_channel_offset
) {
568 number
-= efx
->tx_channel_offset
;
570 snprintf(buf
, len
, "%s%s-%d", efx
->name
, type
, number
);
573 static void efx_set_channel_names(struct efx_nic
*efx
)
575 struct efx_channel
*channel
;
577 efx_for_each_channel(channel
, efx
)
578 channel
->type
->get_name(channel
,
579 efx
->msi_context
[channel
->channel
].name
,
580 sizeof(efx
->msi_context
[0].name
));
583 static int efx_probe_channels(struct efx_nic
*efx
)
585 struct efx_channel
*channel
;
588 /* Restart special buffer allocation */
589 efx
->next_buffer_table
= 0;
591 /* Probe channels in reverse, so that any 'extra' channels
592 * use the start of the buffer table. This allows the traffic
593 * channels to be resized without moving them or wasting the
594 * entries before them.
596 efx_for_each_channel_rev(channel
, efx
) {
597 rc
= efx_probe_channel(channel
);
599 netif_err(efx
, probe
, efx
->net_dev
,
600 "failed to create channel %d\n",
605 efx_set_channel_names(efx
);
610 efx_remove_channels(efx
);
614 /* Channels are shutdown and reinitialised whilst the NIC is running
615 * to propagate configuration changes (mtu, checksum offload), or
616 * to clear hardware error conditions
618 static void efx_start_datapath(struct efx_nic
*efx
)
620 netdev_features_t old_features
= efx
->net_dev
->features
;
621 bool old_rx_scatter
= efx
->rx_scatter
;
622 struct efx_tx_queue
*tx_queue
;
623 struct efx_rx_queue
*rx_queue
;
624 struct efx_channel
*channel
;
627 /* Calculate the rx buffer allocation parameters required to
628 * support the current MTU, including padding for header
629 * alignment and overruns.
631 efx
->rx_dma_len
= (efx
->rx_prefix_size
+
632 EFX_MAX_FRAME_LEN(efx
->net_dev
->mtu
) +
633 efx
->type
->rx_buffer_padding
);
634 rx_buf_len
= (sizeof(struct efx_rx_page_state
) +
635 efx
->rx_ip_align
+ efx
->rx_dma_len
);
636 if (rx_buf_len
<= PAGE_SIZE
) {
637 efx
->rx_scatter
= efx
->type
->always_rx_scatter
;
638 efx
->rx_buffer_order
= 0;
639 } else if (efx
->type
->can_rx_scatter
) {
640 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE
% L1_CACHE_BYTES
);
641 BUILD_BUG_ON(sizeof(struct efx_rx_page_state
) +
642 2 * ALIGN(NET_IP_ALIGN
+ EFX_RX_USR_BUF_SIZE
,
643 EFX_RX_BUF_ALIGNMENT
) >
645 efx
->rx_scatter
= true;
646 efx
->rx_dma_len
= EFX_RX_USR_BUF_SIZE
;
647 efx
->rx_buffer_order
= 0;
649 efx
->rx_scatter
= false;
650 efx
->rx_buffer_order
= get_order(rx_buf_len
);
653 efx_rx_config_page_split(efx
);
654 if (efx
->rx_buffer_order
)
655 netif_dbg(efx
, drv
, efx
->net_dev
,
656 "RX buf len=%u; page order=%u batch=%u\n",
657 efx
->rx_dma_len
, efx
->rx_buffer_order
,
658 efx
->rx_pages_per_batch
);
660 netif_dbg(efx
, drv
, efx
->net_dev
,
661 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
662 efx
->rx_dma_len
, efx
->rx_page_buf_step
,
663 efx
->rx_bufs_per_page
, efx
->rx_pages_per_batch
);
665 /* Restore previously fixed features in hw_features and remove
666 * features which are fixed now
668 efx
->net_dev
->hw_features
|= efx
->net_dev
->features
;
669 efx
->net_dev
->hw_features
&= ~efx
->fixed_features
;
670 efx
->net_dev
->features
|= efx
->fixed_features
;
671 if (efx
->net_dev
->features
!= old_features
)
672 netdev_features_change(efx
->net_dev
);
674 /* RX filters may also have scatter-enabled flags */
675 if (efx
->rx_scatter
!= old_rx_scatter
)
676 efx
->type
->filter_update_rx_scatter(efx
);
678 /* We must keep at least one descriptor in a TX ring empty.
679 * We could avoid this when the queue size does not exactly
680 * match the hardware ring size, but it's not that important.
681 * Therefore we stop the queue when one more skb might fill
682 * the ring completely. We wake it when half way back to
685 efx
->txq_stop_thresh
= efx
->txq_entries
- efx_tx_max_skb_descs(efx
);
686 efx
->txq_wake_thresh
= efx
->txq_stop_thresh
/ 2;
688 /* Initialise the channels */
689 efx_for_each_channel(channel
, efx
) {
690 efx_for_each_channel_tx_queue(tx_queue
, channel
) {
691 efx_init_tx_queue(tx_queue
);
692 atomic_inc(&efx
->active_queues
);
695 efx_for_each_channel_rx_queue(rx_queue
, channel
) {
696 efx_init_rx_queue(rx_queue
);
697 atomic_inc(&efx
->active_queues
);
698 efx_stop_eventq(channel
);
699 efx_fast_push_rx_descriptors(rx_queue
, false);
700 efx_start_eventq(channel
);
703 WARN_ON(channel
->rx_pkt_n_frags
);
706 efx_ptp_start_datapath(efx
);
708 if (netif_device_present(efx
->net_dev
))
709 netif_tx_wake_all_queues(efx
->net_dev
);
712 static void efx_stop_datapath(struct efx_nic
*efx
)
714 struct efx_channel
*channel
;
715 struct efx_tx_queue
*tx_queue
;
716 struct efx_rx_queue
*rx_queue
;
719 EFX_ASSERT_RESET_SERIALISED(efx
);
720 BUG_ON(efx
->port_enabled
);
722 efx_ptp_stop_datapath(efx
);
725 efx_for_each_channel(channel
, efx
) {
726 efx_for_each_channel_rx_queue(rx_queue
, channel
)
727 rx_queue
->refill_enabled
= false;
730 efx_for_each_channel(channel
, efx
) {
731 /* RX packet processing is pipelined, so wait for the
732 * NAPI handler to complete. At least event queue 0
733 * might be kept active by non-data events, so don't
734 * use napi_synchronize() but actually disable NAPI
737 if (efx_channel_has_rx_queue(channel
)) {
738 efx_stop_eventq(channel
);
739 efx_start_eventq(channel
);
743 rc
= efx
->type
->fini_dmaq(efx
);
745 netif_err(efx
, drv
, efx
->net_dev
, "failed to flush queues\n");
747 netif_dbg(efx
, drv
, efx
->net_dev
,
748 "successfully flushed all queues\n");
751 efx_for_each_channel(channel
, efx
) {
752 efx_for_each_channel_rx_queue(rx_queue
, channel
)
753 efx_fini_rx_queue(rx_queue
);
754 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
)
755 efx_fini_tx_queue(tx_queue
);
759 static void efx_remove_channel(struct efx_channel
*channel
)
761 struct efx_tx_queue
*tx_queue
;
762 struct efx_rx_queue
*rx_queue
;
764 netif_dbg(channel
->efx
, drv
, channel
->efx
->net_dev
,
765 "destroy chan %d\n", channel
->channel
);
767 efx_for_each_channel_rx_queue(rx_queue
, channel
)
768 efx_remove_rx_queue(rx_queue
);
769 efx_for_each_possible_channel_tx_queue(tx_queue
, channel
)
770 efx_remove_tx_queue(tx_queue
);
771 efx_remove_eventq(channel
);
772 channel
->type
->post_remove(channel
);
775 static void efx_remove_channels(struct efx_nic
*efx
)
777 struct efx_channel
*channel
;
779 efx_for_each_channel(channel
, efx
)
780 efx_remove_channel(channel
);
784 efx_realloc_channels(struct efx_nic
*efx
, u32 rxq_entries
, u32 txq_entries
)
786 struct efx_channel
*other_channel
[EFX_MAX_CHANNELS
], *channel
;
787 u32 old_rxq_entries
, old_txq_entries
;
788 unsigned i
, next_buffer_table
= 0;
791 rc
= efx_check_disabled(efx
);
795 /* Not all channels should be reallocated. We must avoid
796 * reallocating their buffer table entries.
798 efx_for_each_channel(channel
, efx
) {
799 struct efx_rx_queue
*rx_queue
;
800 struct efx_tx_queue
*tx_queue
;
802 if (channel
->type
->copy
)
804 next_buffer_table
= max(next_buffer_table
,
805 channel
->eventq
.index
+
806 channel
->eventq
.entries
);
807 efx_for_each_channel_rx_queue(rx_queue
, channel
)
808 next_buffer_table
= max(next_buffer_table
,
809 rx_queue
->rxd
.index
+
810 rx_queue
->rxd
.entries
);
811 efx_for_each_channel_tx_queue(tx_queue
, channel
)
812 next_buffer_table
= max(next_buffer_table
,
813 tx_queue
->txd
.index
+
814 tx_queue
->txd
.entries
);
817 efx_device_detach_sync(efx
);
819 efx_soft_disable_interrupts(efx
);
821 /* Clone channels (where possible) */
822 memset(other_channel
, 0, sizeof(other_channel
));
823 for (i
= 0; i
< efx
->n_channels
; i
++) {
824 channel
= efx
->channel
[i
];
825 if (channel
->type
->copy
)
826 channel
= channel
->type
->copy(channel
);
831 other_channel
[i
] = channel
;
834 /* Swap entry counts and channel pointers */
835 old_rxq_entries
= efx
->rxq_entries
;
836 old_txq_entries
= efx
->txq_entries
;
837 efx
->rxq_entries
= rxq_entries
;
838 efx
->txq_entries
= txq_entries
;
839 for (i
= 0; i
< efx
->n_channels
; i
++) {
840 channel
= efx
->channel
[i
];
841 efx
->channel
[i
] = other_channel
[i
];
842 other_channel
[i
] = channel
;
845 /* Restart buffer table allocation */
846 efx
->next_buffer_table
= next_buffer_table
;
848 for (i
= 0; i
< efx
->n_channels
; i
++) {
849 channel
= efx
->channel
[i
];
850 if (!channel
->type
->copy
)
852 rc
= efx_probe_channel(channel
);
855 efx_init_napi_channel(efx
->channel
[i
]);
859 /* Destroy unused channel structures */
860 for (i
= 0; i
< efx
->n_channels
; i
++) {
861 channel
= other_channel
[i
];
862 if (channel
&& channel
->type
->copy
) {
863 efx_fini_napi_channel(channel
);
864 efx_remove_channel(channel
);
869 rc2
= efx_soft_enable_interrupts(efx
);
872 netif_err(efx
, drv
, efx
->net_dev
,
873 "unable to restart interrupts on channel reallocation\n");
874 efx_schedule_reset(efx
, RESET_TYPE_DISABLE
);
877 efx_device_attach_if_not_resetting(efx
);
883 efx
->rxq_entries
= old_rxq_entries
;
884 efx
->txq_entries
= old_txq_entries
;
885 for (i
= 0; i
< efx
->n_channels
; i
++) {
886 channel
= efx
->channel
[i
];
887 efx
->channel
[i
] = other_channel
[i
];
888 other_channel
[i
] = channel
;
893 void efx_schedule_slow_fill(struct efx_rx_queue
*rx_queue
)
895 mod_timer(&rx_queue
->slow_fill
, jiffies
+ msecs_to_jiffies(100));
898 static const struct efx_channel_type efx_default_channel_type
= {
899 .pre_probe
= efx_channel_dummy_op_int
,
900 .post_remove
= efx_channel_dummy_op_void
,
901 .get_name
= efx_get_channel_name
,
902 .copy
= efx_copy_channel
,
903 .keep_eventq
= false,
906 int efx_channel_dummy_op_int(struct efx_channel
*channel
)
911 void efx_channel_dummy_op_void(struct efx_channel
*channel
)
915 /**************************************************************************
919 **************************************************************************/
921 /* This ensures that the kernel is kept informed (via
922 * netif_carrier_on/off) of the link status, and also maintains the
923 * link status's stop on the port's TX queue.
925 void efx_link_status_changed(struct efx_nic
*efx
)
927 struct efx_link_state
*link_state
= &efx
->link_state
;
929 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
930 * that no events are triggered between unregister_netdev() and the
931 * driver unloading. A more general condition is that NETDEV_CHANGE
932 * can only be generated between NETDEV_UP and NETDEV_DOWN */
933 if (!netif_running(efx
->net_dev
))
936 if (link_state
->up
!= netif_carrier_ok(efx
->net_dev
)) {
937 efx
->n_link_state_changes
++;
940 netif_carrier_on(efx
->net_dev
);
942 netif_carrier_off(efx
->net_dev
);
945 /* Status message for kernel log */
947 netif_info(efx
, link
, efx
->net_dev
,
948 "link up at %uMbps %s-duplex (MTU %d)\n",
949 link_state
->speed
, link_state
->fd
? "full" : "half",
952 netif_info(efx
, link
, efx
->net_dev
, "link down\n");
955 void efx_link_set_advertising(struct efx_nic
*efx
, u32 advertising
)
957 efx
->link_advertising
= advertising
;
959 if (advertising
& ADVERTISED_Pause
)
960 efx
->wanted_fc
|= (EFX_FC_TX
| EFX_FC_RX
);
962 efx
->wanted_fc
&= ~(EFX_FC_TX
| EFX_FC_RX
);
963 if (advertising
& ADVERTISED_Asym_Pause
)
964 efx
->wanted_fc
^= EFX_FC_TX
;
968 void efx_link_set_wanted_fc(struct efx_nic
*efx
, u8 wanted_fc
)
970 efx
->wanted_fc
= wanted_fc
;
971 if (efx
->link_advertising
) {
972 if (wanted_fc
& EFX_FC_RX
)
973 efx
->link_advertising
|= (ADVERTISED_Pause
|
974 ADVERTISED_Asym_Pause
);
976 efx
->link_advertising
&= ~(ADVERTISED_Pause
|
977 ADVERTISED_Asym_Pause
);
978 if (wanted_fc
& EFX_FC_TX
)
979 efx
->link_advertising
^= ADVERTISED_Asym_Pause
;
983 static void efx_fini_port(struct efx_nic
*efx
);
985 /* We assume that efx->type->reconfigure_mac will always try to sync RX
986 * filters and therefore needs to read-lock the filter table against freeing
988 void efx_mac_reconfigure(struct efx_nic
*efx
)
990 down_read(&efx
->filter_sem
);
991 efx
->type
->reconfigure_mac(efx
);
992 up_read(&efx
->filter_sem
);
995 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
996 * the MAC appropriately. All other PHY configuration changes are pushed
997 * through phy_op->set_settings(), and pushed asynchronously to the MAC
998 * through efx_monitor().
1000 * Callers must hold the mac_lock
1002 int __efx_reconfigure_port(struct efx_nic
*efx
)
1004 enum efx_phy_mode phy_mode
;
1007 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
1009 /* Disable PHY transmit in mac level loopbacks */
1010 phy_mode
= efx
->phy_mode
;
1011 if (LOOPBACK_INTERNAL(efx
))
1012 efx
->phy_mode
|= PHY_MODE_TX_DISABLED
;
1014 efx
->phy_mode
&= ~PHY_MODE_TX_DISABLED
;
1016 rc
= efx
->type
->reconfigure_port(efx
);
1019 efx
->phy_mode
= phy_mode
;
1024 /* Reinitialise the MAC to pick up new PHY settings, even if the port is
1026 int efx_reconfigure_port(struct efx_nic
*efx
)
1030 EFX_ASSERT_RESET_SERIALISED(efx
);
1032 mutex_lock(&efx
->mac_lock
);
1033 rc
= __efx_reconfigure_port(efx
);
1034 mutex_unlock(&efx
->mac_lock
);
1039 /* Asynchronous work item for changing MAC promiscuity and multicast
1040 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
1042 static void efx_mac_work(struct work_struct
*data
)
1044 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, mac_work
);
1046 mutex_lock(&efx
->mac_lock
);
1047 if (efx
->port_enabled
)
1048 efx_mac_reconfigure(efx
);
1049 mutex_unlock(&efx
->mac_lock
);
1052 static int efx_probe_port(struct efx_nic
*efx
)
1056 netif_dbg(efx
, probe
, efx
->net_dev
, "create port\n");
1059 efx
->phy_mode
= PHY_MODE_SPECIAL
;
1061 /* Connect up MAC/PHY operations table */
1062 rc
= efx
->type
->probe_port(efx
);
1066 /* Initialise MAC address to permanent address */
1067 ether_addr_copy(efx
->net_dev
->dev_addr
, efx
->net_dev
->perm_addr
);
1072 static int efx_init_port(struct efx_nic
*efx
)
1076 netif_dbg(efx
, drv
, efx
->net_dev
, "init port\n");
1078 mutex_lock(&efx
->mac_lock
);
1080 rc
= efx
->phy_op
->init(efx
);
1084 efx
->port_initialized
= true;
1086 /* Reconfigure the MAC before creating dma queues (required for
1087 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1088 efx_mac_reconfigure(efx
);
1090 /* Ensure the PHY advertises the correct flow control settings */
1091 rc
= efx
->phy_op
->reconfigure(efx
);
1092 if (rc
&& rc
!= -EPERM
)
1095 mutex_unlock(&efx
->mac_lock
);
1099 efx
->phy_op
->fini(efx
);
1101 mutex_unlock(&efx
->mac_lock
);
1105 static void efx_start_port(struct efx_nic
*efx
)
1107 netif_dbg(efx
, ifup
, efx
->net_dev
, "start port\n");
1108 BUG_ON(efx
->port_enabled
);
1110 mutex_lock(&efx
->mac_lock
);
1111 efx
->port_enabled
= true;
1113 /* Ensure MAC ingress/egress is enabled */
1114 efx_mac_reconfigure(efx
);
1116 mutex_unlock(&efx
->mac_lock
);
1119 /* Cancel work for MAC reconfiguration, periodic hardware monitoring
1120 * and the async self-test, wait for them to finish and prevent them
1121 * being scheduled again. This doesn't cover online resets, which
1122 * should only be cancelled when removing the device.
1124 static void efx_stop_port(struct efx_nic
*efx
)
1126 netif_dbg(efx
, ifdown
, efx
->net_dev
, "stop port\n");
1128 EFX_ASSERT_RESET_SERIALISED(efx
);
1130 mutex_lock(&efx
->mac_lock
);
1131 efx
->port_enabled
= false;
1132 mutex_unlock(&efx
->mac_lock
);
1134 /* Serialise against efx_set_multicast_list() */
1135 netif_addr_lock_bh(efx
->net_dev
);
1136 netif_addr_unlock_bh(efx
->net_dev
);
1138 cancel_delayed_work_sync(&efx
->monitor_work
);
1139 efx_selftest_async_cancel(efx
);
1140 cancel_work_sync(&efx
->mac_work
);
1143 static void efx_fini_port(struct efx_nic
*efx
)
1145 netif_dbg(efx
, drv
, efx
->net_dev
, "shut down port\n");
1147 if (!efx
->port_initialized
)
1150 efx
->phy_op
->fini(efx
);
1151 efx
->port_initialized
= false;
1153 efx
->link_state
.up
= false;
1154 efx_link_status_changed(efx
);
1157 static void efx_remove_port(struct efx_nic
*efx
)
1159 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying port\n");
1161 efx
->type
->remove_port(efx
);
1164 /**************************************************************************
1168 **************************************************************************/
1170 static LIST_HEAD(efx_primary_list
);
1171 static LIST_HEAD(efx_unassociated_list
);
1173 static bool efx_same_controller(struct efx_nic
*left
, struct efx_nic
*right
)
1175 return left
->type
== right
->type
&&
1176 left
->vpd_sn
&& right
->vpd_sn
&&
1177 !strcmp(left
->vpd_sn
, right
->vpd_sn
);
1180 static void efx_associate(struct efx_nic
*efx
)
1182 struct efx_nic
*other
, *next
;
1184 if (efx
->primary
== efx
) {
1185 /* Adding primary function; look for secondaries */
1187 netif_dbg(efx
, probe
, efx
->net_dev
, "adding to primary list\n");
1188 list_add_tail(&efx
->node
, &efx_primary_list
);
1190 list_for_each_entry_safe(other
, next
, &efx_unassociated_list
,
1192 if (efx_same_controller(efx
, other
)) {
1193 list_del(&other
->node
);
1194 netif_dbg(other
, probe
, other
->net_dev
,
1195 "moving to secondary list of %s %s\n",
1196 pci_name(efx
->pci_dev
),
1197 efx
->net_dev
->name
);
1198 list_add_tail(&other
->node
,
1199 &efx
->secondary_list
);
1200 other
->primary
= efx
;
1204 /* Adding secondary function; look for primary */
1206 list_for_each_entry(other
, &efx_primary_list
, node
) {
1207 if (efx_same_controller(efx
, other
)) {
1208 netif_dbg(efx
, probe
, efx
->net_dev
,
1209 "adding to secondary list of %s %s\n",
1210 pci_name(other
->pci_dev
),
1211 other
->net_dev
->name
);
1212 list_add_tail(&efx
->node
,
1213 &other
->secondary_list
);
1214 efx
->primary
= other
;
1219 netif_dbg(efx
, probe
, efx
->net_dev
,
1220 "adding to unassociated list\n");
1221 list_add_tail(&efx
->node
, &efx_unassociated_list
);
1225 static void efx_dissociate(struct efx_nic
*efx
)
1227 struct efx_nic
*other
, *next
;
1229 list_del(&efx
->node
);
1230 efx
->primary
= NULL
;
1232 list_for_each_entry_safe(other
, next
, &efx
->secondary_list
, node
) {
1233 list_del(&other
->node
);
1234 netif_dbg(other
, probe
, other
->net_dev
,
1235 "moving to unassociated list\n");
1236 list_add_tail(&other
->node
, &efx_unassociated_list
);
1237 other
->primary
= NULL
;
1241 /* This configures the PCI device to enable I/O and DMA. */
1242 static int efx_init_io(struct efx_nic
*efx
)
1244 struct pci_dev
*pci_dev
= efx
->pci_dev
;
1245 dma_addr_t dma_mask
= efx
->type
->max_dma_mask
;
1246 unsigned int mem_map_size
= efx
->type
->mem_map_size(efx
);
1249 netif_dbg(efx
, probe
, efx
->net_dev
, "initialising I/O\n");
1251 bar
= efx
->type
->mem_bar
;
1253 rc
= pci_enable_device(pci_dev
);
1255 netif_err(efx
, probe
, efx
->net_dev
,
1256 "failed to enable PCI device\n");
1260 pci_set_master(pci_dev
);
1262 /* Set the PCI DMA mask. Try all possibilities from our
1263 * genuine mask down to 32 bits, because some architectures
1264 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1265 * masks event though they reject 46 bit masks.
1267 while (dma_mask
> 0x7fffffffUL
) {
1268 rc
= dma_set_mask_and_coherent(&pci_dev
->dev
, dma_mask
);
1274 netif_err(efx
, probe
, efx
->net_dev
,
1275 "could not find a suitable DMA mask\n");
1278 netif_dbg(efx
, probe
, efx
->net_dev
,
1279 "using DMA mask %llx\n", (unsigned long long) dma_mask
);
1281 efx
->membase_phys
= pci_resource_start(efx
->pci_dev
, bar
);
1282 rc
= pci_request_region(pci_dev
, bar
, "sfc");
1284 netif_err(efx
, probe
, efx
->net_dev
,
1285 "request for memory BAR failed\n");
1289 efx
->membase
= ioremap_nocache(efx
->membase_phys
, mem_map_size
);
1290 if (!efx
->membase
) {
1291 netif_err(efx
, probe
, efx
->net_dev
,
1292 "could not map memory BAR at %llx+%x\n",
1293 (unsigned long long)efx
->membase_phys
, mem_map_size
);
1297 netif_dbg(efx
, probe
, efx
->net_dev
,
1298 "memory BAR at %llx+%x (virtual %p)\n",
1299 (unsigned long long)efx
->membase_phys
, mem_map_size
,
1305 pci_release_region(efx
->pci_dev
, bar
);
1307 efx
->membase_phys
= 0;
1309 pci_disable_device(efx
->pci_dev
);
1314 static void efx_fini_io(struct efx_nic
*efx
)
1318 netif_dbg(efx
, drv
, efx
->net_dev
, "shutting down I/O\n");
1321 iounmap(efx
->membase
);
1322 efx
->membase
= NULL
;
1325 if (efx
->membase_phys
) {
1326 bar
= efx
->type
->mem_bar
;
1327 pci_release_region(efx
->pci_dev
, bar
);
1328 efx
->membase_phys
= 0;
1331 /* Don't disable bus-mastering if VFs are assigned */
1332 if (!pci_vfs_assigned(efx
->pci_dev
))
1333 pci_disable_device(efx
->pci_dev
);
1336 void efx_set_default_rx_indir_table(struct efx_nic
*efx
)
1340 for (i
= 0; i
< ARRAY_SIZE(efx
->rx_indir_table
); i
++)
1341 efx
->rx_indir_table
[i
] =
1342 ethtool_rxfh_indir_default(i
, efx
->rss_spread
);
1345 static unsigned int efx_wanted_parallelism(struct efx_nic
*efx
)
1347 cpumask_var_t thread_mask
;
1354 if (unlikely(!zalloc_cpumask_var(&thread_mask
, GFP_KERNEL
))) {
1355 netif_warn(efx
, probe
, efx
->net_dev
,
1356 "RSS disabled due to allocation failure\n");
1361 for_each_online_cpu(cpu
) {
1362 if (!cpumask_test_cpu(cpu
, thread_mask
)) {
1364 cpumask_or(thread_mask
, thread_mask
,
1365 topology_sibling_cpumask(cpu
));
1369 free_cpumask_var(thread_mask
);
1372 if (count
> EFX_MAX_RX_QUEUES
) {
1373 netif_cond_dbg(efx
, probe
, efx
->net_dev
, !rss_cpus
, warn
,
1374 "Reducing number of rx queues from %u to %u.\n",
1375 count
, EFX_MAX_RX_QUEUES
);
1376 count
= EFX_MAX_RX_QUEUES
;
1379 /* If RSS is requested for the PF *and* VFs then we can't write RSS
1380 * table entries that are inaccessible to VFs
1382 #ifdef CONFIG_SFC_SRIOV
1383 if (efx
->type
->sriov_wanted
) {
1384 if (efx
->type
->sriov_wanted(efx
) && efx_vf_size(efx
) > 1 &&
1385 count
> efx_vf_size(efx
)) {
1386 netif_warn(efx
, probe
, efx
->net_dev
,
1387 "Reducing number of RSS channels from %u to %u for "
1388 "VF support. Increase vf-msix-limit to use more "
1389 "channels on the PF.\n",
1390 count
, efx_vf_size(efx
));
1391 count
= efx_vf_size(efx
);
1399 /* Probe the number and type of interrupts we are able to obtain, and
1400 * the resulting numbers of channels and RX queues.
1402 static int efx_probe_interrupts(struct efx_nic
*efx
)
1404 unsigned int extra_channels
= 0;
1408 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++)
1409 if (efx
->extra_channel_type
[i
])
1412 if (efx
->interrupt_mode
== EFX_INT_MODE_MSIX
) {
1413 struct msix_entry xentries
[EFX_MAX_CHANNELS
];
1414 unsigned int n_channels
;
1416 n_channels
= efx_wanted_parallelism(efx
);
1417 if (efx_separate_tx_channels
)
1419 n_channels
+= extra_channels
;
1420 n_channels
= min(n_channels
, efx
->max_channels
);
1422 for (i
= 0; i
< n_channels
; i
++)
1423 xentries
[i
].entry
= i
;
1424 rc
= pci_enable_msix_range(efx
->pci_dev
,
1425 xentries
, 1, n_channels
);
1427 /* Fall back to single channel MSI */
1428 netif_err(efx
, drv
, efx
->net_dev
,
1429 "could not enable MSI-X\n");
1430 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_MSI
)
1431 efx
->interrupt_mode
= EFX_INT_MODE_MSI
;
1434 } else if (rc
< n_channels
) {
1435 netif_err(efx
, drv
, efx
->net_dev
,
1436 "WARNING: Insufficient MSI-X vectors"
1437 " available (%d < %u).\n", rc
, n_channels
);
1438 netif_err(efx
, drv
, efx
->net_dev
,
1439 "WARNING: Performance may be reduced.\n");
1444 efx
->n_channels
= n_channels
;
1445 if (n_channels
> extra_channels
)
1446 n_channels
-= extra_channels
;
1447 if (efx_separate_tx_channels
) {
1448 efx
->n_tx_channels
= min(max(n_channels
/ 2,
1450 efx
->max_tx_channels
);
1451 efx
->n_rx_channels
= max(n_channels
-
1455 efx
->n_tx_channels
= min(n_channels
,
1456 efx
->max_tx_channels
);
1457 efx
->n_rx_channels
= n_channels
;
1459 for (i
= 0; i
< efx
->n_channels
; i
++)
1460 efx_get_channel(efx
, i
)->irq
=
1465 /* Try single interrupt MSI */
1466 if (efx
->interrupt_mode
== EFX_INT_MODE_MSI
) {
1467 efx
->n_channels
= 1;
1468 efx
->n_rx_channels
= 1;
1469 efx
->n_tx_channels
= 1;
1470 rc
= pci_enable_msi(efx
->pci_dev
);
1472 efx_get_channel(efx
, 0)->irq
= efx
->pci_dev
->irq
;
1474 netif_err(efx
, drv
, efx
->net_dev
,
1475 "could not enable MSI\n");
1476 if (efx
->type
->min_interrupt_mode
>= EFX_INT_MODE_LEGACY
)
1477 efx
->interrupt_mode
= EFX_INT_MODE_LEGACY
;
1483 /* Assume legacy interrupts */
1484 if (efx
->interrupt_mode
== EFX_INT_MODE_LEGACY
) {
1485 efx
->n_channels
= 1 + (efx_separate_tx_channels
? 1 : 0);
1486 efx
->n_rx_channels
= 1;
1487 efx
->n_tx_channels
= 1;
1488 efx
->legacy_irq
= efx
->pci_dev
->irq
;
1491 /* Assign extra channels if possible */
1492 j
= efx
->n_channels
;
1493 for (i
= 0; i
< EFX_MAX_EXTRA_CHANNELS
; i
++) {
1494 if (!efx
->extra_channel_type
[i
])
1496 if (efx
->interrupt_mode
!= EFX_INT_MODE_MSIX
||
1497 efx
->n_channels
<= extra_channels
) {
1498 efx
->extra_channel_type
[i
]->handle_no_channel(efx
);
1501 efx_get_channel(efx
, j
)->type
=
1502 efx
->extra_channel_type
[i
];
1506 /* RSS might be usable on VFs even if it is disabled on the PF */
1507 #ifdef CONFIG_SFC_SRIOV
1508 if (efx
->type
->sriov_wanted
) {
1509 efx
->rss_spread
= ((efx
->n_rx_channels
> 1 ||
1510 !efx
->type
->sriov_wanted(efx
)) ?
1511 efx
->n_rx_channels
: efx_vf_size(efx
));
1515 efx
->rss_spread
= efx
->n_rx_channels
;
1520 static int efx_soft_enable_interrupts(struct efx_nic
*efx
)
1522 struct efx_channel
*channel
, *end_channel
;
1525 BUG_ON(efx
->state
== STATE_DISABLED
);
1527 efx
->irq_soft_enabled
= true;
1530 efx_for_each_channel(channel
, efx
) {
1531 if (!channel
->type
->keep_eventq
) {
1532 rc
= efx_init_eventq(channel
);
1536 efx_start_eventq(channel
);
1539 efx_mcdi_mode_event(efx
);
1543 end_channel
= channel
;
1544 efx_for_each_channel(channel
, efx
) {
1545 if (channel
== end_channel
)
1547 efx_stop_eventq(channel
);
1548 if (!channel
->type
->keep_eventq
)
1549 efx_fini_eventq(channel
);
1555 static void efx_soft_disable_interrupts(struct efx_nic
*efx
)
1557 struct efx_channel
*channel
;
1559 if (efx
->state
== STATE_DISABLED
)
1562 efx_mcdi_mode_poll(efx
);
1564 efx
->irq_soft_enabled
= false;
1567 if (efx
->legacy_irq
)
1568 synchronize_irq(efx
->legacy_irq
);
1570 efx_for_each_channel(channel
, efx
) {
1572 synchronize_irq(channel
->irq
);
1574 efx_stop_eventq(channel
);
1575 if (!channel
->type
->keep_eventq
)
1576 efx_fini_eventq(channel
);
1579 /* Flush the asynchronous MCDI request queue */
1580 efx_mcdi_flush_async(efx
);
1583 static int efx_enable_interrupts(struct efx_nic
*efx
)
1585 struct efx_channel
*channel
, *end_channel
;
1588 BUG_ON(efx
->state
== STATE_DISABLED
);
1590 if (efx
->eeh_disabled_legacy_irq
) {
1591 enable_irq(efx
->legacy_irq
);
1592 efx
->eeh_disabled_legacy_irq
= false;
1595 efx
->type
->irq_enable_master(efx
);
1597 efx_for_each_channel(channel
, efx
) {
1598 if (channel
->type
->keep_eventq
) {
1599 rc
= efx_init_eventq(channel
);
1605 rc
= efx_soft_enable_interrupts(efx
);
1612 end_channel
= channel
;
1613 efx_for_each_channel(channel
, efx
) {
1614 if (channel
== end_channel
)
1616 if (channel
->type
->keep_eventq
)
1617 efx_fini_eventq(channel
);
1620 efx
->type
->irq_disable_non_ev(efx
);
1625 static void efx_disable_interrupts(struct efx_nic
*efx
)
1627 struct efx_channel
*channel
;
1629 efx_soft_disable_interrupts(efx
);
1631 efx_for_each_channel(channel
, efx
) {
1632 if (channel
->type
->keep_eventq
)
1633 efx_fini_eventq(channel
);
1636 efx
->type
->irq_disable_non_ev(efx
);
1639 static void efx_remove_interrupts(struct efx_nic
*efx
)
1641 struct efx_channel
*channel
;
1643 /* Remove MSI/MSI-X interrupts */
1644 efx_for_each_channel(channel
, efx
)
1646 pci_disable_msi(efx
->pci_dev
);
1647 pci_disable_msix(efx
->pci_dev
);
1649 /* Remove legacy interrupt */
1650 efx
->legacy_irq
= 0;
1653 static void efx_set_channels(struct efx_nic
*efx
)
1655 struct efx_channel
*channel
;
1656 struct efx_tx_queue
*tx_queue
;
1658 efx
->tx_channel_offset
=
1659 efx_separate_tx_channels
?
1660 efx
->n_channels
- efx
->n_tx_channels
: 0;
1662 /* We need to mark which channels really have RX and TX
1663 * queues, and adjust the TX queue numbers if we have separate
1664 * RX-only and TX-only channels.
1666 efx_for_each_channel(channel
, efx
) {
1667 if (channel
->channel
< efx
->n_rx_channels
)
1668 channel
->rx_queue
.core_index
= channel
->channel
;
1670 channel
->rx_queue
.core_index
= -1;
1672 efx_for_each_channel_tx_queue(tx_queue
, channel
)
1673 tx_queue
->queue
-= (efx
->tx_channel_offset
*
1678 static int efx_probe_nic(struct efx_nic
*efx
)
1682 netif_dbg(efx
, probe
, efx
->net_dev
, "creating NIC\n");
1684 /* Carry out hardware-type specific initialisation */
1685 rc
= efx
->type
->probe(efx
);
1690 if (!efx
->max_channels
|| !efx
->max_tx_channels
) {
1691 netif_err(efx
, drv
, efx
->net_dev
,
1692 "Insufficient resources to allocate"
1698 /* Determine the number of channels and queues by trying
1699 * to hook in MSI-X interrupts.
1701 rc
= efx_probe_interrupts(efx
);
1705 efx_set_channels(efx
);
1707 /* dimension_resources can fail with EAGAIN */
1708 rc
= efx
->type
->dimension_resources(efx
);
1709 if (rc
!= 0 && rc
!= -EAGAIN
)
1713 /* try again with new max_channels */
1714 efx_remove_interrupts(efx
);
1716 } while (rc
== -EAGAIN
);
1718 if (efx
->n_channels
> 1)
1719 netdev_rss_key_fill(&efx
->rx_hash_key
,
1720 sizeof(efx
->rx_hash_key
));
1721 efx_set_default_rx_indir_table(efx
);
1723 netif_set_real_num_tx_queues(efx
->net_dev
, efx
->n_tx_channels
);
1724 netif_set_real_num_rx_queues(efx
->net_dev
, efx
->n_rx_channels
);
1726 /* Initialise the interrupt moderation settings */
1727 efx
->irq_mod_step_us
= DIV_ROUND_UP(efx
->timer_quantum_ns
, 1000);
1728 efx_init_irq_moderation(efx
, tx_irq_mod_usec
, rx_irq_mod_usec
, true,
1734 efx_remove_interrupts(efx
);
1736 efx
->type
->remove(efx
);
1740 static void efx_remove_nic(struct efx_nic
*efx
)
1742 netif_dbg(efx
, drv
, efx
->net_dev
, "destroying NIC\n");
1744 efx_remove_interrupts(efx
);
1745 efx
->type
->remove(efx
);
1748 static int efx_probe_filters(struct efx_nic
*efx
)
1752 spin_lock_init(&efx
->filter_lock
);
1753 init_rwsem(&efx
->filter_sem
);
1754 mutex_lock(&efx
->mac_lock
);
1755 down_write(&efx
->filter_sem
);
1756 rc
= efx
->type
->filter_table_probe(efx
);
1760 #ifdef CONFIG_RFS_ACCEL
1761 if (efx
->type
->offload_features
& NETIF_F_NTUPLE
) {
1762 struct efx_channel
*channel
;
1765 efx_for_each_channel(channel
, efx
) {
1766 channel
->rps_flow_id
=
1767 kcalloc(efx
->type
->max_rx_ip_filters
,
1768 sizeof(*channel
->rps_flow_id
),
1770 if (!channel
->rps_flow_id
)
1774 i
< efx
->type
->max_rx_ip_filters
;
1776 channel
->rps_flow_id
[i
] =
1777 RPS_FLOW_ID_INVALID
;
1781 efx_for_each_channel(channel
, efx
)
1782 kfree(channel
->rps_flow_id
);
1783 efx
->type
->filter_table_remove(efx
);
1788 efx
->rps_expire_index
= efx
->rps_expire_channel
= 0;
1792 up_write(&efx
->filter_sem
);
1793 mutex_unlock(&efx
->mac_lock
);
1797 static void efx_remove_filters(struct efx_nic
*efx
)
1799 #ifdef CONFIG_RFS_ACCEL
1800 struct efx_channel
*channel
;
1802 efx_for_each_channel(channel
, efx
)
1803 kfree(channel
->rps_flow_id
);
1805 down_write(&efx
->filter_sem
);
1806 efx
->type
->filter_table_remove(efx
);
1807 up_write(&efx
->filter_sem
);
1810 static void efx_restore_filters(struct efx_nic
*efx
)
1812 down_read(&efx
->filter_sem
);
1813 efx
->type
->filter_table_restore(efx
);
1814 up_read(&efx
->filter_sem
);
1817 /**************************************************************************
1819 * NIC startup/shutdown
1821 *************************************************************************/
1823 static int efx_probe_all(struct efx_nic
*efx
)
1827 rc
= efx_probe_nic(efx
);
1829 netif_err(efx
, probe
, efx
->net_dev
, "failed to create NIC\n");
1833 rc
= efx_probe_port(efx
);
1835 netif_err(efx
, probe
, efx
->net_dev
, "failed to create port\n");
1839 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_RXQ_MIN_ENT
);
1840 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE
< EFX_TXQ_MIN_ENT(efx
))) {
1844 efx
->rxq_entries
= efx
->txq_entries
= EFX_DEFAULT_DMAQ_SIZE
;
1846 #ifdef CONFIG_SFC_SRIOV
1847 rc
= efx
->type
->vswitching_probe(efx
);
1848 if (rc
) /* not fatal; the PF will still work fine */
1849 netif_warn(efx
, probe
, efx
->net_dev
,
1850 "failed to setup vswitching rc=%d;"
1851 " VFs may not function\n", rc
);
1854 rc
= efx_probe_filters(efx
);
1856 netif_err(efx
, probe
, efx
->net_dev
,
1857 "failed to create filter tables\n");
1861 rc
= efx_probe_channels(efx
);
1868 efx_remove_filters(efx
);
1870 #ifdef CONFIG_SFC_SRIOV
1871 efx
->type
->vswitching_remove(efx
);
1874 efx_remove_port(efx
);
1876 efx_remove_nic(efx
);
1881 /* If the interface is supposed to be running but is not, start
1882 * the hardware and software data path, regular activity for the port
1883 * (MAC statistics, link polling, etc.) and schedule the port to be
1884 * reconfigured. Interrupts must already be enabled. This function
1885 * is safe to call multiple times, so long as the NIC is not disabled.
1886 * Requires the RTNL lock.
1888 static void efx_start_all(struct efx_nic
*efx
)
1890 EFX_ASSERT_RESET_SERIALISED(efx
);
1891 BUG_ON(efx
->state
== STATE_DISABLED
);
1893 /* Check that it is appropriate to restart the interface. All
1894 * of these flags are safe to read under just the rtnl lock */
1895 if (efx
->port_enabled
|| !netif_running(efx
->net_dev
) ||
1899 efx_start_port(efx
);
1900 efx_start_datapath(efx
);
1902 /* Start the hardware monitor if there is one */
1903 if (efx
->type
->monitor
!= NULL
)
1904 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
1905 efx_monitor_interval
);
1907 /* Link state detection is normally event-driven; we have
1908 * to poll now because we could have missed a change
1910 mutex_lock(&efx
->mac_lock
);
1911 if (efx
->phy_op
->poll(efx
))
1912 efx_link_status_changed(efx
);
1913 mutex_unlock(&efx
->mac_lock
);
1915 efx
->type
->start_stats(efx
);
1916 efx
->type
->pull_stats(efx
);
1917 spin_lock_bh(&efx
->stats_lock
);
1918 efx
->type
->update_stats(efx
, NULL
, NULL
);
1919 spin_unlock_bh(&efx
->stats_lock
);
1922 /* Quiesce the hardware and software data path, and regular activity
1923 * for the port without bringing the link down. Safe to call multiple
1924 * times with the NIC in almost any state, but interrupts should be
1925 * enabled. Requires the RTNL lock.
1927 static void efx_stop_all(struct efx_nic
*efx
)
1929 EFX_ASSERT_RESET_SERIALISED(efx
);
1931 /* port_enabled can be read safely under the rtnl lock */
1932 if (!efx
->port_enabled
)
1935 /* update stats before we go down so we can accurately count
1938 efx
->type
->pull_stats(efx
);
1939 spin_lock_bh(&efx
->stats_lock
);
1940 efx
->type
->update_stats(efx
, NULL
, NULL
);
1941 spin_unlock_bh(&efx
->stats_lock
);
1942 efx
->type
->stop_stats(efx
);
1945 /* Stop the kernel transmit interface. This is only valid if
1946 * the device is stopped or detached; otherwise the watchdog
1947 * may fire immediately.
1949 WARN_ON(netif_running(efx
->net_dev
) &&
1950 netif_device_present(efx
->net_dev
));
1951 netif_tx_disable(efx
->net_dev
);
1953 efx_stop_datapath(efx
);
1956 static void efx_remove_all(struct efx_nic
*efx
)
1958 efx_remove_channels(efx
);
1959 efx_remove_filters(efx
);
1960 #ifdef CONFIG_SFC_SRIOV
1961 efx
->type
->vswitching_remove(efx
);
1963 efx_remove_port(efx
);
1964 efx_remove_nic(efx
);
1967 /**************************************************************************
1969 * Interrupt moderation
1971 **************************************************************************/
1972 unsigned int efx_usecs_to_ticks(struct efx_nic
*efx
, unsigned int usecs
)
1976 if (usecs
* 1000 < efx
->timer_quantum_ns
)
1977 return 1; /* never round down to 0 */
1978 return usecs
* 1000 / efx
->timer_quantum_ns
;
1981 unsigned int efx_ticks_to_usecs(struct efx_nic
*efx
, unsigned int ticks
)
1983 /* We must round up when converting ticks to microseconds
1984 * because we round down when converting the other way.
1986 return DIV_ROUND_UP(ticks
* efx
->timer_quantum_ns
, 1000);
1989 /* Set interrupt moderation parameters */
1990 int efx_init_irq_moderation(struct efx_nic
*efx
, unsigned int tx_usecs
,
1991 unsigned int rx_usecs
, bool rx_adaptive
,
1992 bool rx_may_override_tx
)
1994 struct efx_channel
*channel
;
1995 unsigned int timer_max_us
;
1997 EFX_ASSERT_RESET_SERIALISED(efx
);
1999 timer_max_us
= efx
->timer_max_ns
/ 1000;
2001 if (tx_usecs
> timer_max_us
|| rx_usecs
> timer_max_us
)
2004 if (tx_usecs
!= rx_usecs
&& efx
->tx_channel_offset
== 0 &&
2005 !rx_may_override_tx
) {
2006 netif_err(efx
, drv
, efx
->net_dev
, "Channels are shared. "
2007 "RX and TX IRQ moderation must be equal\n");
2011 efx
->irq_rx_adaptive
= rx_adaptive
;
2012 efx
->irq_rx_moderation_us
= rx_usecs
;
2013 efx_for_each_channel(channel
, efx
) {
2014 if (efx_channel_has_rx_queue(channel
))
2015 channel
->irq_moderation_us
= rx_usecs
;
2016 else if (efx_channel_has_tx_queues(channel
))
2017 channel
->irq_moderation_us
= tx_usecs
;
2023 void efx_get_irq_moderation(struct efx_nic
*efx
, unsigned int *tx_usecs
,
2024 unsigned int *rx_usecs
, bool *rx_adaptive
)
2026 *rx_adaptive
= efx
->irq_rx_adaptive
;
2027 *rx_usecs
= efx
->irq_rx_moderation_us
;
2029 /* If channels are shared between RX and TX, so is IRQ
2030 * moderation. Otherwise, IRQ moderation is the same for all
2031 * TX channels and is not adaptive.
2033 if (efx
->tx_channel_offset
== 0) {
2034 *tx_usecs
= *rx_usecs
;
2036 struct efx_channel
*tx_channel
;
2038 tx_channel
= efx
->channel
[efx
->tx_channel_offset
];
2039 *tx_usecs
= tx_channel
->irq_moderation_us
;
2043 /**************************************************************************
2047 **************************************************************************/
2049 /* Run periodically off the general workqueue */
2050 static void efx_monitor(struct work_struct
*data
)
2052 struct efx_nic
*efx
= container_of(data
, struct efx_nic
,
2055 netif_vdbg(efx
, timer
, efx
->net_dev
,
2056 "hardware monitor executing on CPU %d\n",
2057 raw_smp_processor_id());
2058 BUG_ON(efx
->type
->monitor
== NULL
);
2060 /* If the mac_lock is already held then it is likely a port
2061 * reconfiguration is already in place, which will likely do
2062 * most of the work of monitor() anyway. */
2063 if (mutex_trylock(&efx
->mac_lock
)) {
2064 if (efx
->port_enabled
)
2065 efx
->type
->monitor(efx
);
2066 mutex_unlock(&efx
->mac_lock
);
2069 queue_delayed_work(efx
->workqueue
, &efx
->monitor_work
,
2070 efx_monitor_interval
);
2073 /**************************************************************************
2077 *************************************************************************/
2080 * Context: process, rtnl_lock() held.
2082 static int efx_ioctl(struct net_device
*net_dev
, struct ifreq
*ifr
, int cmd
)
2084 struct efx_nic
*efx
= netdev_priv(net_dev
);
2085 struct mii_ioctl_data
*data
= if_mii(ifr
);
2087 if (cmd
== SIOCSHWTSTAMP
)
2088 return efx_ptp_set_ts_config(efx
, ifr
);
2089 if (cmd
== SIOCGHWTSTAMP
)
2090 return efx_ptp_get_ts_config(efx
, ifr
);
2092 /* Convert phy_id from older PRTAD/DEVAD format */
2093 if ((cmd
== SIOCGMIIREG
|| cmd
== SIOCSMIIREG
) &&
2094 (data
->phy_id
& 0xfc00) == 0x0400)
2095 data
->phy_id
^= MDIO_PHY_ID_C45
| 0x0400;
2097 return mdio_mii_ioctl(&efx
->mdio
, data
, cmd
);
2100 /**************************************************************************
2104 **************************************************************************/
2106 static void efx_init_napi_channel(struct efx_channel
*channel
)
2108 struct efx_nic
*efx
= channel
->efx
;
2110 channel
->napi_dev
= efx
->net_dev
;
2111 netif_napi_add(channel
->napi_dev
, &channel
->napi_str
,
2112 efx_poll
, napi_weight
);
2115 static void efx_init_napi(struct efx_nic
*efx
)
2117 struct efx_channel
*channel
;
2119 efx_for_each_channel(channel
, efx
)
2120 efx_init_napi_channel(channel
);
2123 static void efx_fini_napi_channel(struct efx_channel
*channel
)
2125 if (channel
->napi_dev
)
2126 netif_napi_del(&channel
->napi_str
);
2128 channel
->napi_dev
= NULL
;
2131 static void efx_fini_napi(struct efx_nic
*efx
)
2133 struct efx_channel
*channel
;
2135 efx_for_each_channel(channel
, efx
)
2136 efx_fini_napi_channel(channel
);
2139 /**************************************************************************
2141 * Kernel netpoll interface
2143 *************************************************************************/
2145 #ifdef CONFIG_NET_POLL_CONTROLLER
2147 /* Although in the common case interrupts will be disabled, this is not
2148 * guaranteed. However, all our work happens inside the NAPI callback,
2149 * so no locking is required.
2151 static void efx_netpoll(struct net_device
*net_dev
)
2153 struct efx_nic
*efx
= netdev_priv(net_dev
);
2154 struct efx_channel
*channel
;
2156 efx_for_each_channel(channel
, efx
)
2157 efx_schedule_channel(channel
);
2162 /**************************************************************************
2164 * Kernel net device interface
2166 *************************************************************************/
2168 /* Context: process, rtnl_lock() held. */
2169 int efx_net_open(struct net_device
*net_dev
)
2171 struct efx_nic
*efx
= netdev_priv(net_dev
);
2174 netif_dbg(efx
, ifup
, efx
->net_dev
, "opening device on CPU %d\n",
2175 raw_smp_processor_id());
2177 rc
= efx_check_disabled(efx
);
2180 if (efx
->phy_mode
& PHY_MODE_SPECIAL
)
2182 if (efx_mcdi_poll_reboot(efx
) && efx_reset(efx
, RESET_TYPE_ALL
))
2185 /* Notify the kernel of the link state polled during driver load,
2186 * before the monitor starts running */
2187 efx_link_status_changed(efx
);
2190 if (efx
->state
== STATE_DISABLED
|| efx
->reset_pending
)
2191 netif_device_detach(efx
->net_dev
);
2192 efx_selftest_async_start(efx
);
2196 /* Context: process, rtnl_lock() held.
2197 * Note that the kernel will ignore our return code; this method
2198 * should really be a void.
2200 int efx_net_stop(struct net_device
*net_dev
)
2202 struct efx_nic
*efx
= netdev_priv(net_dev
);
2204 netif_dbg(efx
, ifdown
, efx
->net_dev
, "closing on CPU %d\n",
2205 raw_smp_processor_id());
2207 /* Stop the device and flush all the channels */
2213 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
2214 static void efx_net_stats(struct net_device
*net_dev
,
2215 struct rtnl_link_stats64
*stats
)
2217 struct efx_nic
*efx
= netdev_priv(net_dev
);
2219 spin_lock_bh(&efx
->stats_lock
);
2220 efx
->type
->update_stats(efx
, NULL
, stats
);
2221 spin_unlock_bh(&efx
->stats_lock
);
2224 /* Context: netif_tx_lock held, BHs disabled. */
2225 static void efx_watchdog(struct net_device
*net_dev
)
2227 struct efx_nic
*efx
= netdev_priv(net_dev
);
2229 netif_err(efx
, tx_err
, efx
->net_dev
,
2230 "TX stuck with port_enabled=%d: resetting channels\n",
2233 efx_schedule_reset(efx
, RESET_TYPE_TX_WATCHDOG
);
2237 /* Context: process, rtnl_lock() held. */
2238 static int efx_change_mtu(struct net_device
*net_dev
, int new_mtu
)
2240 struct efx_nic
*efx
= netdev_priv(net_dev
);
2243 rc
= efx_check_disabled(efx
);
2247 netif_dbg(efx
, drv
, efx
->net_dev
, "changing MTU to %d\n", new_mtu
);
2249 efx_device_detach_sync(efx
);
2252 mutex_lock(&efx
->mac_lock
);
2253 net_dev
->mtu
= new_mtu
;
2254 efx_mac_reconfigure(efx
);
2255 mutex_unlock(&efx
->mac_lock
);
2258 efx_device_attach_if_not_resetting(efx
);
2262 static int efx_set_mac_address(struct net_device
*net_dev
, void *data
)
2264 struct efx_nic
*efx
= netdev_priv(net_dev
);
2265 struct sockaddr
*addr
= data
;
2266 u8
*new_addr
= addr
->sa_data
;
2270 if (!is_valid_ether_addr(new_addr
)) {
2271 netif_err(efx
, drv
, efx
->net_dev
,
2272 "invalid ethernet MAC address requested: %pM\n",
2274 return -EADDRNOTAVAIL
;
2277 /* save old address */
2278 ether_addr_copy(old_addr
, net_dev
->dev_addr
);
2279 ether_addr_copy(net_dev
->dev_addr
, new_addr
);
2280 if (efx
->type
->set_mac_address
) {
2281 rc
= efx
->type
->set_mac_address(efx
);
2283 ether_addr_copy(net_dev
->dev_addr
, old_addr
);
2288 /* Reconfigure the MAC */
2289 mutex_lock(&efx
->mac_lock
);
2290 efx_mac_reconfigure(efx
);
2291 mutex_unlock(&efx
->mac_lock
);
2296 /* Context: netif_addr_lock held, BHs disabled. */
2297 static void efx_set_rx_mode(struct net_device
*net_dev
)
2299 struct efx_nic
*efx
= netdev_priv(net_dev
);
2301 if (efx
->port_enabled
)
2302 queue_work(efx
->workqueue
, &efx
->mac_work
);
2303 /* Otherwise efx_start_port() will do this */
2306 static int efx_set_features(struct net_device
*net_dev
, netdev_features_t data
)
2308 struct efx_nic
*efx
= netdev_priv(net_dev
);
2311 /* If disabling RX n-tuple filtering, clear existing filters */
2312 if (net_dev
->features
& ~data
& NETIF_F_NTUPLE
) {
2313 rc
= efx
->type
->filter_clear_rx(efx
, EFX_FILTER_PRI_MANUAL
);
2318 /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
2319 * If rx-fcs is changed, mac_reconfigure updates that too.
2321 if ((net_dev
->features
^ data
) & (NETIF_F_HW_VLAN_CTAG_FILTER
|
2323 /* efx_set_rx_mode() will schedule MAC work to update filters
2324 * when a new features are finally set in net_dev.
2326 efx_set_rx_mode(net_dev
);
2332 static int efx_get_phys_port_id(struct net_device
*net_dev
,
2333 struct netdev_phys_item_id
*ppid
)
2335 struct efx_nic
*efx
= netdev_priv(net_dev
);
2337 if (efx
->type
->get_phys_port_id
)
2338 return efx
->type
->get_phys_port_id(efx
, ppid
);
2343 static int efx_get_phys_port_name(struct net_device
*net_dev
,
2344 char *name
, size_t len
)
2346 struct efx_nic
*efx
= netdev_priv(net_dev
);
2348 if (snprintf(name
, len
, "p%u", efx
->port_num
) >= len
)
2353 static int efx_vlan_rx_add_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
2355 struct efx_nic
*efx
= netdev_priv(net_dev
);
2357 if (efx
->type
->vlan_rx_add_vid
)
2358 return efx
->type
->vlan_rx_add_vid(efx
, proto
, vid
);
2363 static int efx_vlan_rx_kill_vid(struct net_device
*net_dev
, __be16 proto
, u16 vid
)
2365 struct efx_nic
*efx
= netdev_priv(net_dev
);
2367 if (efx
->type
->vlan_rx_kill_vid
)
2368 return efx
->type
->vlan_rx_kill_vid(efx
, proto
, vid
);
2373 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in
)
2376 case UDP_TUNNEL_TYPE_VXLAN
:
2377 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN
;
2378 case UDP_TUNNEL_TYPE_GENEVE
:
2379 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE
;
2385 static void efx_udp_tunnel_add(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
2387 struct efx_nic
*efx
= netdev_priv(dev
);
2388 struct efx_udp_tunnel tnl
;
2389 int efx_tunnel_type
;
2391 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
2392 if (efx_tunnel_type
< 0)
2395 tnl
.type
= (u16
)efx_tunnel_type
;
2396 tnl
.port
= ti
->port
;
2398 if (efx
->type
->udp_tnl_add_port
)
2399 (void)efx
->type
->udp_tnl_add_port(efx
, tnl
);
2402 static void efx_udp_tunnel_del(struct net_device
*dev
, struct udp_tunnel_info
*ti
)
2404 struct efx_nic
*efx
= netdev_priv(dev
);
2405 struct efx_udp_tunnel tnl
;
2406 int efx_tunnel_type
;
2408 efx_tunnel_type
= efx_udp_tunnel_type_map(ti
->type
);
2409 if (efx_tunnel_type
< 0)
2412 tnl
.type
= (u16
)efx_tunnel_type
;
2413 tnl
.port
= ti
->port
;
2415 if (efx
->type
->udp_tnl_del_port
)
2416 (void)efx
->type
->udp_tnl_del_port(efx
, tnl
);
2419 static const struct net_device_ops efx_netdev_ops
= {
2420 .ndo_open
= efx_net_open
,
2421 .ndo_stop
= efx_net_stop
,
2422 .ndo_get_stats64
= efx_net_stats
,
2423 .ndo_tx_timeout
= efx_watchdog
,
2424 .ndo_start_xmit
= efx_hard_start_xmit
,
2425 .ndo_validate_addr
= eth_validate_addr
,
2426 .ndo_do_ioctl
= efx_ioctl
,
2427 .ndo_change_mtu
= efx_change_mtu
,
2428 .ndo_set_mac_address
= efx_set_mac_address
,
2429 .ndo_set_rx_mode
= efx_set_rx_mode
,
2430 .ndo_set_features
= efx_set_features
,
2431 .ndo_vlan_rx_add_vid
= efx_vlan_rx_add_vid
,
2432 .ndo_vlan_rx_kill_vid
= efx_vlan_rx_kill_vid
,
2433 #ifdef CONFIG_SFC_SRIOV
2434 .ndo_set_vf_mac
= efx_sriov_set_vf_mac
,
2435 .ndo_set_vf_vlan
= efx_sriov_set_vf_vlan
,
2436 .ndo_set_vf_spoofchk
= efx_sriov_set_vf_spoofchk
,
2437 .ndo_get_vf_config
= efx_sriov_get_vf_config
,
2438 .ndo_set_vf_link_state
= efx_sriov_set_vf_link_state
,
2440 .ndo_get_phys_port_id
= efx_get_phys_port_id
,
2441 .ndo_get_phys_port_name
= efx_get_phys_port_name
,
2442 #ifdef CONFIG_NET_POLL_CONTROLLER
2443 .ndo_poll_controller
= efx_netpoll
,
2445 .ndo_setup_tc
= efx_setup_tc
,
2446 #ifdef CONFIG_RFS_ACCEL
2447 .ndo_rx_flow_steer
= efx_filter_rfs
,
2449 .ndo_udp_tunnel_add
= efx_udp_tunnel_add
,
2450 .ndo_udp_tunnel_del
= efx_udp_tunnel_del
,
2453 static void efx_update_name(struct efx_nic
*efx
)
2455 strcpy(efx
->name
, efx
->net_dev
->name
);
2456 efx_mtd_rename(efx
);
2457 efx_set_channel_names(efx
);
2460 static int efx_netdev_event(struct notifier_block
*this,
2461 unsigned long event
, void *ptr
)
2463 struct net_device
*net_dev
= netdev_notifier_info_to_dev(ptr
);
2465 if ((net_dev
->netdev_ops
== &efx_netdev_ops
) &&
2466 event
== NETDEV_CHANGENAME
)
2467 efx_update_name(netdev_priv(net_dev
));
2472 static struct notifier_block efx_netdev_notifier
= {
2473 .notifier_call
= efx_netdev_event
,
2477 show_phy_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2479 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2480 return sprintf(buf
, "%d\n", efx
->phy_type
);
2482 static DEVICE_ATTR(phy_type
, 0444, show_phy_type
, NULL
);
2484 #ifdef CONFIG_SFC_MCDI_LOGGING
2485 static ssize_t
show_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
2488 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2489 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
2491 return scnprintf(buf
, PAGE_SIZE
, "%d\n", mcdi
->logging_enabled
);
2493 static ssize_t
set_mcdi_log(struct device
*dev
, struct device_attribute
*attr
,
2494 const char *buf
, size_t count
)
2496 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
2497 struct efx_mcdi_iface
*mcdi
= efx_mcdi(efx
);
2498 bool enable
= count
> 0 && *buf
!= '0';
2500 mcdi
->logging_enabled
= enable
;
2503 static DEVICE_ATTR(mcdi_logging
, 0644, show_mcdi_log
, set_mcdi_log
);
2506 static int efx_register_netdev(struct efx_nic
*efx
)
2508 struct net_device
*net_dev
= efx
->net_dev
;
2509 struct efx_channel
*channel
;
2512 net_dev
->watchdog_timeo
= 5 * HZ
;
2513 net_dev
->irq
= efx
->pci_dev
->irq
;
2514 net_dev
->netdev_ops
= &efx_netdev_ops
;
2515 if (efx_nic_rev(efx
) >= EFX_REV_HUNT_A0
)
2516 net_dev
->priv_flags
|= IFF_UNICAST_FLT
;
2517 net_dev
->ethtool_ops
= &efx_ethtool_ops
;
2518 net_dev
->gso_max_segs
= EFX_TSO_MAX_SEGS
;
2519 net_dev
->min_mtu
= EFX_MIN_MTU
;
2520 net_dev
->max_mtu
= EFX_MAX_MTU
;
2524 /* Enable resets to be scheduled and check whether any were
2525 * already requested. If so, the NIC is probably hosed so we
2528 efx
->state
= STATE_READY
;
2529 smp_mb(); /* ensure we change state before checking reset_pending */
2530 if (efx
->reset_pending
) {
2531 netif_err(efx
, probe
, efx
->net_dev
,
2532 "aborting probe due to scheduled reset\n");
2537 rc
= dev_alloc_name(net_dev
, net_dev
->name
);
2540 efx_update_name(efx
);
2542 /* Always start with carrier off; PHY events will detect the link */
2543 netif_carrier_off(net_dev
);
2545 rc
= register_netdevice(net_dev
);
2549 efx_for_each_channel(channel
, efx
) {
2550 struct efx_tx_queue
*tx_queue
;
2551 efx_for_each_channel_tx_queue(tx_queue
, channel
)
2552 efx_init_tx_queue_core_txq(tx_queue
);
2559 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2561 netif_err(efx
, drv
, efx
->net_dev
,
2562 "failed to init net dev attributes\n");
2563 goto fail_registered
;
2565 #ifdef CONFIG_SFC_MCDI_LOGGING
2566 rc
= device_create_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
2568 netif_err(efx
, drv
, efx
->net_dev
,
2569 "failed to init net dev attributes\n");
2570 goto fail_attr_mcdi_logging
;
2576 #ifdef CONFIG_SFC_MCDI_LOGGING
2577 fail_attr_mcdi_logging
:
2578 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2582 efx_dissociate(efx
);
2583 unregister_netdevice(net_dev
);
2585 efx
->state
= STATE_UNINIT
;
2587 netif_err(efx
, drv
, efx
->net_dev
, "could not register net dev\n");
2591 static void efx_unregister_netdev(struct efx_nic
*efx
)
2596 BUG_ON(netdev_priv(efx
->net_dev
) != efx
);
2598 if (efx_dev_registered(efx
)) {
2599 strlcpy(efx
->name
, pci_name(efx
->pci_dev
), sizeof(efx
->name
));
2600 #ifdef CONFIG_SFC_MCDI_LOGGING
2601 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_mcdi_logging
);
2603 device_remove_file(&efx
->pci_dev
->dev
, &dev_attr_phy_type
);
2604 unregister_netdev(efx
->net_dev
);
2608 /**************************************************************************
2610 * Device reset and suspend
2612 **************************************************************************/
2614 /* Tears down the entire software state and most of the hardware state
2616 void efx_reset_down(struct efx_nic
*efx
, enum reset_type method
)
2618 EFX_ASSERT_RESET_SERIALISED(efx
);
2620 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
2621 efx
->type
->prepare_flr(efx
);
2624 efx_disable_interrupts(efx
);
2626 mutex_lock(&efx
->mac_lock
);
2627 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2628 method
!= RESET_TYPE_DATAPATH
)
2629 efx
->phy_op
->fini(efx
);
2630 efx
->type
->fini(efx
);
2633 /* This function will always ensure that the locks acquired in
2634 * efx_reset_down() are released. A failure return code indicates
2635 * that we were unable to reinitialise the hardware, and the
2636 * driver should be disabled. If ok is false, then the rx and tx
2637 * engines are not restarted, pending a RESET_DISABLE. */
2638 int efx_reset_up(struct efx_nic
*efx
, enum reset_type method
, bool ok
)
2642 EFX_ASSERT_RESET_SERIALISED(efx
);
2644 if (method
== RESET_TYPE_MCDI_TIMEOUT
)
2645 efx
->type
->finish_flr(efx
);
2647 /* Ensure that SRAM is initialised even if we're disabling the device */
2648 rc
= efx
->type
->init(efx
);
2650 netif_err(efx
, drv
, efx
->net_dev
, "failed to initialise NIC\n");
2657 if (efx
->port_initialized
&& method
!= RESET_TYPE_INVISIBLE
&&
2658 method
!= RESET_TYPE_DATAPATH
) {
2659 rc
= efx
->phy_op
->init(efx
);
2662 rc
= efx
->phy_op
->reconfigure(efx
);
2663 if (rc
&& rc
!= -EPERM
)
2664 netif_err(efx
, drv
, efx
->net_dev
,
2665 "could not restore PHY settings\n");
2668 rc
= efx_enable_interrupts(efx
);
2672 #ifdef CONFIG_SFC_SRIOV
2673 rc
= efx
->type
->vswitching_restore(efx
);
2674 if (rc
) /* not fatal; the PF will still work fine */
2675 netif_warn(efx
, probe
, efx
->net_dev
,
2676 "failed to restore vswitching rc=%d;"
2677 " VFs may not function\n", rc
);
2680 down_read(&efx
->filter_sem
);
2681 efx_restore_filters(efx
);
2682 up_read(&efx
->filter_sem
);
2683 if (efx
->type
->sriov_reset
)
2684 efx
->type
->sriov_reset(efx
);
2686 mutex_unlock(&efx
->mac_lock
);
2690 if (efx
->type
->udp_tnl_push_ports
)
2691 efx
->type
->udp_tnl_push_ports(efx
);
2696 efx
->port_initialized
= false;
2698 mutex_unlock(&efx
->mac_lock
);
2703 /* Reset the NIC using the specified method. Note that the reset may
2704 * fail, in which case the card will be left in an unusable state.
2706 * Caller must hold the rtnl_lock.
2708 int efx_reset(struct efx_nic
*efx
, enum reset_type method
)
2713 netif_info(efx
, drv
, efx
->net_dev
, "resetting (%s)\n",
2714 RESET_TYPE(method
));
2716 efx_device_detach_sync(efx
);
2717 efx_reset_down(efx
, method
);
2719 rc
= efx
->type
->reset(efx
, method
);
2721 netif_err(efx
, drv
, efx
->net_dev
, "failed to reset hardware\n");
2725 /* Clear flags for the scopes we covered. We assume the NIC and
2726 * driver are now quiescent so that there is no race here.
2728 if (method
< RESET_TYPE_MAX_METHOD
)
2729 efx
->reset_pending
&= -(1 << (method
+ 1));
2730 else /* it doesn't fit into the well-ordered scope hierarchy */
2731 __clear_bit(method
, &efx
->reset_pending
);
2733 /* Reinitialise bus-mastering, which may have been turned off before
2734 * the reset was scheduled. This is still appropriate, even in the
2735 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2736 * can respond to requests. */
2737 pci_set_master(efx
->pci_dev
);
2740 /* Leave device stopped if necessary */
2742 method
== RESET_TYPE_DISABLE
||
2743 method
== RESET_TYPE_RECOVER_OR_DISABLE
;
2744 rc2
= efx_reset_up(efx
, method
, !disabled
);
2752 dev_close(efx
->net_dev
);
2753 netif_err(efx
, drv
, efx
->net_dev
, "has been disabled\n");
2754 efx
->state
= STATE_DISABLED
;
2756 netif_dbg(efx
, drv
, efx
->net_dev
, "reset complete\n");
2757 efx_device_attach_if_not_resetting(efx
);
2762 /* Try recovery mechanisms.
2763 * For now only EEH is supported.
2764 * Returns 0 if the recovery mechanisms are unsuccessful.
2765 * Returns a non-zero value otherwise.
2767 int efx_try_recovery(struct efx_nic
*efx
)
2770 /* A PCI error can occur and not be seen by EEH because nothing
2771 * happens on the PCI bus. In this case the driver may fail and
2772 * schedule a 'recover or reset', leading to this recovery handler.
2773 * Manually call the eeh failure check function.
2775 struct eeh_dev
*eehdev
= pci_dev_to_eeh_dev(efx
->pci_dev
);
2776 if (eeh_dev_check_failure(eehdev
)) {
2777 /* The EEH mechanisms will handle the error and reset the
2778 * device if necessary.
2786 static void efx_wait_for_bist_end(struct efx_nic
*efx
)
2790 for (i
= 0; i
< BIST_WAIT_DELAY_COUNT
; ++i
) {
2791 if (efx_mcdi_poll_reboot(efx
))
2793 msleep(BIST_WAIT_DELAY_MS
);
2796 netif_err(efx
, drv
, efx
->net_dev
, "Warning: No MC reboot after BIST mode\n");
2798 /* Either way unset the BIST flag. If we found no reboot we probably
2799 * won't recover, but we should try.
2801 efx
->mc_bist_for_other_fn
= false;
2804 /* The worker thread exists so that code that cannot sleep can
2805 * schedule a reset for later.
2807 static void efx_reset_work(struct work_struct
*data
)
2809 struct efx_nic
*efx
= container_of(data
, struct efx_nic
, reset_work
);
2810 unsigned long pending
;
2811 enum reset_type method
;
2813 pending
= READ_ONCE(efx
->reset_pending
);
2814 method
= fls(pending
) - 1;
2816 if (method
== RESET_TYPE_MC_BIST
)
2817 efx_wait_for_bist_end(efx
);
2819 if ((method
== RESET_TYPE_RECOVER_OR_DISABLE
||
2820 method
== RESET_TYPE_RECOVER_OR_ALL
) &&
2821 efx_try_recovery(efx
))
2829 /* We checked the state in efx_schedule_reset() but it may
2830 * have changed by now. Now that we have the RTNL lock,
2831 * it cannot change again.
2833 if (efx
->state
== STATE_READY
)
2834 (void)efx_reset(efx
, method
);
2839 void efx_schedule_reset(struct efx_nic
*efx
, enum reset_type type
)
2841 enum reset_type method
;
2843 if (efx
->state
== STATE_RECOVERY
) {
2844 netif_dbg(efx
, drv
, efx
->net_dev
,
2845 "recovering: skip scheduling %s reset\n",
2851 case RESET_TYPE_INVISIBLE
:
2852 case RESET_TYPE_ALL
:
2853 case RESET_TYPE_RECOVER_OR_ALL
:
2854 case RESET_TYPE_WORLD
:
2855 case RESET_TYPE_DISABLE
:
2856 case RESET_TYPE_RECOVER_OR_DISABLE
:
2857 case RESET_TYPE_DATAPATH
:
2858 case RESET_TYPE_MC_BIST
:
2859 case RESET_TYPE_MCDI_TIMEOUT
:
2861 netif_dbg(efx
, drv
, efx
->net_dev
, "scheduling %s reset\n",
2862 RESET_TYPE(method
));
2865 method
= efx
->type
->map_reset_reason(type
);
2866 netif_dbg(efx
, drv
, efx
->net_dev
,
2867 "scheduling %s reset for %s\n",
2868 RESET_TYPE(method
), RESET_TYPE(type
));
2872 set_bit(method
, &efx
->reset_pending
);
2873 smp_mb(); /* ensure we change reset_pending before checking state */
2875 /* If we're not READY then just leave the flags set as the cue
2876 * to abort probing or reschedule the reset later.
2878 if (READ_ONCE(efx
->state
) != STATE_READY
)
2881 /* efx_process_channel() will no longer read events once a
2882 * reset is scheduled. So switch back to poll'd MCDI completions. */
2883 efx_mcdi_mode_poll(efx
);
2885 queue_work(reset_workqueue
, &efx
->reset_work
);
2888 /**************************************************************************
2890 * List of NICs we support
2892 **************************************************************************/
2894 /* PCI device ID table */
2895 static const struct pci_device_id efx_pci_table
[] = {
2896 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0803), /* SFC9020 */
2897 .driver_data
= (unsigned long) &siena_a0_nic_type
},
2898 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0813), /* SFL9021 */
2899 .driver_data
= (unsigned long) &siena_a0_nic_type
},
2900 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0903), /* SFC9120 PF */
2901 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2902 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1903), /* SFC9120 VF */
2903 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2904 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0923), /* SFC9140 PF */
2905 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2906 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1923), /* SFC9140 VF */
2907 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2908 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x0a03), /* SFC9220 PF */
2909 .driver_data
= (unsigned long) &efx_hunt_a0_nic_type
},
2910 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE
, 0x1a03), /* SFC9220 VF */
2911 .driver_data
= (unsigned long) &efx_hunt_a0_vf_nic_type
},
2912 {0} /* end of list */
2915 /**************************************************************************
2917 * Dummy PHY/MAC operations
2919 * Can be used for some unimplemented operations
2920 * Needed so all function pointers are valid and do not have to be tested
2923 **************************************************************************/
2924 int efx_port_dummy_op_int(struct efx_nic
*efx
)
2928 void efx_port_dummy_op_void(struct efx_nic
*efx
) {}
2930 static bool efx_port_dummy_op_poll(struct efx_nic
*efx
)
2935 static const struct efx_phy_operations efx_dummy_phy_operations
= {
2936 .init
= efx_port_dummy_op_int
,
2937 .reconfigure
= efx_port_dummy_op_int
,
2938 .poll
= efx_port_dummy_op_poll
,
2939 .fini
= efx_port_dummy_op_void
,
2942 /**************************************************************************
2946 **************************************************************************/
2948 /* This zeroes out and then fills in the invariants in a struct
2949 * efx_nic (including all sub-structures).
2951 static int efx_init_struct(struct efx_nic
*efx
,
2952 struct pci_dev
*pci_dev
, struct net_device
*net_dev
)
2954 int rc
= -ENOMEM
, i
;
2956 /* Initialise common structures */
2957 INIT_LIST_HEAD(&efx
->node
);
2958 INIT_LIST_HEAD(&efx
->secondary_list
);
2959 spin_lock_init(&efx
->biu_lock
);
2960 #ifdef CONFIG_SFC_MTD
2961 INIT_LIST_HEAD(&efx
->mtd_list
);
2963 INIT_WORK(&efx
->reset_work
, efx_reset_work
);
2964 INIT_DELAYED_WORK(&efx
->monitor_work
, efx_monitor
);
2965 INIT_DELAYED_WORK(&efx
->selftest_work
, efx_selftest_async_work
);
2966 efx
->pci_dev
= pci_dev
;
2967 efx
->msg_enable
= debug
;
2968 efx
->state
= STATE_UNINIT
;
2969 strlcpy(efx
->name
, pci_name(pci_dev
), sizeof(efx
->name
));
2971 efx
->net_dev
= net_dev
;
2972 efx
->rx_prefix_size
= efx
->type
->rx_prefix_size
;
2974 NET_IP_ALIGN
? (efx
->rx_prefix_size
+ NET_IP_ALIGN
) % 4 : 0;
2975 efx
->rx_packet_hash_offset
=
2976 efx
->type
->rx_hash_offset
- efx
->type
->rx_prefix_size
;
2977 efx
->rx_packet_ts_offset
=
2978 efx
->type
->rx_ts_offset
- efx
->type
->rx_prefix_size
;
2979 spin_lock_init(&efx
->stats_lock
);
2980 mutex_init(&efx
->mac_lock
);
2981 efx
->phy_op
= &efx_dummy_phy_operations
;
2982 efx
->mdio
.dev
= net_dev
;
2983 INIT_WORK(&efx
->mac_work
, efx_mac_work
);
2984 init_waitqueue_head(&efx
->flush_wq
);
2986 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++) {
2987 efx
->channel
[i
] = efx_alloc_channel(efx
, i
, NULL
);
2988 if (!efx
->channel
[i
])
2990 efx
->msi_context
[i
].efx
= efx
;
2991 efx
->msi_context
[i
].index
= i
;
2994 /* Higher numbered interrupt modes are less capable! */
2995 if (WARN_ON_ONCE(efx
->type
->max_interrupt_mode
>
2996 efx
->type
->min_interrupt_mode
)) {
3000 efx
->interrupt_mode
= max(efx
->type
->max_interrupt_mode
,
3002 efx
->interrupt_mode
= min(efx
->type
->min_interrupt_mode
,
3005 /* Would be good to use the net_dev name, but we're too early */
3006 snprintf(efx
->workqueue_name
, sizeof(efx
->workqueue_name
), "sfc%s",
3008 efx
->workqueue
= create_singlethread_workqueue(efx
->workqueue_name
);
3009 if (!efx
->workqueue
)
3015 efx_fini_struct(efx
);
3019 static void efx_fini_struct(struct efx_nic
*efx
)
3023 for (i
= 0; i
< EFX_MAX_CHANNELS
; i
++)
3024 kfree(efx
->channel
[i
]);
3028 if (efx
->workqueue
) {
3029 destroy_workqueue(efx
->workqueue
);
3030 efx
->workqueue
= NULL
;
3034 void efx_update_sw_stats(struct efx_nic
*efx
, u64
*stats
)
3036 u64 n_rx_nodesc_trunc
= 0;
3037 struct efx_channel
*channel
;
3039 efx_for_each_channel(channel
, efx
)
3040 n_rx_nodesc_trunc
+= channel
->n_rx_nodesc_trunc
;
3041 stats
[GENERIC_STAT_rx_nodesc_trunc
] = n_rx_nodesc_trunc
;
3042 stats
[GENERIC_STAT_rx_noskb_drops
] = atomic_read(&efx
->n_rx_noskb_drops
);
3045 /**************************************************************************
3049 **************************************************************************/
3051 /* Main body of final NIC shutdown code
3052 * This is called only at module unload (or hotplug removal).
3054 static void efx_pci_remove_main(struct efx_nic
*efx
)
3056 /* Flush reset_work. It can no longer be scheduled since we
3059 BUG_ON(efx
->state
== STATE_READY
);
3060 cancel_work_sync(&efx
->reset_work
);
3062 efx_disable_interrupts(efx
);
3063 efx_nic_fini_interrupt(efx
);
3065 efx
->type
->fini(efx
);
3067 efx_remove_all(efx
);
3070 /* Final NIC shutdown
3071 * This is called only at module unload (or hotplug removal). A PF can call
3072 * this on its VFs to ensure they are unbound first.
3074 static void efx_pci_remove(struct pci_dev
*pci_dev
)
3076 struct efx_nic
*efx
;
3078 efx
= pci_get_drvdata(pci_dev
);
3082 /* Mark the NIC as fini, then stop the interface */
3084 efx_dissociate(efx
);
3085 dev_close(efx
->net_dev
);
3086 efx_disable_interrupts(efx
);
3087 efx
->state
= STATE_UNINIT
;
3090 if (efx
->type
->sriov_fini
)
3091 efx
->type
->sriov_fini(efx
);
3093 efx_unregister_netdev(efx
);
3095 efx_mtd_remove(efx
);
3097 efx_pci_remove_main(efx
);
3100 netif_dbg(efx
, drv
, efx
->net_dev
, "shutdown successful\n");
3102 efx_fini_struct(efx
);
3103 free_netdev(efx
->net_dev
);
3105 pci_disable_pcie_error_reporting(pci_dev
);
3108 /* NIC VPD information
3109 * Called during probe to display the part number of the
3110 * installed NIC. VPD is potentially very large but this should
3111 * always appear within the first 512 bytes.
3113 #define SFC_VPD_LEN 512
3114 static void efx_probe_vpd_strings(struct efx_nic
*efx
)
3116 struct pci_dev
*dev
= efx
->pci_dev
;
3117 char vpd_data
[SFC_VPD_LEN
];
3119 int ro_start
, ro_size
, i
, j
;
3121 /* Get the vpd data from the device */
3122 vpd_size
= pci_read_vpd(dev
, 0, sizeof(vpd_data
), vpd_data
);
3123 if (vpd_size
<= 0) {
3124 netif_err(efx
, drv
, efx
->net_dev
, "Unable to read VPD\n");
3128 /* Get the Read only section */
3129 ro_start
= pci_vpd_find_tag(vpd_data
, 0, vpd_size
, PCI_VPD_LRDT_RO_DATA
);
3131 netif_err(efx
, drv
, efx
->net_dev
, "VPD Read-only not found\n");
3135 ro_size
= pci_vpd_lrdt_size(&vpd_data
[ro_start
]);
3137 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
3138 if (i
+ j
> vpd_size
)
3141 /* Get the Part number */
3142 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "PN");
3144 netif_err(efx
, drv
, efx
->net_dev
, "Part number not found\n");
3148 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
3149 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
3150 if (i
+ j
> vpd_size
) {
3151 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete part number\n");
3155 netif_info(efx
, drv
, efx
->net_dev
,
3156 "Part Number : %.*s\n", j
, &vpd_data
[i
]);
3158 i
= ro_start
+ PCI_VPD_LRDT_TAG_SIZE
;
3160 i
= pci_vpd_find_info_keyword(vpd_data
, i
, j
, "SN");
3162 netif_err(efx
, drv
, efx
->net_dev
, "Serial number not found\n");
3166 j
= pci_vpd_info_field_size(&vpd_data
[i
]);
3167 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
3168 if (i
+ j
> vpd_size
) {
3169 netif_err(efx
, drv
, efx
->net_dev
, "Incomplete serial number\n");
3173 efx
->vpd_sn
= kmalloc(j
+ 1, GFP_KERNEL
);
3177 snprintf(efx
->vpd_sn
, j
+ 1, "%s", &vpd_data
[i
]);
3181 /* Main body of NIC initialisation
3182 * This is called at module load (or hotplug insertion, theoretically).
3184 static int efx_pci_probe_main(struct efx_nic
*efx
)
3188 /* Do start-of-day initialisation */
3189 rc
= efx_probe_all(efx
);
3195 rc
= efx
->type
->init(efx
);
3197 netif_err(efx
, probe
, efx
->net_dev
,
3198 "failed to initialise NIC\n");
3202 rc
= efx_init_port(efx
);
3204 netif_err(efx
, probe
, efx
->net_dev
,
3205 "failed to initialise port\n");
3209 rc
= efx_nic_init_interrupt(efx
);
3212 rc
= efx_enable_interrupts(efx
);
3219 efx_nic_fini_interrupt(efx
);
3223 efx
->type
->fini(efx
);
3226 efx_remove_all(efx
);
3231 static int efx_pci_probe_post_io(struct efx_nic
*efx
)
3233 struct net_device
*net_dev
= efx
->net_dev
;
3234 int rc
= efx_pci_probe_main(efx
);
3239 if (efx
->type
->sriov_init
) {
3240 rc
= efx
->type
->sriov_init(efx
);
3242 netif_err(efx
, probe
, efx
->net_dev
,
3243 "SR-IOV can't be enabled rc %d\n", rc
);
3246 /* Determine netdevice features */
3247 net_dev
->features
|= (efx
->type
->offload_features
| NETIF_F_SG
|
3248 NETIF_F_TSO
| NETIF_F_RXCSUM
| NETIF_F_RXALL
);
3249 if (efx
->type
->offload_features
& (NETIF_F_IPV6_CSUM
| NETIF_F_HW_CSUM
))
3250 net_dev
->features
|= NETIF_F_TSO6
;
3251 /* Check whether device supports TSO */
3252 if (!efx
->type
->tso_versions
|| !efx
->type
->tso_versions(efx
))
3253 net_dev
->features
&= ~NETIF_F_ALL_TSO
;
3254 /* Mask for features that also apply to VLAN devices */
3255 net_dev
->vlan_features
|= (NETIF_F_HW_CSUM
| NETIF_F_SG
|
3256 NETIF_F_HIGHDMA
| NETIF_F_ALL_TSO
|
3259 net_dev
->hw_features
|= net_dev
->features
& ~efx
->fixed_features
;
3261 /* Disable receiving frames with bad FCS, by default. */
3262 net_dev
->features
&= ~NETIF_F_RXALL
;
3264 /* Disable VLAN filtering by default. It may be enforced if
3265 * the feature is fixed (i.e. VLAN filters are required to
3266 * receive VLAN tagged packets due to vPort restrictions).
3268 net_dev
->features
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
3269 net_dev
->features
|= efx
->fixed_features
;
3271 rc
= efx_register_netdev(efx
);
3275 efx_pci_remove_main(efx
);
3279 /* NIC initialisation
3281 * This is called at module load (or hotplug insertion,
3282 * theoretically). It sets up PCI mappings, resets the NIC,
3283 * sets up and registers the network devices with the kernel and hooks
3284 * the interrupt service routine. It does not prepare the device for
3285 * transmission; this is left to the first time one of the network
3286 * interfaces is brought up (i.e. efx_net_open).
3288 static int efx_pci_probe(struct pci_dev
*pci_dev
,
3289 const struct pci_device_id
*entry
)
3291 struct net_device
*net_dev
;
3292 struct efx_nic
*efx
;
3295 /* Allocate and initialise a struct net_device and struct efx_nic */
3296 net_dev
= alloc_etherdev_mqs(sizeof(*efx
), EFX_MAX_CORE_TX_QUEUES
,
3300 efx
= netdev_priv(net_dev
);
3301 efx
->type
= (const struct efx_nic_type
*) entry
->driver_data
;
3302 efx
->fixed_features
|= NETIF_F_HIGHDMA
;
3304 pci_set_drvdata(pci_dev
, efx
);
3305 SET_NETDEV_DEV(net_dev
, &pci_dev
->dev
);
3306 rc
= efx_init_struct(efx
, pci_dev
, net_dev
);
3310 netif_info(efx
, probe
, efx
->net_dev
,
3311 "Solarflare NIC detected\n");
3313 if (!efx
->type
->is_vf
)
3314 efx_probe_vpd_strings(efx
);
3316 /* Set up basic I/O (BAR mappings etc) */
3317 rc
= efx_init_io(efx
);
3321 rc
= efx_pci_probe_post_io(efx
);
3323 /* On failure, retry once immediately.
3324 * If we aborted probe due to a scheduled reset, dismiss it.
3326 efx
->reset_pending
= 0;
3327 rc
= efx_pci_probe_post_io(efx
);
3329 /* On another failure, retry once more
3330 * after a 50-305ms delay.
3334 get_random_bytes(&r
, 1);
3335 msleep((unsigned int)r
+ 50);
3336 efx
->reset_pending
= 0;
3337 rc
= efx_pci_probe_post_io(efx
);
3343 netif_dbg(efx
, probe
, efx
->net_dev
, "initialisation successful\n");
3345 /* Try to create MTDs, but allow this to fail */
3347 rc
= efx_mtd_probe(efx
);
3349 if (rc
&& rc
!= -EPERM
)
3350 netif_warn(efx
, probe
, efx
->net_dev
,
3351 "failed to create MTDs (%d)\n", rc
);
3353 rc
= pci_enable_pcie_error_reporting(pci_dev
);
3354 if (rc
&& rc
!= -EINVAL
)
3355 netif_notice(efx
, probe
, efx
->net_dev
,
3356 "PCIE error reporting unavailable (%d).\n",
3359 if (efx
->type
->udp_tnl_push_ports
)
3360 efx
->type
->udp_tnl_push_ports(efx
);
3367 efx_fini_struct(efx
);
3370 netif_dbg(efx
, drv
, efx
->net_dev
, "initialisation failed. rc=%d\n", rc
);
3371 free_netdev(net_dev
);
3375 /* efx_pci_sriov_configure returns the actual number of Virtual Functions
3376 * enabled on success
3378 #ifdef CONFIG_SFC_SRIOV
3379 static int efx_pci_sriov_configure(struct pci_dev
*dev
, int num_vfs
)
3382 struct efx_nic
*efx
= pci_get_drvdata(dev
);
3384 if (efx
->type
->sriov_configure
) {
3385 rc
= efx
->type
->sriov_configure(efx
, num_vfs
);
3395 static int efx_pm_freeze(struct device
*dev
)
3397 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3401 if (efx
->state
!= STATE_DISABLED
) {
3402 efx
->state
= STATE_UNINIT
;
3404 efx_device_detach_sync(efx
);
3407 efx_disable_interrupts(efx
);
3415 static int efx_pm_thaw(struct device
*dev
)
3418 struct efx_nic
*efx
= pci_get_drvdata(to_pci_dev(dev
));
3422 if (efx
->state
!= STATE_DISABLED
) {
3423 rc
= efx_enable_interrupts(efx
);
3427 mutex_lock(&efx
->mac_lock
);
3428 efx
->phy_op
->reconfigure(efx
);
3429 mutex_unlock(&efx
->mac_lock
);
3433 efx_device_attach_if_not_resetting(efx
);
3435 efx
->state
= STATE_READY
;
3437 efx
->type
->resume_wol(efx
);
3442 /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3443 queue_work(reset_workqueue
, &efx
->reset_work
);
3453 static int efx_pm_poweroff(struct device
*dev
)
3455 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3456 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
3458 efx
->type
->fini(efx
);
3460 efx
->reset_pending
= 0;
3462 pci_save_state(pci_dev
);
3463 return pci_set_power_state(pci_dev
, PCI_D3hot
);
3466 /* Used for both resume and restore */
3467 static int efx_pm_resume(struct device
*dev
)
3469 struct pci_dev
*pci_dev
= to_pci_dev(dev
);
3470 struct efx_nic
*efx
= pci_get_drvdata(pci_dev
);
3473 rc
= pci_set_power_state(pci_dev
, PCI_D0
);
3476 pci_restore_state(pci_dev
);
3477 rc
= pci_enable_device(pci_dev
);
3480 pci_set_master(efx
->pci_dev
);
3481 rc
= efx
->type
->reset(efx
, RESET_TYPE_ALL
);
3484 rc
= efx
->type
->init(efx
);
3487 rc
= efx_pm_thaw(dev
);
3491 static int efx_pm_suspend(struct device
*dev
)
3496 rc
= efx_pm_poweroff(dev
);
3502 static const struct dev_pm_ops efx_pm_ops
= {
3503 .suspend
= efx_pm_suspend
,
3504 .resume
= efx_pm_resume
,
3505 .freeze
= efx_pm_freeze
,
3506 .thaw
= efx_pm_thaw
,
3507 .poweroff
= efx_pm_poweroff
,
3508 .restore
= efx_pm_resume
,
3511 /* A PCI error affecting this device was detected.
3512 * At this point MMIO and DMA may be disabled.
3513 * Stop the software path and request a slot reset.
3515 static pci_ers_result_t
efx_io_error_detected(struct pci_dev
*pdev
,
3516 enum pci_channel_state state
)
3518 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3519 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3521 if (state
== pci_channel_io_perm_failure
)
3522 return PCI_ERS_RESULT_DISCONNECT
;
3526 if (efx
->state
!= STATE_DISABLED
) {
3527 efx
->state
= STATE_RECOVERY
;
3528 efx
->reset_pending
= 0;
3530 efx_device_detach_sync(efx
);
3533 efx_disable_interrupts(efx
);
3535 status
= PCI_ERS_RESULT_NEED_RESET
;
3537 /* If the interface is disabled we don't want to do anything
3540 status
= PCI_ERS_RESULT_RECOVERED
;
3545 pci_disable_device(pdev
);
3550 /* Fake a successful reset, which will be performed later in efx_io_resume. */
3551 static pci_ers_result_t
efx_io_slot_reset(struct pci_dev
*pdev
)
3553 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3554 pci_ers_result_t status
= PCI_ERS_RESULT_RECOVERED
;
3557 if (pci_enable_device(pdev
)) {
3558 netif_err(efx
, hw
, efx
->net_dev
,
3559 "Cannot re-enable PCI device after reset.\n");
3560 status
= PCI_ERS_RESULT_DISCONNECT
;
3563 rc
= pci_cleanup_aer_uncorrect_error_status(pdev
);
3565 netif_err(efx
, hw
, efx
->net_dev
,
3566 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc
);
3567 /* Non-fatal error. Continue. */
3573 /* Perform the actual reset and resume I/O operations. */
3574 static void efx_io_resume(struct pci_dev
*pdev
)
3576 struct efx_nic
*efx
= pci_get_drvdata(pdev
);
3581 if (efx
->state
== STATE_DISABLED
)
3584 rc
= efx_reset(efx
, RESET_TYPE_ALL
);
3586 netif_err(efx
, hw
, efx
->net_dev
,
3587 "efx_reset failed after PCI error (%d)\n", rc
);
3589 efx
->state
= STATE_READY
;
3590 netif_dbg(efx
, hw
, efx
->net_dev
,
3591 "Done resetting and resuming IO after PCI error.\n");
3598 /* For simplicity and reliability, we always require a slot reset and try to
3599 * reset the hardware when a pci error affecting the device is detected.
3600 * We leave both the link_reset and mmio_enabled callback unimplemented:
3601 * with our request for slot reset the mmio_enabled callback will never be
3602 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3604 static const struct pci_error_handlers efx_err_handlers
= {
3605 .error_detected
= efx_io_error_detected
,
3606 .slot_reset
= efx_io_slot_reset
,
3607 .resume
= efx_io_resume
,
3610 static struct pci_driver efx_pci_driver
= {
3611 .name
= KBUILD_MODNAME
,
3612 .id_table
= efx_pci_table
,
3613 .probe
= efx_pci_probe
,
3614 .remove
= efx_pci_remove
,
3615 .driver
.pm
= &efx_pm_ops
,
3616 .err_handler
= &efx_err_handlers
,
3617 #ifdef CONFIG_SFC_SRIOV
3618 .sriov_configure
= efx_pci_sriov_configure
,
3622 /**************************************************************************
3624 * Kernel module interface
3626 *************************************************************************/
3628 module_param(interrupt_mode
, uint
, 0444);
3629 MODULE_PARM_DESC(interrupt_mode
,
3630 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3632 static int __init
efx_init_module(void)
3636 printk(KERN_INFO
"Solarflare NET driver v" EFX_DRIVER_VERSION
"\n");
3638 rc
= register_netdevice_notifier(&efx_netdev_notifier
);
3642 #ifdef CONFIG_SFC_SRIOV
3643 rc
= efx_init_sriov();
3648 reset_workqueue
= create_singlethread_workqueue("sfc_reset");
3649 if (!reset_workqueue
) {
3654 rc
= pci_register_driver(&efx_pci_driver
);
3661 destroy_workqueue(reset_workqueue
);
3663 #ifdef CONFIG_SFC_SRIOV
3667 unregister_netdevice_notifier(&efx_netdev_notifier
);
3672 static void __exit
efx_exit_module(void)
3674 printk(KERN_INFO
"Solarflare NET driver unloading\n");
3676 pci_unregister_driver(&efx_pci_driver
);
3677 destroy_workqueue(reset_workqueue
);
3678 #ifdef CONFIG_SFC_SRIOV
3681 unregister_netdevice_notifier(&efx_netdev_notifier
);
3685 module_init(efx_init_module
);
3686 module_exit(efx_exit_module
);
3688 MODULE_AUTHOR("Solarflare Communications and "
3689 "Michael Brown <mbrown@fensystems.co.uk>");
3690 MODULE_DESCRIPTION("Solarflare network driver");
3691 MODULE_LICENSE("GPL");
3692 MODULE_DEVICE_TABLE(pci
, efx_pci_table
);
3693 MODULE_VERSION(EFX_DRIVER_VERSION
);