1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
5 * Copyright 2019-2020 Xilinx Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
12 #include "ef100_nic.h"
13 #include "efx_common.h"
14 #include "efx_channels.h"
17 #include "ef100_regs.h"
19 #include "mcdi_pcol.h"
20 #include "mcdi_port_common.h"
21 #include "mcdi_functions.h"
22 #include "mcdi_filters.h"
25 #include "ef100_netdev.h"
27 #define EF100_MAX_VIS 4096
28 #define EF100_NUM_MCDI_BUFFERS 1
29 #define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
31 #define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
35 static u8
*ef100_mcdi_buf(struct efx_nic
*efx
, u8 bufid
, dma_addr_t
*dma_addr
)
37 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
40 *dma_addr
= nic_data
->mcdi_buf
.dma_addr
+
41 bufid
* ALIGN(MCDI_BUF_LEN
, 256);
42 return nic_data
->mcdi_buf
.addr
+ bufid
* ALIGN(MCDI_BUF_LEN
, 256);
45 static int ef100_get_warm_boot_count(struct efx_nic
*efx
)
49 efx_readd(efx
, ®
, efx_reg(efx
, ER_GZ_MC_SFT_STATUS
));
51 if (EFX_DWORD_FIELD(reg
, EFX_DWORD_0
) == 0xffffffff) {
52 netif_err(efx
, hw
, efx
->net_dev
, "Hardware unavailable\n");
53 efx
->state
= STATE_DISABLED
;
56 return EFX_DWORD_FIELD(reg
, EFX_WORD_1
) == 0xb007 ?
57 EFX_DWORD_FIELD(reg
, EFX_WORD_0
) : -EIO
;
61 static void ef100_mcdi_request(struct efx_nic
*efx
,
62 const efx_dword_t
*hdr
, size_t hdr_len
,
63 const efx_dword_t
*sdu
, size_t sdu_len
)
66 u8
*pdu
= ef100_mcdi_buf(efx
, 0, &dma_addr
);
68 memcpy(pdu
, hdr
, hdr_len
);
69 memcpy(pdu
+ hdr_len
, sdu
, sdu_len
);
72 /* The hardware provides 'low' and 'high' (doorbell) registers
73 * for passing the 64-bit address of an MCDI request to
74 * firmware. However the dwords are swapped by firmware. The
75 * least significant bits of the doorbell are then 0 for all
76 * MCDI requests due to alignment.
78 _efx_writed(efx
, cpu_to_le32((u64
)dma_addr
>> 32), efx_reg(efx
, ER_GZ_MC_DB_LWRD
));
79 _efx_writed(efx
, cpu_to_le32((u32
)dma_addr
), efx_reg(efx
, ER_GZ_MC_DB_HWRD
));
82 static bool ef100_mcdi_poll_response(struct efx_nic
*efx
)
84 const efx_dword_t hdr
=
85 *(const efx_dword_t
*)(ef100_mcdi_buf(efx
, 0, NULL
));
88 return EFX_DWORD_FIELD(hdr
, MCDI_HEADER_RESPONSE
);
91 static void ef100_mcdi_read_response(struct efx_nic
*efx
,
92 efx_dword_t
*outbuf
, size_t offset
,
95 const u8
*pdu
= ef100_mcdi_buf(efx
, 0, NULL
);
97 memcpy(outbuf
, pdu
+ offset
, outlen
);
100 static int ef100_mcdi_poll_reboot(struct efx_nic
*efx
)
102 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
105 rc
= ef100_get_warm_boot_count(efx
);
107 /* The firmware is presumably in the process of
108 * rebooting. However, we are supposed to report each
109 * reboot just once, so we must only do that once we
110 * can read and store the updated warm boot count.
115 if (rc
== nic_data
->warm_boot_count
)
118 nic_data
->warm_boot_count
= rc
;
123 static void ef100_mcdi_reboot_detected(struct efx_nic
*efx
)
129 static int ef100_get_mac_address(struct efx_nic
*efx
, u8
*mac_address
)
131 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN
);
135 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN
!= 0);
137 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_MAC_ADDRESSES
, NULL
, 0,
138 outbuf
, sizeof(outbuf
), &outlen
);
141 if (outlen
< MC_CMD_GET_MAC_ADDRESSES_OUT_LEN
)
144 ether_addr_copy(mac_address
,
145 MCDI_PTR(outbuf
, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE
));
149 static int efx_ef100_init_datapath_caps(struct efx_nic
*efx
)
151 MCDI_DECLARE_BUF(outbuf
, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN
);
152 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
157 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN
!= 0);
159 rc
= efx_mcdi_rpc(efx
, MC_CMD_GET_CAPABILITIES
, NULL
, 0,
160 outbuf
, sizeof(outbuf
), &outlen
);
163 if (outlen
< MC_CMD_GET_CAPABILITIES_V4_OUT_LEN
) {
164 netif_err(efx
, drv
, efx
->net_dev
,
165 "unable to read datapath firmware capabilities\n");
169 nic_data
->datapath_caps
= MCDI_DWORD(outbuf
,
170 GET_CAPABILITIES_OUT_FLAGS1
);
171 nic_data
->datapath_caps2
= MCDI_DWORD(outbuf
,
172 GET_CAPABILITIES_V2_OUT_FLAGS2
);
173 if (outlen
< MC_CMD_GET_CAPABILITIES_V7_OUT_LEN
)
174 nic_data
->datapath_caps3
= 0;
176 nic_data
->datapath_caps3
= MCDI_DWORD(outbuf
,
177 GET_CAPABILITIES_V7_OUT_FLAGS3
);
179 vi_window_mode
= MCDI_BYTE(outbuf
,
180 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE
);
181 rc
= efx_mcdi_window_mode_to_stride(efx
, vi_window_mode
);
185 if (efx_ef100_has_cap(nic_data
->datapath_caps2
, TX_TSO_V3
)) {
186 struct net_device
*net_dev
= efx
->net_dev
;
187 netdev_features_t tso
= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_PARTIAL
|
188 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_UDP_TUNNEL_CSUM
|
189 NETIF_F_GSO_GRE
| NETIF_F_GSO_GRE_CSUM
;
191 net_dev
->features
|= tso
;
192 net_dev
->hw_features
|= tso
;
193 net_dev
->hw_enc_features
|= tso
;
194 /* EF100 HW can only offload outer checksums if they are UDP,
195 * so for GRE_CSUM we have to use GSO_PARTIAL.
197 net_dev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
199 efx
->num_mac_stats
= MCDI_WORD(outbuf
,
200 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS
);
201 netif_dbg(efx
, probe
, efx
->net_dev
,
202 "firmware reports num_mac_stats = %u\n",
209 static int ef100_ev_probe(struct efx_channel
*channel
)
211 /* Allocate an extra descriptor for the QMDA status completion entry */
212 return efx_nic_alloc_buffer(channel
->efx
, &channel
->eventq
.buf
,
213 (channel
->eventq_mask
+ 2) *
218 static int ef100_ev_init(struct efx_channel
*channel
)
220 struct ef100_nic_data
*nic_data
= channel
->efx
->nic_data
;
222 /* initial phase is 0 */
223 clear_bit(channel
->channel
, nic_data
->evq_phases
);
225 return efx_mcdi_ev_init(channel
, false, false);
228 static void ef100_ev_read_ack(struct efx_channel
*channel
)
230 efx_dword_t evq_prime
;
232 EFX_POPULATE_DWORD_2(evq_prime
,
233 ERF_GZ_EVQ_ID
, channel
->channel
,
234 ERF_GZ_IDX
, channel
->eventq_read_ptr
&
235 channel
->eventq_mask
);
237 efx_writed(channel
->efx
, &evq_prime
,
238 efx_reg(channel
->efx
, ER_GZ_EVQ_INT_PRIME
));
241 static int ef100_ev_process(struct efx_channel
*channel
, int quota
)
243 struct efx_nic
*efx
= channel
->efx
;
244 struct ef100_nic_data
*nic_data
;
245 bool evq_phase
, old_evq_phase
;
246 unsigned int read_ptr
;
247 efx_qword_t
*p_event
;
252 if (unlikely(!channel
->enabled
))
255 nic_data
= efx
->nic_data
;
256 evq_phase
= test_bit(channel
->channel
, nic_data
->evq_phases
);
257 old_evq_phase
= evq_phase
;
258 read_ptr
= channel
->eventq_read_ptr
;
259 BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN
!= ESF_GZ_EV_TXCMPL_PHASE_LBN
);
261 while (spent
< quota
) {
262 p_event
= efx_event(channel
, read_ptr
);
264 ev_phase
= !!EFX_QWORD_FIELD(*p_event
, ESF_GZ_EV_RXPKTS_PHASE
);
265 if (ev_phase
!= evq_phase
)
268 netif_vdbg(efx
, drv
, efx
->net_dev
,
269 "processing event on %d " EFX_QWORD_FMT
"\n",
270 channel
->channel
, EFX_QWORD_VAL(*p_event
));
272 ev_type
= EFX_QWORD_FIELD(*p_event
, ESF_GZ_E_TYPE
);
275 case ESE_GZ_EF100_EV_RX_PKTS
:
276 efx_ef100_ev_rx(channel
, p_event
);
279 case ESE_GZ_EF100_EV_MCDI
:
280 efx_mcdi_process_event(channel
, p_event
);
282 case ESE_GZ_EF100_EV_TX_COMPLETION
:
283 ef100_ev_tx(channel
, p_event
);
285 case ESE_GZ_EF100_EV_DRIVER
:
286 netif_info(efx
, drv
, efx
->net_dev
,
287 "Driver initiated event " EFX_QWORD_FMT
"\n",
288 EFX_QWORD_VAL(*p_event
));
291 netif_info(efx
, drv
, efx
->net_dev
,
292 "Unhandled event " EFX_QWORD_FMT
"\n",
293 EFX_QWORD_VAL(*p_event
));
297 if ((read_ptr
& channel
->eventq_mask
) == 0)
298 evq_phase
= !evq_phase
;
301 channel
->eventq_read_ptr
= read_ptr
;
302 if (evq_phase
!= old_evq_phase
)
303 change_bit(channel
->channel
, nic_data
->evq_phases
);
308 static irqreturn_t
ef100_msi_interrupt(int irq
, void *dev_id
)
310 struct efx_msi_context
*context
= dev_id
;
311 struct efx_nic
*efx
= context
->efx
;
313 netif_vdbg(efx
, intr
, efx
->net_dev
,
314 "IRQ %d on CPU %d\n", irq
, raw_smp_processor_id());
316 if (likely(READ_ONCE(efx
->irq_soft_enabled
))) {
317 /* Note test interrupts */
318 if (context
->index
== efx
->irq_level
)
319 efx
->last_irq_cpu
= raw_smp_processor_id();
321 /* Schedule processing of the channel */
322 efx_schedule_channel_irq(efx
->channel
[context
->index
]);
328 static int ef100_phy_probe(struct efx_nic
*efx
)
330 struct efx_mcdi_phy_data
*phy_data
;
333 /* Probe for the PHY */
334 efx
->phy_data
= kzalloc(sizeof(struct efx_mcdi_phy_data
), GFP_KERNEL
);
338 rc
= efx_mcdi_get_phy_cfg(efx
, efx
->phy_data
);
342 /* Populate driver and ethtool settings */
343 phy_data
= efx
->phy_data
;
344 mcdi_to_ethtool_linkset(phy_data
->media
, phy_data
->supported_cap
,
345 efx
->link_advertising
);
346 efx
->fec_config
= mcdi_fec_caps_to_ethtool(phy_data
->supported_cap
,
349 /* Default to Autonegotiated flow control if the PHY supports it */
350 efx
->wanted_fc
= EFX_FC_RX
| EFX_FC_TX
;
351 if (phy_data
->supported_cap
& (1 << MC_CMD_PHY_CAP_AN_LBN
))
352 efx
->wanted_fc
|= EFX_FC_AUTO
;
353 efx_link_set_wanted_fc(efx
, efx
->wanted_fc
);
355 /* Push settings to the PHY. Failure is not fatal, the user can try to
356 * fix it using ethtool.
358 rc
= efx_mcdi_port_reconfigure(efx
);
359 if (rc
&& rc
!= -EPERM
)
360 netif_warn(efx
, drv
, efx
->net_dev
,
361 "could not initialise PHY settings\n");
366 static int ef100_filter_table_probe(struct efx_nic
*efx
)
368 return efx_mcdi_filter_table_probe(efx
, true);
371 static int ef100_filter_table_up(struct efx_nic
*efx
)
375 rc
= efx_mcdi_filter_add_vlan(efx
, EFX_FILTER_VID_UNSPEC
);
377 efx_mcdi_filter_table_down(efx
);
381 rc
= efx_mcdi_filter_add_vlan(efx
, 0);
383 efx_mcdi_filter_del_vlan(efx
, EFX_FILTER_VID_UNSPEC
);
384 efx_mcdi_filter_table_down(efx
);
390 static void ef100_filter_table_down(struct efx_nic
*efx
)
392 efx_mcdi_filter_del_vlan(efx
, 0);
393 efx_mcdi_filter_del_vlan(efx
, EFX_FILTER_VID_UNSPEC
);
394 efx_mcdi_filter_table_down(efx
);
399 static int ef100_reconfigure_mac(struct efx_nic
*efx
, bool mtu_only
)
401 WARN_ON(!mutex_is_locked(&efx
->mac_lock
));
403 efx_mcdi_filter_sync_rx_mode(efx
);
405 if (mtu_only
&& efx_has_cap(efx
, SET_MAC_ENHANCED
))
406 return efx_mcdi_set_mtu(efx
);
407 return efx_mcdi_set_mac(efx
);
410 static enum reset_type
ef100_map_reset_reason(enum reset_type reason
)
412 if (reason
== RESET_TYPE_TX_WATCHDOG
)
414 return RESET_TYPE_DISABLE
;
417 static int ef100_map_reset_flags(u32
*flags
)
419 /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */
420 if ((*flags
& EF100_RESET_PORT
)) {
421 *flags
&= ~EF100_RESET_PORT
;
422 return RESET_TYPE_ALL
;
424 if (*flags
& ETH_RESET_MGMT
) {
425 *flags
&= ~ETH_RESET_MGMT
;
426 return RESET_TYPE_DISABLE
;
432 static int ef100_reset(struct efx_nic
*efx
, enum reset_type reset_type
)
436 dev_close(efx
->net_dev
);
438 if (reset_type
== RESET_TYPE_TX_WATCHDOG
) {
439 netif_device_attach(efx
->net_dev
);
440 __clear_bit(reset_type
, &efx
->reset_pending
);
441 rc
= dev_open(efx
->net_dev
, NULL
);
442 } else if (reset_type
== RESET_TYPE_ALL
) {
443 rc
= efx_mcdi_reset(efx
, reset_type
);
447 netif_device_attach(efx
->net_dev
);
449 rc
= dev_open(efx
->net_dev
, NULL
);
451 rc
= 1; /* Leave the device closed */
456 static void ef100_common_stat_mask(unsigned long *mask
)
458 __set_bit(EF100_STAT_port_rx_packets
, mask
);
459 __set_bit(EF100_STAT_port_tx_packets
, mask
);
460 __set_bit(EF100_STAT_port_rx_bytes
, mask
);
461 __set_bit(EF100_STAT_port_tx_bytes
, mask
);
462 __set_bit(EF100_STAT_port_rx_multicast
, mask
);
463 __set_bit(EF100_STAT_port_rx_bad
, mask
);
464 __set_bit(EF100_STAT_port_rx_align_error
, mask
);
465 __set_bit(EF100_STAT_port_rx_overflow
, mask
);
468 static void ef100_ethtool_stat_mask(unsigned long *mask
)
470 __set_bit(EF100_STAT_port_tx_pause
, mask
);
471 __set_bit(EF100_STAT_port_tx_unicast
, mask
);
472 __set_bit(EF100_STAT_port_tx_multicast
, mask
);
473 __set_bit(EF100_STAT_port_tx_broadcast
, mask
);
474 __set_bit(EF100_STAT_port_tx_lt64
, mask
);
475 __set_bit(EF100_STAT_port_tx_64
, mask
);
476 __set_bit(EF100_STAT_port_tx_65_to_127
, mask
);
477 __set_bit(EF100_STAT_port_tx_128_to_255
, mask
);
478 __set_bit(EF100_STAT_port_tx_256_to_511
, mask
);
479 __set_bit(EF100_STAT_port_tx_512_to_1023
, mask
);
480 __set_bit(EF100_STAT_port_tx_1024_to_15xx
, mask
);
481 __set_bit(EF100_STAT_port_tx_15xx_to_jumbo
, mask
);
482 __set_bit(EF100_STAT_port_rx_good
, mask
);
483 __set_bit(EF100_STAT_port_rx_pause
, mask
);
484 __set_bit(EF100_STAT_port_rx_unicast
, mask
);
485 __set_bit(EF100_STAT_port_rx_broadcast
, mask
);
486 __set_bit(EF100_STAT_port_rx_lt64
, mask
);
487 __set_bit(EF100_STAT_port_rx_64
, mask
);
488 __set_bit(EF100_STAT_port_rx_65_to_127
, mask
);
489 __set_bit(EF100_STAT_port_rx_128_to_255
, mask
);
490 __set_bit(EF100_STAT_port_rx_256_to_511
, mask
);
491 __set_bit(EF100_STAT_port_rx_512_to_1023
, mask
);
492 __set_bit(EF100_STAT_port_rx_1024_to_15xx
, mask
);
493 __set_bit(EF100_STAT_port_rx_15xx_to_jumbo
, mask
);
494 __set_bit(EF100_STAT_port_rx_gtjumbo
, mask
);
495 __set_bit(EF100_STAT_port_rx_bad_gtjumbo
, mask
);
496 __set_bit(EF100_STAT_port_rx_length_error
, mask
);
497 __set_bit(EF100_STAT_port_rx_nodesc_drops
, mask
);
498 __set_bit(GENERIC_STAT_rx_nodesc_trunc
, mask
);
499 __set_bit(GENERIC_STAT_rx_noskb_drops
, mask
);
502 #define EF100_DMA_STAT(ext_name, mcdi_name) \
503 [EF100_STAT_ ## ext_name] = \
504 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
506 static const struct efx_hw_stat_desc ef100_stat_desc
[EF100_STAT_COUNT
] = {
507 EF100_DMA_STAT(port_tx_bytes
, TX_BYTES
),
508 EF100_DMA_STAT(port_tx_packets
, TX_PKTS
),
509 EF100_DMA_STAT(port_tx_pause
, TX_PAUSE_PKTS
),
510 EF100_DMA_STAT(port_tx_unicast
, TX_UNICAST_PKTS
),
511 EF100_DMA_STAT(port_tx_multicast
, TX_MULTICAST_PKTS
),
512 EF100_DMA_STAT(port_tx_broadcast
, TX_BROADCAST_PKTS
),
513 EF100_DMA_STAT(port_tx_lt64
, TX_LT64_PKTS
),
514 EF100_DMA_STAT(port_tx_64
, TX_64_PKTS
),
515 EF100_DMA_STAT(port_tx_65_to_127
, TX_65_TO_127_PKTS
),
516 EF100_DMA_STAT(port_tx_128_to_255
, TX_128_TO_255_PKTS
),
517 EF100_DMA_STAT(port_tx_256_to_511
, TX_256_TO_511_PKTS
),
518 EF100_DMA_STAT(port_tx_512_to_1023
, TX_512_TO_1023_PKTS
),
519 EF100_DMA_STAT(port_tx_1024_to_15xx
, TX_1024_TO_15XX_PKTS
),
520 EF100_DMA_STAT(port_tx_15xx_to_jumbo
, TX_15XX_TO_JUMBO_PKTS
),
521 EF100_DMA_STAT(port_rx_bytes
, RX_BYTES
),
522 EF100_DMA_STAT(port_rx_packets
, RX_PKTS
),
523 EF100_DMA_STAT(port_rx_good
, RX_GOOD_PKTS
),
524 EF100_DMA_STAT(port_rx_bad
, RX_BAD_FCS_PKTS
),
525 EF100_DMA_STAT(port_rx_pause
, RX_PAUSE_PKTS
),
526 EF100_DMA_STAT(port_rx_unicast
, RX_UNICAST_PKTS
),
527 EF100_DMA_STAT(port_rx_multicast
, RX_MULTICAST_PKTS
),
528 EF100_DMA_STAT(port_rx_broadcast
, RX_BROADCAST_PKTS
),
529 EF100_DMA_STAT(port_rx_lt64
, RX_UNDERSIZE_PKTS
),
530 EF100_DMA_STAT(port_rx_64
, RX_64_PKTS
),
531 EF100_DMA_STAT(port_rx_65_to_127
, RX_65_TO_127_PKTS
),
532 EF100_DMA_STAT(port_rx_128_to_255
, RX_128_TO_255_PKTS
),
533 EF100_DMA_STAT(port_rx_256_to_511
, RX_256_TO_511_PKTS
),
534 EF100_DMA_STAT(port_rx_512_to_1023
, RX_512_TO_1023_PKTS
),
535 EF100_DMA_STAT(port_rx_1024_to_15xx
, RX_1024_TO_15XX_PKTS
),
536 EF100_DMA_STAT(port_rx_15xx_to_jumbo
, RX_15XX_TO_JUMBO_PKTS
),
537 EF100_DMA_STAT(port_rx_gtjumbo
, RX_GTJUMBO_PKTS
),
538 EF100_DMA_STAT(port_rx_bad_gtjumbo
, RX_JABBER_PKTS
),
539 EF100_DMA_STAT(port_rx_align_error
, RX_ALIGN_ERROR_PKTS
),
540 EF100_DMA_STAT(port_rx_length_error
, RX_LENGTH_ERROR_PKTS
),
541 EF100_DMA_STAT(port_rx_overflow
, RX_OVERFLOW_PKTS
),
542 EF100_DMA_STAT(port_rx_nodesc_drops
, RX_NODESC_DROPS
),
543 EFX_GENERIC_SW_STAT(rx_nodesc_trunc
),
544 EFX_GENERIC_SW_STAT(rx_noskb_drops
),
547 static size_t ef100_describe_stats(struct efx_nic
*efx
, u8
*names
)
549 DECLARE_BITMAP(mask
, EF100_STAT_COUNT
) = {};
551 ef100_ethtool_stat_mask(mask
);
552 return efx_nic_describe_stats(ef100_stat_desc
, EF100_STAT_COUNT
,
556 static size_t ef100_update_stats_common(struct efx_nic
*efx
, u64
*full_stats
,
557 struct rtnl_link_stats64
*core_stats
)
559 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
560 DECLARE_BITMAP(mask
, EF100_STAT_COUNT
) = {};
561 size_t stats_count
= 0, index
;
562 u64
*stats
= nic_data
->stats
;
564 ef100_ethtool_stat_mask(mask
);
567 for_each_set_bit(index
, mask
, EF100_STAT_COUNT
) {
568 if (ef100_stat_desc
[index
].name
) {
569 *full_stats
++ = stats
[index
];
578 core_stats
->rx_packets
= stats
[EF100_STAT_port_rx_packets
];
579 core_stats
->tx_packets
= stats
[EF100_STAT_port_tx_packets
];
580 core_stats
->rx_bytes
= stats
[EF100_STAT_port_rx_bytes
];
581 core_stats
->tx_bytes
= stats
[EF100_STAT_port_tx_bytes
];
582 core_stats
->rx_dropped
= stats
[EF100_STAT_port_rx_nodesc_drops
] +
583 stats
[GENERIC_STAT_rx_nodesc_trunc
] +
584 stats
[GENERIC_STAT_rx_noskb_drops
];
585 core_stats
->multicast
= stats
[EF100_STAT_port_rx_multicast
];
586 core_stats
->rx_length_errors
=
587 stats
[EF100_STAT_port_rx_gtjumbo
] +
588 stats
[EF100_STAT_port_rx_length_error
];
589 core_stats
->rx_crc_errors
= stats
[EF100_STAT_port_rx_bad
];
590 core_stats
->rx_frame_errors
=
591 stats
[EF100_STAT_port_rx_align_error
];
592 core_stats
->rx_fifo_errors
= stats
[EF100_STAT_port_rx_overflow
];
593 core_stats
->rx_errors
= (core_stats
->rx_length_errors
+
594 core_stats
->rx_crc_errors
+
595 core_stats
->rx_frame_errors
);
600 static size_t ef100_update_stats(struct efx_nic
*efx
,
602 struct rtnl_link_stats64
*core_stats
)
604 __le64
*mc_stats
= kmalloc(array_size(efx
->num_mac_stats
, sizeof(__le64
)), GFP_ATOMIC
);
605 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
606 DECLARE_BITMAP(mask
, EF100_STAT_COUNT
) = {};
607 u64
*stats
= nic_data
->stats
;
609 ef100_common_stat_mask(mask
);
610 ef100_ethtool_stat_mask(mask
);
615 efx_nic_copy_stats(efx
, mc_stats
);
616 efx_nic_update_stats(ef100_stat_desc
, EF100_STAT_COUNT
, mask
,
617 stats
, mc_stats
, false);
621 return ef100_update_stats_common(efx
, full_stats
, core_stats
);
624 static int efx_ef100_get_phys_port_id(struct efx_nic
*efx
,
625 struct netdev_phys_item_id
*ppid
)
627 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
629 if (!is_valid_ether_addr(nic_data
->port_id
))
632 ppid
->id_len
= ETH_ALEN
;
633 memcpy(ppid
->id
, nic_data
->port_id
, ppid
->id_len
);
638 static int efx_ef100_irq_test_generate(struct efx_nic
*efx
)
640 MCDI_DECLARE_BUF(inbuf
, MC_CMD_TRIGGER_INTERRUPT_IN_LEN
);
642 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN
!= 0);
644 MCDI_SET_DWORD(inbuf
, TRIGGER_INTERRUPT_IN_INTR_LEVEL
, efx
->irq_level
);
645 return efx_mcdi_rpc_quiet(efx
, MC_CMD_TRIGGER_INTERRUPT
,
646 inbuf
, sizeof(inbuf
), NULL
, 0, NULL
);
649 #define EFX_EF100_TEST 1
651 static void efx_ef100_ev_test_generate(struct efx_channel
*channel
)
653 MCDI_DECLARE_BUF(inbuf
, MC_CMD_DRIVER_EVENT_IN_LEN
);
654 struct efx_nic
*efx
= channel
->efx
;
658 EFX_POPULATE_QWORD_2(event
,
659 ESF_GZ_E_TYPE
, ESE_GZ_EF100_EV_DRIVER
,
660 ESF_GZ_DRIVER_DATA
, EFX_EF100_TEST
);
662 MCDI_SET_DWORD(inbuf
, DRIVER_EVENT_IN_EVQ
, channel
->channel
);
664 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
665 * already swapped the data to little-endian order.
667 memcpy(MCDI_PTR(inbuf
, DRIVER_EVENT_IN_DATA
), &event
.u64
[0],
668 sizeof(efx_qword_t
));
670 rc
= efx_mcdi_rpc(efx
, MC_CMD_DRIVER_EVENT
, inbuf
, sizeof(inbuf
),
672 if (rc
&& (rc
!= -ENETDOWN
))
679 netif_err(efx
, hw
, efx
->net_dev
, "%s: failed rc=%d\n", __func__
, rc
);
682 static unsigned int ef100_check_caps(const struct efx_nic
*efx
,
685 const struct ef100_nic_data
*nic_data
= efx
->nic_data
;
688 case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST
:
689 return nic_data
->datapath_caps
& BIT_ULL(flag
);
690 case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST
:
691 return nic_data
->datapath_caps2
& BIT_ULL(flag
);
692 case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST
:
693 return nic_data
->datapath_caps3
& BIT_ULL(flag
);
699 /* NIC level access functions
701 #define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
702 NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
703 NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
704 NETIF_F_HW_VLAN_CTAG_TX)
706 const struct efx_nic_type ef100_pf_nic_type
= {
707 .revision
= EFX_REV_EF100
,
709 .probe
= ef100_probe_pf
,
710 .offload_features
= EF100_OFFLOAD_FEATURES
,
712 .mcdi_request
= ef100_mcdi_request
,
713 .mcdi_poll_response
= ef100_mcdi_poll_response
,
714 .mcdi_read_response
= ef100_mcdi_read_response
,
715 .mcdi_poll_reboot
= ef100_mcdi_poll_reboot
,
716 .mcdi_reboot_detected
= ef100_mcdi_reboot_detected
,
717 .irq_enable_master
= efx_port_dummy_op_void
,
718 .irq_test_generate
= efx_ef100_irq_test_generate
,
719 .irq_disable_non_ev
= efx_port_dummy_op_void
,
720 .push_irq_moderation
= efx_channel_dummy_op_void
,
721 .min_interrupt_mode
= EFX_INT_MODE_MSIX
,
722 .map_reset_reason
= ef100_map_reset_reason
,
723 .map_reset_flags
= ef100_map_reset_flags
,
724 .reset
= ef100_reset
,
726 .check_caps
= ef100_check_caps
,
728 .ev_probe
= ef100_ev_probe
,
729 .ev_init
= ef100_ev_init
,
730 .ev_fini
= efx_mcdi_ev_fini
,
731 .ev_remove
= efx_mcdi_ev_remove
,
732 .irq_handle_msi
= ef100_msi_interrupt
,
733 .ev_process
= ef100_ev_process
,
734 .ev_read_ack
= ef100_ev_read_ack
,
735 .ev_test_generate
= efx_ef100_ev_test_generate
,
736 .tx_probe
= ef100_tx_probe
,
737 .tx_init
= ef100_tx_init
,
738 .tx_write
= ef100_tx_write
,
739 .tx_enqueue
= ef100_enqueue_skb
,
740 .rx_probe
= efx_mcdi_rx_probe
,
741 .rx_init
= efx_mcdi_rx_init
,
742 .rx_remove
= efx_mcdi_rx_remove
,
743 .rx_write
= ef100_rx_write
,
744 .rx_packet
= __ef100_rx_packet
,
745 .rx_buf_hash_valid
= ef100_rx_buf_hash_valid
,
746 .fini_dmaq
= efx_fini_dmaq
,
747 .max_rx_ip_filters
= EFX_MCDI_FILTER_TBL_ROWS
,
748 .filter_table_probe
= ef100_filter_table_up
,
749 .filter_table_restore
= efx_mcdi_filter_table_restore
,
750 .filter_table_remove
= ef100_filter_table_down
,
751 .filter_insert
= efx_mcdi_filter_insert
,
752 .filter_remove_safe
= efx_mcdi_filter_remove_safe
,
753 .filter_get_safe
= efx_mcdi_filter_get_safe
,
754 .filter_clear_rx
= efx_mcdi_filter_clear_rx
,
755 .filter_count_rx_used
= efx_mcdi_filter_count_rx_used
,
756 .filter_get_rx_id_limit
= efx_mcdi_filter_get_rx_id_limit
,
757 .filter_get_rx_ids
= efx_mcdi_filter_get_rx_ids
,
758 #ifdef CONFIG_RFS_ACCEL
759 .filter_rfs_expire_one
= efx_mcdi_filter_rfs_expire_one
,
762 .get_phys_port_id
= efx_ef100_get_phys_port_id
,
764 .rx_prefix_size
= ESE_GZ_RX_PKT_PREFIX_LEN
,
765 .rx_hash_offset
= ESF_GZ_RX_PREFIX_RSS_HASH_LBN
/ 8,
766 .rx_ts_offset
= ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN
/ 8,
767 .rx_hash_key_size
= 40,
768 .rx_pull_rss_config
= efx_mcdi_rx_pull_rss_config
,
769 .rx_push_rss_config
= efx_mcdi_pf_rx_push_rss_config
,
770 .rx_push_rss_context_config
= efx_mcdi_rx_push_rss_context_config
,
771 .rx_pull_rss_context_config
= efx_mcdi_rx_pull_rss_context_config
,
772 .rx_restore_rss_contexts
= efx_mcdi_rx_restore_rss_contexts
,
774 .reconfigure_mac
= ef100_reconfigure_mac
,
775 .reconfigure_port
= efx_mcdi_port_reconfigure
,
776 .test_nvram
= efx_new_mcdi_nvram_test_all
,
777 .describe_stats
= ef100_describe_stats
,
778 .start_stats
= efx_mcdi_mac_start_stats
,
779 .update_stats
= ef100_update_stats
,
780 .pull_stats
= efx_mcdi_mac_pull_stats
,
781 .stop_stats
= efx_mcdi_mac_stop_stats
,
783 /* Per-type bar/size configuration not used on ef100. Location of
784 * registers is defined by extended capabilities.
787 .mem_map_size
= NULL
,
791 const struct efx_nic_type ef100_vf_nic_type
= {
792 .revision
= EFX_REV_EF100
,
794 .probe
= ef100_probe_vf
,
795 .offload_features
= EF100_OFFLOAD_FEATURES
,
797 .mcdi_request
= ef100_mcdi_request
,
798 .mcdi_poll_response
= ef100_mcdi_poll_response
,
799 .mcdi_read_response
= ef100_mcdi_read_response
,
800 .mcdi_poll_reboot
= ef100_mcdi_poll_reboot
,
801 .mcdi_reboot_detected
= ef100_mcdi_reboot_detected
,
802 .irq_enable_master
= efx_port_dummy_op_void
,
803 .irq_test_generate
= efx_ef100_irq_test_generate
,
804 .irq_disable_non_ev
= efx_port_dummy_op_void
,
805 .push_irq_moderation
= efx_channel_dummy_op_void
,
806 .min_interrupt_mode
= EFX_INT_MODE_MSIX
,
807 .map_reset_reason
= ef100_map_reset_reason
,
808 .map_reset_flags
= ef100_map_reset_flags
,
809 .reset
= ef100_reset
,
810 .check_caps
= ef100_check_caps
,
811 .ev_probe
= ef100_ev_probe
,
812 .ev_init
= ef100_ev_init
,
813 .ev_fini
= efx_mcdi_ev_fini
,
814 .ev_remove
= efx_mcdi_ev_remove
,
815 .irq_handle_msi
= ef100_msi_interrupt
,
816 .ev_process
= ef100_ev_process
,
817 .ev_read_ack
= ef100_ev_read_ack
,
818 .ev_test_generate
= efx_ef100_ev_test_generate
,
819 .tx_probe
= ef100_tx_probe
,
820 .tx_init
= ef100_tx_init
,
821 .tx_write
= ef100_tx_write
,
822 .tx_enqueue
= ef100_enqueue_skb
,
823 .rx_probe
= efx_mcdi_rx_probe
,
824 .rx_init
= efx_mcdi_rx_init
,
825 .rx_remove
= efx_mcdi_rx_remove
,
826 .rx_write
= ef100_rx_write
,
827 .rx_packet
= __ef100_rx_packet
,
828 .rx_buf_hash_valid
= ef100_rx_buf_hash_valid
,
829 .fini_dmaq
= efx_fini_dmaq
,
830 .max_rx_ip_filters
= EFX_MCDI_FILTER_TBL_ROWS
,
831 .filter_table_probe
= ef100_filter_table_up
,
832 .filter_table_restore
= efx_mcdi_filter_table_restore
,
833 .filter_table_remove
= ef100_filter_table_down
,
834 .filter_insert
= efx_mcdi_filter_insert
,
835 .filter_remove_safe
= efx_mcdi_filter_remove_safe
,
836 .filter_get_safe
= efx_mcdi_filter_get_safe
,
837 .filter_clear_rx
= efx_mcdi_filter_clear_rx
,
838 .filter_count_rx_used
= efx_mcdi_filter_count_rx_used
,
839 .filter_get_rx_id_limit
= efx_mcdi_filter_get_rx_id_limit
,
840 .filter_get_rx_ids
= efx_mcdi_filter_get_rx_ids
,
841 #ifdef CONFIG_RFS_ACCEL
842 .filter_rfs_expire_one
= efx_mcdi_filter_rfs_expire_one
,
845 .rx_prefix_size
= ESE_GZ_RX_PKT_PREFIX_LEN
,
846 .rx_hash_offset
= ESF_GZ_RX_PREFIX_RSS_HASH_LBN
/ 8,
847 .rx_ts_offset
= ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN
/ 8,
848 .rx_hash_key_size
= 40,
849 .rx_pull_rss_config
= efx_mcdi_rx_pull_rss_config
,
850 .rx_push_rss_config
= efx_mcdi_pf_rx_push_rss_config
,
851 .rx_restore_rss_contexts
= efx_mcdi_rx_restore_rss_contexts
,
853 .reconfigure_mac
= ef100_reconfigure_mac
,
854 .test_nvram
= efx_new_mcdi_nvram_test_all
,
855 .describe_stats
= ef100_describe_stats
,
856 .start_stats
= efx_mcdi_mac_start_stats
,
857 .update_stats
= ef100_update_stats
,
858 .pull_stats
= efx_mcdi_mac_pull_stats
,
859 .stop_stats
= efx_mcdi_mac_stop_stats
,
862 .mem_map_size
= NULL
,
866 static int compare_versions(const char *a
, const char *b
)
868 int a_major
, a_minor
, a_point
, a_patch
;
869 int b_major
, b_minor
, b_point
, b_patch
;
870 int a_matched
, b_matched
;
872 a_matched
= sscanf(a
, "%d.%d.%d.%d", &a_major
, &a_minor
, &a_point
, &a_patch
);
873 b_matched
= sscanf(b
, "%d.%d.%d.%d", &b_major
, &b_minor
, &b_point
, &b_patch
);
875 if (a_matched
== 4 && b_matched
!= 4)
878 if (a_matched
!= 4 && b_matched
== 4)
881 if (a_matched
!= 4 && b_matched
!= 4)
884 if (a_major
!= b_major
)
885 return a_major
- b_major
;
887 if (a_minor
!= b_minor
)
888 return a_minor
- b_minor
;
890 if (a_point
!= b_point
)
891 return a_point
- b_point
;
893 return a_patch
- b_patch
;
896 enum ef100_tlv_state_machine
{
903 struct ef100_tlv_state
{
904 enum ef100_tlv_state_machine state
;
911 static int ef100_tlv_feed(struct ef100_tlv_state
*state
, u8 byte
)
913 switch (state
->state
) {
915 state
->type
= byte
& 0x7f;
916 state
->state
= (byte
& 0x80) ? EF100_TLV_TYPE_CONT
918 /* Clear ready to read in a new entry */
920 state
->value_offset
= 0;
922 case EF100_TLV_TYPE_CONT
:
923 state
->type
|= byte
<< 7;
924 state
->state
= EF100_TLV_LENGTH
;
926 case EF100_TLV_LENGTH
:
928 /* We only handle TLVs that fit in a u64 */
929 if (state
->len
> sizeof(state
->value
))
931 /* len may be zero, implying a value of zero */
932 state
->state
= state
->len
? EF100_TLV_VALUE
: EF100_TLV_TYPE
;
934 case EF100_TLV_VALUE
:
935 state
->value
|= ((u64
)byte
) << (state
->value_offset
* 8);
936 state
->value_offset
++;
937 if (state
->value_offset
>= state
->len
)
938 state
->state
= EF100_TLV_TYPE
;
940 default: /* state machine error, can't happen */
946 static int ef100_process_design_param(struct efx_nic
*efx
,
947 const struct ef100_tlv_state
*reader
)
949 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
951 switch (reader
->type
) {
952 case ESE_EF100_DP_GZ_PAD
: /* padding, skip it */
954 case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS
:
955 /* Driver doesn't support timestamping yet, so we don't care */
957 case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS
:
958 /* Driver doesn't support unsolicited-event credits yet, so
962 case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE
:
963 /* Driver doesn't manage the NMMU (so we don't care) */
965 case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS
:
966 /* Driver uses CHECKSUM_COMPLETE, so we don't care about
967 * protocol checksum validation
970 case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN
:
971 nic_data
->tso_max_hdr_len
= min_t(u64
, reader
->value
, 0xffff);
973 case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS
:
974 /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
975 if (!reader
->value
) {
976 netif_err(efx
, probe
, efx
->net_dev
,
977 "TSO_MAX_HDR_NUM_SEGS < 1\n");
981 case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY
:
982 case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY
:
983 /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by
984 * EFX_MIN_DMAQ_SIZE, so we just need to check that
985 * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
986 * This is very unlikely to fail.
988 if (!reader
->value
|| reader
->value
> EFX_MIN_DMAQ_SIZE
||
989 EFX_MIN_DMAQ_SIZE
% (u32
)reader
->value
) {
990 netif_err(efx
, probe
, efx
->net_dev
,
991 "%s size granularity is %llu, can't guarantee safety\n",
992 reader
->type
== ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY
? "RXQ" : "TXQ",
997 case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN
:
998 nic_data
->tso_max_payload_len
= min_t(u64
, reader
->value
, GSO_MAX_SIZE
);
999 efx
->net_dev
->gso_max_size
= nic_data
->tso_max_payload_len
;
1001 case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS
:
1002 nic_data
->tso_max_payload_num_segs
= min_t(u64
, reader
->value
, 0xffff);
1003 efx
->net_dev
->gso_max_segs
= nic_data
->tso_max_payload_num_segs
;
1005 case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES
:
1006 nic_data
->tso_max_frames
= min_t(u64
, reader
->value
, 0xffff);
1008 case ESE_EF100_DP_GZ_COMPAT
:
1009 if (reader
->value
) {
1010 netif_err(efx
, probe
, efx
->net_dev
,
1011 "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
1016 case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN
:
1017 /* Driver doesn't use mem2mem transfers */
1019 case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS
:
1020 /* Driver doesn't currently use EVQ_TIMER */
1022 case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES
:
1023 /* Driver doesn't manage the NMMU (so we don't care) */
1025 case ESE_EF100_DP_GZ_VI_STRIDES
:
1026 /* We never try to set the VI stride, and we don't rely on
1027 * being able to find VIs past VI 0 until after we've learned
1028 * the current stride from MC_CMD_GET_CAPABILITIES.
1029 * So the value of this shouldn't matter.
1031 if (reader
->value
!= ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT
)
1032 netif_dbg(efx
, probe
, efx
->net_dev
,
1033 "NIC has other than default VI_STRIDES (mask "
1034 "%#llx), early probing might use wrong one\n",
1037 case ESE_EF100_DP_GZ_RX_MAX_RUNT
:
1038 /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
1039 * care whether it indicates runt or overlength for any given
1040 * packet, so we don't care about this parameter.
1044 /* Host interface says "Drivers should ignore design parameters
1045 * that they do not recognise."
1047 netif_dbg(efx
, probe
, efx
->net_dev
,
1048 "Ignoring unrecognised design parameter %u\n",
1054 static int ef100_check_design_params(struct efx_nic
*efx
)
1056 struct ef100_tlv_state reader
= {};
1057 u32 total_len
, offset
= 0;
1062 efx_readd(efx
, ®
, ER_GZ_PARAMS_TLV_LEN
);
1063 total_len
= EFX_DWORD_FIELD(reg
, EFX_DWORD_0
);
1064 netif_dbg(efx
, probe
, efx
->net_dev
, "%u bytes of design parameters\n",
1066 while (offset
< total_len
) {
1067 efx_readd(efx
, ®
, ER_GZ_PARAMS_TLV
+ offset
);
1068 data
= EFX_DWORD_FIELD(reg
, EFX_DWORD_0
);
1069 for (i
= 0; i
< sizeof(data
); i
++) {
1070 rc
= ef100_tlv_feed(&reader
, data
);
1071 /* Got a complete value? */
1072 if (!rc
&& reader
.state
== EF100_TLV_TYPE
)
1073 rc
= ef100_process_design_param(efx
, &reader
);
1080 /* Check we didn't end halfway through a TLV entry, which could either
1081 * mean that the TLV stream is truncated or just that it's corrupted
1082 * and our state machine is out of sync.
1084 if (reader
.state
!= EF100_TLV_TYPE
) {
1085 if (reader
.state
== EF100_TLV_TYPE_CONT
)
1086 netif_err(efx
, probe
, efx
->net_dev
,
1087 "truncated design parameter (incomplete type %u)\n",
1090 netif_err(efx
, probe
, efx
->net_dev
,
1091 "truncated design parameter %u\n",
1099 /* NIC probe and remove
1101 static int ef100_probe_main(struct efx_nic
*efx
)
1103 unsigned int bar_size
= resource_size(&efx
->pci_dev
->resource
[efx
->mem_bar
]);
1104 struct net_device
*net_dev
= efx
->net_dev
;
1105 struct ef100_nic_data
*nic_data
;
1106 char fw_version
[32];
1109 if (WARN_ON(bar_size
== 0))
1112 nic_data
= kzalloc(sizeof(*nic_data
), GFP_KERNEL
);
1115 efx
->nic_data
= nic_data
;
1116 nic_data
->efx
= efx
;
1117 net_dev
->features
|= efx
->type
->offload_features
;
1118 net_dev
->hw_features
|= efx
->type
->offload_features
;
1119 net_dev
->hw_enc_features
|= efx
->type
->offload_features
;
1120 net_dev
->vlan_features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
|
1121 NETIF_F_HIGHDMA
| NETIF_F_ALL_TSO
;
1123 /* Populate design-parameter defaults */
1124 nic_data
->tso_max_hdr_len
= ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT
;
1125 nic_data
->tso_max_frames
= ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT
;
1126 nic_data
->tso_max_payload_num_segs
= ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT
;
1127 nic_data
->tso_max_payload_len
= ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT
;
1128 net_dev
->gso_max_segs
= ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT
;
1129 /* Read design parameters */
1130 rc
= ef100_check_design_params(efx
);
1132 netif_err(efx
, probe
, efx
->net_dev
,
1133 "Unsupported design parameters\n");
1137 /* we assume later that we can copy from this buffer in dwords */
1138 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2
% 4);
1140 /* MCDI buffers must be 256 byte aligned. */
1141 rc
= efx_nic_alloc_buffer(efx
, &nic_data
->mcdi_buf
, MCDI_BUF_LEN
,
1146 /* Get the MC's warm boot count. In case it's rebooting right
1147 * now, be prepared to retry.
1151 rc
= ef100_get_warm_boot_count(efx
);
1158 nic_data
->warm_boot_count
= rc
;
1160 /* In case we're recovering from a crash (kexec), we want to
1161 * cancel any outstanding request by the previous user of this
1162 * function. We send a special message using the least
1163 * significant bits of the 'high' (doorbell) register.
1165 _efx_writed(efx
, cpu_to_le32(1), efx_reg(efx
, ER_GZ_MC_DB_HWRD
));
1167 /* Post-IO section. */
1169 rc
= efx_mcdi_init(efx
);
1170 if (!rc
&& efx
->mcdi
->fn_flags
&
1171 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT
)) {
1172 netif_info(efx
, probe
, efx
->net_dev
,
1173 "No network port on this PCI function");
1178 /* Reset (most) configuration for this function */
1179 rc
= efx_mcdi_reset(efx
, RESET_TYPE_ALL
);
1182 /* Enable event logging */
1183 rc
= efx_mcdi_log_ctrl(efx
, true, false, 0);
1187 rc
= efx_get_pf_index(efx
, &nic_data
->pf_index
);
1191 rc
= efx_ef100_init_datapath_caps(efx
);
1195 efx
->max_vis
= EF100_MAX_VIS
;
1197 rc
= efx_mcdi_port_get_number(efx
);
1202 efx_mcdi_print_fwver(efx
, fw_version
, sizeof(fw_version
));
1203 netif_dbg(efx
, drv
, efx
->net_dev
, "Firmware version %s\n", fw_version
);
1205 if (compare_versions(fw_version
, "1.1.0.1000") < 0) {
1206 netif_info(efx
, drv
, efx
->net_dev
, "Firmware uses old event descriptors\n");
1211 if (efx_has_cap(efx
, UNSOL_EV_CREDIT_SUPPORTED
)) {
1212 netif_info(efx
, drv
, efx
->net_dev
, "Firmware uses unsolicited-event credits\n");
1217 rc
= ef100_phy_probe(efx
);
1221 down_write(&efx
->filter_sem
);
1222 rc
= ef100_filter_table_probe(efx
);
1223 up_write(&efx
->filter_sem
);
1227 netdev_rss_key_fill(efx
->rss_context
.rx_hash_key
,
1228 sizeof(efx
->rss_context
.rx_hash_key
));
1230 /* Don't fail init if RSS setup doesn't work. */
1231 efx_mcdi_push_default_indir_table(efx
, efx
->n_rx_channels
);
1233 rc
= ef100_register_netdev(efx
);
1242 int ef100_probe_pf(struct efx_nic
*efx
)
1244 struct net_device
*net_dev
= efx
->net_dev
;
1245 struct ef100_nic_data
*nic_data
;
1246 int rc
= ef100_probe_main(efx
);
1251 nic_data
= efx
->nic_data
;
1252 rc
= ef100_get_mac_address(efx
, net_dev
->perm_addr
);
1255 /* Assign MAC address */
1256 memcpy(net_dev
->dev_addr
, net_dev
->perm_addr
, ETH_ALEN
);
1257 memcpy(nic_data
->port_id
, net_dev
->perm_addr
, ETH_ALEN
);
1265 int ef100_probe_vf(struct efx_nic
*efx
)
1267 return ef100_probe_main(efx
);
1270 void ef100_remove(struct efx_nic
*efx
)
1272 struct ef100_nic_data
*nic_data
= efx
->nic_data
;
1274 ef100_unregister_netdev(efx
);
1276 down_write(&efx
->filter_sem
);
1277 efx_mcdi_filter_table_remove(efx
);
1278 up_write(&efx
->filter_sem
);
1279 efx_fini_channels(efx
);
1280 kfree(efx
->phy_data
);
1281 efx
->phy_data
= NULL
;
1282 efx_mcdi_detach(efx
);
1285 efx_nic_free_buffer(efx
, &nic_data
->mcdi_buf
);
1287 efx
->nic_data
= NULL
;