]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/sfc/falcon.c
sfc: Make most filter operations NIC-type-specific
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / sfc / falcon.c
CommitLineData
8ceee660
BH
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
0a6f40c6 4 * Copyright 2006-2010 Solarflare Communications Inc.
8ceee660
BH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
37b5a603 16#include <linux/i2c.h>
f31a45d2 17#include <linux/mii.h>
5a0e3ad6 18#include <linux/slab.h>
8ceee660
BH
19#include "net_driver.h"
20#include "bitfield.h"
21#include "efx.h"
8ceee660 22#include "spi.h"
744093c9 23#include "nic.h"
8b8a95a1 24#include "farch_regs.h"
12d00cad 25#include "io.h"
8ceee660 26#include "phy.h"
8ceee660 27#include "workarounds.h"
d4f2cecc 28#include "selftest.h"
ab0115fc 29#include "mdio_10g.h"
8ceee660 30
8986352a 31/* Hardware control for SFC4000 (aka Falcon). */
8ceee660 32
ab0115fc
BH
33/**************************************************************************
34 *
35 * MAC stats DMA format
36 *
37 **************************************************************************
38 */
39
40#define FALCON_MAC_STATS_SIZE 0x100
41
42#define XgRxOctets_offset 0x0
43#define XgRxOctets_WIDTH 48
44#define XgRxOctetsOK_offset 0x8
45#define XgRxOctetsOK_WIDTH 48
46#define XgRxPkts_offset 0x10
47#define XgRxPkts_WIDTH 32
48#define XgRxPktsOK_offset 0x14
49#define XgRxPktsOK_WIDTH 32
50#define XgRxBroadcastPkts_offset 0x18
51#define XgRxBroadcastPkts_WIDTH 32
52#define XgRxMulticastPkts_offset 0x1C
53#define XgRxMulticastPkts_WIDTH 32
54#define XgRxUnicastPkts_offset 0x20
55#define XgRxUnicastPkts_WIDTH 32
56#define XgRxUndersizePkts_offset 0x24
57#define XgRxUndersizePkts_WIDTH 32
58#define XgRxOversizePkts_offset 0x28
59#define XgRxOversizePkts_WIDTH 32
60#define XgRxJabberPkts_offset 0x2C
61#define XgRxJabberPkts_WIDTH 32
62#define XgRxUndersizeFCSerrorPkts_offset 0x30
63#define XgRxUndersizeFCSerrorPkts_WIDTH 32
64#define XgRxDropEvents_offset 0x34
65#define XgRxDropEvents_WIDTH 32
66#define XgRxFCSerrorPkts_offset 0x38
67#define XgRxFCSerrorPkts_WIDTH 32
68#define XgRxAlignError_offset 0x3C
69#define XgRxAlignError_WIDTH 32
70#define XgRxSymbolError_offset 0x40
71#define XgRxSymbolError_WIDTH 32
72#define XgRxInternalMACError_offset 0x44
73#define XgRxInternalMACError_WIDTH 32
74#define XgRxControlPkts_offset 0x48
75#define XgRxControlPkts_WIDTH 32
76#define XgRxPausePkts_offset 0x4C
77#define XgRxPausePkts_WIDTH 32
78#define XgRxPkts64Octets_offset 0x50
79#define XgRxPkts64Octets_WIDTH 32
80#define XgRxPkts65to127Octets_offset 0x54
81#define XgRxPkts65to127Octets_WIDTH 32
82#define XgRxPkts128to255Octets_offset 0x58
83#define XgRxPkts128to255Octets_WIDTH 32
84#define XgRxPkts256to511Octets_offset 0x5C
85#define XgRxPkts256to511Octets_WIDTH 32
86#define XgRxPkts512to1023Octets_offset 0x60
87#define XgRxPkts512to1023Octets_WIDTH 32
88#define XgRxPkts1024to15xxOctets_offset 0x64
89#define XgRxPkts1024to15xxOctets_WIDTH 32
90#define XgRxPkts15xxtoMaxOctets_offset 0x68
91#define XgRxPkts15xxtoMaxOctets_WIDTH 32
92#define XgRxLengthError_offset 0x6C
93#define XgRxLengthError_WIDTH 32
94#define XgTxPkts_offset 0x80
95#define XgTxPkts_WIDTH 32
96#define XgTxOctets_offset 0x88
97#define XgTxOctets_WIDTH 48
98#define XgTxMulticastPkts_offset 0x90
99#define XgTxMulticastPkts_WIDTH 32
100#define XgTxBroadcastPkts_offset 0x94
101#define XgTxBroadcastPkts_WIDTH 32
102#define XgTxUnicastPkts_offset 0x98
103#define XgTxUnicastPkts_WIDTH 32
104#define XgTxControlPkts_offset 0x9C
105#define XgTxControlPkts_WIDTH 32
106#define XgTxPausePkts_offset 0xA0
107#define XgTxPausePkts_WIDTH 32
108#define XgTxPkts64Octets_offset 0xA4
109#define XgTxPkts64Octets_WIDTH 32
110#define XgTxPkts65to127Octets_offset 0xA8
111#define XgTxPkts65to127Octets_WIDTH 32
112#define XgTxPkts128to255Octets_offset 0xAC
113#define XgTxPkts128to255Octets_WIDTH 32
114#define XgTxPkts256to511Octets_offset 0xB0
115#define XgTxPkts256to511Octets_WIDTH 32
116#define XgTxPkts512to1023Octets_offset 0xB4
117#define XgTxPkts512to1023Octets_WIDTH 32
118#define XgTxPkts1024to15xxOctets_offset 0xB8
119#define XgTxPkts1024to15xxOctets_WIDTH 32
120#define XgTxPkts1519toMaxOctets_offset 0xBC
121#define XgTxPkts1519toMaxOctets_WIDTH 32
122#define XgTxUndersizePkts_offset 0xC0
123#define XgTxUndersizePkts_WIDTH 32
124#define XgTxOversizePkts_offset 0xC4
125#define XgTxOversizePkts_WIDTH 32
126#define XgTxNonTcpUdpPkt_offset 0xC8
127#define XgTxNonTcpUdpPkt_WIDTH 16
128#define XgTxMacSrcErrPkt_offset 0xCC
129#define XgTxMacSrcErrPkt_WIDTH 16
130#define XgTxIpSrcErrPkt_offset 0xD0
131#define XgTxIpSrcErrPkt_WIDTH 16
132#define XgDmaDone_offset 0xD4
133#define XgDmaDone_WIDTH 32
134
135#define FALCON_STATS_NOT_DONE 0x00000000
136#define FALCON_STATS_DONE 0xffffffff
137
138#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
139#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
140
141/* Retrieve statistic from statistics block */
142#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
143 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
144 (efx)->mac_stats.efx_stat += le16_to_cpu( \
145 *((__force __le16 *) \
146 (efx->stats_buffer.addr + \
147 FALCON_STAT_OFFSET(falcon_stat)))); \
148 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
149 (efx)->mac_stats.efx_stat += le32_to_cpu( \
150 *((__force __le32 *) \
151 (efx->stats_buffer.addr + \
152 FALCON_STAT_OFFSET(falcon_stat)))); \
153 else \
154 (efx)->mac_stats.efx_stat += le64_to_cpu( \
155 *((__force __le64 *) \
156 (efx->stats_buffer.addr + \
157 FALCON_STAT_OFFSET(falcon_stat)))); \
158 } while (0)
159
160/**************************************************************************
161 *
162 * Non-volatile configuration
163 *
164 **************************************************************************
165 */
166
167/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
168struct falcon_nvconfig_board_v2 {
169 __le16 nports;
170 u8 port0_phy_addr;
171 u8 port0_phy_type;
172 u8 port1_phy_addr;
173 u8 port1_phy_type;
174 __le16 asic_sub_revision;
175 __le16 board_revision;
176} __packed;
177
178/* Board configuration v3 extra information */
179struct falcon_nvconfig_board_v3 {
180 __le32 spi_device_type[2];
181} __packed;
182
183/* Bit numbers for spi_device_type */
184#define SPI_DEV_TYPE_SIZE_LBN 0
185#define SPI_DEV_TYPE_SIZE_WIDTH 5
186#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
187#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
188#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
189#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
190#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
191#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
192#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
193#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
194#define SPI_DEV_TYPE_FIELD(type, field) \
195 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
196
197#define FALCON_NVCONFIG_OFFSET 0x300
198
199#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
200struct falcon_nvconfig {
201 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
202 u8 mac_address[2][8]; /* 0x310 */
203 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
204 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
205 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
206 efx_oword_t hw_init_reg; /* 0x350 */
207 efx_oword_t nic_stat_reg; /* 0x360 */
208 efx_oword_t glb_ctl_reg; /* 0x370 */
209 efx_oword_t srm_cfg_reg; /* 0x380 */
210 efx_oword_t spare_reg; /* 0x390 */
211 __le16 board_magic_num; /* 0x3A0 */
212 __le16 board_struct_ver;
213 __le16 board_checksum;
214 struct falcon_nvconfig_board_v2 board_v2;
215 efx_oword_t ee_base_page_reg; /* 0x3B0 */
216 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
217} __packed;
218
219/*************************************************************************/
220
d4f2cecc 221static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
ab0115fc 222static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
d4f2cecc 223
2f7f5730
BH
224static const unsigned int
225/* "Large" EEPROM device: Atmel AT25640 or similar
226 * 8 KB, 16-bit address, 32 B write block */
227large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
228 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
229 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
230/* Default flash device: Atmel AT25F1024
231 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
232default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
233 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
234 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
235 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
236 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
237
8ceee660
BH
238/**************************************************************************
239 *
240 * I2C bus - this is a bit-bashing interface using GPIO pins
241 * Note that it uses the output enables to tristate the outputs
242 * SDA is the data pin and SCL is the clock
243 *
244 **************************************************************************
245 */
37b5a603 246static void falcon_setsda(void *data, int state)
8ceee660 247{
37b5a603 248 struct efx_nic *efx = (struct efx_nic *)data;
8ceee660
BH
249 efx_oword_t reg;
250
12d00cad 251 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
3e6c4538 252 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
12d00cad 253 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
8ceee660
BH
254}
255
37b5a603 256static void falcon_setscl(void *data, int state)
8ceee660 257{
37b5a603 258 struct efx_nic *efx = (struct efx_nic *)data;
8ceee660
BH
259 efx_oword_t reg;
260
12d00cad 261 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
3e6c4538 262 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
12d00cad 263 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
37b5a603
BH
264}
265
8e730c15
BH
266static int falcon_getsda(void *data)
267{
268 struct efx_nic *efx = (struct efx_nic *)data;
269 efx_oword_t reg;
8ceee660 270
8e730c15
BH
271 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
272 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
273}
8ceee660 274
8e730c15
BH
275static int falcon_getscl(void *data)
276{
277 struct efx_nic *efx = (struct efx_nic *)data;
278 efx_oword_t reg;
8ceee660 279
8e730c15
BH
280 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
281 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
8ceee660
BH
282}
283
18e83e4c 284static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
8e730c15
BH
285 .setsda = falcon_setsda,
286 .setscl = falcon_setscl,
287 .getsda = falcon_getsda,
288 .getscl = falcon_getscl,
289 .udelay = 5,
290 /* Wait up to 50 ms for slave to let us pull SCL high */
291 .timeout = DIV_ROUND_UP(HZ, 20),
292};
293
ef2b90ee 294static void falcon_push_irq_moderation(struct efx_channel *channel)
8ceee660
BH
295{
296 efx_dword_t timer_cmd;
297 struct efx_nic *efx = channel->efx;
298
299 /* Set timer register */
300 if (channel->irq_moderation) {
8ceee660 301 EFX_POPULATE_DWORD_2(timer_cmd,
3e6c4538
BH
302 FRF_AB_TC_TIMER_MODE,
303 FFE_BB_TIMER_MODE_INT_HLDOFF,
304 FRF_AB_TC_TIMER_VAL,
0d86ebd8 305 channel->irq_moderation - 1);
8ceee660
BH
306 } else {
307 EFX_POPULATE_DWORD_2(timer_cmd,
3e6c4538
BH
308 FRF_AB_TC_TIMER_MODE,
309 FFE_BB_TIMER_MODE_DIS,
310 FRF_AB_TC_TIMER_VAL, 0);
8ceee660 311 }
3e6c4538 312 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
12d00cad
BH
313 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
314 channel->channel);
127e6e10
BH
315}
316
d3245b28
BH
317static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
318
127e6e10
BH
319static void falcon_prepare_flush(struct efx_nic *efx)
320{
321 falcon_deconfigure_mac_wrapper(efx);
322
323 /* Wait for the tx and rx fifo's to get to the next packet boundary
324 * (~1ms without back-pressure), then to drain the remainder of the
325 * fifo's at data path speeds (negligible), with a healthy margin. */
326 msleep(10);
6bc5d3a9
BH
327}
328
8ceee660
BH
329/* Acknowledge a legacy interrupt from Falcon
330 *
331 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
332 *
333 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
334 * BIU. Interrupt acknowledge is read sensitive so must write instead
335 * (then read to ensure the BIU collector is flushed)
336 *
337 * NB most hardware supports MSI interrupts
338 */
1840667a 339static inline void falcon_irq_ack_a1(struct efx_nic *efx)
8ceee660
BH
340{
341 efx_dword_t reg;
342
3e6c4538 343 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
12d00cad
BH
344 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
345 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
8ceee660
BH
346}
347
8ceee660 348
86094f7f 349static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
8ceee660 350{
d3208b5e
BH
351 struct efx_nic *efx = dev_id;
352 efx_oword_t *int_ker = efx->irq_status.addr;
8ceee660
BH
353 int syserr;
354 int queues;
355
356 /* Check to see if this is our interrupt. If it isn't, we
357 * exit without having touched the hardware.
358 */
359 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
62776d03
BH
360 netif_vdbg(efx, intr, efx->net_dev,
361 "IRQ %d on CPU %d not for me\n", irq,
362 raw_smp_processor_id());
8ceee660
BH
363 return IRQ_NONE;
364 }
365 efx->last_irq_cpu = raw_smp_processor_id();
62776d03
BH
366 netif_vdbg(efx, intr, efx->net_dev,
367 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
368 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
8ceee660 369
d8291187
BH
370 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
371 return IRQ_HANDLED;
372
f70d1847
BH
373 /* Check to see if we have a serious error condition */
374 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
375 if (unlikely(syserr))
86094f7f 376 return efx_farch_fatal_interrupt(efx);
f70d1847 377
8ceee660
BH
378 /* Determine interrupting queues, clear interrupt status
379 * register and acknowledge the device interrupt.
380 */
674979d3
BH
381 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
382 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
8ceee660
BH
383 EFX_ZERO_OWORD(*int_ker);
384 wmb(); /* Ensure the vector is cleared before interrupt ack */
385 falcon_irq_ack_a1(efx);
386
8313aca3 387 if (queues & 1)
1646a6f3 388 efx_schedule_channel_irq(efx_get_channel(efx, 0));
8313aca3 389 if (queues & 2)
1646a6f3 390 efx_schedule_channel_irq(efx_get_channel(efx, 1));
8ceee660
BH
391 return IRQ_HANDLED;
392}
8ceee660
BH
393/**************************************************************************
394 *
395 * EEPROM/flash
396 *
397 **************************************************************************
398 */
399
23d30f02 400#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
8ceee660 401
be4ea89c
BH
402static int falcon_spi_poll(struct efx_nic *efx)
403{
404 efx_oword_t reg;
12d00cad 405 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
3e6c4538 406 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
be4ea89c
BH
407}
408
8ceee660
BH
409/* Wait for SPI command completion */
410static int falcon_spi_wait(struct efx_nic *efx)
411{
be4ea89c
BH
412 /* Most commands will finish quickly, so we start polling at
413 * very short intervals. Sometimes the command may have to
414 * wait for VPD or expansion ROM access outside of our
415 * control, so we allow up to 100 ms. */
416 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
417 int i;
418
419 for (i = 0; i < 10; i++) {
420 if (!falcon_spi_poll(efx))
421 return 0;
422 udelay(10);
423 }
8ceee660 424
4a5b504d 425 for (;;) {
be4ea89c 426 if (!falcon_spi_poll(efx))
8ceee660 427 return 0;
4a5b504d 428 if (time_after_eq(jiffies, timeout)) {
62776d03
BH
429 netif_err(efx, hw, efx->net_dev,
430 "timed out waiting for SPI\n");
4a5b504d
BH
431 return -ETIMEDOUT;
432 }
be4ea89c 433 schedule_timeout_uninterruptible(1);
4a5b504d 434 }
8ceee660
BH
435}
436
76884835 437int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
f4150724 438 unsigned int command, int address,
23d30f02 439 const void *in, void *out, size_t len)
8ceee660 440{
4a5b504d
BH
441 bool addressed = (address >= 0);
442 bool reading = (out != NULL);
8ceee660
BH
443 efx_oword_t reg;
444 int rc;
445
4a5b504d
BH
446 /* Input validation */
447 if (len > FALCON_SPI_MAX_LEN)
448 return -EINVAL;
8ceee660 449
be4ea89c
BH
450 /* Check that previous command is not still running */
451 rc = falcon_spi_poll(efx);
8ceee660
BH
452 if (rc)
453 return rc;
454
4a5b504d
BH
455 /* Program address register, if we have an address */
456 if (addressed) {
3e6c4538 457 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
12d00cad 458 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
4a5b504d
BH
459 }
460
461 /* Program data register, if we have data */
462 if (in != NULL) {
463 memcpy(&reg, in, len);
12d00cad 464 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
4a5b504d 465 }
8ceee660 466
4a5b504d 467 /* Issue read/write command */
8ceee660 468 EFX_POPULATE_OWORD_7(reg,
3e6c4538
BH
469 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
470 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
471 FRF_AB_EE_SPI_HCMD_DABCNT, len,
472 FRF_AB_EE_SPI_HCMD_READ, reading,
473 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
474 FRF_AB_EE_SPI_HCMD_ADBCNT,
4a5b504d 475 (addressed ? spi->addr_len : 0),
3e6c4538 476 FRF_AB_EE_SPI_HCMD_ENC, command);
12d00cad 477 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
8ceee660 478
4a5b504d 479 /* Wait for read/write to complete */
8ceee660
BH
480 rc = falcon_spi_wait(efx);
481 if (rc)
482 return rc;
483
484 /* Read data */
4a5b504d 485 if (out != NULL) {
12d00cad 486 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
4a5b504d
BH
487 memcpy(out, &reg, len);
488 }
489
8ceee660
BH
490 return 0;
491}
492
23d30f02
BH
493static size_t
494falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
4a5b504d
BH
495{
496 return min(FALCON_SPI_MAX_LEN,
497 (spi->block_size - (start & (spi->block_size - 1))));
498}
499
500static inline u8
501efx_spi_munge_command(const struct efx_spi_device *spi,
502 const u8 command, const unsigned int address)
503{
504 return command | (((address >> 8) & spi->munge_address) << 3);
505}
506
be4ea89c 507/* Wait up to 10 ms for buffered write completion */
76884835
BH
508int
509falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
4a5b504d 510{
be4ea89c 511 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
4a5b504d 512 u8 status;
be4ea89c 513 int rc;
4a5b504d 514
be4ea89c 515 for (;;) {
76884835 516 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
4a5b504d
BH
517 &status, sizeof(status));
518 if (rc)
519 return rc;
520 if (!(status & SPI_STATUS_NRDY))
521 return 0;
be4ea89c 522 if (time_after_eq(jiffies, timeout)) {
62776d03
BH
523 netif_err(efx, hw, efx->net_dev,
524 "SPI write timeout on device %d"
525 " last status=0x%02x\n",
526 spi->device_id, status);
be4ea89c
BH
527 return -ETIMEDOUT;
528 }
529 schedule_timeout_uninterruptible(1);
4a5b504d 530 }
4a5b504d
BH
531}
532
76884835
BH
533int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
534 loff_t start, size_t len, size_t *retlen, u8 *buffer)
4a5b504d 535{
23d30f02
BH
536 size_t block_len, pos = 0;
537 unsigned int command;
4a5b504d
BH
538 int rc = 0;
539
540 while (pos < len) {
23d30f02 541 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
4a5b504d
BH
542
543 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
76884835 544 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
4a5b504d
BH
545 buffer + pos, block_len);
546 if (rc)
547 break;
548 pos += block_len;
549
550 /* Avoid locking up the system */
551 cond_resched();
552 if (signal_pending(current)) {
553 rc = -EINTR;
554 break;
555 }
556 }
557
558 if (retlen)
559 *retlen = pos;
560 return rc;
561}
562
76884835
BH
563int
564falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
565 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
4a5b504d
BH
566{
567 u8 verify_buffer[FALCON_SPI_MAX_LEN];
23d30f02
BH
568 size_t block_len, pos = 0;
569 unsigned int command;
4a5b504d
BH
570 int rc = 0;
571
572 while (pos < len) {
76884835 573 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
4a5b504d
BH
574 if (rc)
575 break;
576
23d30f02 577 block_len = min(len - pos,
4a5b504d
BH
578 falcon_spi_write_limit(spi, start + pos));
579 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
76884835 580 rc = falcon_spi_cmd(efx, spi, command, start + pos,
4a5b504d
BH
581 buffer + pos, NULL, block_len);
582 if (rc)
583 break;
584
76884835 585 rc = falcon_spi_wait_write(efx, spi);
4a5b504d
BH
586 if (rc)
587 break;
588
589 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
76884835 590 rc = falcon_spi_cmd(efx, spi, command, start + pos,
4a5b504d
BH
591 NULL, verify_buffer, block_len);
592 if (memcmp(verify_buffer, buffer + pos, block_len)) {
593 rc = -EIO;
594 break;
595 }
596
597 pos += block_len;
598
599 /* Avoid locking up the system */
600 cond_resched();
601 if (signal_pending(current)) {
602 rc = -EINTR;
603 break;
604 }
605 }
606
607 if (retlen)
608 *retlen = pos;
609 return rc;
610}
611
ab0115fc
BH
612/**************************************************************************
613 *
614 * XMAC operations
615 *
616 **************************************************************************
617 */
618
619/* Configure the XAUI driver that is an output from Falcon */
620static void falcon_setup_xaui(struct efx_nic *efx)
621{
622 efx_oword_t sdctl, txdrv;
623
624 /* Move the XAUI into low power, unless there is no PHY, in
625 * which case the XAUI will have to drive a cable. */
626 if (efx->phy_type == PHY_TYPE_NONE)
627 return;
628
629 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
630 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
631 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
632 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
633 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
634 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
635 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
636 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
637 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
638 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
639
640 EFX_POPULATE_OWORD_8(txdrv,
641 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
642 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
643 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
644 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
645 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
646 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
647 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
648 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
649 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
650}
651
652int falcon_reset_xaui(struct efx_nic *efx)
653{
654 struct falcon_nic_data *nic_data = efx->nic_data;
655 efx_oword_t reg;
656 int count;
657
658 /* Don't fetch MAC statistics over an XMAC reset */
659 WARN_ON(nic_data->stats_disable_count == 0);
660
661 /* Start reset sequence */
662 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
663 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
664
665 /* Wait up to 10 ms for completion, then reinitialise */
666 for (count = 0; count < 1000; count++) {
667 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
668 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
669 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
670 falcon_setup_xaui(efx);
671 return 0;
672 }
673 udelay(10);
674 }
675 netif_err(efx, hw, efx->net_dev,
676 "timed out waiting for XAUI/XGXS reset\n");
677 return -ETIMEDOUT;
678}
679
680static void falcon_ack_status_intr(struct efx_nic *efx)
681{
682 struct falcon_nic_data *nic_data = efx->nic_data;
683 efx_oword_t reg;
684
685 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
686 return;
687
688 /* We expect xgmii faults if the wireside link is down */
ab3b8250 689 if (!efx->link_state.up)
ab0115fc
BH
690 return;
691
692 /* We can only use this interrupt to signal the negative edge of
693 * xaui_align [we have to poll the positive edge]. */
694 if (nic_data->xmac_poll_required)
695 return;
696
697 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
698}
699
700static bool falcon_xgxs_link_ok(struct efx_nic *efx)
701{
702 efx_oword_t reg;
703 bool align_done, link_ok = false;
704 int sync_status;
705
706 /* Read link status */
707 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
708
709 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
710 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
711 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
712 link_ok = true;
713
714 /* Clear link status ready for next read */
715 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
716 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
717 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
718 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
719
720 return link_ok;
721}
722
723static bool falcon_xmac_link_ok(struct efx_nic *efx)
724{
725 /*
726 * Check MAC's XGXS link status except when using XGMII loopback
727 * which bypasses the XGXS block.
728 * If possible, check PHY's XGXS link status except when using
729 * MAC loopback.
730 */
731 return (efx->loopback_mode == LOOPBACK_XGMII ||
732 falcon_xgxs_link_ok(efx)) &&
733 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
734 LOOPBACK_INTERNAL(efx) ||
735 efx_mdio_phyxgxs_lane_sync(efx));
736}
737
738static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
739{
740 unsigned int max_frame_len;
741 efx_oword_t reg;
742 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
743 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
744
745 /* Configure MAC - cut-thru mode is hard wired on */
746 EFX_POPULATE_OWORD_3(reg,
747 FRF_AB_XM_RX_JUMBO_MODE, 1,
748 FRF_AB_XM_TX_STAT_EN, 1,
749 FRF_AB_XM_RX_STAT_EN, 1);
750 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
751
752 /* Configure TX */
753 EFX_POPULATE_OWORD_6(reg,
754 FRF_AB_XM_TXEN, 1,
755 FRF_AB_XM_TX_PRMBL, 1,
756 FRF_AB_XM_AUTO_PAD, 1,
757 FRF_AB_XM_TXCRC, 1,
758 FRF_AB_XM_FCNTL, tx_fc,
759 FRF_AB_XM_IPG, 0x3);
760 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
761
762 /* Configure RX */
763 EFX_POPULATE_OWORD_5(reg,
764 FRF_AB_XM_RXEN, 1,
765 FRF_AB_XM_AUTO_DEPAD, 0,
766 FRF_AB_XM_ACPT_ALL_MCAST, 1,
767 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
768 FRF_AB_XM_PASS_CRC_ERR, 1);
769 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
770
771 /* Set frame length */
772 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
773 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
774 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
775 EFX_POPULATE_OWORD_2(reg,
776 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
777 FRF_AB_XM_TX_JUMBO_MODE, 1);
778 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
779
780 EFX_POPULATE_OWORD_2(reg,
781 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
782 FRF_AB_XM_DIS_FCNTL, !rx_fc);
783 efx_writeo(efx, &reg, FR_AB_XM_FC);
784
785 /* Set MAC address */
786 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
787 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
788 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
789 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
790}
791
792static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
793{
794 efx_oword_t reg;
795 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
796 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
797 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
ab3b8250 798 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
ab0115fc
BH
799
800 /* XGXS block is flaky and will need to be reset if moving
801 * into our out of XGMII, XGXS or XAUI loopbacks. */
ab3b8250
BH
802 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
803 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
804 old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
ab0115fc 805
ab3b8250
BH
806 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
807 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
ab0115fc 808
ab3b8250
BH
809 /* The PHY driver may have turned XAUI off */
810 if ((xgxs_loopback != old_xgxs_loopback) ||
811 (xaui_loopback != old_xaui_loopback) ||
812 (xgmii_loopback != old_xgmii_loopback))
813 falcon_reset_xaui(efx);
ab0115fc
BH
814
815 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
816 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
817 (xgxs_loopback || xaui_loopback) ?
818 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
819 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
820 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
821 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
822
823 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
824 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
825 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
826 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
827 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
828 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
829}
830
831
832/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
833static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
834{
835 bool mac_up = falcon_xmac_link_ok(efx);
836
837 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
838 efx_phy_mode_disabled(efx->phy_mode))
839 /* XAUI link is expected to be down */
840 return mac_up;
841
842 falcon_stop_nic_stats(efx);
843
844 while (!mac_up && tries) {
845 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
846 falcon_reset_xaui(efx);
847 udelay(200);
848
849 mac_up = falcon_xmac_link_ok(efx);
850 --tries;
851 }
852
853 falcon_start_nic_stats(efx);
854
855 return mac_up;
856}
857
858static bool falcon_xmac_check_fault(struct efx_nic *efx)
859{
860 return !falcon_xmac_link_ok_retry(efx, 5);
861}
862
863static int falcon_reconfigure_xmac(struct efx_nic *efx)
864{
865 struct falcon_nic_data *nic_data = efx->nic_data;
866
867 falcon_reconfigure_xgxs_core(efx);
868 falcon_reconfigure_xmac_core(efx);
869
870 falcon_reconfigure_mac_wrapper(efx);
871
872 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
873 falcon_ack_status_intr(efx);
874
875 return 0;
876}
877
878static void falcon_update_stats_xmac(struct efx_nic *efx)
879{
880 struct efx_mac_stats *mac_stats = &efx->mac_stats;
881
882 /* Update MAC stats from DMAed values */
883 FALCON_STAT(efx, XgRxOctets, rx_bytes);
884 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
885 FALCON_STAT(efx, XgRxPkts, rx_packets);
886 FALCON_STAT(efx, XgRxPktsOK, rx_good);
887 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
888 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
889 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
890 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
891 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
892 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
893 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
894 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
895 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
896 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
897 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
898 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
899 FALCON_STAT(efx, XgRxControlPkts, rx_control);
900 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
901 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
902 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
903 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
904 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
905 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
906 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
907 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
908 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
909 FALCON_STAT(efx, XgTxPkts, tx_packets);
910 FALCON_STAT(efx, XgTxOctets, tx_bytes);
911 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
912 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
913 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
914 FALCON_STAT(efx, XgTxControlPkts, tx_control);
915 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
916 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
917 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
918 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
919 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
920 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
921 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
922 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
923 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
924 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
925 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
926 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
927 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
928
929 /* Update derived statistics */
930 efx_update_diff_stat(&mac_stats->tx_good_bytes,
931 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
932 mac_stats->tx_control * 64);
933 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
934 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
935 mac_stats->rx_control * 64);
936}
937
938static void falcon_poll_xmac(struct efx_nic *efx)
939{
940 struct falcon_nic_data *nic_data = efx->nic_data;
941
ab3b8250
BH
942 /* We expect xgmii faults if the wireside link is down */
943 if (!efx->link_state.up || !nic_data->xmac_poll_required)
ab0115fc
BH
944 return;
945
946 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
947 falcon_ack_status_intr(efx);
948}
949
8ceee660
BH
950/**************************************************************************
951 *
952 * MAC wrapper
953 *
954 **************************************************************************
955 */
177dfcd8 956
ef2b90ee
BH
957static void falcon_push_multicast_hash(struct efx_nic *efx)
958{
959 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
960
961 WARN_ON(!mutex_is_locked(&efx->mac_lock));
962
963 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
964 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
965}
966
d3245b28 967static void falcon_reset_macs(struct efx_nic *efx)
8ceee660 968{
d3245b28
BH
969 struct falcon_nic_data *nic_data = efx->nic_data;
970 efx_oword_t reg, mac_ctrl;
8ceee660
BH
971 int count;
972
daeda630 973 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
177dfcd8
BH
974 /* It's not safe to use GLB_CTL_REG to reset the
975 * macs, so instead use the internal MAC resets
976 */
8fbca791
BH
977 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
978 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
979
980 for (count = 0; count < 10000; count++) {
981 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
982 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
983 0)
984 return;
985 udelay(10);
177dfcd8 986 }
8fbca791
BH
987
988 netif_err(efx, hw, efx->net_dev,
989 "timed out waiting for XMAC core reset\n");
177dfcd8 990 }
8ceee660 991
d3245b28
BH
992 /* Mac stats will fail whist the TX fifo is draining */
993 WARN_ON(nic_data->stats_disable_count == 0);
8ceee660 994
d3245b28
BH
995 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
996 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
997 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
8ceee660 998
12d00cad 999 efx_reado(efx, &reg, FR_AB_GLB_CTL);
3e6c4538
BH
1000 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1001 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1002 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
12d00cad 1003 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
8ceee660
BH
1004
1005 count = 0;
1006 while (1) {
12d00cad 1007 efx_reado(efx, &reg, FR_AB_GLB_CTL);
3e6c4538
BH
1008 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1009 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1010 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
62776d03
BH
1011 netif_dbg(efx, hw, efx->net_dev,
1012 "Completed MAC reset after %d loops\n",
1013 count);
8ceee660
BH
1014 break;
1015 }
1016 if (count > 20) {
62776d03 1017 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
8ceee660
BH
1018 break;
1019 }
1020 count++;
1021 udelay(10);
1022 }
1023
d3245b28
BH
1024 /* Ensure the correct MAC is selected before statistics
1025 * are re-enabled by the caller */
1026 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
b7b40eeb 1027
b7b40eeb 1028 falcon_setup_xaui(efx);
177dfcd8
BH
1029}
1030
9dd3a13b 1031static void falcon_drain_tx_fifo(struct efx_nic *efx)
177dfcd8
BH
1032{
1033 efx_oword_t reg;
1034
daeda630 1035 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
177dfcd8
BH
1036 (efx->loopback_mode != LOOPBACK_NONE))
1037 return;
1038
12d00cad 1039 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
177dfcd8 1040 /* There is no point in draining more than once */
3e6c4538 1041 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
177dfcd8
BH
1042 return;
1043
1044 falcon_reset_macs(efx);
8ceee660
BH
1045}
1046
d3245b28 1047static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
8ceee660 1048{
177dfcd8 1049 efx_oword_t reg;
8ceee660 1050
daeda630 1051 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
8ceee660
BH
1052 return;
1053
1054 /* Isolate the MAC -> RX */
12d00cad 1055 efx_reado(efx, &reg, FR_AZ_RX_CFG);
3e6c4538 1056 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
12d00cad 1057 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
8ceee660 1058
d3245b28
BH
1059 /* Isolate TX -> MAC */
1060 falcon_drain_tx_fifo(efx);
8ceee660
BH
1061}
1062
ab0115fc 1063static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
8ceee660 1064{
eb50c0d6 1065 struct efx_link_state *link_state = &efx->link_state;
8ceee660 1066 efx_oword_t reg;
fd371e32
SH
1067 int link_speed, isolate;
1068
a7d529ae 1069 isolate = !!ACCESS_ONCE(efx->reset_pending);
8ceee660 1070
eb50c0d6 1071 switch (link_state->speed) {
f31a45d2
BH
1072 case 10000: link_speed = 3; break;
1073 case 1000: link_speed = 2; break;
1074 case 100: link_speed = 1; break;
1075 default: link_speed = 0; break;
1076 }
8ceee660
BH
1077 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1078 * as advertised. Disable to ensure packets are not
1079 * indefinitely held and TX queue can be flushed at any point
1080 * while the link is down. */
1081 EFX_POPULATE_OWORD_5(reg,
3e6c4538
BH
1082 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1083 FRF_AB_MAC_BCAD_ACPT, 1,
1084 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1085 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1086 FRF_AB_MAC_SPEED, link_speed);
8ceee660
BH
1087 /* On B0, MAC backpressure can be disabled and packets get
1088 * discarded. */
daeda630 1089 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
3e6c4538 1090 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
fd371e32 1091 !link_state->up || isolate);
8ceee660
BH
1092 }
1093
12d00cad 1094 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
8ceee660
BH
1095
1096 /* Restore the multicast hash registers. */
8be4f3e6 1097 falcon_push_multicast_hash(efx);
8ceee660 1098
12d00cad 1099 efx_reado(efx, &reg, FR_AZ_RX_CFG);
4b0d29dc
BH
1100 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
1101 * initialisation but it may read back as 0) */
1102 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
8ceee660 1103 /* Unisolate the MAC -> RX */
daeda630 1104 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
fd371e32 1105 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
12d00cad 1106 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
8ceee660
BH
1107}
1108
55edc6e6 1109static void falcon_stats_request(struct efx_nic *efx)
8ceee660 1110{
55edc6e6 1111 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1112 efx_oword_t reg;
8ceee660 1113
55edc6e6
BH
1114 WARN_ON(nic_data->stats_pending);
1115 WARN_ON(nic_data->stats_disable_count);
8ceee660 1116
55edc6e6
BH
1117 if (nic_data->stats_dma_done == NULL)
1118 return; /* no mac selected */
8ceee660 1119
55edc6e6
BH
1120 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
1121 nic_data->stats_pending = true;
8ceee660
BH
1122 wmb(); /* ensure done flag is clear */
1123
1124 /* Initiate DMA transfer of stats */
1125 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1126 FRF_AB_MAC_STAT_DMA_CMD, 1,
1127 FRF_AB_MAC_STAT_DMA_ADR,
8ceee660 1128 efx->stats_buffer.dma_addr);
12d00cad 1129 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
8ceee660 1130
55edc6e6
BH
1131 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1132}
1133
1134static void falcon_stats_complete(struct efx_nic *efx)
1135{
1136 struct falcon_nic_data *nic_data = efx->nic_data;
1137
1138 if (!nic_data->stats_pending)
1139 return;
1140
3db1cd5c 1141 nic_data->stats_pending = false;
55edc6e6
BH
1142 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
1143 rmb(); /* read the done flag before the stats */
710b208d 1144 falcon_update_stats_xmac(efx);
55edc6e6 1145 } else {
62776d03
BH
1146 netif_err(efx, hw, efx->net_dev,
1147 "timed out waiting for statistics\n");
8ceee660 1148 }
55edc6e6 1149}
8ceee660 1150
55edc6e6
BH
1151static void falcon_stats_timer_func(unsigned long context)
1152{
1153 struct efx_nic *efx = (struct efx_nic *)context;
1154 struct falcon_nic_data *nic_data = efx->nic_data;
1155
1156 spin_lock(&efx->stats_lock);
1157
1158 falcon_stats_complete(efx);
1159 if (nic_data->stats_disable_count == 0)
1160 falcon_stats_request(efx);
1161
1162 spin_unlock(&efx->stats_lock);
8ceee660
BH
1163}
1164
fdaa9aed
SH
1165static bool falcon_loopback_link_poll(struct efx_nic *efx)
1166{
1167 struct efx_link_state old_state = efx->link_state;
1168
1169 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1170 WARN_ON(!LOOPBACK_INTERNAL(efx));
1171
1172 efx->link_state.fd = true;
1173 efx->link_state.fc = efx->wanted_fc;
1174 efx->link_state.up = true;
8fbca791 1175 efx->link_state.speed = 10000;
fdaa9aed
SH
1176
1177 return !efx_link_state_equal(&efx->link_state, &old_state);
1178}
1179
d3245b28
BH
1180static int falcon_reconfigure_port(struct efx_nic *efx)
1181{
1182 int rc;
1183
1184 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
1185
1186 /* Poll the PHY link state *before* reconfiguring it. This means we
1187 * will pick up the correct speed (in loopback) to select the correct
1188 * MAC.
1189 */
1190 if (LOOPBACK_INTERNAL(efx))
1191 falcon_loopback_link_poll(efx);
1192 else
1193 efx->phy_op->poll(efx);
1194
1195 falcon_stop_nic_stats(efx);
1196 falcon_deconfigure_mac_wrapper(efx);
1197
8fbca791 1198 falcon_reset_macs(efx);
d3245b28
BH
1199
1200 efx->phy_op->reconfigure(efx);
710b208d 1201 rc = falcon_reconfigure_xmac(efx);
d3245b28
BH
1202 BUG_ON(rc);
1203
1204 falcon_start_nic_stats(efx);
1205
1206 /* Synchronise efx->link_state with the kernel */
1207 efx_link_status_changed(efx);
1208
1209 return 0;
1210}
1211
9dd3a13b
BH
1212/* TX flow control may automatically turn itself off if the link
1213 * partner (intermittently) stops responding to pause frames. There
1214 * isn't any indication that this has happened, so the best we do is
1215 * leave it up to the user to spot this and fix it by cycling transmit
1216 * flow control on this end.
1217 */
1218
1219static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1220{
1221 /* Schedule a reset to recover */
1222 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1223}
1224
1225static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1226{
1227 /* Recover by resetting the EM block */
1228 falcon_stop_nic_stats(efx);
1229 falcon_drain_tx_fifo(efx);
1230 falcon_reconfigure_xmac(efx);
1231 falcon_start_nic_stats(efx);
1232}
1233
8ceee660
BH
1234/**************************************************************************
1235 *
1236 * PHY access via GMII
1237 *
1238 **************************************************************************
1239 */
1240
8ceee660
BH
1241/* Wait for GMII access to complete */
1242static int falcon_gmii_wait(struct efx_nic *efx)
1243{
80cb9a0f 1244 efx_oword_t md_stat;
8ceee660
BH
1245 int count;
1246
25985edc 1247 /* wait up to 50ms - taken max from datasheet */
177dfcd8 1248 for (count = 0; count < 5000; count++) {
80cb9a0f
BH
1249 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
1250 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1251 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1252 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
62776d03
BH
1253 netif_err(efx, hw, efx->net_dev,
1254 "error from GMII access "
1255 EFX_OWORD_FMT"\n",
1256 EFX_OWORD_VAL(md_stat));
8ceee660
BH
1257 return -EIO;
1258 }
1259 return 0;
1260 }
1261 udelay(10);
1262 }
62776d03 1263 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
8ceee660
BH
1264 return -ETIMEDOUT;
1265}
1266
68e7f45e
BH
1267/* Write an MDIO register of a PHY connected to Falcon. */
1268static int falcon_mdio_write(struct net_device *net_dev,
1269 int prtad, int devad, u16 addr, u16 value)
8ceee660 1270{
767e468c 1271 struct efx_nic *efx = netdev_priv(net_dev);
4833f02a 1272 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1273 efx_oword_t reg;
68e7f45e 1274 int rc;
8ceee660 1275
62776d03
BH
1276 netif_vdbg(efx, hw, efx->net_dev,
1277 "writing MDIO %d register %d.%d with 0x%04x\n",
68e7f45e 1278 prtad, devad, addr, value);
8ceee660 1279
4833f02a 1280 mutex_lock(&nic_data->mdio_lock);
8ceee660 1281
68e7f45e
BH
1282 /* Check MDIO not currently being accessed */
1283 rc = falcon_gmii_wait(efx);
1284 if (rc)
8ceee660
BH
1285 goto out;
1286
1287 /* Write the address/ID register */
3e6c4538 1288 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
12d00cad 1289 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
8ceee660 1290
3e6c4538
BH
1291 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1292 FRF_AB_MD_DEV_ADR, devad);
12d00cad 1293 efx_writeo(efx, &reg, FR_AB_MD_ID);
8ceee660
BH
1294
1295 /* Write data */
3e6c4538 1296 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
12d00cad 1297 efx_writeo(efx, &reg, FR_AB_MD_TXD);
8ceee660
BH
1298
1299 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1300 FRF_AB_MD_WRC, 1,
1301 FRF_AB_MD_GC, 0);
12d00cad 1302 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1303
1304 /* Wait for data to be written */
68e7f45e
BH
1305 rc = falcon_gmii_wait(efx);
1306 if (rc) {
8ceee660
BH
1307 /* Abort the write operation */
1308 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1309 FRF_AB_MD_WRC, 0,
1310 FRF_AB_MD_GC, 1);
12d00cad 1311 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1312 udelay(10);
1313 }
1314
ab867461 1315out:
4833f02a 1316 mutex_unlock(&nic_data->mdio_lock);
68e7f45e 1317 return rc;
8ceee660
BH
1318}
1319
68e7f45e
BH
1320/* Read an MDIO register of a PHY connected to Falcon. */
1321static int falcon_mdio_read(struct net_device *net_dev,
1322 int prtad, int devad, u16 addr)
8ceee660 1323{
767e468c 1324 struct efx_nic *efx = netdev_priv(net_dev);
4833f02a 1325 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1326 efx_oword_t reg;
68e7f45e 1327 int rc;
8ceee660 1328
4833f02a 1329 mutex_lock(&nic_data->mdio_lock);
8ceee660 1330
68e7f45e
BH
1331 /* Check MDIO not currently being accessed */
1332 rc = falcon_gmii_wait(efx);
1333 if (rc)
8ceee660
BH
1334 goto out;
1335
3e6c4538 1336 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
12d00cad 1337 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
8ceee660 1338
3e6c4538
BH
1339 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1340 FRF_AB_MD_DEV_ADR, devad);
12d00cad 1341 efx_writeo(efx, &reg, FR_AB_MD_ID);
8ceee660
BH
1342
1343 /* Request data to be read */
3e6c4538 1344 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
12d00cad 1345 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1346
1347 /* Wait for data to become available */
68e7f45e
BH
1348 rc = falcon_gmii_wait(efx);
1349 if (rc == 0) {
12d00cad 1350 efx_reado(efx, &reg, FR_AB_MD_RXD);
3e6c4538 1351 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
62776d03
BH
1352 netif_vdbg(efx, hw, efx->net_dev,
1353 "read from MDIO %d register %d.%d, got %04x\n",
1354 prtad, devad, addr, rc);
8ceee660
BH
1355 } else {
1356 /* Abort the read operation */
1357 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1358 FRF_AB_MD_RIC, 0,
1359 FRF_AB_MD_GC, 1);
12d00cad 1360 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660 1361
62776d03
BH
1362 netif_dbg(efx, hw, efx->net_dev,
1363 "read from MDIO %d register %d.%d, got error %d\n",
1364 prtad, devad, addr, rc);
8ceee660
BH
1365 }
1366
ab867461 1367out:
4833f02a 1368 mutex_unlock(&nic_data->mdio_lock);
68e7f45e 1369 return rc;
8ceee660
BH
1370}
1371
8ceee660 1372/* This call is responsible for hooking in the MAC and PHY operations */
ef2b90ee 1373static int falcon_probe_port(struct efx_nic *efx)
8ceee660 1374{
8fbca791 1375 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660
BH
1376 int rc;
1377
96c45726
BH
1378 switch (efx->phy_type) {
1379 case PHY_TYPE_SFX7101:
1380 efx->phy_op = &falcon_sfx7101_phy_ops;
1381 break;
96c45726
BH
1382 case PHY_TYPE_QT2022C2:
1383 case PHY_TYPE_QT2025C:
b37b62fe 1384 efx->phy_op = &falcon_qt202x_phy_ops;
96c45726 1385 break;
7e51b439
BH
1386 case PHY_TYPE_TXC43128:
1387 efx->phy_op = &falcon_txc_phy_ops;
1388 break;
96c45726 1389 default:
62776d03
BH
1390 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1391 efx->phy_type);
96c45726
BH
1392 return -ENODEV;
1393 }
1394
c1c4f453 1395 /* Fill out MDIO structure and loopback modes */
4833f02a 1396 mutex_init(&nic_data->mdio_lock);
68e7f45e
BH
1397 efx->mdio.mdio_read = falcon_mdio_read;
1398 efx->mdio.mdio_write = falcon_mdio_write;
c1c4f453
BH
1399 rc = efx->phy_op->probe(efx);
1400 if (rc != 0)
1401 return rc;
8ceee660 1402
b895d73e
SH
1403 /* Initial assumption */
1404 efx->link_state.speed = 10000;
1405 efx->link_state.fd = true;
1406
8ceee660 1407 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
daeda630 1408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
04cc8cac 1409 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
8ceee660 1410 else
04cc8cac 1411 efx->wanted_fc = EFX_FC_RX;
7a6b8f6f
SH
1412 if (efx->mdio.mmds & MDIO_DEVS_AN)
1413 efx->wanted_fc |= EFX_FC_AUTO;
8ceee660
BH
1414
1415 /* Allocate buffer for stats */
152b6a62 1416 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
0d19a540 1417 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
8ceee660
BH
1418 if (rc)
1419 return rc;
62776d03
BH
1420 netif_dbg(efx, probe, efx->net_dev,
1421 "stats buffer at %llx (virt %p phys %llx)\n",
1422 (u64)efx->stats_buffer.dma_addr,
1423 efx->stats_buffer.addr,
1424 (u64)virt_to_phys(efx->stats_buffer.addr));
8fbca791 1425 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
8ceee660
BH
1426
1427 return 0;
1428}
1429
ef2b90ee 1430static void falcon_remove_port(struct efx_nic *efx)
8ceee660 1431{
ff3b00a0 1432 efx->phy_op->remove(efx);
152b6a62 1433 efx_nic_free_buffer(efx, &efx->stats_buffer);
8ceee660
BH
1434}
1435
40641ed9
BH
1436/* Global events are basically PHY events */
1437static bool
1438falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
1439{
1440 struct efx_nic *efx = channel->efx;
cef68bde 1441 struct falcon_nic_data *nic_data = efx->nic_data;
40641ed9
BH
1442
1443 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1444 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1445 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1446 /* Ignored */
1447 return true;
1448
1449 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
1450 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
cef68bde 1451 nic_data->xmac_poll_required = true;
40641ed9
BH
1452 return true;
1453 }
1454
1455 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
1456 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1457 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1458 netif_err(efx, rx_err, efx->net_dev,
1459 "channel %d seen global RX_RESET event. Resetting.\n",
1460 channel->channel);
1461
1462 atomic_inc(&efx->rx_reset);
1463 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1464 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1465 return true;
1466 }
1467
1468 return false;
1469}
1470
8c8661e4
BH
1471/**************************************************************************
1472 *
1473 * Falcon test code
1474 *
1475 **************************************************************************/
1476
0aa3fbaa
BH
1477static int
1478falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
8c8661e4 1479{
4de92180 1480 struct falcon_nic_data *nic_data = efx->nic_data;
8c8661e4
BH
1481 struct falcon_nvconfig *nvconfig;
1482 struct efx_spi_device *spi;
1483 void *region;
1484 int rc, magic_num, struct_ver;
1485 __le16 *word, *limit;
1486 u32 csum;
1487
4de92180
BH
1488 if (efx_spi_present(&nic_data->spi_flash))
1489 spi = &nic_data->spi_flash;
1490 else if (efx_spi_present(&nic_data->spi_eeprom))
1491 spi = &nic_data->spi_eeprom;
1492 else
2f7f5730
BH
1493 return -EINVAL;
1494
0a95f563 1495 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
8c8661e4
BH
1496 if (!region)
1497 return -ENOMEM;
3e6c4538 1498 nvconfig = region + FALCON_NVCONFIG_OFFSET;
8c8661e4 1499
4de92180 1500 mutex_lock(&nic_data->spi_lock);
76884835 1501 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
4de92180 1502 mutex_unlock(&nic_data->spi_lock);
8c8661e4 1503 if (rc) {
62776d03 1504 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
4de92180
BH
1505 efx_spi_present(&nic_data->spi_flash) ?
1506 "flash" : "EEPROM");
8c8661e4
BH
1507 rc = -EIO;
1508 goto out;
1509 }
1510
1511 magic_num = le16_to_cpu(nvconfig->board_magic_num);
1512 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1513
1514 rc = -EINVAL;
3e6c4538 1515 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
62776d03
BH
1516 netif_err(efx, hw, efx->net_dev,
1517 "NVRAM bad magic 0x%x\n", magic_num);
8c8661e4
BH
1518 goto out;
1519 }
1520 if (struct_ver < 2) {
62776d03
BH
1521 netif_err(efx, hw, efx->net_dev,
1522 "NVRAM has ancient version 0x%x\n", struct_ver);
8c8661e4
BH
1523 goto out;
1524 } else if (struct_ver < 4) {
1525 word = &nvconfig->board_magic_num;
1526 limit = (__le16 *) (nvconfig + 1);
1527 } else {
1528 word = region;
0a95f563 1529 limit = region + FALCON_NVCONFIG_END;
8c8661e4
BH
1530 }
1531 for (csum = 0; word < limit; ++word)
1532 csum += le16_to_cpu(*word);
1533
1534 if (~csum & 0xffff) {
62776d03
BH
1535 netif_err(efx, hw, efx->net_dev,
1536 "NVRAM has incorrect checksum\n");
8c8661e4
BH
1537 goto out;
1538 }
1539
1540 rc = 0;
1541 if (nvconfig_out)
1542 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1543
1544 out:
1545 kfree(region);
1546 return rc;
1547}
1548
0aa3fbaa
BH
1549static int falcon_test_nvram(struct efx_nic *efx)
1550{
1551 return falcon_read_nvram(efx, NULL);
1552}
1553
86094f7f 1554static const struct efx_farch_register_test falcon_b0_register_tests[] = {
3e6c4538 1555 { FR_AZ_ADR_REGION,
4cddca54 1556 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
3e6c4538 1557 { FR_AZ_RX_CFG,
8c8661e4 1558 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
3e6c4538 1559 { FR_AZ_TX_CFG,
8c8661e4 1560 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1561 { FR_AZ_TX_RESERVED,
8c8661e4 1562 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
3e6c4538 1563 { FR_AB_MAC_CTRL,
8c8661e4 1564 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1565 { FR_AZ_SRM_TX_DC_CFG,
8c8661e4 1566 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1567 { FR_AZ_RX_DC_CFG,
8c8661e4 1568 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1569 { FR_AZ_RX_DC_PF_WM,
8c8661e4 1570 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1571 { FR_BZ_DP_CTRL,
8c8661e4 1572 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1573 { FR_AB_GM_CFG2,
177dfcd8 1574 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1575 { FR_AB_GMF_CFG0,
177dfcd8 1576 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1577 { FR_AB_XM_GLB_CFG,
8c8661e4 1578 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1579 { FR_AB_XM_TX_CFG,
8c8661e4 1580 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1581 { FR_AB_XM_RX_CFG,
8c8661e4 1582 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1583 { FR_AB_XM_RX_PARAM,
8c8661e4 1584 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1585 { FR_AB_XM_FC,
8c8661e4 1586 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1587 { FR_AB_XM_ADR_LO,
8c8661e4 1588 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1589 { FR_AB_XX_SD_CTL,
8c8661e4
BH
1590 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1591};
1592
d4f2cecc
BH
1593static int
1594falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
152b6a62 1595{
d4f2cecc
BH
1596 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1597 int rc, rc2;
1598
1599 mutex_lock(&efx->mac_lock);
1600 if (efx->loopback_modes) {
1601 /* We need the 312 clock from the PHY to test the XMAC
1602 * registers, so move into XGMII loopback if available */
1603 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1604 efx->loopback_mode = LOOPBACK_XGMII;
1605 else
1606 efx->loopback_mode = __ffs(efx->loopback_modes);
1607 }
1608 __efx_reconfigure_port(efx);
1609 mutex_unlock(&efx->mac_lock);
1610
1611 efx_reset_down(efx, reset_method);
1612
1613 tests->registers =
86094f7f
BH
1614 efx_farch_test_registers(efx, falcon_b0_register_tests,
1615 ARRAY_SIZE(falcon_b0_register_tests))
d4f2cecc
BH
1616 ? -1 : 1;
1617
1618 rc = falcon_reset_hw(efx, reset_method);
1619 rc2 = efx_reset_up(efx, reset_method, rc == 0);
1620 return rc ? rc : rc2;
152b6a62
BH
1621}
1622
8ceee660
BH
1623/**************************************************************************
1624 *
1625 * Device reset
1626 *
1627 **************************************************************************
1628 */
1629
0e2a9c7c
BH
1630static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1631{
1632 switch (reason) {
1633 case RESET_TYPE_RX_RECOVERY:
1634 case RESET_TYPE_RX_DESC_FETCH:
1635 case RESET_TYPE_TX_DESC_FETCH:
1636 case RESET_TYPE_TX_SKIP:
1637 /* These can occasionally occur due to hardware bugs.
1638 * We try to reset without disrupting the link.
1639 */
1640 return RESET_TYPE_INVISIBLE;
1641 default:
1642 return RESET_TYPE_ALL;
1643 }
1644}
1645
1646static int falcon_map_reset_flags(u32 *flags)
1647{
1648 enum {
1649 FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1650 ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1651 FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1652 FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1653 };
1654
1655 if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1656 *flags &= ~FALCON_RESET_WORLD;
1657 return RESET_TYPE_WORLD;
1658 }
1659
1660 if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1661 *flags &= ~FALCON_RESET_ALL;
1662 return RESET_TYPE_ALL;
1663 }
1664
1665 if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1666 *flags &= ~FALCON_RESET_INVISIBLE;
1667 return RESET_TYPE_INVISIBLE;
1668 }
1669
1670 return -EINVAL;
1671}
1672
8ceee660
BH
1673/* Resets NIC to known state. This routine must be called in process
1674 * context and is allowed to sleep. */
4de92180 1675static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
8ceee660
BH
1676{
1677 struct falcon_nic_data *nic_data = efx->nic_data;
1678 efx_oword_t glb_ctl_reg_ker;
1679 int rc;
1680
62776d03
BH
1681 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1682 RESET_TYPE(method));
8ceee660
BH
1683
1684 /* Initiate device reset */
1685 if (method == RESET_TYPE_WORLD) {
1686 rc = pci_save_state(efx->pci_dev);
1687 if (rc) {
62776d03
BH
1688 netif_err(efx, drv, efx->net_dev,
1689 "failed to backup PCI state of primary "
1690 "function prior to hardware reset\n");
8ceee660
BH
1691 goto fail1;
1692 }
152b6a62 1693 if (efx_nic_is_dual_func(efx)) {
8ceee660
BH
1694 rc = pci_save_state(nic_data->pci_dev2);
1695 if (rc) {
62776d03
BH
1696 netif_err(efx, drv, efx->net_dev,
1697 "failed to backup PCI state of "
1698 "secondary function prior to "
1699 "hardware reset\n");
8ceee660
BH
1700 goto fail2;
1701 }
1702 }
1703
1704 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
3e6c4538
BH
1705 FRF_AB_EXT_PHY_RST_DUR,
1706 FFE_AB_EXT_PHY_RST_DUR_10240US,
1707 FRF_AB_SWRST, 1);
8ceee660 1708 } else {
8ceee660 1709 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
3e6c4538
BH
1710 /* exclude PHY from "invisible" reset */
1711 FRF_AB_EXT_PHY_RST_CTL,
1712 method == RESET_TYPE_INVISIBLE,
1713 /* exclude EEPROM/flash and PCIe */
1714 FRF_AB_PCIE_CORE_RST_CTL, 1,
1715 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1716 FRF_AB_PCIE_SD_RST_CTL, 1,
1717 FRF_AB_EE_RST_CTL, 1,
1718 FRF_AB_EXT_PHY_RST_DUR,
1719 FFE_AB_EXT_PHY_RST_DUR_10240US,
1720 FRF_AB_SWRST, 1);
1721 }
12d00cad 1722 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
8ceee660 1723
62776d03 1724 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
8ceee660
BH
1725 schedule_timeout_uninterruptible(HZ / 20);
1726
1727 /* Restore PCI configuration if needed */
1728 if (method == RESET_TYPE_WORLD) {
1d3c16a8
JM
1729 if (efx_nic_is_dual_func(efx))
1730 pci_restore_state(nic_data->pci_dev2);
1731 pci_restore_state(efx->pci_dev);
62776d03
BH
1732 netif_dbg(efx, drv, efx->net_dev,
1733 "successfully restored PCI config\n");
8ceee660
BH
1734 }
1735
1736 /* Assert that reset complete */
12d00cad 1737 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
3e6c4538 1738 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
8ceee660 1739 rc = -ETIMEDOUT;
62776d03
BH
1740 netif_err(efx, hw, efx->net_dev,
1741 "timed out waiting for hardware reset\n");
1d3c16a8 1742 goto fail3;
8ceee660 1743 }
62776d03 1744 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
8ceee660
BH
1745
1746 return 0;
1747
1748 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1749fail2:
8ceee660
BH
1750 pci_restore_state(efx->pci_dev);
1751fail1:
1d3c16a8 1752fail3:
8ceee660
BH
1753 return rc;
1754}
1755
4de92180
BH
1756static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1757{
1758 struct falcon_nic_data *nic_data = efx->nic_data;
1759 int rc;
1760
1761 mutex_lock(&nic_data->spi_lock);
1762 rc = __falcon_reset_hw(efx, method);
1763 mutex_unlock(&nic_data->spi_lock);
1764
1765 return rc;
1766}
1767
ef2b90ee 1768static void falcon_monitor(struct efx_nic *efx)
fe75820b 1769{
fdaa9aed 1770 bool link_changed;
fe75820b
BH
1771 int rc;
1772
fdaa9aed
SH
1773 BUG_ON(!mutex_is_locked(&efx->mac_lock));
1774
fe75820b
BH
1775 rc = falcon_board(efx)->type->monitor(efx);
1776 if (rc) {
62776d03
BH
1777 netif_err(efx, hw, efx->net_dev,
1778 "Board sensor %s; shutting down PHY\n",
1779 (rc == -ERANGE) ? "reported fault" : "failed");
fe75820b 1780 efx->phy_mode |= PHY_MODE_LOW_POWER;
d3245b28
BH
1781 rc = __efx_reconfigure_port(efx);
1782 WARN_ON(rc);
fe75820b 1783 }
fdaa9aed
SH
1784
1785 if (LOOPBACK_INTERNAL(efx))
1786 link_changed = falcon_loopback_link_poll(efx);
1787 else
1788 link_changed = efx->phy_op->poll(efx);
1789
1790 if (link_changed) {
1791 falcon_stop_nic_stats(efx);
1792 falcon_deconfigure_mac_wrapper(efx);
1793
8fbca791 1794 falcon_reset_macs(efx);
710b208d 1795 rc = falcon_reconfigure_xmac(efx);
d3245b28 1796 BUG_ON(rc);
fdaa9aed
SH
1797
1798 falcon_start_nic_stats(efx);
1799
1800 efx_link_status_changed(efx);
1801 }
1802
8fbca791 1803 falcon_poll_xmac(efx);
fe75820b
BH
1804}
1805
8ceee660
BH
1806/* Zeroes out the SRAM contents. This routine must be called in
1807 * process context and is allowed to sleep.
1808 */
1809static int falcon_reset_sram(struct efx_nic *efx)
1810{
1811 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1812 int count;
1813
1814 /* Set the SRAM wake/sleep GPIO appropriately. */
12d00cad 1815 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
3e6c4538
BH
1816 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1817 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
12d00cad 1818 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
8ceee660
BH
1819
1820 /* Initiate SRAM reset */
1821 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
3e6c4538
BH
1822 FRF_AZ_SRM_INIT_EN, 1,
1823 FRF_AZ_SRM_NB_SZ, 0);
12d00cad 1824 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
8ceee660
BH
1825
1826 /* Wait for SRAM reset to complete */
1827 count = 0;
1828 do {
62776d03
BH
1829 netif_dbg(efx, hw, efx->net_dev,
1830 "waiting for SRAM reset (attempt %d)...\n", count);
8ceee660
BH
1831
1832 /* SRAM reset is slow; expect around 16ms */
1833 schedule_timeout_uninterruptible(HZ / 50);
1834
1835 /* Check for reset complete */
12d00cad 1836 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
3e6c4538 1837 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
62776d03
BH
1838 netif_dbg(efx, hw, efx->net_dev,
1839 "SRAM reset complete\n");
8ceee660
BH
1840
1841 return 0;
1842 }
25985edc 1843 } while (++count < 20); /* wait up to 0.4 sec */
8ceee660 1844
62776d03 1845 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
8ceee660
BH
1846 return -ETIMEDOUT;
1847}
1848
4de92180
BH
1849static void falcon_spi_device_init(struct efx_nic *efx,
1850 struct efx_spi_device *spi_device,
4a5b504d
BH
1851 unsigned int device_id, u32 device_type)
1852{
4a5b504d 1853 if (device_type != 0) {
4a5b504d
BH
1854 spi_device->device_id = device_id;
1855 spi_device->size =
1856 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1857 spi_device->addr_len =
1858 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1859 spi_device->munge_address = (spi_device->size == 1 << 9 &&
1860 spi_device->addr_len == 1);
f4150724
BH
1861 spi_device->erase_command =
1862 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1863 spi_device->erase_size =
1864 1 << SPI_DEV_TYPE_FIELD(device_type,
1865 SPI_DEV_TYPE_ERASE_SIZE);
4a5b504d
BH
1866 spi_device->block_size =
1867 1 << SPI_DEV_TYPE_FIELD(device_type,
1868 SPI_DEV_TYPE_BLOCK_SIZE);
4a5b504d 1869 } else {
4de92180 1870 spi_device->size = 0;
4a5b504d 1871 }
4a5b504d
BH
1872}
1873
8ceee660
BH
1874/* Extract non-volatile configuration */
1875static int falcon_probe_nvconfig(struct efx_nic *efx)
1876{
4de92180 1877 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1878 struct falcon_nvconfig *nvconfig;
8ceee660
BH
1879 int rc;
1880
8ceee660 1881 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
4a5b504d
BH
1882 if (!nvconfig)
1883 return -ENOMEM;
8ceee660 1884
8c8661e4 1885 rc = falcon_read_nvram(efx, nvconfig);
6c88b0b6 1886 if (rc)
4de92180 1887 goto out;
6c88b0b6
BH
1888
1889 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1890 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1891
1892 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
4de92180
BH
1893 falcon_spi_device_init(
1894 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
6c88b0b6
BH
1895 le32_to_cpu(nvconfig->board_v3
1896 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
4de92180
BH
1897 falcon_spi_device_init(
1898 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
6c88b0b6
BH
1899 le32_to_cpu(nvconfig->board_v3
1900 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
8ceee660
BH
1901 }
1902
8c8661e4 1903 /* Read the MAC addresses */
7e300bc8 1904 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
8c8661e4 1905
62776d03
BH
1906 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1907 efx->phy_type, efx->mdio.prtad);
8ceee660 1908
6c88b0b6
BH
1909 rc = falcon_probe_board(efx,
1910 le16_to_cpu(nvconfig->board_v2.board_revision));
4de92180 1911out:
8ceee660
BH
1912 kfree(nvconfig);
1913 return rc;
1914}
1915
28e47c49
BH
1916static void falcon_dimension_resources(struct efx_nic *efx)
1917{
1918 efx->rx_dc_base = 0x20000;
1919 efx->tx_dc_base = 0x26000;
1920}
1921
4a5b504d
BH
1922/* Probe all SPI devices on the NIC */
1923static void falcon_probe_spi_devices(struct efx_nic *efx)
1924{
4de92180 1925 struct falcon_nic_data *nic_data = efx->nic_data;
4a5b504d 1926 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2f7f5730 1927 int boot_dev;
4a5b504d 1928
12d00cad
BH
1929 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1930 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1931 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
4a5b504d 1932
3e6c4538
BH
1933 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1934 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1935 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
62776d03
BH
1936 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1937 boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1938 "flash" : "EEPROM");
2f7f5730
BH
1939 } else {
1940 /* Disable VPD and set clock dividers to safe
1941 * values for initial programming. */
1942 boot_dev = -1;
62776d03
BH
1943 netif_dbg(efx, probe, efx->net_dev,
1944 "Booted from internal ASIC settings;"
1945 " setting SPI config\n");
3e6c4538 1946 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2f7f5730 1947 /* 125 MHz / 7 ~= 20 MHz */
3e6c4538 1948 FRF_AB_EE_SF_CLOCK_DIV, 7,
2f7f5730 1949 /* 125 MHz / 63 ~= 2 MHz */
3e6c4538 1950 FRF_AB_EE_EE_CLOCK_DIV, 63);
12d00cad 1951 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
4a5b504d
BH
1952 }
1953
4de92180
BH
1954 mutex_init(&nic_data->spi_lock);
1955
3e6c4538 1956 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
4de92180 1957 falcon_spi_device_init(efx, &nic_data->spi_flash,
3e6c4538 1958 FFE_AB_SPI_DEVICE_FLASH,
2f7f5730 1959 default_flash_type);
3e6c4538 1960 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
4de92180 1961 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
3e6c4538 1962 FFE_AB_SPI_DEVICE_EEPROM,
2f7f5730 1963 large_eeprom_type);
4a5b504d
BH
1964}
1965
b105798f
BH
1966static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
1967{
1968 return 0x20000;
1969}
1970
1971static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
1972{
1973 /* Map everything up to and including the RSS indirection table.
1974 * The PCI core takes care of mapping the MSI-X tables.
1975 */
1976 return FR_BZ_RX_INDIRECTION_TBL +
1977 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
1978}
1979
ef2b90ee 1980static int falcon_probe_nic(struct efx_nic *efx)
8ceee660
BH
1981{
1982 struct falcon_nic_data *nic_data;
e775fb93 1983 struct falcon_board *board;
8ceee660
BH
1984 int rc;
1985
8ceee660
BH
1986 /* Allocate storage for hardware specific data */
1987 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
88c59425
BH
1988 if (!nic_data)
1989 return -ENOMEM;
5daab96d 1990 efx->nic_data = nic_data;
8ceee660 1991
57849460
BH
1992 rc = -ENODEV;
1993
86094f7f 1994 if (efx_farch_fpga_ver(efx) != 0) {
62776d03
BH
1995 netif_err(efx, probe, efx->net_dev,
1996 "Falcon FPGA not supported\n");
8ceee660 1997 goto fail1;
57849460
BH
1998 }
1999
2000 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2001 efx_oword_t nic_stat;
2002 struct pci_dev *dev;
2003 u8 pci_rev = efx->pci_dev->revision;
8ceee660 2004
57849460 2005 if ((pci_rev == 0xff) || (pci_rev == 0)) {
62776d03
BH
2006 netif_err(efx, probe, efx->net_dev,
2007 "Falcon rev A0 not supported\n");
57849460
BH
2008 goto fail1;
2009 }
2010 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2011 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
62776d03
BH
2012 netif_err(efx, probe, efx->net_dev,
2013 "Falcon rev A1 1G not supported\n");
57849460
BH
2014 goto fail1;
2015 }
2016 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
62776d03
BH
2017 netif_err(efx, probe, efx->net_dev,
2018 "Falcon rev A1 PCI-X not supported\n");
57849460
BH
2019 goto fail1;
2020 }
8ceee660 2021
57849460 2022 dev = pci_dev_get(efx->pci_dev);
937383a5
BH
2023 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2024 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
8ceee660
BH
2025 dev))) {
2026 if (dev->bus == efx->pci_dev->bus &&
2027 dev->devfn == efx->pci_dev->devfn + 1) {
2028 nic_data->pci_dev2 = dev;
2029 break;
2030 }
2031 }
2032 if (!nic_data->pci_dev2) {
62776d03
BH
2033 netif_err(efx, probe, efx->net_dev,
2034 "failed to find secondary function\n");
8ceee660
BH
2035 rc = -ENODEV;
2036 goto fail2;
2037 }
2038 }
2039
2040 /* Now we can reset the NIC */
4de92180 2041 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
8ceee660 2042 if (rc) {
62776d03 2043 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
8ceee660
BH
2044 goto fail3;
2045 }
2046
2047 /* Allocate memory for INT_KER */
0d19a540
BH
2048 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2049 GFP_KERNEL);
8ceee660
BH
2050 if (rc)
2051 goto fail4;
2052 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2053
62776d03
BH
2054 netif_dbg(efx, probe, efx->net_dev,
2055 "INT_KER at %llx (virt %p phys %llx)\n",
2056 (u64)efx->irq_status.dma_addr,
2057 efx->irq_status.addr,
2058 (u64)virt_to_phys(efx->irq_status.addr));
8ceee660 2059
4a5b504d
BH
2060 falcon_probe_spi_devices(efx);
2061
8ceee660
BH
2062 /* Read in the non-volatile configuration */
2063 rc = falcon_probe_nvconfig(efx);
6c88b0b6
BH
2064 if (rc) {
2065 if (rc == -EINVAL)
2066 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
8ceee660 2067 goto fail5;
6c88b0b6 2068 }
8ceee660 2069
b105798f
BH
2070 efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
2071 EFX_MAX_CHANNELS);
cc180b69
BH
2072 efx->timer_quantum_ns = 4968; /* 621 cycles */
2073
37b5a603 2074 /* Initialise I2C adapter */
e775fb93
BH
2075 board = falcon_board(efx);
2076 board->i2c_adap.owner = THIS_MODULE;
2077 board->i2c_data = falcon_i2c_bit_operations;
2078 board->i2c_data.data = efx;
2079 board->i2c_adap.algo_data = &board->i2c_data;
2080 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2081 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2082 sizeof(board->i2c_adap.name));
2083 rc = i2c_bit_add_bus(&board->i2c_adap);
37b5a603
BH
2084 if (rc)
2085 goto fail5;
2086
44838a44 2087 rc = falcon_board(efx)->type->init(efx);
278c0621 2088 if (rc) {
62776d03
BH
2089 netif_err(efx, probe, efx->net_dev,
2090 "failed to initialise board\n");
278c0621
BH
2091 goto fail6;
2092 }
2093
55edc6e6
BH
2094 nic_data->stats_disable_count = 1;
2095 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2096 (unsigned long)efx);
2097
8ceee660
BH
2098 return 0;
2099
278c0621 2100 fail6:
bf51a8c5 2101 i2c_del_adapter(&board->i2c_adap);
e775fb93 2102 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
8ceee660 2103 fail5:
152b6a62 2104 efx_nic_free_buffer(efx, &efx->irq_status);
8ceee660 2105 fail4:
8ceee660
BH
2106 fail3:
2107 if (nic_data->pci_dev2) {
2108 pci_dev_put(nic_data->pci_dev2);
2109 nic_data->pci_dev2 = NULL;
2110 }
2111 fail2:
8ceee660
BH
2112 fail1:
2113 kfree(efx->nic_data);
2114 return rc;
2115}
2116
56241ceb
BH
2117static void falcon_init_rx_cfg(struct efx_nic *efx)
2118{
56241ceb
BH
2119 /* RX control FIFO thresholds (32 entries) */
2120 const unsigned ctrl_xon_thr = 20;
2121 const unsigned ctrl_xoff_thr = 25;
56241ceb
BH
2122 efx_oword_t reg;
2123
12d00cad 2124 efx_reado(efx, &reg, FR_AZ_RX_CFG);
daeda630 2125 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
85740cdf
BH
2126 /* Data FIFO size is 5.5K. The RX DMA engine only
2127 * supports scattering for user-mode queues, but will
2128 * split DMA writes at intervals of RX_USR_BUF_SIZE
2129 * (32-byte units) even for kernel-mode queues. We
2130 * set it to be so large that that never happens.
2131 */
3e6c4538
BH
2132 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2133 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
85740cdf 2134 (3 * 4096) >> 5);
5fb6b06d
BH
2135 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2136 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
3e6c4538
BH
2137 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2138 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
56241ceb 2139 } else {
625b4514 2140 /* Data FIFO size is 80K; register fields moved */
3e6c4538
BH
2141 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2142 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
85740cdf 2143 EFX_RX_USR_BUF_SIZE >> 5);
5fb6b06d
BH
2144 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
2145 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2146 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
3e6c4538
BH
2147 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2148 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2149 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
477e54eb
BH
2150
2151 /* Enable hash insertion. This is broken for the
2152 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2153 * IPv4 hashes. */
2154 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2155 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2156 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
56241ceb 2157 }
4b0d29dc
BH
2158 /* Always enable XOFF signal from RX FIFO. We enable
2159 * or disable transmission of pause frames at the MAC. */
2160 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
12d00cad 2161 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
56241ceb
BH
2162}
2163
152b6a62
BH
2164/* This call performs hardware-specific global initialisation, such as
2165 * defining the descriptor cache sizes and number of RSS channels.
2166 * It does not set up any buffers, descriptor rings or event queues.
2167 */
2168static int falcon_init_nic(struct efx_nic *efx)
2169{
2170 efx_oword_t temp;
2171 int rc;
2172
2173 /* Use on-chip SRAM */
2174 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2175 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2176 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2177
152b6a62
BH
2178 rc = falcon_reset_sram(efx);
2179 if (rc)
2180 return rc;
2181
2182 /* Clear the parity enables on the TX data fifos as
2183 * they produce false parity errors because of timing issues
2184 */
2185 if (EFX_WORKAROUND_5129(efx)) {
2186 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2187 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2188 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2189 }
2190
8ceee660 2191 if (EFX_WORKAROUND_7244(efx)) {
12d00cad 2192 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3e6c4538
BH
2193 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2194 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2195 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2196 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
12d00cad 2197 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
8ceee660 2198 }
8ceee660 2199
3e6c4538 2200 /* XXX This is documented only for Falcon A0/A1 */
8ceee660
BH
2201 /* Setup RX. Wait for descriptor is broken and must
2202 * be disabled. RXDP recovery shouldn't be needed, but is.
2203 */
12d00cad 2204 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3e6c4538
BH
2205 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2206 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
8ceee660 2207 if (EFX_WORKAROUND_5583(efx))
3e6c4538 2208 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
12d00cad 2209 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
8ceee660 2210
8ceee660
BH
2211 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2212 * descriptors (which is bad).
2213 */
12d00cad 2214 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3e6c4538 2215 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
12d00cad 2216 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
8ceee660 2217
56241ceb 2218 falcon_init_rx_cfg(efx);
8ceee660 2219
daeda630 2220 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
477e54eb
BH
2221 /* Set hash key for IPv4 */
2222 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
2223 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
2224
2225 /* Set destination of both TX and RX Flush events */
3e6c4538 2226 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
12d00cad 2227 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
8ceee660
BH
2228 }
2229
86094f7f 2230 efx_farch_init_common(efx);
152b6a62 2231
8ceee660
BH
2232 return 0;
2233}
2234
ef2b90ee 2235static void falcon_remove_nic(struct efx_nic *efx)
8ceee660
BH
2236{
2237 struct falcon_nic_data *nic_data = efx->nic_data;
e775fb93 2238 struct falcon_board *board = falcon_board(efx);
37b5a603 2239
44838a44 2240 board->type->fini(efx);
278c0621 2241
8c870379 2242 /* Remove I2C adapter and clear it in preparation for a retry */
bf51a8c5 2243 i2c_del_adapter(&board->i2c_adap);
e775fb93 2244 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
8ceee660 2245
152b6a62 2246 efx_nic_free_buffer(efx, &efx->irq_status);
8ceee660 2247
4de92180 2248 __falcon_reset_hw(efx, RESET_TYPE_ALL);
8ceee660
BH
2249
2250 /* Release the second function after the reset */
2251 if (nic_data->pci_dev2) {
2252 pci_dev_put(nic_data->pci_dev2);
2253 nic_data->pci_dev2 = NULL;
2254 }
2255
2256 /* Tear down the private nic state */
2257 kfree(efx->nic_data);
2258 efx->nic_data = NULL;
2259}
2260
ef2b90ee 2261static void falcon_update_nic_stats(struct efx_nic *efx)
8ceee660 2262{
55edc6e6 2263 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660
BH
2264 efx_oword_t cnt;
2265
55edc6e6
BH
2266 if (nic_data->stats_disable_count)
2267 return;
2268
12d00cad 2269 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3e6c4538
BH
2270 efx->n_rx_nodesc_drop_cnt +=
2271 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
55edc6e6
BH
2272
2273 if (nic_data->stats_pending &&
2274 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
2275 nic_data->stats_pending = false;
2276 rmb(); /* read the done flag before the stats */
710b208d 2277 falcon_update_stats_xmac(efx);
55edc6e6
BH
2278 }
2279}
2280
2281void falcon_start_nic_stats(struct efx_nic *efx)
2282{
2283 struct falcon_nic_data *nic_data = efx->nic_data;
2284
2285 spin_lock_bh(&efx->stats_lock);
2286 if (--nic_data->stats_disable_count == 0)
2287 falcon_stats_request(efx);
2288 spin_unlock_bh(&efx->stats_lock);
2289}
2290
2291void falcon_stop_nic_stats(struct efx_nic *efx)
2292{
2293 struct falcon_nic_data *nic_data = efx->nic_data;
2294 int i;
2295
2296 might_sleep();
2297
2298 spin_lock_bh(&efx->stats_lock);
2299 ++nic_data->stats_disable_count;
2300 spin_unlock_bh(&efx->stats_lock);
2301
2302 del_timer_sync(&nic_data->stats_timer);
2303
2304 /* Wait enough time for the most recent transfer to
2305 * complete. */
2306 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2307 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
2308 break;
2309 msleep(1);
2310 }
2311
2312 spin_lock_bh(&efx->stats_lock);
2313 falcon_stats_complete(efx);
2314 spin_unlock_bh(&efx->stats_lock);
8ceee660
BH
2315}
2316
06629f07
BH
2317static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
2318{
2319 falcon_board(efx)->type->set_id_led(efx, mode);
2320}
2321
89c758fa
BH
2322/**************************************************************************
2323 *
2324 * Wake on LAN
2325 *
2326 **************************************************************************
2327 */
2328
2329static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2330{
2331 wol->supported = 0;
2332 wol->wolopts = 0;
2333 memset(&wol->sopass, 0, sizeof(wol->sopass));
2334}
2335
2336static int falcon_set_wol(struct efx_nic *efx, u32 type)
2337{
2338 if (type != 0)
2339 return -EINVAL;
2340 return 0;
2341}
2342
8ceee660
BH
2343/**************************************************************************
2344 *
754c653a 2345 * Revision-dependent attributes used by efx.c and nic.c
8ceee660
BH
2346 *
2347 **************************************************************************
2348 */
2349
6c8c2513 2350const struct efx_nic_type falcon_a1_nic_type = {
b105798f 2351 .mem_map_size = falcon_a1_mem_map_size,
ef2b90ee
BH
2352 .probe = falcon_probe_nic,
2353 .remove = falcon_remove_nic,
2354 .init = falcon_init_nic,
28e47c49 2355 .dimension_resources = falcon_dimension_resources,
1840667a 2356 .fini = falcon_irq_ack_a1,
ef2b90ee 2357 .monitor = falcon_monitor,
0e2a9c7c
BH
2358 .map_reset_reason = falcon_map_reset_reason,
2359 .map_reset_flags = falcon_map_reset_flags,
ef2b90ee
BH
2360 .reset = falcon_reset_hw,
2361 .probe_port = falcon_probe_port,
2362 .remove_port = falcon_remove_port,
40641ed9 2363 .handle_global_event = falcon_handle_global_event,
e42c3d85 2364 .fini_dmaq = efx_farch_fini_dmaq,
ef2b90ee 2365 .prepare_flush = falcon_prepare_flush,
d5e8cc6c 2366 .finish_flush = efx_port_dummy_op_void,
ef2b90ee
BH
2367 .update_stats = falcon_update_nic_stats,
2368 .start_stats = falcon_start_nic_stats,
2369 .stop_stats = falcon_stop_nic_stats,
06629f07 2370 .set_id_led = falcon_set_id_led,
ef2b90ee 2371 .push_irq_moderation = falcon_push_irq_moderation,
d3245b28 2372 .reconfigure_port = falcon_reconfigure_port,
9dd3a13b 2373 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
710b208d
BH
2374 .reconfigure_mac = falcon_reconfigure_xmac,
2375 .check_mac_fault = falcon_xmac_check_fault,
89c758fa
BH
2376 .get_wol = falcon_get_wol,
2377 .set_wol = falcon_set_wol,
2378 .resume_wol = efx_port_dummy_op_void,
0aa3fbaa 2379 .test_nvram = falcon_test_nvram,
86094f7f
BH
2380 .irq_enable_master = efx_farch_irq_enable_master,
2381 .irq_test_generate = efx_farch_irq_test_generate,
2382 .irq_disable_non_ev = efx_farch_irq_disable_master,
2383 .irq_handle_msi = efx_farch_msi_interrupt,
2384 .irq_handle_legacy = falcon_legacy_interrupt_a1,
2385 .tx_probe = efx_farch_tx_probe,
2386 .tx_init = efx_farch_tx_init,
2387 .tx_remove = efx_farch_tx_remove,
2388 .tx_write = efx_farch_tx_write,
2389 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2390 .rx_probe = efx_farch_rx_probe,
2391 .rx_init = efx_farch_rx_init,
2392 .rx_remove = efx_farch_rx_remove,
2393 .rx_write = efx_farch_rx_write,
2394 .rx_defer_refill = efx_farch_rx_defer_refill,
2395 .ev_probe = efx_farch_ev_probe,
2396 .ev_init = efx_farch_ev_init,
2397 .ev_fini = efx_farch_ev_fini,
2398 .ev_remove = efx_farch_ev_remove,
2399 .ev_process = efx_farch_ev_process,
2400 .ev_read_ack = efx_farch_ev_read_ack,
2401 .ev_test_generate = efx_farch_ev_test_generate,
b895d73e 2402
add72477
BH
2403 /* We don't expose the filter table on Falcon A1 as it is not
2404 * mapped into function 0, but these implementations still
2405 * work with a degenerate case of all tables set to size 0.
2406 */
2407 .filter_table_probe = efx_farch_filter_table_probe,
2408 .filter_table_restore = efx_farch_filter_table_restore,
2409 .filter_table_remove = efx_farch_filter_table_remove,
2410 .filter_insert = efx_farch_filter_insert,
2411 .filter_remove_safe = efx_farch_filter_remove_safe,
2412 .filter_get_safe = efx_farch_filter_get_safe,
2413 .filter_clear_rx = efx_farch_filter_clear_rx,
2414 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2415 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2416 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2417
daeda630 2418 .revision = EFX_REV_FALCON_A1,
3e6c4538
BH
2419 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2420 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2421 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2422 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2423 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
6d51d307 2424 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
8ceee660 2425 .rx_buffer_padding = 0x24,
85740cdf 2426 .can_rx_scatter = false,
8ceee660 2427 .max_interrupt_mode = EFX_INT_MODE_MSI,
cc180b69 2428 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
c383b537 2429 .offload_features = NETIF_F_IP_CSUM,
df2cd8af 2430 .mcdi_max_ver = -1,
8ceee660
BH
2431};
2432
6c8c2513 2433const struct efx_nic_type falcon_b0_nic_type = {
b105798f 2434 .mem_map_size = falcon_b0_mem_map_size,
ef2b90ee
BH
2435 .probe = falcon_probe_nic,
2436 .remove = falcon_remove_nic,
2437 .init = falcon_init_nic,
28e47c49 2438 .dimension_resources = falcon_dimension_resources,
ef2b90ee
BH
2439 .fini = efx_port_dummy_op_void,
2440 .monitor = falcon_monitor,
0e2a9c7c
BH
2441 .map_reset_reason = falcon_map_reset_reason,
2442 .map_reset_flags = falcon_map_reset_flags,
ef2b90ee
BH
2443 .reset = falcon_reset_hw,
2444 .probe_port = falcon_probe_port,
2445 .remove_port = falcon_remove_port,
40641ed9 2446 .handle_global_event = falcon_handle_global_event,
e42c3d85 2447 .fini_dmaq = efx_farch_fini_dmaq,
ef2b90ee 2448 .prepare_flush = falcon_prepare_flush,
d5e8cc6c 2449 .finish_flush = efx_port_dummy_op_void,
ef2b90ee
BH
2450 .update_stats = falcon_update_nic_stats,
2451 .start_stats = falcon_start_nic_stats,
2452 .stop_stats = falcon_stop_nic_stats,
06629f07 2453 .set_id_led = falcon_set_id_led,
ef2b90ee 2454 .push_irq_moderation = falcon_push_irq_moderation,
d3245b28 2455 .reconfigure_port = falcon_reconfigure_port,
9dd3a13b 2456 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
710b208d
BH
2457 .reconfigure_mac = falcon_reconfigure_xmac,
2458 .check_mac_fault = falcon_xmac_check_fault,
89c758fa
BH
2459 .get_wol = falcon_get_wol,
2460 .set_wol = falcon_set_wol,
2461 .resume_wol = efx_port_dummy_op_void,
d4f2cecc 2462 .test_chip = falcon_b0_test_chip,
0aa3fbaa 2463 .test_nvram = falcon_test_nvram,
86094f7f
BH
2464 .irq_enable_master = efx_farch_irq_enable_master,
2465 .irq_test_generate = efx_farch_irq_test_generate,
2466 .irq_disable_non_ev = efx_farch_irq_disable_master,
2467 .irq_handle_msi = efx_farch_msi_interrupt,
2468 .irq_handle_legacy = efx_farch_legacy_interrupt,
2469 .tx_probe = efx_farch_tx_probe,
2470 .tx_init = efx_farch_tx_init,
2471 .tx_remove = efx_farch_tx_remove,
2472 .tx_write = efx_farch_tx_write,
2473 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2474 .rx_probe = efx_farch_rx_probe,
2475 .rx_init = efx_farch_rx_init,
2476 .rx_remove = efx_farch_rx_remove,
2477 .rx_write = efx_farch_rx_write,
2478 .rx_defer_refill = efx_farch_rx_defer_refill,
2479 .ev_probe = efx_farch_ev_probe,
2480 .ev_init = efx_farch_ev_init,
2481 .ev_fini = efx_farch_ev_fini,
2482 .ev_remove = efx_farch_ev_remove,
2483 .ev_process = efx_farch_ev_process,
2484 .ev_read_ack = efx_farch_ev_read_ack,
2485 .ev_test_generate = efx_farch_ev_test_generate,
add72477
BH
2486 .filter_table_probe = efx_farch_filter_table_probe,
2487 .filter_table_restore = efx_farch_filter_table_restore,
2488 .filter_table_remove = efx_farch_filter_table_remove,
2489 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
2490 .filter_insert = efx_farch_filter_insert,
2491 .filter_remove_safe = efx_farch_filter_remove_safe,
2492 .filter_get_safe = efx_farch_filter_get_safe,
2493 .filter_clear_rx = efx_farch_filter_clear_rx,
2494 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2495 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2496 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2497#ifdef CONFIG_RFS_ACCEL
2498 .filter_rfs_insert = efx_farch_filter_rfs_insert,
2499 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
2500#endif
b895d73e 2501
daeda630 2502 .revision = EFX_REV_FALCON_B0,
3e6c4538
BH
2503 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2504 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2505 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2506 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2507 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
6d51d307 2508 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
39c9cf07 2509 .rx_buffer_hash_size = 0x10,
8ceee660 2510 .rx_buffer_padding = 0,
85740cdf 2511 .can_rx_scatter = true,
8ceee660 2512 .max_interrupt_mode = EFX_INT_MODE_MSIX,
cc180b69 2513 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
b4187e42 2514 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
df2cd8af 2515 .mcdi_max_ver = -1,
add72477 2516 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
8ceee660
BH
2517};
2518