]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/sfc/falcon.c
sfc: Move MTD operations into efx_nic_type
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / sfc / falcon.c
CommitLineData
8ceee660
BH
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
0a6f40c6 4 * Copyright 2006-2010 Solarflare Communications Inc.
8ceee660
BH
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/pci.h>
14#include <linux/module.h>
15#include <linux/seq_file.h>
37b5a603 16#include <linux/i2c.h>
f31a45d2 17#include <linux/mii.h>
5a0e3ad6 18#include <linux/slab.h>
8ceee660
BH
19#include "net_driver.h"
20#include "bitfield.h"
21#include "efx.h"
744093c9 22#include "nic.h"
8b8a95a1 23#include "farch_regs.h"
12d00cad 24#include "io.h"
8ceee660 25#include "phy.h"
8ceee660 26#include "workarounds.h"
d4f2cecc 27#include "selftest.h"
ab0115fc 28#include "mdio_10g.h"
8ceee660 29
8986352a 30/* Hardware control for SFC4000 (aka Falcon). */
8ceee660 31
ab0115fc
BH
32/**************************************************************************
33 *
34 * MAC stats DMA format
35 *
36 **************************************************************************
37 */
38
39#define FALCON_MAC_STATS_SIZE 0x100
40
41#define XgRxOctets_offset 0x0
42#define XgRxOctets_WIDTH 48
43#define XgRxOctetsOK_offset 0x8
44#define XgRxOctetsOK_WIDTH 48
45#define XgRxPkts_offset 0x10
46#define XgRxPkts_WIDTH 32
47#define XgRxPktsOK_offset 0x14
48#define XgRxPktsOK_WIDTH 32
49#define XgRxBroadcastPkts_offset 0x18
50#define XgRxBroadcastPkts_WIDTH 32
51#define XgRxMulticastPkts_offset 0x1C
52#define XgRxMulticastPkts_WIDTH 32
53#define XgRxUnicastPkts_offset 0x20
54#define XgRxUnicastPkts_WIDTH 32
55#define XgRxUndersizePkts_offset 0x24
56#define XgRxUndersizePkts_WIDTH 32
57#define XgRxOversizePkts_offset 0x28
58#define XgRxOversizePkts_WIDTH 32
59#define XgRxJabberPkts_offset 0x2C
60#define XgRxJabberPkts_WIDTH 32
61#define XgRxUndersizeFCSerrorPkts_offset 0x30
62#define XgRxUndersizeFCSerrorPkts_WIDTH 32
63#define XgRxDropEvents_offset 0x34
64#define XgRxDropEvents_WIDTH 32
65#define XgRxFCSerrorPkts_offset 0x38
66#define XgRxFCSerrorPkts_WIDTH 32
67#define XgRxAlignError_offset 0x3C
68#define XgRxAlignError_WIDTH 32
69#define XgRxSymbolError_offset 0x40
70#define XgRxSymbolError_WIDTH 32
71#define XgRxInternalMACError_offset 0x44
72#define XgRxInternalMACError_WIDTH 32
73#define XgRxControlPkts_offset 0x48
74#define XgRxControlPkts_WIDTH 32
75#define XgRxPausePkts_offset 0x4C
76#define XgRxPausePkts_WIDTH 32
77#define XgRxPkts64Octets_offset 0x50
78#define XgRxPkts64Octets_WIDTH 32
79#define XgRxPkts65to127Octets_offset 0x54
80#define XgRxPkts65to127Octets_WIDTH 32
81#define XgRxPkts128to255Octets_offset 0x58
82#define XgRxPkts128to255Octets_WIDTH 32
83#define XgRxPkts256to511Octets_offset 0x5C
84#define XgRxPkts256to511Octets_WIDTH 32
85#define XgRxPkts512to1023Octets_offset 0x60
86#define XgRxPkts512to1023Octets_WIDTH 32
87#define XgRxPkts1024to15xxOctets_offset 0x64
88#define XgRxPkts1024to15xxOctets_WIDTH 32
89#define XgRxPkts15xxtoMaxOctets_offset 0x68
90#define XgRxPkts15xxtoMaxOctets_WIDTH 32
91#define XgRxLengthError_offset 0x6C
92#define XgRxLengthError_WIDTH 32
93#define XgTxPkts_offset 0x80
94#define XgTxPkts_WIDTH 32
95#define XgTxOctets_offset 0x88
96#define XgTxOctets_WIDTH 48
97#define XgTxMulticastPkts_offset 0x90
98#define XgTxMulticastPkts_WIDTH 32
99#define XgTxBroadcastPkts_offset 0x94
100#define XgTxBroadcastPkts_WIDTH 32
101#define XgTxUnicastPkts_offset 0x98
102#define XgTxUnicastPkts_WIDTH 32
103#define XgTxControlPkts_offset 0x9C
104#define XgTxControlPkts_WIDTH 32
105#define XgTxPausePkts_offset 0xA0
106#define XgTxPausePkts_WIDTH 32
107#define XgTxPkts64Octets_offset 0xA4
108#define XgTxPkts64Octets_WIDTH 32
109#define XgTxPkts65to127Octets_offset 0xA8
110#define XgTxPkts65to127Octets_WIDTH 32
111#define XgTxPkts128to255Octets_offset 0xAC
112#define XgTxPkts128to255Octets_WIDTH 32
113#define XgTxPkts256to511Octets_offset 0xB0
114#define XgTxPkts256to511Octets_WIDTH 32
115#define XgTxPkts512to1023Octets_offset 0xB4
116#define XgTxPkts512to1023Octets_WIDTH 32
117#define XgTxPkts1024to15xxOctets_offset 0xB8
118#define XgTxPkts1024to15xxOctets_WIDTH 32
119#define XgTxPkts1519toMaxOctets_offset 0xBC
120#define XgTxPkts1519toMaxOctets_WIDTH 32
121#define XgTxUndersizePkts_offset 0xC0
122#define XgTxUndersizePkts_WIDTH 32
123#define XgTxOversizePkts_offset 0xC4
124#define XgTxOversizePkts_WIDTH 32
125#define XgTxNonTcpUdpPkt_offset 0xC8
126#define XgTxNonTcpUdpPkt_WIDTH 16
127#define XgTxMacSrcErrPkt_offset 0xCC
128#define XgTxMacSrcErrPkt_WIDTH 16
129#define XgTxIpSrcErrPkt_offset 0xD0
130#define XgTxIpSrcErrPkt_WIDTH 16
131#define XgDmaDone_offset 0xD4
132#define XgDmaDone_WIDTH 32
133
134#define FALCON_STATS_NOT_DONE 0x00000000
135#define FALCON_STATS_DONE 0xffffffff
136
137#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
138#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
139
140/* Retrieve statistic from statistics block */
141#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
142 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
143 (efx)->mac_stats.efx_stat += le16_to_cpu( \
144 *((__force __le16 *) \
145 (efx->stats_buffer.addr + \
146 FALCON_STAT_OFFSET(falcon_stat)))); \
147 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
148 (efx)->mac_stats.efx_stat += le32_to_cpu( \
149 *((__force __le32 *) \
150 (efx->stats_buffer.addr + \
151 FALCON_STAT_OFFSET(falcon_stat)))); \
152 else \
153 (efx)->mac_stats.efx_stat += le64_to_cpu( \
154 *((__force __le64 *) \
155 (efx->stats_buffer.addr + \
156 FALCON_STAT_OFFSET(falcon_stat)))); \
157 } while (0)
158
159/**************************************************************************
160 *
45a3fd55
BH
161 * Basic SPI command set and bit definitions
162 *
163 *************************************************************************/
164
165#define SPI_WRSR 0x01 /* Write status register */
166#define SPI_WRITE 0x02 /* Write data to memory array */
167#define SPI_READ 0x03 /* Read data from memory array */
168#define SPI_WRDI 0x04 /* Reset write enable latch */
169#define SPI_RDSR 0x05 /* Read status register */
170#define SPI_WREN 0x06 /* Set write enable latch */
171#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
172
173#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
174#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
175#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
176#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
177#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
178#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
179
180/**************************************************************************
181 *
182 * Non-volatile memory layout
ab0115fc
BH
183 *
184 **************************************************************************
185 */
186
45a3fd55
BH
187/* SFC4000 flash is partitioned into:
188 * 0-0x400 chip and board config (see struct falcon_nvconfig)
189 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
190 * 0x8000-end boot code (mapped to PCI expansion ROM)
191 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
192 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
193 * 0-0x400 chip and board config
194 * configurable VPD
195 * 0x800-0x1800 boot config
196 * Aside from the chip and board config, all of these are optional and may
197 * be absent or truncated depending on the devices used.
198 */
199#define FALCON_NVCONFIG_END 0x400U
200#define FALCON_FLASH_BOOTCODE_START 0x8000U
201#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
202#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
203
ab0115fc
BH
204/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
205struct falcon_nvconfig_board_v2 {
206 __le16 nports;
207 u8 port0_phy_addr;
208 u8 port0_phy_type;
209 u8 port1_phy_addr;
210 u8 port1_phy_type;
211 __le16 asic_sub_revision;
212 __le16 board_revision;
213} __packed;
214
215/* Board configuration v3 extra information */
216struct falcon_nvconfig_board_v3 {
217 __le32 spi_device_type[2];
218} __packed;
219
220/* Bit numbers for spi_device_type */
221#define SPI_DEV_TYPE_SIZE_LBN 0
222#define SPI_DEV_TYPE_SIZE_WIDTH 5
223#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
224#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
225#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
226#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
227#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
228#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
229#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
230#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
231#define SPI_DEV_TYPE_FIELD(type, field) \
232 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
233
234#define FALCON_NVCONFIG_OFFSET 0x300
235
236#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
237struct falcon_nvconfig {
238 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
239 u8 mac_address[2][8]; /* 0x310 */
240 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
241 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
242 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
243 efx_oword_t hw_init_reg; /* 0x350 */
244 efx_oword_t nic_stat_reg; /* 0x360 */
245 efx_oword_t glb_ctl_reg; /* 0x370 */
246 efx_oword_t srm_cfg_reg; /* 0x380 */
247 efx_oword_t spare_reg; /* 0x390 */
248 __le16 board_magic_num; /* 0x3A0 */
249 __le16 board_struct_ver;
250 __le16 board_checksum;
251 struct falcon_nvconfig_board_v2 board_v2;
252 efx_oword_t ee_base_page_reg; /* 0x3B0 */
253 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
254} __packed;
255
256/*************************************************************************/
257
d4f2cecc 258static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
ab0115fc 259static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
d4f2cecc 260
2f7f5730
BH
261static const unsigned int
262/* "Large" EEPROM device: Atmel AT25640 or similar
263 * 8 KB, 16-bit address, 32 B write block */
264large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
265 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
266 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
267/* Default flash device: Atmel AT25F1024
268 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
269default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
270 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
271 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
272 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
273 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
274
8ceee660
BH
275/**************************************************************************
276 *
277 * I2C bus - this is a bit-bashing interface using GPIO pins
278 * Note that it uses the output enables to tristate the outputs
279 * SDA is the data pin and SCL is the clock
280 *
281 **************************************************************************
282 */
37b5a603 283static void falcon_setsda(void *data, int state)
8ceee660 284{
37b5a603 285 struct efx_nic *efx = (struct efx_nic *)data;
8ceee660
BH
286 efx_oword_t reg;
287
12d00cad 288 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
3e6c4538 289 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
12d00cad 290 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
8ceee660
BH
291}
292
37b5a603 293static void falcon_setscl(void *data, int state)
8ceee660 294{
37b5a603 295 struct efx_nic *efx = (struct efx_nic *)data;
8ceee660
BH
296 efx_oword_t reg;
297
12d00cad 298 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
3e6c4538 299 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
12d00cad 300 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
37b5a603
BH
301}
302
8e730c15
BH
303static int falcon_getsda(void *data)
304{
305 struct efx_nic *efx = (struct efx_nic *)data;
306 efx_oword_t reg;
8ceee660 307
8e730c15
BH
308 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
309 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
310}
8ceee660 311
8e730c15
BH
312static int falcon_getscl(void *data)
313{
314 struct efx_nic *efx = (struct efx_nic *)data;
315 efx_oword_t reg;
8ceee660 316
8e730c15
BH
317 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
318 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
8ceee660
BH
319}
320
18e83e4c 321static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
8e730c15
BH
322 .setsda = falcon_setsda,
323 .setscl = falcon_setscl,
324 .getsda = falcon_getsda,
325 .getscl = falcon_getscl,
326 .udelay = 5,
327 /* Wait up to 50 ms for slave to let us pull SCL high */
328 .timeout = DIV_ROUND_UP(HZ, 20),
329};
330
ef2b90ee 331static void falcon_push_irq_moderation(struct efx_channel *channel)
8ceee660
BH
332{
333 efx_dword_t timer_cmd;
334 struct efx_nic *efx = channel->efx;
335
336 /* Set timer register */
337 if (channel->irq_moderation) {
8ceee660 338 EFX_POPULATE_DWORD_2(timer_cmd,
3e6c4538
BH
339 FRF_AB_TC_TIMER_MODE,
340 FFE_BB_TIMER_MODE_INT_HLDOFF,
341 FRF_AB_TC_TIMER_VAL,
0d86ebd8 342 channel->irq_moderation - 1);
8ceee660
BH
343 } else {
344 EFX_POPULATE_DWORD_2(timer_cmd,
3e6c4538
BH
345 FRF_AB_TC_TIMER_MODE,
346 FFE_BB_TIMER_MODE_DIS,
347 FRF_AB_TC_TIMER_VAL, 0);
8ceee660 348 }
3e6c4538 349 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
12d00cad
BH
350 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
351 channel->channel);
127e6e10
BH
352}
353
d3245b28
BH
354static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
355
127e6e10
BH
356static void falcon_prepare_flush(struct efx_nic *efx)
357{
358 falcon_deconfigure_mac_wrapper(efx);
359
360 /* Wait for the tx and rx fifo's to get to the next packet boundary
361 * (~1ms without back-pressure), then to drain the remainder of the
362 * fifo's at data path speeds (negligible), with a healthy margin. */
363 msleep(10);
6bc5d3a9
BH
364}
365
8ceee660
BH
366/* Acknowledge a legacy interrupt from Falcon
367 *
368 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
369 *
370 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
371 * BIU. Interrupt acknowledge is read sensitive so must write instead
372 * (then read to ensure the BIU collector is flushed)
373 *
374 * NB most hardware supports MSI interrupts
375 */
1840667a 376static inline void falcon_irq_ack_a1(struct efx_nic *efx)
8ceee660
BH
377{
378 efx_dword_t reg;
379
3e6c4538 380 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
12d00cad
BH
381 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
382 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
8ceee660
BH
383}
384
8ceee660 385
86094f7f 386static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
8ceee660 387{
d3208b5e
BH
388 struct efx_nic *efx = dev_id;
389 efx_oword_t *int_ker = efx->irq_status.addr;
8ceee660
BH
390 int syserr;
391 int queues;
392
393 /* Check to see if this is our interrupt. If it isn't, we
394 * exit without having touched the hardware.
395 */
396 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
62776d03
BH
397 netif_vdbg(efx, intr, efx->net_dev,
398 "IRQ %d on CPU %d not for me\n", irq,
399 raw_smp_processor_id());
8ceee660
BH
400 return IRQ_NONE;
401 }
402 efx->last_irq_cpu = raw_smp_processor_id();
62776d03
BH
403 netif_vdbg(efx, intr, efx->net_dev,
404 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
405 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
8ceee660 406
d8291187
BH
407 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
408 return IRQ_HANDLED;
409
f70d1847
BH
410 /* Check to see if we have a serious error condition */
411 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
412 if (unlikely(syserr))
86094f7f 413 return efx_farch_fatal_interrupt(efx);
f70d1847 414
8ceee660
BH
415 /* Determine interrupting queues, clear interrupt status
416 * register and acknowledge the device interrupt.
417 */
674979d3
BH
418 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
419 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
8ceee660
BH
420 EFX_ZERO_OWORD(*int_ker);
421 wmb(); /* Ensure the vector is cleared before interrupt ack */
422 falcon_irq_ack_a1(efx);
423
8313aca3 424 if (queues & 1)
1646a6f3 425 efx_schedule_channel_irq(efx_get_channel(efx, 0));
8313aca3 426 if (queues & 2)
1646a6f3 427 efx_schedule_channel_irq(efx_get_channel(efx, 1));
8ceee660
BH
428 return IRQ_HANDLED;
429}
8ceee660
BH
430/**************************************************************************
431 *
432 * EEPROM/flash
433 *
434 **************************************************************************
435 */
436
23d30f02 437#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
8ceee660 438
be4ea89c
BH
439static int falcon_spi_poll(struct efx_nic *efx)
440{
441 efx_oword_t reg;
12d00cad 442 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
3e6c4538 443 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
be4ea89c
BH
444}
445
8ceee660
BH
446/* Wait for SPI command completion */
447static int falcon_spi_wait(struct efx_nic *efx)
448{
be4ea89c
BH
449 /* Most commands will finish quickly, so we start polling at
450 * very short intervals. Sometimes the command may have to
451 * wait for VPD or expansion ROM access outside of our
452 * control, so we allow up to 100 ms. */
453 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
454 int i;
455
456 for (i = 0; i < 10; i++) {
457 if (!falcon_spi_poll(efx))
458 return 0;
459 udelay(10);
460 }
8ceee660 461
4a5b504d 462 for (;;) {
be4ea89c 463 if (!falcon_spi_poll(efx))
8ceee660 464 return 0;
4a5b504d 465 if (time_after_eq(jiffies, timeout)) {
62776d03
BH
466 netif_err(efx, hw, efx->net_dev,
467 "timed out waiting for SPI\n");
4a5b504d
BH
468 return -ETIMEDOUT;
469 }
be4ea89c 470 schedule_timeout_uninterruptible(1);
4a5b504d 471 }
8ceee660
BH
472}
473
45a3fd55
BH
474static int
475falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
476 unsigned int command, int address,
477 const void *in, void *out, size_t len)
8ceee660 478{
4a5b504d
BH
479 bool addressed = (address >= 0);
480 bool reading = (out != NULL);
8ceee660
BH
481 efx_oword_t reg;
482 int rc;
483
4a5b504d
BH
484 /* Input validation */
485 if (len > FALCON_SPI_MAX_LEN)
486 return -EINVAL;
8ceee660 487
be4ea89c
BH
488 /* Check that previous command is not still running */
489 rc = falcon_spi_poll(efx);
8ceee660
BH
490 if (rc)
491 return rc;
492
4a5b504d
BH
493 /* Program address register, if we have an address */
494 if (addressed) {
3e6c4538 495 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
12d00cad 496 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
4a5b504d
BH
497 }
498
499 /* Program data register, if we have data */
500 if (in != NULL) {
501 memcpy(&reg, in, len);
12d00cad 502 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
4a5b504d 503 }
8ceee660 504
4a5b504d 505 /* Issue read/write command */
8ceee660 506 EFX_POPULATE_OWORD_7(reg,
3e6c4538
BH
507 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
508 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
509 FRF_AB_EE_SPI_HCMD_DABCNT, len,
510 FRF_AB_EE_SPI_HCMD_READ, reading,
511 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
512 FRF_AB_EE_SPI_HCMD_ADBCNT,
4a5b504d 513 (addressed ? spi->addr_len : 0),
3e6c4538 514 FRF_AB_EE_SPI_HCMD_ENC, command);
12d00cad 515 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
8ceee660 516
4a5b504d 517 /* Wait for read/write to complete */
8ceee660
BH
518 rc = falcon_spi_wait(efx);
519 if (rc)
520 return rc;
521
522 /* Read data */
4a5b504d 523 if (out != NULL) {
12d00cad 524 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
4a5b504d
BH
525 memcpy(out, &reg, len);
526 }
527
8ceee660
BH
528 return 0;
529}
530
4a5b504d 531static inline u8
ecd0a6f0
BH
532falcon_spi_munge_command(const struct falcon_spi_device *spi,
533 const u8 command, const unsigned int address)
4a5b504d
BH
534{
535 return command | (((address >> 8) & spi->munge_address) << 3);
536}
537
45a3fd55
BH
538static int
539falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
540 loff_t start, size_t len, size_t *retlen, u8 *buffer)
4a5b504d 541{
23d30f02
BH
542 size_t block_len, pos = 0;
543 unsigned int command;
4a5b504d
BH
544 int rc = 0;
545
546 while (pos < len) {
23d30f02 547 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
4a5b504d 548
ecd0a6f0 549 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
76884835 550 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
4a5b504d
BH
551 buffer + pos, block_len);
552 if (rc)
553 break;
554 pos += block_len;
555
556 /* Avoid locking up the system */
557 cond_resched();
558 if (signal_pending(current)) {
559 rc = -EINTR;
560 break;
561 }
562 }
563
564 if (retlen)
565 *retlen = pos;
566 return rc;
567}
568
45a3fd55
BH
569#ifdef CONFIG_SFC_MTD
570
571struct falcon_mtd_partition {
572 struct efx_mtd_partition common;
573 const struct falcon_spi_device *spi;
574 size_t offset;
575};
576
577#define to_falcon_mtd_partition(mtd) \
578 container_of(mtd, struct falcon_mtd_partition, common.mtd)
579
580static size_t
581falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
582{
583 return min(FALCON_SPI_MAX_LEN,
584 (spi->block_size - (start & (spi->block_size - 1))));
585}
586
587/* Wait up to 10 ms for buffered write completion */
588static int
589falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
590{
591 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
592 u8 status;
593 int rc;
594
595 for (;;) {
596 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
597 &status, sizeof(status));
598 if (rc)
599 return rc;
600 if (!(status & SPI_STATUS_NRDY))
601 return 0;
602 if (time_after_eq(jiffies, timeout)) {
603 netif_err(efx, hw, efx->net_dev,
604 "SPI write timeout on device %d"
605 " last status=0x%02x\n",
606 spi->device_id, status);
607 return -ETIMEDOUT;
608 }
609 schedule_timeout_uninterruptible(1);
610 }
611}
612
613static int
ecd0a6f0 614falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
76884835 615 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
4a5b504d
BH
616{
617 u8 verify_buffer[FALCON_SPI_MAX_LEN];
23d30f02
BH
618 size_t block_len, pos = 0;
619 unsigned int command;
4a5b504d
BH
620 int rc = 0;
621
622 while (pos < len) {
76884835 623 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
4a5b504d
BH
624 if (rc)
625 break;
626
23d30f02 627 block_len = min(len - pos,
4a5b504d 628 falcon_spi_write_limit(spi, start + pos));
ecd0a6f0 629 command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
76884835 630 rc = falcon_spi_cmd(efx, spi, command, start + pos,
4a5b504d
BH
631 buffer + pos, NULL, block_len);
632 if (rc)
633 break;
634
76884835 635 rc = falcon_spi_wait_write(efx, spi);
4a5b504d
BH
636 if (rc)
637 break;
638
ecd0a6f0 639 command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
76884835 640 rc = falcon_spi_cmd(efx, spi, command, start + pos,
4a5b504d
BH
641 NULL, verify_buffer, block_len);
642 if (memcmp(verify_buffer, buffer + pos, block_len)) {
643 rc = -EIO;
644 break;
645 }
646
647 pos += block_len;
648
649 /* Avoid locking up the system */
650 cond_resched();
651 if (signal_pending(current)) {
652 rc = -EINTR;
653 break;
654 }
655 }
656
657 if (retlen)
658 *retlen = pos;
659 return rc;
660}
661
45a3fd55
BH
662static int
663falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
664{
665 const struct falcon_spi_device *spi = part->spi;
666 struct efx_nic *efx = part->common.mtd.priv;
667 u8 status;
668 int rc, i;
669
670 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
671 for (i = 0; i < 40; i++) {
672 __set_current_state(uninterruptible ?
673 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
674 schedule_timeout(HZ / 10);
675 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
676 &status, sizeof(status));
677 if (rc)
678 return rc;
679 if (!(status & SPI_STATUS_NRDY))
680 return 0;
681 if (signal_pending(current))
682 return -EINTR;
683 }
684 pr_err("%s: timed out waiting for %s\n",
685 part->common.name, part->common.dev_type_name);
686 return -ETIMEDOUT;
687}
688
689static int
690falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
691{
692 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
693 SPI_STATUS_BP0);
694 u8 status;
695 int rc;
696
697 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
698 &status, sizeof(status));
699 if (rc)
700 return rc;
701
702 if (!(status & unlock_mask))
703 return 0; /* already unlocked */
704
705 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
706 if (rc)
707 return rc;
708 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
709 if (rc)
710 return rc;
711
712 status &= ~unlock_mask;
713 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
714 NULL, sizeof(status));
715 if (rc)
716 return rc;
717 rc = falcon_spi_wait_write(efx, spi);
718 if (rc)
719 return rc;
720
721 return 0;
722}
723
724#define FALCON_SPI_VERIFY_BUF_LEN 16
725
726static int
727falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
728{
729 const struct falcon_spi_device *spi = part->spi;
730 struct efx_nic *efx = part->common.mtd.priv;
731 unsigned pos, block_len;
732 u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
733 u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
734 int rc;
735
736 if (len != spi->erase_size)
737 return -EINVAL;
738
739 if (spi->erase_command == 0)
740 return -EOPNOTSUPP;
741
742 rc = falcon_spi_unlock(efx, spi);
743 if (rc)
744 return rc;
745 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
746 if (rc)
747 return rc;
748 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
749 NULL, 0);
750 if (rc)
751 return rc;
752 rc = falcon_spi_slow_wait(part, false);
753
754 /* Verify the entire region has been wiped */
755 memset(empty, 0xff, sizeof(empty));
756 for (pos = 0; pos < len; pos += block_len) {
757 block_len = min(len - pos, sizeof(buffer));
758 rc = falcon_spi_read(efx, spi, start + pos, block_len,
759 NULL, buffer);
760 if (rc)
761 return rc;
762 if (memcmp(empty, buffer, block_len))
763 return -EIO;
764
765 /* Avoid locking up the system */
766 cond_resched();
767 if (signal_pending(current))
768 return -EINTR;
769 }
770
771 return rc;
772}
773
774static void falcon_mtd_rename(struct efx_mtd_partition *part)
775{
776 struct efx_nic *efx = part->mtd.priv;
777
778 snprintf(part->name, sizeof(part->name), "%s %s",
779 efx->name, part->type_name);
780}
781
782static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
783 size_t len, size_t *retlen, u8 *buffer)
784{
785 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
786 struct efx_nic *efx = mtd->priv;
787 struct falcon_nic_data *nic_data = efx->nic_data;
788 int rc;
789
790 rc = mutex_lock_interruptible(&nic_data->spi_lock);
791 if (rc)
792 return rc;
793 rc = falcon_spi_read(efx, part->spi, part->offset + start,
794 len, retlen, buffer);
795 mutex_unlock(&nic_data->spi_lock);
796 return rc;
797}
798
799static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
800{
801 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
802 struct efx_nic *efx = mtd->priv;
803 struct falcon_nic_data *nic_data = efx->nic_data;
804 int rc;
805
806 rc = mutex_lock_interruptible(&nic_data->spi_lock);
807 if (rc)
808 return rc;
809 rc = falcon_spi_erase(part, part->offset + start, len);
810 mutex_unlock(&nic_data->spi_lock);
811 return rc;
812}
813
814static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
815 size_t len, size_t *retlen, const u8 *buffer)
816{
817 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
818 struct efx_nic *efx = mtd->priv;
819 struct falcon_nic_data *nic_data = efx->nic_data;
820 int rc;
821
822 rc = mutex_lock_interruptible(&nic_data->spi_lock);
823 if (rc)
824 return rc;
825 rc = falcon_spi_write(efx, part->spi, part->offset + start,
826 len, retlen, buffer);
827 mutex_unlock(&nic_data->spi_lock);
828 return rc;
829}
830
831static int falcon_mtd_sync(struct mtd_info *mtd)
832{
833 struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
834 struct efx_nic *efx = mtd->priv;
835 struct falcon_nic_data *nic_data = efx->nic_data;
836 int rc;
837
838 mutex_lock(&nic_data->spi_lock);
839 rc = falcon_spi_slow_wait(part, true);
840 mutex_unlock(&nic_data->spi_lock);
841 return rc;
842}
843
844static int falcon_mtd_probe(struct efx_nic *efx)
845{
846 struct falcon_nic_data *nic_data = efx->nic_data;
847 struct falcon_mtd_partition *parts;
848 struct falcon_spi_device *spi;
849 size_t n_parts;
850 int rc = -ENODEV;
851
852 ASSERT_RTNL();
853
854 /* Allocate space for maximum number of partitions */
855 parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
856 n_parts = 0;
857
858 spi = &nic_data->spi_flash;
859 if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
860 parts[n_parts].spi = spi;
861 parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
862 parts[n_parts].common.dev_type_name = "flash";
863 parts[n_parts].common.type_name = "sfc_flash_bootrom";
864 parts[n_parts].common.mtd.type = MTD_NORFLASH;
865 parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
866 parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
867 parts[n_parts].common.mtd.erasesize = spi->erase_size;
868 n_parts++;
869 }
870
871 spi = &nic_data->spi_eeprom;
872 if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
873 parts[n_parts].spi = spi;
874 parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
875 parts[n_parts].common.dev_type_name = "EEPROM";
876 parts[n_parts].common.type_name = "sfc_bootconfig";
877 parts[n_parts].common.mtd.type = MTD_RAM;
878 parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
879 parts[n_parts].common.mtd.size =
880 min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
881 FALCON_EEPROM_BOOTCONFIG_START;
882 parts[n_parts].common.mtd.erasesize = spi->erase_size;
883 n_parts++;
884 }
885
886 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
887 if (rc)
888 kfree(parts);
889 return rc;
890}
891
892#endif /* CONFIG_SFC_MTD */
893
ab0115fc
BH
894/**************************************************************************
895 *
896 * XMAC operations
897 *
898 **************************************************************************
899 */
900
901/* Configure the XAUI driver that is an output from Falcon */
902static void falcon_setup_xaui(struct efx_nic *efx)
903{
904 efx_oword_t sdctl, txdrv;
905
906 /* Move the XAUI into low power, unless there is no PHY, in
907 * which case the XAUI will have to drive a cable. */
908 if (efx->phy_type == PHY_TYPE_NONE)
909 return;
910
911 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
912 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
913 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
914 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
915 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
916 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
917 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
918 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
919 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
920 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
921
922 EFX_POPULATE_OWORD_8(txdrv,
923 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
924 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
925 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
926 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
927 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
928 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
929 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
930 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
931 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
932}
933
934int falcon_reset_xaui(struct efx_nic *efx)
935{
936 struct falcon_nic_data *nic_data = efx->nic_data;
937 efx_oword_t reg;
938 int count;
939
940 /* Don't fetch MAC statistics over an XMAC reset */
941 WARN_ON(nic_data->stats_disable_count == 0);
942
943 /* Start reset sequence */
944 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
945 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
946
947 /* Wait up to 10 ms for completion, then reinitialise */
948 for (count = 0; count < 1000; count++) {
949 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
950 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
951 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
952 falcon_setup_xaui(efx);
953 return 0;
954 }
955 udelay(10);
956 }
957 netif_err(efx, hw, efx->net_dev,
958 "timed out waiting for XAUI/XGXS reset\n");
959 return -ETIMEDOUT;
960}
961
962static void falcon_ack_status_intr(struct efx_nic *efx)
963{
964 struct falcon_nic_data *nic_data = efx->nic_data;
965 efx_oword_t reg;
966
967 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
968 return;
969
970 /* We expect xgmii faults if the wireside link is down */
ab3b8250 971 if (!efx->link_state.up)
ab0115fc
BH
972 return;
973
974 /* We can only use this interrupt to signal the negative edge of
975 * xaui_align [we have to poll the positive edge]. */
976 if (nic_data->xmac_poll_required)
977 return;
978
979 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
980}
981
982static bool falcon_xgxs_link_ok(struct efx_nic *efx)
983{
984 efx_oword_t reg;
985 bool align_done, link_ok = false;
986 int sync_status;
987
988 /* Read link status */
989 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
990
991 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
992 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
993 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
994 link_ok = true;
995
996 /* Clear link status ready for next read */
997 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
998 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
999 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1000 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1001
1002 return link_ok;
1003}
1004
1005static bool falcon_xmac_link_ok(struct efx_nic *efx)
1006{
1007 /*
1008 * Check MAC's XGXS link status except when using XGMII loopback
1009 * which bypasses the XGXS block.
1010 * If possible, check PHY's XGXS link status except when using
1011 * MAC loopback.
1012 */
1013 return (efx->loopback_mode == LOOPBACK_XGMII ||
1014 falcon_xgxs_link_ok(efx)) &&
1015 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1016 LOOPBACK_INTERNAL(efx) ||
1017 efx_mdio_phyxgxs_lane_sync(efx));
1018}
1019
1020static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
1021{
1022 unsigned int max_frame_len;
1023 efx_oword_t reg;
1024 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
1025 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
1026
1027 /* Configure MAC - cut-thru mode is hard wired on */
1028 EFX_POPULATE_OWORD_3(reg,
1029 FRF_AB_XM_RX_JUMBO_MODE, 1,
1030 FRF_AB_XM_TX_STAT_EN, 1,
1031 FRF_AB_XM_RX_STAT_EN, 1);
1032 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1033
1034 /* Configure TX */
1035 EFX_POPULATE_OWORD_6(reg,
1036 FRF_AB_XM_TXEN, 1,
1037 FRF_AB_XM_TX_PRMBL, 1,
1038 FRF_AB_XM_AUTO_PAD, 1,
1039 FRF_AB_XM_TXCRC, 1,
1040 FRF_AB_XM_FCNTL, tx_fc,
1041 FRF_AB_XM_IPG, 0x3);
1042 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1043
1044 /* Configure RX */
1045 EFX_POPULATE_OWORD_5(reg,
1046 FRF_AB_XM_RXEN, 1,
1047 FRF_AB_XM_AUTO_DEPAD, 0,
1048 FRF_AB_XM_ACPT_ALL_MCAST, 1,
964e6135 1049 FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
ab0115fc
BH
1050 FRF_AB_XM_PASS_CRC_ERR, 1);
1051 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1052
1053 /* Set frame length */
1054 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
1055 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1056 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1057 EFX_POPULATE_OWORD_2(reg,
1058 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1059 FRF_AB_XM_TX_JUMBO_MODE, 1);
1060 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1061
1062 EFX_POPULATE_OWORD_2(reg,
1063 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1064 FRF_AB_XM_DIS_FCNTL, !rx_fc);
1065 efx_writeo(efx, &reg, FR_AB_XM_FC);
1066
1067 /* Set MAC address */
1068 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1069 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1070 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1071 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1072}
1073
1074static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
1075{
1076 efx_oword_t reg;
1077 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1078 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1079 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
ab3b8250 1080 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
ab0115fc
BH
1081
1082 /* XGXS block is flaky and will need to be reset if moving
1083 * into our out of XGMII, XGXS or XAUI loopbacks. */
ab3b8250
BH
1084 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1085 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1086 old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
ab0115fc 1087
ab3b8250
BH
1088 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1089 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
ab0115fc 1090
ab3b8250
BH
1091 /* The PHY driver may have turned XAUI off */
1092 if ((xgxs_loopback != old_xgxs_loopback) ||
1093 (xaui_loopback != old_xaui_loopback) ||
1094 (xgmii_loopback != old_xgmii_loopback))
1095 falcon_reset_xaui(efx);
ab0115fc
BH
1096
1097 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1098 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1099 (xgxs_loopback || xaui_loopback) ?
1100 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1101 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1102 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1103 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1104
1105 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
1106 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1107 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1108 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1109 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1110 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1111}
1112
1113
1114/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
1115static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
1116{
1117 bool mac_up = falcon_xmac_link_ok(efx);
1118
1119 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1120 efx_phy_mode_disabled(efx->phy_mode))
1121 /* XAUI link is expected to be down */
1122 return mac_up;
1123
1124 falcon_stop_nic_stats(efx);
1125
1126 while (!mac_up && tries) {
1127 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1128 falcon_reset_xaui(efx);
1129 udelay(200);
1130
1131 mac_up = falcon_xmac_link_ok(efx);
1132 --tries;
1133 }
1134
1135 falcon_start_nic_stats(efx);
1136
1137 return mac_up;
1138}
1139
1140static bool falcon_xmac_check_fault(struct efx_nic *efx)
1141{
1142 return !falcon_xmac_link_ok_retry(efx, 5);
1143}
1144
1145static int falcon_reconfigure_xmac(struct efx_nic *efx)
1146{
1147 struct falcon_nic_data *nic_data = efx->nic_data;
1148
964e6135
BH
1149 efx_farch_filter_sync_rx_mode(efx);
1150
ab0115fc
BH
1151 falcon_reconfigure_xgxs_core(efx);
1152 falcon_reconfigure_xmac_core(efx);
1153
1154 falcon_reconfigure_mac_wrapper(efx);
1155
1156 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1157 falcon_ack_status_intr(efx);
1158
1159 return 0;
1160}
1161
1162static void falcon_update_stats_xmac(struct efx_nic *efx)
1163{
1164 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1165
1166 /* Update MAC stats from DMAed values */
1167 FALCON_STAT(efx, XgRxOctets, rx_bytes);
1168 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
1169 FALCON_STAT(efx, XgRxPkts, rx_packets);
1170 FALCON_STAT(efx, XgRxPktsOK, rx_good);
1171 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
1172 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
1173 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
1174 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
1175 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
1176 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
1177 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
1178 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
1179 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
1180 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
1181 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
1182 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
1183 FALCON_STAT(efx, XgRxControlPkts, rx_control);
1184 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
1185 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
1186 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
1187 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
1188 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
1189 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
1190 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
1191 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
1192 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
1193 FALCON_STAT(efx, XgTxPkts, tx_packets);
1194 FALCON_STAT(efx, XgTxOctets, tx_bytes);
1195 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
1196 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
1197 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
1198 FALCON_STAT(efx, XgTxControlPkts, tx_control);
1199 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
1200 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
1201 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
1202 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
1203 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
1204 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
1205 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
1206 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
1207 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
1208 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
1209 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
1210 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
1211 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
1212
1213 /* Update derived statistics */
1214 efx_update_diff_stat(&mac_stats->tx_good_bytes,
1215 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
1216 mac_stats->tx_control * 64);
1217 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
1218 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
1219 mac_stats->rx_control * 64);
1220}
1221
1222static void falcon_poll_xmac(struct efx_nic *efx)
1223{
1224 struct falcon_nic_data *nic_data = efx->nic_data;
1225
ab3b8250
BH
1226 /* We expect xgmii faults if the wireside link is down */
1227 if (!efx->link_state.up || !nic_data->xmac_poll_required)
ab0115fc
BH
1228 return;
1229
1230 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1231 falcon_ack_status_intr(efx);
1232}
1233
8ceee660
BH
1234/**************************************************************************
1235 *
1236 * MAC wrapper
1237 *
1238 **************************************************************************
1239 */
177dfcd8 1240
ef2b90ee
BH
1241static void falcon_push_multicast_hash(struct efx_nic *efx)
1242{
1243 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1244
1245 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1246
1247 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1248 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1249}
1250
d3245b28 1251static void falcon_reset_macs(struct efx_nic *efx)
8ceee660 1252{
d3245b28
BH
1253 struct falcon_nic_data *nic_data = efx->nic_data;
1254 efx_oword_t reg, mac_ctrl;
8ceee660
BH
1255 int count;
1256
daeda630 1257 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
177dfcd8
BH
1258 /* It's not safe to use GLB_CTL_REG to reset the
1259 * macs, so instead use the internal MAC resets
1260 */
8fbca791
BH
1261 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1262 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1263
1264 for (count = 0; count < 10000; count++) {
1265 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1266 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1267 0)
1268 return;
1269 udelay(10);
177dfcd8 1270 }
8fbca791
BH
1271
1272 netif_err(efx, hw, efx->net_dev,
1273 "timed out waiting for XMAC core reset\n");
177dfcd8 1274 }
8ceee660 1275
d3245b28
BH
1276 /* Mac stats will fail whist the TX fifo is draining */
1277 WARN_ON(nic_data->stats_disable_count == 0);
8ceee660 1278
d3245b28
BH
1279 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1280 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1281 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
8ceee660 1282
12d00cad 1283 efx_reado(efx, &reg, FR_AB_GLB_CTL);
3e6c4538
BH
1284 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1285 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1286 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
12d00cad 1287 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
8ceee660
BH
1288
1289 count = 0;
1290 while (1) {
12d00cad 1291 efx_reado(efx, &reg, FR_AB_GLB_CTL);
3e6c4538
BH
1292 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1293 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1294 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
62776d03
BH
1295 netif_dbg(efx, hw, efx->net_dev,
1296 "Completed MAC reset after %d loops\n",
1297 count);
8ceee660
BH
1298 break;
1299 }
1300 if (count > 20) {
62776d03 1301 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
8ceee660
BH
1302 break;
1303 }
1304 count++;
1305 udelay(10);
1306 }
1307
d3245b28
BH
1308 /* Ensure the correct MAC is selected before statistics
1309 * are re-enabled by the caller */
1310 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
b7b40eeb 1311
b7b40eeb 1312 falcon_setup_xaui(efx);
177dfcd8
BH
1313}
1314
9dd3a13b 1315static void falcon_drain_tx_fifo(struct efx_nic *efx)
177dfcd8
BH
1316{
1317 efx_oword_t reg;
1318
daeda630 1319 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
177dfcd8
BH
1320 (efx->loopback_mode != LOOPBACK_NONE))
1321 return;
1322
12d00cad 1323 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
177dfcd8 1324 /* There is no point in draining more than once */
3e6c4538 1325 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
177dfcd8
BH
1326 return;
1327
1328 falcon_reset_macs(efx);
8ceee660
BH
1329}
1330
d3245b28 1331static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
8ceee660 1332{
177dfcd8 1333 efx_oword_t reg;
8ceee660 1334
daeda630 1335 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
8ceee660
BH
1336 return;
1337
1338 /* Isolate the MAC -> RX */
12d00cad 1339 efx_reado(efx, &reg, FR_AZ_RX_CFG);
3e6c4538 1340 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
12d00cad 1341 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
8ceee660 1342
d3245b28
BH
1343 /* Isolate TX -> MAC */
1344 falcon_drain_tx_fifo(efx);
8ceee660
BH
1345}
1346
ab0115fc 1347static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
8ceee660 1348{
eb50c0d6 1349 struct efx_link_state *link_state = &efx->link_state;
8ceee660 1350 efx_oword_t reg;
fd371e32
SH
1351 int link_speed, isolate;
1352
a7d529ae 1353 isolate = !!ACCESS_ONCE(efx->reset_pending);
8ceee660 1354
eb50c0d6 1355 switch (link_state->speed) {
f31a45d2
BH
1356 case 10000: link_speed = 3; break;
1357 case 1000: link_speed = 2; break;
1358 case 100: link_speed = 1; break;
1359 default: link_speed = 0; break;
1360 }
8ceee660
BH
1361 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1362 * as advertised. Disable to ensure packets are not
1363 * indefinitely held and TX queue can be flushed at any point
1364 * while the link is down. */
1365 EFX_POPULATE_OWORD_5(reg,
3e6c4538
BH
1366 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1367 FRF_AB_MAC_BCAD_ACPT, 1,
964e6135 1368 FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
3e6c4538
BH
1369 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1370 FRF_AB_MAC_SPEED, link_speed);
8ceee660
BH
1371 /* On B0, MAC backpressure can be disabled and packets get
1372 * discarded. */
daeda630 1373 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
3e6c4538 1374 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
fd371e32 1375 !link_state->up || isolate);
8ceee660
BH
1376 }
1377
12d00cad 1378 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
8ceee660
BH
1379
1380 /* Restore the multicast hash registers. */
8be4f3e6 1381 falcon_push_multicast_hash(efx);
8ceee660 1382
12d00cad 1383 efx_reado(efx, &reg, FR_AZ_RX_CFG);
4b0d29dc
BH
1384 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
1385 * initialisation but it may read back as 0) */
1386 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
8ceee660 1387 /* Unisolate the MAC -> RX */
daeda630 1388 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
fd371e32 1389 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
12d00cad 1390 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
8ceee660
BH
1391}
1392
55edc6e6 1393static void falcon_stats_request(struct efx_nic *efx)
8ceee660 1394{
55edc6e6 1395 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1396 efx_oword_t reg;
8ceee660 1397
55edc6e6
BH
1398 WARN_ON(nic_data->stats_pending);
1399 WARN_ON(nic_data->stats_disable_count);
8ceee660 1400
55edc6e6
BH
1401 if (nic_data->stats_dma_done == NULL)
1402 return; /* no mac selected */
8ceee660 1403
55edc6e6
BH
1404 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
1405 nic_data->stats_pending = true;
8ceee660
BH
1406 wmb(); /* ensure done flag is clear */
1407
1408 /* Initiate DMA transfer of stats */
1409 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1410 FRF_AB_MAC_STAT_DMA_CMD, 1,
1411 FRF_AB_MAC_STAT_DMA_ADR,
8ceee660 1412 efx->stats_buffer.dma_addr);
12d00cad 1413 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
8ceee660 1414
55edc6e6
BH
1415 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1416}
1417
1418static void falcon_stats_complete(struct efx_nic *efx)
1419{
1420 struct falcon_nic_data *nic_data = efx->nic_data;
1421
1422 if (!nic_data->stats_pending)
1423 return;
1424
3db1cd5c 1425 nic_data->stats_pending = false;
55edc6e6
BH
1426 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
1427 rmb(); /* read the done flag before the stats */
710b208d 1428 falcon_update_stats_xmac(efx);
55edc6e6 1429 } else {
62776d03
BH
1430 netif_err(efx, hw, efx->net_dev,
1431 "timed out waiting for statistics\n");
8ceee660 1432 }
55edc6e6 1433}
8ceee660 1434
55edc6e6
BH
1435static void falcon_stats_timer_func(unsigned long context)
1436{
1437 struct efx_nic *efx = (struct efx_nic *)context;
1438 struct falcon_nic_data *nic_data = efx->nic_data;
1439
1440 spin_lock(&efx->stats_lock);
1441
1442 falcon_stats_complete(efx);
1443 if (nic_data->stats_disable_count == 0)
1444 falcon_stats_request(efx);
1445
1446 spin_unlock(&efx->stats_lock);
8ceee660
BH
1447}
1448
fdaa9aed
SH
1449static bool falcon_loopback_link_poll(struct efx_nic *efx)
1450{
1451 struct efx_link_state old_state = efx->link_state;
1452
1453 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1454 WARN_ON(!LOOPBACK_INTERNAL(efx));
1455
1456 efx->link_state.fd = true;
1457 efx->link_state.fc = efx->wanted_fc;
1458 efx->link_state.up = true;
8fbca791 1459 efx->link_state.speed = 10000;
fdaa9aed
SH
1460
1461 return !efx_link_state_equal(&efx->link_state, &old_state);
1462}
1463
d3245b28
BH
1464static int falcon_reconfigure_port(struct efx_nic *efx)
1465{
1466 int rc;
1467
1468 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
1469
1470 /* Poll the PHY link state *before* reconfiguring it. This means we
1471 * will pick up the correct speed (in loopback) to select the correct
1472 * MAC.
1473 */
1474 if (LOOPBACK_INTERNAL(efx))
1475 falcon_loopback_link_poll(efx);
1476 else
1477 efx->phy_op->poll(efx);
1478
1479 falcon_stop_nic_stats(efx);
1480 falcon_deconfigure_mac_wrapper(efx);
1481
8fbca791 1482 falcon_reset_macs(efx);
d3245b28
BH
1483
1484 efx->phy_op->reconfigure(efx);
710b208d 1485 rc = falcon_reconfigure_xmac(efx);
d3245b28
BH
1486 BUG_ON(rc);
1487
1488 falcon_start_nic_stats(efx);
1489
1490 /* Synchronise efx->link_state with the kernel */
1491 efx_link_status_changed(efx);
1492
1493 return 0;
1494}
1495
9dd3a13b
BH
1496/* TX flow control may automatically turn itself off if the link
1497 * partner (intermittently) stops responding to pause frames. There
1498 * isn't any indication that this has happened, so the best we do is
1499 * leave it up to the user to spot this and fix it by cycling transmit
1500 * flow control on this end.
1501 */
1502
1503static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1504{
1505 /* Schedule a reset to recover */
1506 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1507}
1508
1509static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1510{
1511 /* Recover by resetting the EM block */
1512 falcon_stop_nic_stats(efx);
1513 falcon_drain_tx_fifo(efx);
1514 falcon_reconfigure_xmac(efx);
1515 falcon_start_nic_stats(efx);
1516}
1517
8ceee660
BH
1518/**************************************************************************
1519 *
1520 * PHY access via GMII
1521 *
1522 **************************************************************************
1523 */
1524
8ceee660
BH
1525/* Wait for GMII access to complete */
1526static int falcon_gmii_wait(struct efx_nic *efx)
1527{
80cb9a0f 1528 efx_oword_t md_stat;
8ceee660
BH
1529 int count;
1530
25985edc 1531 /* wait up to 50ms - taken max from datasheet */
177dfcd8 1532 for (count = 0; count < 5000; count++) {
80cb9a0f
BH
1533 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
1534 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1535 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1536 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
62776d03
BH
1537 netif_err(efx, hw, efx->net_dev,
1538 "error from GMII access "
1539 EFX_OWORD_FMT"\n",
1540 EFX_OWORD_VAL(md_stat));
8ceee660
BH
1541 return -EIO;
1542 }
1543 return 0;
1544 }
1545 udelay(10);
1546 }
62776d03 1547 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
8ceee660
BH
1548 return -ETIMEDOUT;
1549}
1550
68e7f45e
BH
1551/* Write an MDIO register of a PHY connected to Falcon. */
1552static int falcon_mdio_write(struct net_device *net_dev,
1553 int prtad, int devad, u16 addr, u16 value)
8ceee660 1554{
767e468c 1555 struct efx_nic *efx = netdev_priv(net_dev);
4833f02a 1556 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1557 efx_oword_t reg;
68e7f45e 1558 int rc;
8ceee660 1559
62776d03
BH
1560 netif_vdbg(efx, hw, efx->net_dev,
1561 "writing MDIO %d register %d.%d with 0x%04x\n",
68e7f45e 1562 prtad, devad, addr, value);
8ceee660 1563
4833f02a 1564 mutex_lock(&nic_data->mdio_lock);
8ceee660 1565
68e7f45e
BH
1566 /* Check MDIO not currently being accessed */
1567 rc = falcon_gmii_wait(efx);
1568 if (rc)
8ceee660
BH
1569 goto out;
1570
1571 /* Write the address/ID register */
3e6c4538 1572 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
12d00cad 1573 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
8ceee660 1574
3e6c4538
BH
1575 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1576 FRF_AB_MD_DEV_ADR, devad);
12d00cad 1577 efx_writeo(efx, &reg, FR_AB_MD_ID);
8ceee660
BH
1578
1579 /* Write data */
3e6c4538 1580 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
12d00cad 1581 efx_writeo(efx, &reg, FR_AB_MD_TXD);
8ceee660
BH
1582
1583 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1584 FRF_AB_MD_WRC, 1,
1585 FRF_AB_MD_GC, 0);
12d00cad 1586 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1587
1588 /* Wait for data to be written */
68e7f45e
BH
1589 rc = falcon_gmii_wait(efx);
1590 if (rc) {
8ceee660
BH
1591 /* Abort the write operation */
1592 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1593 FRF_AB_MD_WRC, 0,
1594 FRF_AB_MD_GC, 1);
12d00cad 1595 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1596 udelay(10);
1597 }
1598
ab867461 1599out:
4833f02a 1600 mutex_unlock(&nic_data->mdio_lock);
68e7f45e 1601 return rc;
8ceee660
BH
1602}
1603
68e7f45e
BH
1604/* Read an MDIO register of a PHY connected to Falcon. */
1605static int falcon_mdio_read(struct net_device *net_dev,
1606 int prtad, int devad, u16 addr)
8ceee660 1607{
767e468c 1608 struct efx_nic *efx = netdev_priv(net_dev);
4833f02a 1609 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 1610 efx_oword_t reg;
68e7f45e 1611 int rc;
8ceee660 1612
4833f02a 1613 mutex_lock(&nic_data->mdio_lock);
8ceee660 1614
68e7f45e
BH
1615 /* Check MDIO not currently being accessed */
1616 rc = falcon_gmii_wait(efx);
1617 if (rc)
8ceee660
BH
1618 goto out;
1619
3e6c4538 1620 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
12d00cad 1621 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
8ceee660 1622
3e6c4538
BH
1623 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1624 FRF_AB_MD_DEV_ADR, devad);
12d00cad 1625 efx_writeo(efx, &reg, FR_AB_MD_ID);
8ceee660
BH
1626
1627 /* Request data to be read */
3e6c4538 1628 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
12d00cad 1629 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660
BH
1630
1631 /* Wait for data to become available */
68e7f45e
BH
1632 rc = falcon_gmii_wait(efx);
1633 if (rc == 0) {
12d00cad 1634 efx_reado(efx, &reg, FR_AB_MD_RXD);
3e6c4538 1635 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
62776d03
BH
1636 netif_vdbg(efx, hw, efx->net_dev,
1637 "read from MDIO %d register %d.%d, got %04x\n",
1638 prtad, devad, addr, rc);
8ceee660
BH
1639 } else {
1640 /* Abort the read operation */
1641 EFX_POPULATE_OWORD_2(reg,
3e6c4538
BH
1642 FRF_AB_MD_RIC, 0,
1643 FRF_AB_MD_GC, 1);
12d00cad 1644 efx_writeo(efx, &reg, FR_AB_MD_CS);
8ceee660 1645
62776d03
BH
1646 netif_dbg(efx, hw, efx->net_dev,
1647 "read from MDIO %d register %d.%d, got error %d\n",
1648 prtad, devad, addr, rc);
8ceee660
BH
1649 }
1650
ab867461 1651out:
4833f02a 1652 mutex_unlock(&nic_data->mdio_lock);
68e7f45e 1653 return rc;
8ceee660
BH
1654}
1655
8ceee660 1656/* This call is responsible for hooking in the MAC and PHY operations */
ef2b90ee 1657static int falcon_probe_port(struct efx_nic *efx)
8ceee660 1658{
8fbca791 1659 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660
BH
1660 int rc;
1661
96c45726
BH
1662 switch (efx->phy_type) {
1663 case PHY_TYPE_SFX7101:
1664 efx->phy_op = &falcon_sfx7101_phy_ops;
1665 break;
96c45726
BH
1666 case PHY_TYPE_QT2022C2:
1667 case PHY_TYPE_QT2025C:
b37b62fe 1668 efx->phy_op = &falcon_qt202x_phy_ops;
96c45726 1669 break;
7e51b439
BH
1670 case PHY_TYPE_TXC43128:
1671 efx->phy_op = &falcon_txc_phy_ops;
1672 break;
96c45726 1673 default:
62776d03
BH
1674 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1675 efx->phy_type);
96c45726
BH
1676 return -ENODEV;
1677 }
1678
c1c4f453 1679 /* Fill out MDIO structure and loopback modes */
4833f02a 1680 mutex_init(&nic_data->mdio_lock);
68e7f45e
BH
1681 efx->mdio.mdio_read = falcon_mdio_read;
1682 efx->mdio.mdio_write = falcon_mdio_write;
c1c4f453
BH
1683 rc = efx->phy_op->probe(efx);
1684 if (rc != 0)
1685 return rc;
8ceee660 1686
b895d73e
SH
1687 /* Initial assumption */
1688 efx->link_state.speed = 10000;
1689 efx->link_state.fd = true;
1690
8ceee660 1691 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
daeda630 1692 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
04cc8cac 1693 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
8ceee660 1694 else
04cc8cac 1695 efx->wanted_fc = EFX_FC_RX;
7a6b8f6f
SH
1696 if (efx->mdio.mmds & MDIO_DEVS_AN)
1697 efx->wanted_fc |= EFX_FC_AUTO;
8ceee660
BH
1698
1699 /* Allocate buffer for stats */
152b6a62 1700 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
0d19a540 1701 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
8ceee660
BH
1702 if (rc)
1703 return rc;
62776d03
BH
1704 netif_dbg(efx, probe, efx->net_dev,
1705 "stats buffer at %llx (virt %p phys %llx)\n",
1706 (u64)efx->stats_buffer.dma_addr,
1707 efx->stats_buffer.addr,
1708 (u64)virt_to_phys(efx->stats_buffer.addr));
8fbca791 1709 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
8ceee660
BH
1710
1711 return 0;
1712}
1713
ef2b90ee 1714static void falcon_remove_port(struct efx_nic *efx)
8ceee660 1715{
ff3b00a0 1716 efx->phy_op->remove(efx);
152b6a62 1717 efx_nic_free_buffer(efx, &efx->stats_buffer);
8ceee660
BH
1718}
1719
40641ed9
BH
1720/* Global events are basically PHY events */
1721static bool
1722falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
1723{
1724 struct efx_nic *efx = channel->efx;
cef68bde 1725 struct falcon_nic_data *nic_data = efx->nic_data;
40641ed9
BH
1726
1727 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1728 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1729 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1730 /* Ignored */
1731 return true;
1732
1733 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
1734 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
cef68bde 1735 nic_data->xmac_poll_required = true;
40641ed9
BH
1736 return true;
1737 }
1738
1739 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
1740 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1741 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1742 netif_err(efx, rx_err, efx->net_dev,
1743 "channel %d seen global RX_RESET event. Resetting.\n",
1744 channel->channel);
1745
1746 atomic_inc(&efx->rx_reset);
1747 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1748 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1749 return true;
1750 }
1751
1752 return false;
1753}
1754
8c8661e4
BH
1755/**************************************************************************
1756 *
1757 * Falcon test code
1758 *
1759 **************************************************************************/
1760
0aa3fbaa
BH
1761static int
1762falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
8c8661e4 1763{
4de92180 1764 struct falcon_nic_data *nic_data = efx->nic_data;
8c8661e4 1765 struct falcon_nvconfig *nvconfig;
ecd0a6f0 1766 struct falcon_spi_device *spi;
8c8661e4
BH
1767 void *region;
1768 int rc, magic_num, struct_ver;
1769 __le16 *word, *limit;
1770 u32 csum;
1771
ecd0a6f0 1772 if (falcon_spi_present(&nic_data->spi_flash))
4de92180 1773 spi = &nic_data->spi_flash;
ecd0a6f0 1774 else if (falcon_spi_present(&nic_data->spi_eeprom))
4de92180
BH
1775 spi = &nic_data->spi_eeprom;
1776 else
2f7f5730
BH
1777 return -EINVAL;
1778
0a95f563 1779 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
8c8661e4
BH
1780 if (!region)
1781 return -ENOMEM;
3e6c4538 1782 nvconfig = region + FALCON_NVCONFIG_OFFSET;
8c8661e4 1783
4de92180 1784 mutex_lock(&nic_data->spi_lock);
76884835 1785 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
4de92180 1786 mutex_unlock(&nic_data->spi_lock);
8c8661e4 1787 if (rc) {
62776d03 1788 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
ecd0a6f0 1789 falcon_spi_present(&nic_data->spi_flash) ?
4de92180 1790 "flash" : "EEPROM");
8c8661e4
BH
1791 rc = -EIO;
1792 goto out;
1793 }
1794
1795 magic_num = le16_to_cpu(nvconfig->board_magic_num);
1796 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1797
1798 rc = -EINVAL;
3e6c4538 1799 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
62776d03
BH
1800 netif_err(efx, hw, efx->net_dev,
1801 "NVRAM bad magic 0x%x\n", magic_num);
8c8661e4
BH
1802 goto out;
1803 }
1804 if (struct_ver < 2) {
62776d03
BH
1805 netif_err(efx, hw, efx->net_dev,
1806 "NVRAM has ancient version 0x%x\n", struct_ver);
8c8661e4
BH
1807 goto out;
1808 } else if (struct_ver < 4) {
1809 word = &nvconfig->board_magic_num;
1810 limit = (__le16 *) (nvconfig + 1);
1811 } else {
1812 word = region;
0a95f563 1813 limit = region + FALCON_NVCONFIG_END;
8c8661e4
BH
1814 }
1815 for (csum = 0; word < limit; ++word)
1816 csum += le16_to_cpu(*word);
1817
1818 if (~csum & 0xffff) {
62776d03
BH
1819 netif_err(efx, hw, efx->net_dev,
1820 "NVRAM has incorrect checksum\n");
8c8661e4
BH
1821 goto out;
1822 }
1823
1824 rc = 0;
1825 if (nvconfig_out)
1826 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1827
1828 out:
1829 kfree(region);
1830 return rc;
1831}
1832
0aa3fbaa
BH
1833static int falcon_test_nvram(struct efx_nic *efx)
1834{
1835 return falcon_read_nvram(efx, NULL);
1836}
1837
86094f7f 1838static const struct efx_farch_register_test falcon_b0_register_tests[] = {
3e6c4538 1839 { FR_AZ_ADR_REGION,
4cddca54 1840 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
3e6c4538 1841 { FR_AZ_RX_CFG,
8c8661e4 1842 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
3e6c4538 1843 { FR_AZ_TX_CFG,
8c8661e4 1844 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1845 { FR_AZ_TX_RESERVED,
8c8661e4 1846 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
3e6c4538 1847 { FR_AB_MAC_CTRL,
8c8661e4 1848 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1849 { FR_AZ_SRM_TX_DC_CFG,
8c8661e4 1850 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1851 { FR_AZ_RX_DC_CFG,
8c8661e4 1852 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1853 { FR_AZ_RX_DC_PF_WM,
8c8661e4 1854 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1855 { FR_BZ_DP_CTRL,
8c8661e4 1856 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1857 { FR_AB_GM_CFG2,
177dfcd8 1858 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1859 { FR_AB_GMF_CFG0,
177dfcd8 1860 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1861 { FR_AB_XM_GLB_CFG,
8c8661e4 1862 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1863 { FR_AB_XM_TX_CFG,
8c8661e4 1864 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1865 { FR_AB_XM_RX_CFG,
8c8661e4 1866 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1867 { FR_AB_XM_RX_PARAM,
8c8661e4 1868 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1869 { FR_AB_XM_FC,
8c8661e4 1870 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1871 { FR_AB_XM_ADR_LO,
8c8661e4 1872 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
3e6c4538 1873 { FR_AB_XX_SD_CTL,
8c8661e4
BH
1874 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1875};
1876
d4f2cecc
BH
1877static int
1878falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
152b6a62 1879{
d4f2cecc
BH
1880 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1881 int rc, rc2;
1882
1883 mutex_lock(&efx->mac_lock);
1884 if (efx->loopback_modes) {
1885 /* We need the 312 clock from the PHY to test the XMAC
1886 * registers, so move into XGMII loopback if available */
1887 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1888 efx->loopback_mode = LOOPBACK_XGMII;
1889 else
1890 efx->loopback_mode = __ffs(efx->loopback_modes);
1891 }
1892 __efx_reconfigure_port(efx);
1893 mutex_unlock(&efx->mac_lock);
1894
1895 efx_reset_down(efx, reset_method);
1896
1897 tests->registers =
86094f7f
BH
1898 efx_farch_test_registers(efx, falcon_b0_register_tests,
1899 ARRAY_SIZE(falcon_b0_register_tests))
d4f2cecc
BH
1900 ? -1 : 1;
1901
1902 rc = falcon_reset_hw(efx, reset_method);
1903 rc2 = efx_reset_up(efx, reset_method, rc == 0);
1904 return rc ? rc : rc2;
152b6a62
BH
1905}
1906
8ceee660
BH
1907/**************************************************************************
1908 *
1909 * Device reset
1910 *
1911 **************************************************************************
1912 */
1913
0e2a9c7c
BH
1914static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1915{
1916 switch (reason) {
1917 case RESET_TYPE_RX_RECOVERY:
1918 case RESET_TYPE_RX_DESC_FETCH:
1919 case RESET_TYPE_TX_DESC_FETCH:
1920 case RESET_TYPE_TX_SKIP:
1921 /* These can occasionally occur due to hardware bugs.
1922 * We try to reset without disrupting the link.
1923 */
1924 return RESET_TYPE_INVISIBLE;
1925 default:
1926 return RESET_TYPE_ALL;
1927 }
1928}
1929
1930static int falcon_map_reset_flags(u32 *flags)
1931{
1932 enum {
1933 FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1934 ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1935 FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1936 FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1937 };
1938
1939 if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1940 *flags &= ~FALCON_RESET_WORLD;
1941 return RESET_TYPE_WORLD;
1942 }
1943
1944 if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1945 *flags &= ~FALCON_RESET_ALL;
1946 return RESET_TYPE_ALL;
1947 }
1948
1949 if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1950 *flags &= ~FALCON_RESET_INVISIBLE;
1951 return RESET_TYPE_INVISIBLE;
1952 }
1953
1954 return -EINVAL;
1955}
1956
8ceee660
BH
1957/* Resets NIC to known state. This routine must be called in process
1958 * context and is allowed to sleep. */
4de92180 1959static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
8ceee660
BH
1960{
1961 struct falcon_nic_data *nic_data = efx->nic_data;
1962 efx_oword_t glb_ctl_reg_ker;
1963 int rc;
1964
62776d03
BH
1965 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1966 RESET_TYPE(method));
8ceee660
BH
1967
1968 /* Initiate device reset */
1969 if (method == RESET_TYPE_WORLD) {
1970 rc = pci_save_state(efx->pci_dev);
1971 if (rc) {
62776d03
BH
1972 netif_err(efx, drv, efx->net_dev,
1973 "failed to backup PCI state of primary "
1974 "function prior to hardware reset\n");
8ceee660
BH
1975 goto fail1;
1976 }
152b6a62 1977 if (efx_nic_is_dual_func(efx)) {
8ceee660
BH
1978 rc = pci_save_state(nic_data->pci_dev2);
1979 if (rc) {
62776d03
BH
1980 netif_err(efx, drv, efx->net_dev,
1981 "failed to backup PCI state of "
1982 "secondary function prior to "
1983 "hardware reset\n");
8ceee660
BH
1984 goto fail2;
1985 }
1986 }
1987
1988 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
3e6c4538
BH
1989 FRF_AB_EXT_PHY_RST_DUR,
1990 FFE_AB_EXT_PHY_RST_DUR_10240US,
1991 FRF_AB_SWRST, 1);
8ceee660 1992 } else {
8ceee660 1993 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
3e6c4538
BH
1994 /* exclude PHY from "invisible" reset */
1995 FRF_AB_EXT_PHY_RST_CTL,
1996 method == RESET_TYPE_INVISIBLE,
1997 /* exclude EEPROM/flash and PCIe */
1998 FRF_AB_PCIE_CORE_RST_CTL, 1,
1999 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2000 FRF_AB_PCIE_SD_RST_CTL, 1,
2001 FRF_AB_EE_RST_CTL, 1,
2002 FRF_AB_EXT_PHY_RST_DUR,
2003 FFE_AB_EXT_PHY_RST_DUR_10240US,
2004 FRF_AB_SWRST, 1);
2005 }
12d00cad 2006 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
8ceee660 2007
62776d03 2008 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
8ceee660
BH
2009 schedule_timeout_uninterruptible(HZ / 20);
2010
2011 /* Restore PCI configuration if needed */
2012 if (method == RESET_TYPE_WORLD) {
1d3c16a8
JM
2013 if (efx_nic_is_dual_func(efx))
2014 pci_restore_state(nic_data->pci_dev2);
2015 pci_restore_state(efx->pci_dev);
62776d03
BH
2016 netif_dbg(efx, drv, efx->net_dev,
2017 "successfully restored PCI config\n");
8ceee660
BH
2018 }
2019
2020 /* Assert that reset complete */
12d00cad 2021 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
3e6c4538 2022 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
8ceee660 2023 rc = -ETIMEDOUT;
62776d03
BH
2024 netif_err(efx, hw, efx->net_dev,
2025 "timed out waiting for hardware reset\n");
1d3c16a8 2026 goto fail3;
8ceee660 2027 }
62776d03 2028 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
8ceee660
BH
2029
2030 return 0;
2031
2032 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
2033fail2:
8ceee660
BH
2034 pci_restore_state(efx->pci_dev);
2035fail1:
1d3c16a8 2036fail3:
8ceee660
BH
2037 return rc;
2038}
2039
4de92180
BH
2040static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2041{
2042 struct falcon_nic_data *nic_data = efx->nic_data;
2043 int rc;
2044
2045 mutex_lock(&nic_data->spi_lock);
2046 rc = __falcon_reset_hw(efx, method);
2047 mutex_unlock(&nic_data->spi_lock);
2048
2049 return rc;
2050}
2051
ef2b90ee 2052static void falcon_monitor(struct efx_nic *efx)
fe75820b 2053{
fdaa9aed 2054 bool link_changed;
fe75820b
BH
2055 int rc;
2056
fdaa9aed
SH
2057 BUG_ON(!mutex_is_locked(&efx->mac_lock));
2058
fe75820b
BH
2059 rc = falcon_board(efx)->type->monitor(efx);
2060 if (rc) {
62776d03
BH
2061 netif_err(efx, hw, efx->net_dev,
2062 "Board sensor %s; shutting down PHY\n",
2063 (rc == -ERANGE) ? "reported fault" : "failed");
fe75820b 2064 efx->phy_mode |= PHY_MODE_LOW_POWER;
d3245b28
BH
2065 rc = __efx_reconfigure_port(efx);
2066 WARN_ON(rc);
fe75820b 2067 }
fdaa9aed
SH
2068
2069 if (LOOPBACK_INTERNAL(efx))
2070 link_changed = falcon_loopback_link_poll(efx);
2071 else
2072 link_changed = efx->phy_op->poll(efx);
2073
2074 if (link_changed) {
2075 falcon_stop_nic_stats(efx);
2076 falcon_deconfigure_mac_wrapper(efx);
2077
8fbca791 2078 falcon_reset_macs(efx);
710b208d 2079 rc = falcon_reconfigure_xmac(efx);
d3245b28 2080 BUG_ON(rc);
fdaa9aed
SH
2081
2082 falcon_start_nic_stats(efx);
2083
2084 efx_link_status_changed(efx);
2085 }
2086
8fbca791 2087 falcon_poll_xmac(efx);
fe75820b
BH
2088}
2089
8ceee660
BH
2090/* Zeroes out the SRAM contents. This routine must be called in
2091 * process context and is allowed to sleep.
2092 */
2093static int falcon_reset_sram(struct efx_nic *efx)
2094{
2095 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2096 int count;
2097
2098 /* Set the SRAM wake/sleep GPIO appropriately. */
12d00cad 2099 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
3e6c4538
BH
2100 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2101 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
12d00cad 2102 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
8ceee660
BH
2103
2104 /* Initiate SRAM reset */
2105 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
3e6c4538
BH
2106 FRF_AZ_SRM_INIT_EN, 1,
2107 FRF_AZ_SRM_NB_SZ, 0);
12d00cad 2108 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
8ceee660
BH
2109
2110 /* Wait for SRAM reset to complete */
2111 count = 0;
2112 do {
62776d03
BH
2113 netif_dbg(efx, hw, efx->net_dev,
2114 "waiting for SRAM reset (attempt %d)...\n", count);
8ceee660
BH
2115
2116 /* SRAM reset is slow; expect around 16ms */
2117 schedule_timeout_uninterruptible(HZ / 50);
2118
2119 /* Check for reset complete */
12d00cad 2120 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
3e6c4538 2121 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
62776d03
BH
2122 netif_dbg(efx, hw, efx->net_dev,
2123 "SRAM reset complete\n");
8ceee660
BH
2124
2125 return 0;
2126 }
25985edc 2127 } while (++count < 20); /* wait up to 0.4 sec */
8ceee660 2128
62776d03 2129 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
8ceee660
BH
2130 return -ETIMEDOUT;
2131}
2132
4de92180 2133static void falcon_spi_device_init(struct efx_nic *efx,
ecd0a6f0 2134 struct falcon_spi_device *spi_device,
4a5b504d
BH
2135 unsigned int device_id, u32 device_type)
2136{
4a5b504d 2137 if (device_type != 0) {
4a5b504d
BH
2138 spi_device->device_id = device_id;
2139 spi_device->size =
2140 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2141 spi_device->addr_len =
2142 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2143 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2144 spi_device->addr_len == 1);
f4150724
BH
2145 spi_device->erase_command =
2146 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2147 spi_device->erase_size =
2148 1 << SPI_DEV_TYPE_FIELD(device_type,
2149 SPI_DEV_TYPE_ERASE_SIZE);
4a5b504d
BH
2150 spi_device->block_size =
2151 1 << SPI_DEV_TYPE_FIELD(device_type,
2152 SPI_DEV_TYPE_BLOCK_SIZE);
4a5b504d 2153 } else {
4de92180 2154 spi_device->size = 0;
4a5b504d 2155 }
4a5b504d
BH
2156}
2157
8ceee660
BH
2158/* Extract non-volatile configuration */
2159static int falcon_probe_nvconfig(struct efx_nic *efx)
2160{
4de92180 2161 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660 2162 struct falcon_nvconfig *nvconfig;
8ceee660
BH
2163 int rc;
2164
8ceee660 2165 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
4a5b504d
BH
2166 if (!nvconfig)
2167 return -ENOMEM;
8ceee660 2168
8c8661e4 2169 rc = falcon_read_nvram(efx, nvconfig);
6c88b0b6 2170 if (rc)
4de92180 2171 goto out;
6c88b0b6
BH
2172
2173 efx->phy_type = nvconfig->board_v2.port0_phy_type;
2174 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2175
2176 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
4de92180
BH
2177 falcon_spi_device_init(
2178 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
6c88b0b6
BH
2179 le32_to_cpu(nvconfig->board_v3
2180 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
4de92180
BH
2181 falcon_spi_device_init(
2182 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
6c88b0b6
BH
2183 le32_to_cpu(nvconfig->board_v3
2184 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
8ceee660
BH
2185 }
2186
8c8661e4 2187 /* Read the MAC addresses */
7e300bc8 2188 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
8c8661e4 2189
62776d03
BH
2190 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2191 efx->phy_type, efx->mdio.prtad);
8ceee660 2192
6c88b0b6
BH
2193 rc = falcon_probe_board(efx,
2194 le16_to_cpu(nvconfig->board_v2.board_revision));
4de92180 2195out:
8ceee660
BH
2196 kfree(nvconfig);
2197 return rc;
2198}
2199
28e47c49
BH
2200static void falcon_dimension_resources(struct efx_nic *efx)
2201{
2202 efx->rx_dc_base = 0x20000;
2203 efx->tx_dc_base = 0x26000;
2204}
2205
4a5b504d
BH
2206/* Probe all SPI devices on the NIC */
2207static void falcon_probe_spi_devices(struct efx_nic *efx)
2208{
4de92180 2209 struct falcon_nic_data *nic_data = efx->nic_data;
4a5b504d 2210 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2f7f5730 2211 int boot_dev;
4a5b504d 2212
12d00cad
BH
2213 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2214 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2215 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
4a5b504d 2216
3e6c4538
BH
2217 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2218 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2219 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
62776d03
BH
2220 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2221 boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2222 "flash" : "EEPROM");
2f7f5730
BH
2223 } else {
2224 /* Disable VPD and set clock dividers to safe
2225 * values for initial programming. */
2226 boot_dev = -1;
62776d03
BH
2227 netif_dbg(efx, probe, efx->net_dev,
2228 "Booted from internal ASIC settings;"
2229 " setting SPI config\n");
3e6c4538 2230 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2f7f5730 2231 /* 125 MHz / 7 ~= 20 MHz */
3e6c4538 2232 FRF_AB_EE_SF_CLOCK_DIV, 7,
2f7f5730 2233 /* 125 MHz / 63 ~= 2 MHz */
3e6c4538 2234 FRF_AB_EE_EE_CLOCK_DIV, 63);
12d00cad 2235 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
4a5b504d
BH
2236 }
2237
4de92180
BH
2238 mutex_init(&nic_data->spi_lock);
2239
3e6c4538 2240 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
4de92180 2241 falcon_spi_device_init(efx, &nic_data->spi_flash,
3e6c4538 2242 FFE_AB_SPI_DEVICE_FLASH,
2f7f5730 2243 default_flash_type);
3e6c4538 2244 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
4de92180 2245 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
3e6c4538 2246 FFE_AB_SPI_DEVICE_EEPROM,
2f7f5730 2247 large_eeprom_type);
4a5b504d
BH
2248}
2249
b105798f
BH
2250static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
2251{
2252 return 0x20000;
2253}
2254
2255static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
2256{
2257 /* Map everything up to and including the RSS indirection table.
2258 * The PCI core takes care of mapping the MSI-X tables.
2259 */
2260 return FR_BZ_RX_INDIRECTION_TBL +
2261 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2262}
2263
ef2b90ee 2264static int falcon_probe_nic(struct efx_nic *efx)
8ceee660
BH
2265{
2266 struct falcon_nic_data *nic_data;
e775fb93 2267 struct falcon_board *board;
8ceee660
BH
2268 int rc;
2269
8ceee660
BH
2270 /* Allocate storage for hardware specific data */
2271 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
88c59425
BH
2272 if (!nic_data)
2273 return -ENOMEM;
5daab96d 2274 efx->nic_data = nic_data;
8ceee660 2275
57849460
BH
2276 rc = -ENODEV;
2277
86094f7f 2278 if (efx_farch_fpga_ver(efx) != 0) {
62776d03
BH
2279 netif_err(efx, probe, efx->net_dev,
2280 "Falcon FPGA not supported\n");
8ceee660 2281 goto fail1;
57849460
BH
2282 }
2283
2284 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2285 efx_oword_t nic_stat;
2286 struct pci_dev *dev;
2287 u8 pci_rev = efx->pci_dev->revision;
8ceee660 2288
57849460 2289 if ((pci_rev == 0xff) || (pci_rev == 0)) {
62776d03
BH
2290 netif_err(efx, probe, efx->net_dev,
2291 "Falcon rev A0 not supported\n");
57849460
BH
2292 goto fail1;
2293 }
2294 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2295 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
62776d03
BH
2296 netif_err(efx, probe, efx->net_dev,
2297 "Falcon rev A1 1G not supported\n");
57849460
BH
2298 goto fail1;
2299 }
2300 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
62776d03
BH
2301 netif_err(efx, probe, efx->net_dev,
2302 "Falcon rev A1 PCI-X not supported\n");
57849460
BH
2303 goto fail1;
2304 }
8ceee660 2305
57849460 2306 dev = pci_dev_get(efx->pci_dev);
937383a5
BH
2307 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2308 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
8ceee660
BH
2309 dev))) {
2310 if (dev->bus == efx->pci_dev->bus &&
2311 dev->devfn == efx->pci_dev->devfn + 1) {
2312 nic_data->pci_dev2 = dev;
2313 break;
2314 }
2315 }
2316 if (!nic_data->pci_dev2) {
62776d03
BH
2317 netif_err(efx, probe, efx->net_dev,
2318 "failed to find secondary function\n");
8ceee660
BH
2319 rc = -ENODEV;
2320 goto fail2;
2321 }
2322 }
2323
2324 /* Now we can reset the NIC */
4de92180 2325 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
8ceee660 2326 if (rc) {
62776d03 2327 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
8ceee660
BH
2328 goto fail3;
2329 }
2330
2331 /* Allocate memory for INT_KER */
0d19a540
BH
2332 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2333 GFP_KERNEL);
8ceee660
BH
2334 if (rc)
2335 goto fail4;
2336 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2337
62776d03
BH
2338 netif_dbg(efx, probe, efx->net_dev,
2339 "INT_KER at %llx (virt %p phys %llx)\n",
2340 (u64)efx->irq_status.dma_addr,
2341 efx->irq_status.addr,
2342 (u64)virt_to_phys(efx->irq_status.addr));
8ceee660 2343
4a5b504d
BH
2344 falcon_probe_spi_devices(efx);
2345
8ceee660
BH
2346 /* Read in the non-volatile configuration */
2347 rc = falcon_probe_nvconfig(efx);
6c88b0b6
BH
2348 if (rc) {
2349 if (rc == -EINVAL)
2350 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
8ceee660 2351 goto fail5;
6c88b0b6 2352 }
8ceee660 2353
b105798f
BH
2354 efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
2355 EFX_MAX_CHANNELS);
cc180b69
BH
2356 efx->timer_quantum_ns = 4968; /* 621 cycles */
2357
37b5a603 2358 /* Initialise I2C adapter */
e775fb93
BH
2359 board = falcon_board(efx);
2360 board->i2c_adap.owner = THIS_MODULE;
2361 board->i2c_data = falcon_i2c_bit_operations;
2362 board->i2c_data.data = efx;
2363 board->i2c_adap.algo_data = &board->i2c_data;
2364 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2365 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2366 sizeof(board->i2c_adap.name));
2367 rc = i2c_bit_add_bus(&board->i2c_adap);
37b5a603
BH
2368 if (rc)
2369 goto fail5;
2370
44838a44 2371 rc = falcon_board(efx)->type->init(efx);
278c0621 2372 if (rc) {
62776d03
BH
2373 netif_err(efx, probe, efx->net_dev,
2374 "failed to initialise board\n");
278c0621
BH
2375 goto fail6;
2376 }
2377
55edc6e6
BH
2378 nic_data->stats_disable_count = 1;
2379 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2380 (unsigned long)efx);
2381
8ceee660
BH
2382 return 0;
2383
278c0621 2384 fail6:
bf51a8c5 2385 i2c_del_adapter(&board->i2c_adap);
e775fb93 2386 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
8ceee660 2387 fail5:
152b6a62 2388 efx_nic_free_buffer(efx, &efx->irq_status);
8ceee660 2389 fail4:
8ceee660
BH
2390 fail3:
2391 if (nic_data->pci_dev2) {
2392 pci_dev_put(nic_data->pci_dev2);
2393 nic_data->pci_dev2 = NULL;
2394 }
2395 fail2:
8ceee660
BH
2396 fail1:
2397 kfree(efx->nic_data);
2398 return rc;
2399}
2400
56241ceb
BH
2401static void falcon_init_rx_cfg(struct efx_nic *efx)
2402{
56241ceb
BH
2403 /* RX control FIFO thresholds (32 entries) */
2404 const unsigned ctrl_xon_thr = 20;
2405 const unsigned ctrl_xoff_thr = 25;
56241ceb
BH
2406 efx_oword_t reg;
2407
12d00cad 2408 efx_reado(efx, &reg, FR_AZ_RX_CFG);
daeda630 2409 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
85740cdf
BH
2410 /* Data FIFO size is 5.5K. The RX DMA engine only
2411 * supports scattering for user-mode queues, but will
2412 * split DMA writes at intervals of RX_USR_BUF_SIZE
2413 * (32-byte units) even for kernel-mode queues. We
2414 * set it to be so large that that never happens.
2415 */
3e6c4538
BH
2416 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2417 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
85740cdf 2418 (3 * 4096) >> 5);
5fb6b06d
BH
2419 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2420 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
3e6c4538
BH
2421 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2422 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
56241ceb 2423 } else {
625b4514 2424 /* Data FIFO size is 80K; register fields moved */
3e6c4538
BH
2425 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2426 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
85740cdf 2427 EFX_RX_USR_BUF_SIZE >> 5);
5fb6b06d
BH
2428 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
2429 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2430 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
3e6c4538
BH
2431 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2432 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2433 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
477e54eb
BH
2434
2435 /* Enable hash insertion. This is broken for the
2436 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2437 * IPv4 hashes. */
2438 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2439 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2440 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
56241ceb 2441 }
4b0d29dc
BH
2442 /* Always enable XOFF signal from RX FIFO. We enable
2443 * or disable transmission of pause frames at the MAC. */
2444 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
12d00cad 2445 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
56241ceb
BH
2446}
2447
152b6a62
BH
2448/* This call performs hardware-specific global initialisation, such as
2449 * defining the descriptor cache sizes and number of RSS channels.
2450 * It does not set up any buffers, descriptor rings or event queues.
2451 */
2452static int falcon_init_nic(struct efx_nic *efx)
2453{
2454 efx_oword_t temp;
2455 int rc;
2456
2457 /* Use on-chip SRAM */
2458 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2459 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2460 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2461
152b6a62
BH
2462 rc = falcon_reset_sram(efx);
2463 if (rc)
2464 return rc;
2465
2466 /* Clear the parity enables on the TX data fifos as
2467 * they produce false parity errors because of timing issues
2468 */
2469 if (EFX_WORKAROUND_5129(efx)) {
2470 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2471 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2472 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2473 }
2474
8ceee660 2475 if (EFX_WORKAROUND_7244(efx)) {
12d00cad 2476 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
3e6c4538
BH
2477 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2478 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2479 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2480 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
12d00cad 2481 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
8ceee660 2482 }
8ceee660 2483
3e6c4538 2484 /* XXX This is documented only for Falcon A0/A1 */
8ceee660
BH
2485 /* Setup RX. Wait for descriptor is broken and must
2486 * be disabled. RXDP recovery shouldn't be needed, but is.
2487 */
12d00cad 2488 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3e6c4538
BH
2489 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2490 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
8ceee660 2491 if (EFX_WORKAROUND_5583(efx))
3e6c4538 2492 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
12d00cad 2493 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
8ceee660 2494
8ceee660
BH
2495 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2496 * descriptors (which is bad).
2497 */
12d00cad 2498 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3e6c4538 2499 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
12d00cad 2500 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
8ceee660 2501
56241ceb 2502 falcon_init_rx_cfg(efx);
8ceee660 2503
daeda630 2504 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
477e54eb
BH
2505 /* Set hash key for IPv4 */
2506 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
2507 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
2508
2509 /* Set destination of both TX and RX Flush events */
3e6c4538 2510 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
12d00cad 2511 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
8ceee660
BH
2512 }
2513
86094f7f 2514 efx_farch_init_common(efx);
152b6a62 2515
8ceee660
BH
2516 return 0;
2517}
2518
ef2b90ee 2519static void falcon_remove_nic(struct efx_nic *efx)
8ceee660
BH
2520{
2521 struct falcon_nic_data *nic_data = efx->nic_data;
e775fb93 2522 struct falcon_board *board = falcon_board(efx);
37b5a603 2523
44838a44 2524 board->type->fini(efx);
278c0621 2525
8c870379 2526 /* Remove I2C adapter and clear it in preparation for a retry */
bf51a8c5 2527 i2c_del_adapter(&board->i2c_adap);
e775fb93 2528 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
8ceee660 2529
152b6a62 2530 efx_nic_free_buffer(efx, &efx->irq_status);
8ceee660 2531
4de92180 2532 __falcon_reset_hw(efx, RESET_TYPE_ALL);
8ceee660
BH
2533
2534 /* Release the second function after the reset */
2535 if (nic_data->pci_dev2) {
2536 pci_dev_put(nic_data->pci_dev2);
2537 nic_data->pci_dev2 = NULL;
2538 }
2539
2540 /* Tear down the private nic state */
2541 kfree(efx->nic_data);
2542 efx->nic_data = NULL;
2543}
2544
ef2b90ee 2545static void falcon_update_nic_stats(struct efx_nic *efx)
8ceee660 2546{
55edc6e6 2547 struct falcon_nic_data *nic_data = efx->nic_data;
8ceee660
BH
2548 efx_oword_t cnt;
2549
55edc6e6
BH
2550 if (nic_data->stats_disable_count)
2551 return;
2552
12d00cad 2553 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3e6c4538
BH
2554 efx->n_rx_nodesc_drop_cnt +=
2555 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
55edc6e6
BH
2556
2557 if (nic_data->stats_pending &&
2558 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
2559 nic_data->stats_pending = false;
2560 rmb(); /* read the done flag before the stats */
710b208d 2561 falcon_update_stats_xmac(efx);
55edc6e6
BH
2562 }
2563}
2564
2565void falcon_start_nic_stats(struct efx_nic *efx)
2566{
2567 struct falcon_nic_data *nic_data = efx->nic_data;
2568
2569 spin_lock_bh(&efx->stats_lock);
2570 if (--nic_data->stats_disable_count == 0)
2571 falcon_stats_request(efx);
2572 spin_unlock_bh(&efx->stats_lock);
2573}
2574
2575void falcon_stop_nic_stats(struct efx_nic *efx)
2576{
2577 struct falcon_nic_data *nic_data = efx->nic_data;
2578 int i;
2579
2580 might_sleep();
2581
2582 spin_lock_bh(&efx->stats_lock);
2583 ++nic_data->stats_disable_count;
2584 spin_unlock_bh(&efx->stats_lock);
2585
2586 del_timer_sync(&nic_data->stats_timer);
2587
2588 /* Wait enough time for the most recent transfer to
2589 * complete. */
2590 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2591 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
2592 break;
2593 msleep(1);
2594 }
2595
2596 spin_lock_bh(&efx->stats_lock);
2597 falcon_stats_complete(efx);
2598 spin_unlock_bh(&efx->stats_lock);
8ceee660
BH
2599}
2600
06629f07
BH
2601static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
2602{
2603 falcon_board(efx)->type->set_id_led(efx, mode);
2604}
2605
89c758fa
BH
2606/**************************************************************************
2607 *
2608 * Wake on LAN
2609 *
2610 **************************************************************************
2611 */
2612
2613static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2614{
2615 wol->supported = 0;
2616 wol->wolopts = 0;
2617 memset(&wol->sopass, 0, sizeof(wol->sopass));
2618}
2619
2620static int falcon_set_wol(struct efx_nic *efx, u32 type)
2621{
2622 if (type != 0)
2623 return -EINVAL;
2624 return 0;
2625}
2626
8ceee660
BH
2627/**************************************************************************
2628 *
754c653a 2629 * Revision-dependent attributes used by efx.c and nic.c
8ceee660
BH
2630 *
2631 **************************************************************************
2632 */
2633
6c8c2513 2634const struct efx_nic_type falcon_a1_nic_type = {
b105798f 2635 .mem_map_size = falcon_a1_mem_map_size,
ef2b90ee
BH
2636 .probe = falcon_probe_nic,
2637 .remove = falcon_remove_nic,
2638 .init = falcon_init_nic,
28e47c49 2639 .dimension_resources = falcon_dimension_resources,
1840667a 2640 .fini = falcon_irq_ack_a1,
ef2b90ee 2641 .monitor = falcon_monitor,
0e2a9c7c
BH
2642 .map_reset_reason = falcon_map_reset_reason,
2643 .map_reset_flags = falcon_map_reset_flags,
ef2b90ee
BH
2644 .reset = falcon_reset_hw,
2645 .probe_port = falcon_probe_port,
2646 .remove_port = falcon_remove_port,
40641ed9 2647 .handle_global_event = falcon_handle_global_event,
e42c3d85 2648 .fini_dmaq = efx_farch_fini_dmaq,
ef2b90ee 2649 .prepare_flush = falcon_prepare_flush,
d5e8cc6c 2650 .finish_flush = efx_port_dummy_op_void,
ef2b90ee
BH
2651 .update_stats = falcon_update_nic_stats,
2652 .start_stats = falcon_start_nic_stats,
2653 .stop_stats = falcon_stop_nic_stats,
06629f07 2654 .set_id_led = falcon_set_id_led,
ef2b90ee 2655 .push_irq_moderation = falcon_push_irq_moderation,
d3245b28 2656 .reconfigure_port = falcon_reconfigure_port,
9dd3a13b 2657 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
710b208d
BH
2658 .reconfigure_mac = falcon_reconfigure_xmac,
2659 .check_mac_fault = falcon_xmac_check_fault,
89c758fa
BH
2660 .get_wol = falcon_get_wol,
2661 .set_wol = falcon_set_wol,
2662 .resume_wol = efx_port_dummy_op_void,
0aa3fbaa 2663 .test_nvram = falcon_test_nvram,
86094f7f
BH
2664 .irq_enable_master = efx_farch_irq_enable_master,
2665 .irq_test_generate = efx_farch_irq_test_generate,
2666 .irq_disable_non_ev = efx_farch_irq_disable_master,
2667 .irq_handle_msi = efx_farch_msi_interrupt,
2668 .irq_handle_legacy = falcon_legacy_interrupt_a1,
2669 .tx_probe = efx_farch_tx_probe,
2670 .tx_init = efx_farch_tx_init,
2671 .tx_remove = efx_farch_tx_remove,
2672 .tx_write = efx_farch_tx_write,
2673 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2674 .rx_probe = efx_farch_rx_probe,
2675 .rx_init = efx_farch_rx_init,
2676 .rx_remove = efx_farch_rx_remove,
2677 .rx_write = efx_farch_rx_write,
2678 .rx_defer_refill = efx_farch_rx_defer_refill,
2679 .ev_probe = efx_farch_ev_probe,
2680 .ev_init = efx_farch_ev_init,
2681 .ev_fini = efx_farch_ev_fini,
2682 .ev_remove = efx_farch_ev_remove,
2683 .ev_process = efx_farch_ev_process,
2684 .ev_read_ack = efx_farch_ev_read_ack,
2685 .ev_test_generate = efx_farch_ev_test_generate,
b895d73e 2686
add72477
BH
2687 /* We don't expose the filter table on Falcon A1 as it is not
2688 * mapped into function 0, but these implementations still
2689 * work with a degenerate case of all tables set to size 0.
2690 */
2691 .filter_table_probe = efx_farch_filter_table_probe,
2692 .filter_table_restore = efx_farch_filter_table_restore,
2693 .filter_table_remove = efx_farch_filter_table_remove,
2694 .filter_insert = efx_farch_filter_insert,
2695 .filter_remove_safe = efx_farch_filter_remove_safe,
2696 .filter_get_safe = efx_farch_filter_get_safe,
2697 .filter_clear_rx = efx_farch_filter_clear_rx,
2698 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2699 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2700 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2701
45a3fd55
BH
2702#ifdef CONFIG_SFC_MTD
2703 .mtd_probe = falcon_mtd_probe,
2704 .mtd_rename = falcon_mtd_rename,
2705 .mtd_read = falcon_mtd_read,
2706 .mtd_erase = falcon_mtd_erase,
2707 .mtd_write = falcon_mtd_write,
2708 .mtd_sync = falcon_mtd_sync,
2709#endif
2710
daeda630 2711 .revision = EFX_REV_FALCON_A1,
3e6c4538
BH
2712 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2713 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2714 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2715 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2716 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
6d51d307 2717 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
8ceee660 2718 .rx_buffer_padding = 0x24,
85740cdf 2719 .can_rx_scatter = false,
8ceee660 2720 .max_interrupt_mode = EFX_INT_MODE_MSI,
cc180b69 2721 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
c383b537 2722 .offload_features = NETIF_F_IP_CSUM,
df2cd8af 2723 .mcdi_max_ver = -1,
8ceee660
BH
2724};
2725
6c8c2513 2726const struct efx_nic_type falcon_b0_nic_type = {
b105798f 2727 .mem_map_size = falcon_b0_mem_map_size,
ef2b90ee
BH
2728 .probe = falcon_probe_nic,
2729 .remove = falcon_remove_nic,
2730 .init = falcon_init_nic,
28e47c49 2731 .dimension_resources = falcon_dimension_resources,
ef2b90ee
BH
2732 .fini = efx_port_dummy_op_void,
2733 .monitor = falcon_monitor,
0e2a9c7c
BH
2734 .map_reset_reason = falcon_map_reset_reason,
2735 .map_reset_flags = falcon_map_reset_flags,
ef2b90ee
BH
2736 .reset = falcon_reset_hw,
2737 .probe_port = falcon_probe_port,
2738 .remove_port = falcon_remove_port,
40641ed9 2739 .handle_global_event = falcon_handle_global_event,
e42c3d85 2740 .fini_dmaq = efx_farch_fini_dmaq,
ef2b90ee 2741 .prepare_flush = falcon_prepare_flush,
d5e8cc6c 2742 .finish_flush = efx_port_dummy_op_void,
ef2b90ee
BH
2743 .update_stats = falcon_update_nic_stats,
2744 .start_stats = falcon_start_nic_stats,
2745 .stop_stats = falcon_stop_nic_stats,
06629f07 2746 .set_id_led = falcon_set_id_led,
ef2b90ee 2747 .push_irq_moderation = falcon_push_irq_moderation,
d3245b28 2748 .reconfigure_port = falcon_reconfigure_port,
9dd3a13b 2749 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
710b208d
BH
2750 .reconfigure_mac = falcon_reconfigure_xmac,
2751 .check_mac_fault = falcon_xmac_check_fault,
89c758fa
BH
2752 .get_wol = falcon_get_wol,
2753 .set_wol = falcon_set_wol,
2754 .resume_wol = efx_port_dummy_op_void,
d4f2cecc 2755 .test_chip = falcon_b0_test_chip,
0aa3fbaa 2756 .test_nvram = falcon_test_nvram,
86094f7f
BH
2757 .irq_enable_master = efx_farch_irq_enable_master,
2758 .irq_test_generate = efx_farch_irq_test_generate,
2759 .irq_disable_non_ev = efx_farch_irq_disable_master,
2760 .irq_handle_msi = efx_farch_msi_interrupt,
2761 .irq_handle_legacy = efx_farch_legacy_interrupt,
2762 .tx_probe = efx_farch_tx_probe,
2763 .tx_init = efx_farch_tx_init,
2764 .tx_remove = efx_farch_tx_remove,
2765 .tx_write = efx_farch_tx_write,
2766 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2767 .rx_probe = efx_farch_rx_probe,
2768 .rx_init = efx_farch_rx_init,
2769 .rx_remove = efx_farch_rx_remove,
2770 .rx_write = efx_farch_rx_write,
2771 .rx_defer_refill = efx_farch_rx_defer_refill,
2772 .ev_probe = efx_farch_ev_probe,
2773 .ev_init = efx_farch_ev_init,
2774 .ev_fini = efx_farch_ev_fini,
2775 .ev_remove = efx_farch_ev_remove,
2776 .ev_process = efx_farch_ev_process,
2777 .ev_read_ack = efx_farch_ev_read_ack,
2778 .ev_test_generate = efx_farch_ev_test_generate,
add72477
BH
2779 .filter_table_probe = efx_farch_filter_table_probe,
2780 .filter_table_restore = efx_farch_filter_table_restore,
2781 .filter_table_remove = efx_farch_filter_table_remove,
2782 .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
2783 .filter_insert = efx_farch_filter_insert,
2784 .filter_remove_safe = efx_farch_filter_remove_safe,
2785 .filter_get_safe = efx_farch_filter_get_safe,
2786 .filter_clear_rx = efx_farch_filter_clear_rx,
2787 .filter_count_rx_used = efx_farch_filter_count_rx_used,
2788 .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
2789 .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
2790#ifdef CONFIG_RFS_ACCEL
2791 .filter_rfs_insert = efx_farch_filter_rfs_insert,
2792 .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
2793#endif
45a3fd55
BH
2794#ifdef CONFIG_SFC_MTD
2795 .mtd_probe = falcon_mtd_probe,
2796 .mtd_rename = falcon_mtd_rename,
2797 .mtd_read = falcon_mtd_read,
2798 .mtd_erase = falcon_mtd_erase,
2799 .mtd_write = falcon_mtd_write,
2800 .mtd_sync = falcon_mtd_sync,
2801#endif
b895d73e 2802
daeda630 2803 .revision = EFX_REV_FALCON_B0,
3e6c4538
BH
2804 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2805 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2806 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2807 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2808 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
6d51d307 2809 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
39c9cf07 2810 .rx_buffer_hash_size = 0x10,
8ceee660 2811 .rx_buffer_padding = 0,
85740cdf 2812 .can_rx_scatter = true,
8ceee660 2813 .max_interrupt_mode = EFX_INT_MODE_MSIX,
cc180b69 2814 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
b4187e42 2815 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
df2cd8af 2816 .mcdi_max_ver = -1,
add72477 2817 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
8ceee660
BH
2818};
2819