]>
Commit | Line | Data |
---|---|---|
8ceee660 | 1 | /**************************************************************************** |
f7a6d2c4 | 2 | * Driver for Solarflare network controllers and boards |
8ceee660 | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
f7a6d2c4 | 4 | * Copyright 2006-2013 Solarflare Communications Inc. |
8ceee660 BH |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License version 2 as published | |
8 | * by the Free Software Foundation, incorporated herein by reference. | |
9 | */ | |
10 | ||
744093c9 BH |
11 | #ifndef EFX_NIC_H |
12 | #define EFX_NIC_H | |
8ceee660 | 13 | |
7c236c43 | 14 | #include <linux/net_tstamp.h> |
5c16a96c | 15 | #include <linux/i2c-algo-bit.h> |
8ceee660 | 16 | #include "net_driver.h" |
177dfcd8 | 17 | #include "efx.h" |
8880f4ec | 18 | #include "mcdi.h" |
8ceee660 | 19 | |
daeda630 BH |
20 | enum { |
21 | EFX_REV_FALCON_A0 = 0, | |
22 | EFX_REV_FALCON_A1 = 1, | |
23 | EFX_REV_FALCON_B0 = 2, | |
8880f4ec | 24 | EFX_REV_SIENA_A0 = 3, |
8127d661 | 25 | EFX_REV_HUNT_A0 = 4, |
8ceee660 BH |
26 | }; |
27 | ||
daeda630 | 28 | static inline int efx_nic_rev(struct efx_nic *efx) |
55668611 | 29 | { |
daeda630 | 30 | return efx->type->revision; |
55668611 | 31 | } |
8ceee660 | 32 | |
00aef986 | 33 | u32 efx_farch_fpga_ver(struct efx_nic *efx); |
152b6a62 BH |
34 | |
35 | /* NIC has two interlinked PCI functions for the same port. */ | |
36 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | |
37 | { | |
38 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; | |
39 | } | |
40 | ||
86094f7f BH |
41 | /* Read the current event from the event queue */ |
42 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | |
43 | unsigned int index) | |
44 | { | |
45 | return ((efx_qword_t *) (channel->eventq.buf.addr)) + | |
46 | (index & channel->eventq_mask); | |
47 | } | |
48 | ||
49 | /* See if an event is present | |
50 | * | |
51 | * We check both the high and low dword of the event for all ones. We | |
52 | * wrote all ones when we cleared the event, and no valid event can | |
53 | * have all ones in either its high or low dwords. This approach is | |
54 | * robust against reordering. | |
55 | * | |
56 | * Note that using a single 64-bit comparison is incorrect; even | |
57 | * though the CPU read will be atomic, the DMA write may not be. | |
58 | */ | |
59 | static inline int efx_event_present(efx_qword_t *event) | |
60 | { | |
61 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | |
62 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | |
63 | } | |
64 | ||
65 | /* Returns a pointer to the specified transmit descriptor in the TX | |
66 | * descriptor queue belonging to the specified channel. | |
67 | */ | |
68 | static inline efx_qword_t * | |
69 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | |
70 | { | |
71 | return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; | |
72 | } | |
73 | ||
70b33fb0 EC |
74 | /* Get partner of a TX queue, seen as part of the same net core queue */ |
75 | static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) | |
76 | { | |
77 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | |
78 | return tx_queue - EFX_TXQ_TYPE_OFFLOAD; | |
79 | else | |
80 | return tx_queue + EFX_TXQ_TYPE_OFFLOAD; | |
81 | } | |
82 | ||
83 | /* Report whether this TX queue would be empty for the given write_count. | |
84 | * May return false negative. | |
306a2782 BH |
85 | */ |
86 | static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, | |
87 | unsigned int write_count) | |
88 | { | |
89 | unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | |
90 | ||
91 | if (empty_read_count == 0) | |
92 | return false; | |
93 | ||
94 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | |
95 | } | |
96 | ||
70b33fb0 EC |
97 | /* Decide whether we can use TX PIO, ie. write packet data directly into |
98 | * a buffer on the device. This can reduce latency at the expense of | |
99 | * throughput, so we only do this if both hardware and software TX rings | |
100 | * are empty. This also ensures that only one packet at a time can be | |
101 | * using the PIO buffer. | |
102 | */ | |
103 | static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) | |
306a2782 | 104 | { |
70b33fb0 EC |
105 | struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); |
106 | return tx_queue->piobuf && | |
107 | __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) && | |
108 | __efx_nic_tx_is_empty(partner, partner->insert_count); | |
306a2782 BH |
109 | } |
110 | ||
86094f7f BH |
111 | /* Decide whether to push a TX descriptor to the NIC vs merely writing |
112 | * the doorbell. This can reduce latency when we are adding a single | |
113 | * descriptor to an empty queue, but is otherwise pointless. Further, | |
114 | * Falcon and Siena have hardware bugs (SF bug 33851) that may be | |
115 | * triggered if we don't check this. | |
70b33fb0 EC |
116 | * We use the write_count used for the last doorbell push, to get the |
117 | * NIC's view of the tx queue. | |
86094f7f BH |
118 | */ |
119 | static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, | |
120 | unsigned int write_count) | |
121 | { | |
306a2782 | 122 | bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); |
86094f7f BH |
123 | |
124 | tx_queue->empty_read_count = 0; | |
306a2782 | 125 | return was_empty && tx_queue->write_count - write_count == 1; |
86094f7f BH |
126 | } |
127 | ||
128 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | |
129 | static inline efx_qword_t * | |
130 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | |
131 | { | |
132 | return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; | |
133 | } | |
134 | ||
c1c4f453 BH |
135 | enum { |
136 | PHY_TYPE_NONE = 0, | |
137 | PHY_TYPE_TXC43128 = 1, | |
138 | PHY_TYPE_88E1111 = 2, | |
139 | PHY_TYPE_SFX7101 = 3, | |
140 | PHY_TYPE_QT2022C2 = 4, | |
141 | PHY_TYPE_PM8358 = 6, | |
142 | PHY_TYPE_SFT9001A = 8, | |
143 | PHY_TYPE_QT2025C = 9, | |
144 | PHY_TYPE_SFT9001B = 10, | |
145 | }; | |
146 | ||
147 | #define FALCON_XMAC_LOOPBACKS \ | |
148 | ((1 << LOOPBACK_XGMII) | \ | |
149 | (1 << LOOPBACK_XGXS) | \ | |
150 | (1 << LOOPBACK_XAUI)) | |
151 | ||
5b6262d0 BH |
152 | /* Alignment of PCIe DMA boundaries (4KB) */ |
153 | #define EFX_PAGE_SIZE 4096 | |
154 | /* Size and alignment of buffer table entries (same) */ | |
155 | #define EFX_BUF_SIZE EFX_PAGE_SIZE | |
156 | ||
e4d112e4 EC |
157 | /* NIC-generic software stats */ |
158 | enum { | |
159 | GENERIC_STAT_rx_noskb_drops, | |
160 | GENERIC_STAT_rx_nodesc_trunc, | |
161 | GENERIC_STAT_COUNT | |
162 | }; | |
163 | ||
3759433d | 164 | /** |
44838a44 BH |
165 | * struct falcon_board_type - board operations and type information |
166 | * @id: Board type id, as found in NVRAM | |
3759433d BH |
167 | * @init: Allocate resources and initialise peripheral hardware |
168 | * @init_phy: Do board-specific PHY initialisation | |
44838a44 | 169 | * @fini: Shut down hardware and free resources |
3759433d BH |
170 | * @set_id_led: Set state of identifying LED or revert to automatic function |
171 | * @monitor: Board-specific health check function | |
44838a44 BH |
172 | */ |
173 | struct falcon_board_type { | |
174 | u8 id; | |
44838a44 BH |
175 | int (*init) (struct efx_nic *nic); |
176 | void (*init_phy) (struct efx_nic *efx); | |
177 | void (*fini) (struct efx_nic *nic); | |
178 | void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); | |
179 | int (*monitor) (struct efx_nic *nic); | |
180 | }; | |
181 | ||
182 | /** | |
183 | * struct falcon_board - board information | |
184 | * @type: Type of board | |
185 | * @major: Major rev. ('A', 'B' ...) | |
186 | * @minor: Minor rev. (0, 1, ...) | |
e775fb93 BH |
187 | * @i2c_adap: I2C adapter for on-board peripherals |
188 | * @i2c_data: Data for bit-banging algorithm | |
3759433d BH |
189 | * @hwmon_client: I2C client for hardware monitor |
190 | * @ioexp_client: I2C client for power/port control | |
191 | */ | |
192 | struct falcon_board { | |
44838a44 | 193 | const struct falcon_board_type *type; |
3759433d BH |
194 | int major; |
195 | int minor; | |
e775fb93 BH |
196 | struct i2c_adapter i2c_adap; |
197 | struct i2c_algo_bit_data i2c_data; | |
3759433d BH |
198 | struct i2c_client *hwmon_client, *ioexp_client; |
199 | }; | |
200 | ||
45a3fd55 BH |
201 | /** |
202 | * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device | |
203 | * @device_id: Controller's id for the device | |
204 | * @size: Size (in bytes) | |
205 | * @addr_len: Number of address bytes in read/write commands | |
206 | * @munge_address: Flag whether addresses should be munged. | |
207 | * Some devices with 9-bit addresses (e.g. AT25040A EEPROM) | |
208 | * use bit 3 of the command byte as address bit A8, rather | |
209 | * than having a two-byte address. If this flag is set, then | |
210 | * commands should be munged in this way. | |
211 | * @erase_command: Erase command (or 0 if sector erase not needed). | |
212 | * @erase_size: Erase sector size (in bytes) | |
213 | * Erase commands affect sectors with this size and alignment. | |
214 | * This must be a power of two. | |
215 | * @block_size: Write block size (in bytes). | |
216 | * Write commands are limited to blocks with this size and alignment. | |
217 | */ | |
218 | struct falcon_spi_device { | |
219 | int device_id; | |
220 | unsigned int size; | |
221 | unsigned int addr_len; | |
222 | unsigned int munge_address:1; | |
223 | u8 erase_command; | |
224 | unsigned int erase_size; | |
225 | unsigned int block_size; | |
226 | }; | |
227 | ||
228 | static inline bool falcon_spi_present(const struct falcon_spi_device *spi) | |
229 | { | |
230 | return spi->size != 0; | |
231 | } | |
232 | ||
cd0ecc9a | 233 | enum { |
e4d112e4 | 234 | FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT, |
cd0ecc9a BH |
235 | FALCON_STAT_tx_packets, |
236 | FALCON_STAT_tx_pause, | |
237 | FALCON_STAT_tx_control, | |
238 | FALCON_STAT_tx_unicast, | |
239 | FALCON_STAT_tx_multicast, | |
240 | FALCON_STAT_tx_broadcast, | |
241 | FALCON_STAT_tx_lt64, | |
242 | FALCON_STAT_tx_64, | |
243 | FALCON_STAT_tx_65_to_127, | |
244 | FALCON_STAT_tx_128_to_255, | |
245 | FALCON_STAT_tx_256_to_511, | |
246 | FALCON_STAT_tx_512_to_1023, | |
247 | FALCON_STAT_tx_1024_to_15xx, | |
248 | FALCON_STAT_tx_15xx_to_jumbo, | |
249 | FALCON_STAT_tx_gtjumbo, | |
250 | FALCON_STAT_tx_non_tcpudp, | |
251 | FALCON_STAT_tx_mac_src_error, | |
252 | FALCON_STAT_tx_ip_src_error, | |
253 | FALCON_STAT_rx_bytes, | |
254 | FALCON_STAT_rx_good_bytes, | |
255 | FALCON_STAT_rx_bad_bytes, | |
256 | FALCON_STAT_rx_packets, | |
257 | FALCON_STAT_rx_good, | |
258 | FALCON_STAT_rx_bad, | |
259 | FALCON_STAT_rx_pause, | |
260 | FALCON_STAT_rx_control, | |
261 | FALCON_STAT_rx_unicast, | |
262 | FALCON_STAT_rx_multicast, | |
263 | FALCON_STAT_rx_broadcast, | |
264 | FALCON_STAT_rx_lt64, | |
265 | FALCON_STAT_rx_64, | |
266 | FALCON_STAT_rx_65_to_127, | |
267 | FALCON_STAT_rx_128_to_255, | |
268 | FALCON_STAT_rx_256_to_511, | |
269 | FALCON_STAT_rx_512_to_1023, | |
270 | FALCON_STAT_rx_1024_to_15xx, | |
271 | FALCON_STAT_rx_15xx_to_jumbo, | |
272 | FALCON_STAT_rx_gtjumbo, | |
273 | FALCON_STAT_rx_bad_lt64, | |
274 | FALCON_STAT_rx_bad_gtjumbo, | |
275 | FALCON_STAT_rx_overflow, | |
276 | FALCON_STAT_rx_symbol_error, | |
277 | FALCON_STAT_rx_align_error, | |
278 | FALCON_STAT_rx_length_error, | |
279 | FALCON_STAT_rx_internal_error, | |
280 | FALCON_STAT_rx_nodesc_drop_cnt, | |
281 | FALCON_STAT_COUNT | |
282 | }; | |
283 | ||
5c16a96c BH |
284 | /** |
285 | * struct falcon_nic_data - Falcon NIC state | |
8986352a | 286 | * @pci_dev2: Secondary function of Falcon A |
3759433d | 287 | * @board: Board state and functions |
cd0ecc9a | 288 | * @stats: Hardware statistics |
55edc6e6 BH |
289 | * @stats_disable_count: Nest count for disabling statistics fetches |
290 | * @stats_pending: Is there a pending DMA of MAC statistics. | |
291 | * @stats_timer: A timer for regularly fetching MAC statistics. | |
4de92180 BH |
292 | * @spi_flash: SPI flash device |
293 | * @spi_eeprom: SPI EEPROM device | |
294 | * @spi_lock: SPI bus lock | |
4833f02a | 295 | * @mdio_lock: MDIO bus lock |
cef68bde | 296 | * @xmac_poll_required: XMAC link state needs polling |
5c16a96c BH |
297 | */ |
298 | struct falcon_nic_data { | |
299 | struct pci_dev *pci_dev2; | |
3759433d | 300 | struct falcon_board board; |
cd0ecc9a | 301 | u64 stats[FALCON_STAT_COUNT]; |
55edc6e6 BH |
302 | unsigned int stats_disable_count; |
303 | bool stats_pending; | |
304 | struct timer_list stats_timer; | |
ecd0a6f0 BH |
305 | struct falcon_spi_device spi_flash; |
306 | struct falcon_spi_device spi_eeprom; | |
4de92180 | 307 | struct mutex spi_lock; |
4833f02a | 308 | struct mutex mdio_lock; |
cef68bde | 309 | bool xmac_poll_required; |
5c16a96c BH |
310 | }; |
311 | ||
278c0621 BH |
312 | static inline struct falcon_board *falcon_board(struct efx_nic *efx) |
313 | { | |
3759433d BH |
314 | struct falcon_nic_data *data = efx->nic_data; |
315 | return &data->board; | |
278c0621 BH |
316 | } |
317 | ||
cd0ecc9a | 318 | enum { |
e4d112e4 | 319 | SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, |
cd0ecc9a BH |
320 | SIENA_STAT_tx_good_bytes, |
321 | SIENA_STAT_tx_bad_bytes, | |
322 | SIENA_STAT_tx_packets, | |
323 | SIENA_STAT_tx_bad, | |
324 | SIENA_STAT_tx_pause, | |
325 | SIENA_STAT_tx_control, | |
326 | SIENA_STAT_tx_unicast, | |
327 | SIENA_STAT_tx_multicast, | |
328 | SIENA_STAT_tx_broadcast, | |
329 | SIENA_STAT_tx_lt64, | |
330 | SIENA_STAT_tx_64, | |
331 | SIENA_STAT_tx_65_to_127, | |
332 | SIENA_STAT_tx_128_to_255, | |
333 | SIENA_STAT_tx_256_to_511, | |
334 | SIENA_STAT_tx_512_to_1023, | |
335 | SIENA_STAT_tx_1024_to_15xx, | |
336 | SIENA_STAT_tx_15xx_to_jumbo, | |
337 | SIENA_STAT_tx_gtjumbo, | |
338 | SIENA_STAT_tx_collision, | |
339 | SIENA_STAT_tx_single_collision, | |
340 | SIENA_STAT_tx_multiple_collision, | |
341 | SIENA_STAT_tx_excessive_collision, | |
342 | SIENA_STAT_tx_deferred, | |
343 | SIENA_STAT_tx_late_collision, | |
344 | SIENA_STAT_tx_excessive_deferred, | |
345 | SIENA_STAT_tx_non_tcpudp, | |
346 | SIENA_STAT_tx_mac_src_error, | |
347 | SIENA_STAT_tx_ip_src_error, | |
348 | SIENA_STAT_rx_bytes, | |
349 | SIENA_STAT_rx_good_bytes, | |
350 | SIENA_STAT_rx_bad_bytes, | |
351 | SIENA_STAT_rx_packets, | |
352 | SIENA_STAT_rx_good, | |
353 | SIENA_STAT_rx_bad, | |
354 | SIENA_STAT_rx_pause, | |
355 | SIENA_STAT_rx_control, | |
356 | SIENA_STAT_rx_unicast, | |
357 | SIENA_STAT_rx_multicast, | |
358 | SIENA_STAT_rx_broadcast, | |
359 | SIENA_STAT_rx_lt64, | |
360 | SIENA_STAT_rx_64, | |
361 | SIENA_STAT_rx_65_to_127, | |
362 | SIENA_STAT_rx_128_to_255, | |
363 | SIENA_STAT_rx_256_to_511, | |
364 | SIENA_STAT_rx_512_to_1023, | |
365 | SIENA_STAT_rx_1024_to_15xx, | |
366 | SIENA_STAT_rx_15xx_to_jumbo, | |
367 | SIENA_STAT_rx_gtjumbo, | |
368 | SIENA_STAT_rx_bad_gtjumbo, | |
369 | SIENA_STAT_rx_overflow, | |
370 | SIENA_STAT_rx_false_carrier, | |
371 | SIENA_STAT_rx_symbol_error, | |
372 | SIENA_STAT_rx_align_error, | |
373 | SIENA_STAT_rx_length_error, | |
374 | SIENA_STAT_rx_internal_error, | |
375 | SIENA_STAT_rx_nodesc_drop_cnt, | |
376 | SIENA_STAT_COUNT | |
377 | }; | |
378 | ||
8880f4ec BH |
379 | /** |
380 | * struct siena_nic_data - Siena NIC state | |
2dc313ec | 381 | * @efx: Pointer back to main interface structure |
8880f4ec | 382 | * @wol_filter_id: Wake-on-LAN packet filter id |
cd0ecc9a | 383 | * @stats: Hardware statistics |
bf3d0156 | 384 | * @vf: Array of &struct siena_vf objects |
2dc313ec SS |
385 | * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. |
386 | * @vfdi_status: Common VFDI status page to be dmad to VF address space. | |
387 | * @local_addr_list: List of local addresses. Protected by %local_lock. | |
388 | * @local_page_list: List of DMA addressable pages used to broadcast | |
389 | * %local_addr_list. Protected by %local_lock. | |
390 | * @local_lock: Mutex protecting %local_addr_list and %local_page_list. | |
391 | * @peer_work: Work item to broadcast peer addresses to VMs. | |
8880f4ec BH |
392 | */ |
393 | struct siena_nic_data { | |
2dc313ec | 394 | struct efx_nic *efx; |
8880f4ec | 395 | int wol_filter_id; |
cd0ecc9a | 396 | u64 stats[SIENA_STAT_COUNT]; |
2dc313ec | 397 | #ifdef CONFIG_SFC_SRIOV |
bf3d0156 | 398 | struct siena_vf *vf; |
2dc313ec SS |
399 | struct efx_channel *vfdi_channel; |
400 | unsigned vf_buftbl_base; | |
401 | struct efx_buffer vfdi_status; | |
402 | struct list_head local_addr_list; | |
403 | struct list_head local_page_list; | |
404 | struct mutex local_lock; | |
405 | struct work_struct peer_work; | |
406 | #endif | |
8880f4ec BH |
407 | }; |
408 | ||
8127d661 | 409 | enum { |
e80ca013 DP |
410 | EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT, |
411 | EF10_STAT_port_tx_packets, | |
412 | EF10_STAT_port_tx_pause, | |
413 | EF10_STAT_port_tx_control, | |
414 | EF10_STAT_port_tx_unicast, | |
415 | EF10_STAT_port_tx_multicast, | |
416 | EF10_STAT_port_tx_broadcast, | |
417 | EF10_STAT_port_tx_lt64, | |
418 | EF10_STAT_port_tx_64, | |
419 | EF10_STAT_port_tx_65_to_127, | |
420 | EF10_STAT_port_tx_128_to_255, | |
421 | EF10_STAT_port_tx_256_to_511, | |
422 | EF10_STAT_port_tx_512_to_1023, | |
423 | EF10_STAT_port_tx_1024_to_15xx, | |
424 | EF10_STAT_port_tx_15xx_to_jumbo, | |
425 | EF10_STAT_port_rx_bytes, | |
426 | EF10_STAT_port_rx_bytes_minus_good_bytes, | |
427 | EF10_STAT_port_rx_good_bytes, | |
428 | EF10_STAT_port_rx_bad_bytes, | |
429 | EF10_STAT_port_rx_packets, | |
430 | EF10_STAT_port_rx_good, | |
431 | EF10_STAT_port_rx_bad, | |
432 | EF10_STAT_port_rx_pause, | |
433 | EF10_STAT_port_rx_control, | |
434 | EF10_STAT_port_rx_unicast, | |
435 | EF10_STAT_port_rx_multicast, | |
436 | EF10_STAT_port_rx_broadcast, | |
437 | EF10_STAT_port_rx_lt64, | |
438 | EF10_STAT_port_rx_64, | |
439 | EF10_STAT_port_rx_65_to_127, | |
440 | EF10_STAT_port_rx_128_to_255, | |
441 | EF10_STAT_port_rx_256_to_511, | |
442 | EF10_STAT_port_rx_512_to_1023, | |
443 | EF10_STAT_port_rx_1024_to_15xx, | |
444 | EF10_STAT_port_rx_15xx_to_jumbo, | |
445 | EF10_STAT_port_rx_gtjumbo, | |
446 | EF10_STAT_port_rx_bad_gtjumbo, | |
447 | EF10_STAT_port_rx_overflow, | |
448 | EF10_STAT_port_rx_align_error, | |
449 | EF10_STAT_port_rx_length_error, | |
450 | EF10_STAT_port_rx_nodesc_drops, | |
451 | EF10_STAT_port_rx_pm_trunc_bb_overflow, | |
452 | EF10_STAT_port_rx_pm_discard_bb_overflow, | |
453 | EF10_STAT_port_rx_pm_trunc_vfifo_full, | |
454 | EF10_STAT_port_rx_pm_discard_vfifo_full, | |
455 | EF10_STAT_port_rx_pm_trunc_qbb, | |
456 | EF10_STAT_port_rx_pm_discard_qbb, | |
457 | EF10_STAT_port_rx_pm_discard_mapping, | |
458 | EF10_STAT_port_rx_dp_q_disabled_packets, | |
459 | EF10_STAT_port_rx_dp_di_dropped_packets, | |
460 | EF10_STAT_port_rx_dp_streaming_packets, | |
461 | EF10_STAT_port_rx_dp_hlb_fetch, | |
462 | EF10_STAT_port_rx_dp_hlb_wait, | |
3c36a2ad DP |
463 | EF10_STAT_rx_unicast, |
464 | EF10_STAT_rx_unicast_bytes, | |
465 | EF10_STAT_rx_multicast, | |
466 | EF10_STAT_rx_multicast_bytes, | |
467 | EF10_STAT_rx_broadcast, | |
468 | EF10_STAT_rx_broadcast_bytes, | |
469 | EF10_STAT_rx_bad, | |
470 | EF10_STAT_rx_bad_bytes, | |
471 | EF10_STAT_rx_overflow, | |
472 | EF10_STAT_tx_unicast, | |
473 | EF10_STAT_tx_unicast_bytes, | |
474 | EF10_STAT_tx_multicast, | |
475 | EF10_STAT_tx_multicast_bytes, | |
476 | EF10_STAT_tx_broadcast, | |
477 | EF10_STAT_tx_broadcast_bytes, | |
478 | EF10_STAT_tx_bad, | |
479 | EF10_STAT_tx_bad_bytes, | |
480 | EF10_STAT_tx_overflow, | |
8127d661 BH |
481 | EF10_STAT_COUNT |
482 | }; | |
483 | ||
183233be BH |
484 | /* Maximum number of TX PIO buffers we may allocate to a function. |
485 | * This matches the total number of buffers on each SFC9100-family | |
486 | * controller. | |
487 | */ | |
488 | #define EF10_TX_PIOBUF_COUNT 16 | |
489 | ||
8127d661 BH |
490 | /** |
491 | * struct efx_ef10_nic_data - EF10 architecture NIC state | |
492 | * @mcdi_buf: DMA buffer for MCDI | |
493 | * @warm_boot_count: Last seen MC warm boot count | |
494 | * @vi_base: Absolute index of first VI in this function | |
495 | * @n_allocated_vis: Number of VIs allocated to this function | |
496 | * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot | |
497 | * @must_restore_filters: Flag: filters have yet to be restored after MC reboot | |
183233be BH |
498 | * @n_piobufs: Number of PIO buffers allocated to this function |
499 | * @wc_membase: Base address of write-combining mapping of the memory BAR | |
500 | * @pio_write_base: Base address for writing PIO buffers | |
501 | * @pio_write_vi_base: Relative VI number for @pio_write_base | |
502 | * @piobuf_handle: Handle of each PIO buffer allocated | |
503 | * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC | |
504 | * reboot | |
8127d661 | 505 | * @rx_rss_context: Firmware handle for our RSS context |
267c0157 | 506 | * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared |
8127d661 BH |
507 | * @stats: Hardware statistics |
508 | * @workaround_35388: Flag: firmware supports workaround for bug 35388 | |
46e612b0 | 509 | * @workaround_26807: Flag: firmware supports workaround for bug 26807 |
539de7c5 | 510 | * @workaround_61265: Flag: firmware supports workaround for bug 61265 |
a915ccc9 BH |
511 | * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated |
512 | * after MC reboot | |
8127d661 BH |
513 | * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of |
514 | * %MC_CMD_GET_CAPABILITIES response) | |
ca889a05 BK |
515 | * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of |
516 | * %MC_CMD_GET_CAPABILITIES response) | |
8d9f9dd4 DP |
517 | * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU |
518 | * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU | |
45b2449e | 519 | * @vport_id: The function's vport ID, only relevant for PFs |
6d8aaaf6 | 520 | * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot |
1cd9ecbb | 521 | * @pf_index: The number for this PF, or the parent PF if this is a VF |
3c5eb876 SS |
522 | #ifdef CONFIG_SFC_SRIOV |
523 | * @vf: Pointer to VF data structure | |
524 | #endif | |
34813fe2 AR |
525 | * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero |
526 | * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. | |
527 | * @vlan_lock: Lock to serialize access to vlan_list. | |
8127d661 BH |
528 | */ |
529 | struct efx_ef10_nic_data { | |
530 | struct efx_buffer mcdi_buf; | |
531 | u16 warm_boot_count; | |
532 | unsigned int vi_base; | |
533 | unsigned int n_allocated_vis; | |
534 | bool must_realloc_vis; | |
535 | bool must_restore_filters; | |
183233be BH |
536 | unsigned int n_piobufs; |
537 | void __iomem *wc_membase, *pio_write_base; | |
538 | unsigned int pio_write_vi_base; | |
539 | unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; | |
540 | bool must_restore_piobufs; | |
8127d661 | 541 | u32 rx_rss_context; |
267c0157 | 542 | bool rx_rss_context_exclusive; |
8127d661 BH |
543 | u64 stats[EF10_STAT_COUNT]; |
544 | bool workaround_35388; | |
46e612b0 | 545 | bool workaround_26807; |
539de7c5 | 546 | bool workaround_61265; |
a915ccc9 | 547 | bool must_check_datapath_caps; |
8127d661 | 548 | u32 datapath_caps; |
ca889a05 | 549 | u32 datapath_caps2; |
8d9f9dd4 DP |
550 | unsigned int rx_dpcpu_fw_id; |
551 | unsigned int tx_dpcpu_fw_id; | |
45b2449e | 552 | unsigned int vport_id; |
6d8aaaf6 | 553 | bool must_probe_vswitching; |
1cd9ecbb | 554 | unsigned int pf_index; |
1d051e00 | 555 | u8 port_id[ETH_ALEN]; |
3c5eb876 | 556 | #ifdef CONFIG_SFC_SRIOV |
88a37de6 | 557 | unsigned int vf_index; |
3c5eb876 SS |
558 | struct ef10_vf *vf; |
559 | #endif | |
560 | u8 vport_mac[ETH_ALEN]; | |
34813fe2 AR |
561 | struct list_head vlan_list; |
562 | struct mutex vlan_lock; | |
8127d661 BH |
563 | }; |
564 | ||
00aef986 | 565 | int efx_init_sriov(void); |
00aef986 | 566 | void efx_fini_sriov(void); |
cd2d5b52 | 567 | |
7c236c43 | 568 | struct ethtool_ts_info; |
ac36baf8 BH |
569 | int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); |
570 | void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); | |
571 | void efx_ptp_remove(struct efx_nic *efx); | |
433dc9b3 BH |
572 | int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); |
573 | int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); | |
00aef986 JP |
574 | void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); |
575 | bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); | |
9ec06595 DP |
576 | int efx_ptp_get_mode(struct efx_nic *efx); |
577 | int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, | |
578 | unsigned int new_mode); | |
00aef986 JP |
579 | int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); |
580 | void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); | |
99691c4a BH |
581 | size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); |
582 | size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); | |
bd9a265d JC |
583 | void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); |
584 | void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, | |
585 | struct sk_buff *skb); | |
586 | static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, | |
587 | struct sk_buff *skb) | |
588 | { | |
589 | if (channel->sync_events_state == SYNC_EVENTS_VALID) | |
590 | __efx_rx_skb_attach_timestamp(channel, skb); | |
591 | } | |
2ea4dc28 AR |
592 | void efx_ptp_start_datapath(struct efx_nic *efx); |
593 | void efx_ptp_stop_datapath(struct efx_nic *efx); | |
7c236c43 | 594 | |
6c8c2513 | 595 | extern const struct efx_nic_type falcon_a1_nic_type; |
596 | extern const struct efx_nic_type falcon_b0_nic_type; | |
597 | extern const struct efx_nic_type siena_a0_nic_type; | |
8127d661 | 598 | extern const struct efx_nic_type efx_hunt_a0_nic_type; |
02246a7f | 599 | extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; |
8ceee660 BH |
600 | |
601 | /************************************************************************** | |
602 | * | |
603 | * Externs | |
604 | * | |
605 | ************************************************************************** | |
606 | */ | |
607 | ||
00aef986 | 608 | int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
5087b54d | 609 | |
8ceee660 | 610 | /* TX data path */ |
86094f7f BH |
611 | static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
612 | { | |
613 | return tx_queue->efx->type->tx_probe(tx_queue); | |
614 | } | |
615 | static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | |
616 | { | |
617 | tx_queue->efx->type->tx_init(tx_queue); | |
618 | } | |
619 | static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | |
620 | { | |
621 | tx_queue->efx->type->tx_remove(tx_queue); | |
622 | } | |
623 | static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | |
624 | { | |
625 | tx_queue->efx->type->tx_write(tx_queue); | |
626 | } | |
8ceee660 BH |
627 | |
628 | /* RX data path */ | |
86094f7f BH |
629 | static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
630 | { | |
631 | return rx_queue->efx->type->rx_probe(rx_queue); | |
632 | } | |
633 | static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | |
634 | { | |
635 | rx_queue->efx->type->rx_init(rx_queue); | |
636 | } | |
637 | static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | |
638 | { | |
639 | rx_queue->efx->type->rx_remove(rx_queue); | |
640 | } | |
641 | static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | |
642 | { | |
643 | rx_queue->efx->type->rx_write(rx_queue); | |
644 | } | |
645 | static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) | |
646 | { | |
647 | rx_queue->efx->type->rx_defer_refill(rx_queue); | |
648 | } | |
8ceee660 BH |
649 | |
650 | /* Event data path */ | |
86094f7f BH |
651 | static inline int efx_nic_probe_eventq(struct efx_channel *channel) |
652 | { | |
653 | return channel->efx->type->ev_probe(channel); | |
654 | } | |
261e4d96 | 655 | static inline int efx_nic_init_eventq(struct efx_channel *channel) |
86094f7f | 656 | { |
261e4d96 | 657 | return channel->efx->type->ev_init(channel); |
86094f7f BH |
658 | } |
659 | static inline void efx_nic_fini_eventq(struct efx_channel *channel) | |
660 | { | |
661 | channel->efx->type->ev_fini(channel); | |
662 | } | |
663 | static inline void efx_nic_remove_eventq(struct efx_channel *channel) | |
664 | { | |
665 | channel->efx->type->ev_remove(channel); | |
666 | } | |
667 | static inline int | |
668 | efx_nic_process_eventq(struct efx_channel *channel, int quota) | |
669 | { | |
670 | return channel->efx->type->ev_process(channel, quota); | |
671 | } | |
672 | static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) | |
673 | { | |
674 | channel->efx->type->ev_read_ack(channel); | |
675 | } | |
00aef986 | 676 | void efx_nic_event_test_start(struct efx_channel *channel); |
86094f7f BH |
677 | |
678 | /* Falcon/Siena queue operations */ | |
00aef986 JP |
679 | int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); |
680 | void efx_farch_tx_init(struct efx_tx_queue *tx_queue); | |
681 | void efx_farch_tx_fini(struct efx_tx_queue *tx_queue); | |
682 | void efx_farch_tx_remove(struct efx_tx_queue *tx_queue); | |
683 | void efx_farch_tx_write(struct efx_tx_queue *tx_queue); | |
684 | int efx_farch_rx_probe(struct efx_rx_queue *rx_queue); | |
685 | void efx_farch_rx_init(struct efx_rx_queue *rx_queue); | |
686 | void efx_farch_rx_fini(struct efx_rx_queue *rx_queue); | |
687 | void efx_farch_rx_remove(struct efx_rx_queue *rx_queue); | |
688 | void efx_farch_rx_write(struct efx_rx_queue *rx_queue); | |
689 | void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue); | |
690 | int efx_farch_ev_probe(struct efx_channel *channel); | |
691 | int efx_farch_ev_init(struct efx_channel *channel); | |
692 | void efx_farch_ev_fini(struct efx_channel *channel); | |
693 | void efx_farch_ev_remove(struct efx_channel *channel); | |
694 | int efx_farch_ev_process(struct efx_channel *channel, int quota); | |
695 | void efx_farch_ev_read_ack(struct efx_channel *channel); | |
696 | void efx_farch_ev_test_generate(struct efx_channel *channel); | |
86094f7f | 697 | |
add72477 | 698 | /* Falcon/Siena filter operations */ |
00aef986 JP |
699 | int efx_farch_filter_table_probe(struct efx_nic *efx); |
700 | void efx_farch_filter_table_restore(struct efx_nic *efx); | |
701 | void efx_farch_filter_table_remove(struct efx_nic *efx); | |
702 | void efx_farch_filter_update_rx_scatter(struct efx_nic *efx); | |
703 | s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, | |
704 | bool replace); | |
705 | int efx_farch_filter_remove_safe(struct efx_nic *efx, | |
706 | enum efx_filter_priority priority, | |
707 | u32 filter_id); | |
708 | int efx_farch_filter_get_safe(struct efx_nic *efx, | |
709 | enum efx_filter_priority priority, u32 filter_id, | |
710 | struct efx_filter_spec *); | |
fbd79120 BH |
711 | int efx_farch_filter_clear_rx(struct efx_nic *efx, |
712 | enum efx_filter_priority priority); | |
00aef986 JP |
713 | u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, |
714 | enum efx_filter_priority priority); | |
715 | u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx); | |
716 | s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, | |
717 | enum efx_filter_priority priority, u32 *buf, | |
718 | u32 size); | |
add72477 | 719 | #ifdef CONFIG_RFS_ACCEL |
00aef986 JP |
720 | s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, |
721 | struct efx_filter_spec *spec); | |
722 | bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, | |
723 | unsigned int index); | |
add72477 | 724 | #endif |
00aef986 | 725 | void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); |
add72477 | 726 | |
00aef986 | 727 | bool efx_nic_event_present(struct efx_channel *channel); |
8ceee660 | 728 | |
b7f514af BH |
729 | /* Some statistics are computed as A - B where A and B each increase |
730 | * linearly with some hardware counter(s) and the counters are read | |
731 | * asynchronously. If the counters contributing to B are always read | |
732 | * after those contributing to A, the computed value may be lower than | |
733 | * the true value by some variable amount, and may decrease between | |
734 | * subsequent computations. | |
735 | * | |
736 | * We should never allow statistics to decrease or to exceed the true | |
737 | * value. Since the computed value will never be greater than the | |
738 | * true value, we can achieve this by only storing the computed value | |
739 | * when it increases. | |
740 | */ | |
741 | static inline void efx_update_diff_stat(u64 *stat, u64 diff) | |
742 | { | |
743 | if ((s64)(diff - *stat) > 0) | |
744 | *stat = diff; | |
745 | } | |
746 | ||
86094f7f | 747 | /* Interrupts */ |
00aef986 | 748 | int efx_nic_init_interrupt(struct efx_nic *efx); |
942e298e | 749 | int efx_nic_irq_test_start(struct efx_nic *efx); |
00aef986 | 750 | void efx_nic_fini_interrupt(struct efx_nic *efx); |
86094f7f BH |
751 | |
752 | /* Falcon/Siena interrupts */ | |
00aef986 | 753 | void efx_farch_irq_enable_master(struct efx_nic *efx); |
942e298e | 754 | int efx_farch_irq_test_generate(struct efx_nic *efx); |
00aef986 JP |
755 | void efx_farch_irq_disable_master(struct efx_nic *efx); |
756 | irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); | |
757 | irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); | |
758 | irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); | |
152b6a62 | 759 | |
eee6f6a9 BH |
760 | static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) |
761 | { | |
dd40781e | 762 | return ACCESS_ONCE(channel->event_test_cpu); |
eee6f6a9 BH |
763 | } |
764 | static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) | |
765 | { | |
766 | return ACCESS_ONCE(efx->last_irq_cpu); | |
767 | } | |
768 | ||
8ceee660 | 769 | /* Global Resources */ |
00aef986 JP |
770 | int efx_nic_flush_queues(struct efx_nic *efx); |
771 | void siena_prepare_flush(struct efx_nic *efx); | |
772 | int efx_farch_fini_dmaq(struct efx_nic *efx); | |
e283546c | 773 | void efx_farch_finish_flr(struct efx_nic *efx); |
00aef986 JP |
774 | void siena_finish_flush(struct efx_nic *efx); |
775 | void falcon_start_nic_stats(struct efx_nic *efx); | |
776 | void falcon_stop_nic_stats(struct efx_nic *efx); | |
777 | int falcon_reset_xaui(struct efx_nic *efx); | |
778 | void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); | |
779 | void efx_farch_init_common(struct efx_nic *efx); | |
780 | void efx_ef10_handle_drain_event(struct efx_nic *efx); | |
00aef986 | 781 | void efx_farch_rx_push_indir_table(struct efx_nic *efx); |
152b6a62 BH |
782 | |
783 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | |
0d19a540 | 784 | unsigned int len, gfp_t gfp_flags); |
152b6a62 | 785 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); |
8ceee660 | 786 | |
8c8661e4 | 787 | /* Tests */ |
86094f7f | 788 | struct efx_farch_register_test { |
152b6a62 BH |
789 | unsigned address; |
790 | efx_oword_t mask; | |
791 | }; | |
00aef986 JP |
792 | int efx_farch_test_registers(struct efx_nic *efx, |
793 | const struct efx_farch_register_test *regs, | |
794 | size_t n_regs); | |
8c8661e4 | 795 | |
00aef986 JP |
796 | size_t efx_nic_get_regs_len(struct efx_nic *efx); |
797 | void efx_nic_get_regs(struct efx_nic *efx, void *buf); | |
5b98c1bf | 798 | |
00aef986 JP |
799 | size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, |
800 | const unsigned long *mask, u8 *names); | |
801 | void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, | |
802 | const unsigned long *mask, u64 *stats, | |
803 | const void *dma_buf, bool accumulate); | |
f8f3b5ae | 804 | void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); |
cd0ecc9a | 805 | |
ab0115fc | 806 | #define EFX_MAX_FLUSH_TIME 5000 |
8ceee660 | 807 | |
00aef986 JP |
808 | void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, |
809 | efx_qword_t *event); | |
8ceee660 | 810 | |
744093c9 | 811 | #endif /* EFX_NIC_H */ |