]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Call version 2 of GET_STATS ioctl for Skyhawk-R
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6b7c5b94
SP
25
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 29MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
30MODULE_LICENSE("GPL");
31
ba343c77 32static unsigned int num_vfs;
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
11ac75ed
SP
36static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
6b7c5b94 40static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
49 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 52/* UE Status Low CSR */
42c8b11e 53static const char * const ue_status_low_desc[] = {
7c185276
AK
54 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
42c8b11e 88static const char * const ue_status_hi_desc[] = {
7c185276
AK
89 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
42c8b11e 112 "NETC",
7c185276
AK
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
6b7c5b94 122
752961a1
SP
123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
6b7c5b94
SP
130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 133 if (mem->va) {
2b7bcebf
IV
134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
1cfafab9
SP
136 mem->va = NULL;
137 }
6b7c5b94
SP
138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
ede23fa8
JP
149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
6b7c5b94 151 if (!mem->va)
10ef9ab4 152 return -ENOMEM;
6b7c5b94
SP
153 return 0;
154}
155
68c45a2d 156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
db3ea781
SP
160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
5f0b849e 164 if (!enabled && enable)
6b7c5b94 165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 166 else if (enabled && !enable)
6b7c5b94 167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else
6b7c5b94 169 return;
5f0b849e 170
db3ea781
SP
171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
173}
174
68c45a2d
SK
175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
8788fdc2 191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
94d73aaa 205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
207
208 wmb();
94d73aaa 209 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
210}
211
8788fdc2 212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 219
f67ef7ba 220 if (adapter->eeh_error)
cf588477
SP
221 return;
222
6b7c5b94
SP
223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
230}
231
8788fdc2 232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
5a712c13
SP
260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
704e4c88 265 */
5a712c13
SP
266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
704e4c88
PR
277 }
278
5a712c13
SP
279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
704e4c88 281 */
5a712c13 282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 283 if (status)
e3a7ae2c 284 goto err;
6b7c5b94 285
5a712c13
SP
286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
e3a7ae2c 294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 295 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
296 return 0;
297err:
5a712c13 298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
299 return status;
300}
301
ca34fe38
SP
302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
61000861 309 } else if (BE3_chip(adapter)) {
ca34fe38
SP
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
61000861
AK
312 return &cmd->hw_stats;
313 } else {
314 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
315
ca34fe38
SP
316 return &cmd->hw_stats;
317 }
318}
319
320/* BE2 supports only v0 cmd */
321static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
322{
323 if (BE2_chip(adapter)) {
324 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
61000861 327 } else if (BE3_chip(adapter)) {
ca34fe38
SP
328 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
329
61000861
AK
330 return &hw_stats->erx;
331 } else {
332 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
333
ca34fe38
SP
334 return &hw_stats->erx;
335 }
336}
337
338static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 339{
ac124ff9
SP
340 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
341 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
342 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 343 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
344 &rxf_stats->port[adapter->port_num];
345 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 346
ac124ff9 347 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
348 drvs->rx_pause_frames = port_stats->rx_pause_frames;
349 drvs->rx_crc_errors = port_stats->rx_crc_errors;
350 drvs->rx_control_frames = port_stats->rx_control_frames;
351 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
352 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
353 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
355 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
356 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
357 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 362 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
363 drvs->rx_dropped_header_too_small =
364 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
365 drvs->rx_address_filtered =
366 port_stats->rx_address_filtered +
367 port_stats->rx_vlan_filtered;
89a88ab8
AK
368 drvs->rx_alignment_symbol_errors =
369 port_stats->rx_alignment_symbol_errors;
370
371 drvs->tx_pauseframes = port_stats->tx_pauseframes;
372 drvs->tx_controlframes = port_stats->tx_controlframes;
373
374 if (adapter->port_num)
ac124ff9 375 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 376 else
ac124ff9 377 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 378 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 379 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
380 drvs->forwarded_packets = rxf_stats->forwarded_packets;
381 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
382 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
383 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
384 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
385}
386
ca34fe38 387static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 388{
ac124ff9
SP
389 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
390 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
391 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 392 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
393 &rxf_stats->port[adapter->port_num];
394 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 395
ac124ff9 396 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
397 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
398 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
399 drvs->rx_pause_frames = port_stats->rx_pause_frames;
400 drvs->rx_crc_errors = port_stats->rx_crc_errors;
401 drvs->rx_control_frames = port_stats->rx_control_frames;
402 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
403 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
404 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
405 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
406 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
407 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
408 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
409 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
410 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
411 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
412 drvs->rx_dropped_header_too_small =
413 port_stats->rx_dropped_header_too_small;
414 drvs->rx_input_fifo_overflow_drop =
415 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 416 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
417 drvs->rx_alignment_symbol_errors =
418 port_stats->rx_alignment_symbol_errors;
ac124ff9 419 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
420 drvs->tx_pauseframes = port_stats->tx_pauseframes;
421 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 422 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
423 drvs->jabber_events = port_stats->jabber_events;
424 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 425 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
426 drvs->forwarded_packets = rxf_stats->forwarded_packets;
427 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
428 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
429 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
430 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
431}
432
61000861
AK
433static void populate_be_v2_stats(struct be_adapter *adapter)
434{
435 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
436 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
437 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
438 struct be_port_rxf_stats_v2 *port_stats =
439 &rxf_stats->port[adapter->port_num];
440 struct be_drv_stats *drvs = &adapter->drv_stats;
441
442 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
443 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
444 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
445 drvs->rx_pause_frames = port_stats->rx_pause_frames;
446 drvs->rx_crc_errors = port_stats->rx_crc_errors;
447 drvs->rx_control_frames = port_stats->rx_control_frames;
448 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
449 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
450 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
451 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
452 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
453 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
454 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
455 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
456 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
457 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
458 drvs->rx_dropped_header_too_small =
459 port_stats->rx_dropped_header_too_small;
460 drvs->rx_input_fifo_overflow_drop =
461 port_stats->rx_input_fifo_overflow_drop;
462 drvs->rx_address_filtered = port_stats->rx_address_filtered;
463 drvs->rx_alignment_symbol_errors =
464 port_stats->rx_alignment_symbol_errors;
465 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
466 drvs->tx_pauseframes = port_stats->tx_pauseframes;
467 drvs->tx_controlframes = port_stats->tx_controlframes;
468 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
469 drvs->jabber_events = port_stats->jabber_events;
470 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
471 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
472 drvs->forwarded_packets = rxf_stats->forwarded_packets;
473 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
474 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
475 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
476 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
477}
478
005d5696
SX
479static void populate_lancer_stats(struct be_adapter *adapter)
480{
89a88ab8 481
005d5696 482 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
483 struct lancer_pport_stats *pport_stats =
484 pport_stats_from_cmd(adapter);
485
486 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
487 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
488 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
489 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 490 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 491 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
492 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
493 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
494 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
495 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
496 drvs->rx_dropped_tcp_length =
497 pport_stats->rx_dropped_invalid_tcp_length;
498 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
499 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
500 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
501 drvs->rx_dropped_header_too_small =
502 pport_stats->rx_dropped_header_too_small;
503 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
504 drvs->rx_address_filtered =
505 pport_stats->rx_address_filtered +
506 pport_stats->rx_vlan_filtered;
ac124ff9 507 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 508 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
509 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
510 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 511 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
512 drvs->forwarded_packets = pport_stats->num_forwards_lo;
513 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 514 drvs->rx_drops_too_many_frags =
ac124ff9 515 pport_stats->rx_drops_too_many_frags_lo;
005d5696 516}
89a88ab8 517
09c1c68f
SP
518static void accumulate_16bit_val(u32 *acc, u16 val)
519{
520#define lo(x) (x & 0xFFFF)
521#define hi(x) (x & 0xFFFF0000)
522 bool wrapped = val < lo(*acc);
523 u32 newacc = hi(*acc) + val;
524
525 if (wrapped)
526 newacc += 65536;
527 ACCESS_ONCE(*acc) = newacc;
528}
529
4188e7df 530static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
531 struct be_rx_obj *rxo,
532 u32 erx_stat)
533{
534 if (!BEx_chip(adapter))
535 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
536 else
537 /* below erx HW counter can actually wrap around after
538 * 65535. Driver accumulates a 32-bit value
539 */
540 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
541 (u16)erx_stat);
542}
543
89a88ab8
AK
544void be_parse_stats(struct be_adapter *adapter)
545{
61000861 546 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
547 struct be_rx_obj *rxo;
548 int i;
a6c578ef 549 u32 erx_stat;
ac124ff9 550
ca34fe38
SP
551 if (lancer_chip(adapter)) {
552 populate_lancer_stats(adapter);
005d5696 553 } else {
ca34fe38
SP
554 if (BE2_chip(adapter))
555 populate_be_v0_stats(adapter);
61000861
AK
556 else if (BE3_chip(adapter))
557 /* for BE3 */
ca34fe38 558 populate_be_v1_stats(adapter);
61000861
AK
559 else
560 populate_be_v2_stats(adapter);
d51ebd33 561
61000861 562 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 563 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
564 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
565 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 566 }
09c1c68f 567 }
89a88ab8
AK
568}
569
ab1594e9
SP
570static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
571 struct rtnl_link_stats64 *stats)
6b7c5b94 572{
ab1594e9 573 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 574 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 575 struct be_rx_obj *rxo;
3c8def97 576 struct be_tx_obj *txo;
ab1594e9
SP
577 u64 pkts, bytes;
578 unsigned int start;
3abcdeda 579 int i;
6b7c5b94 580
3abcdeda 581 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
582 const struct be_rx_stats *rx_stats = rx_stats(rxo);
583 do {
584 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
585 pkts = rx_stats(rxo)->rx_pkts;
586 bytes = rx_stats(rxo)->rx_bytes;
587 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
588 stats->rx_packets += pkts;
589 stats->rx_bytes += bytes;
590 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
591 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
592 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
593 }
594
3c8def97 595 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
596 const struct be_tx_stats *tx_stats = tx_stats(txo);
597 do {
598 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
599 pkts = tx_stats(txo)->tx_pkts;
600 bytes = tx_stats(txo)->tx_bytes;
601 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
602 stats->tx_packets += pkts;
603 stats->tx_bytes += bytes;
3c8def97 604 }
6b7c5b94
SP
605
606 /* bad pkts received */
ab1594e9 607 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
608 drvs->rx_alignment_symbol_errors +
609 drvs->rx_in_range_errors +
610 drvs->rx_out_range_errors +
611 drvs->rx_frame_too_long +
612 drvs->rx_dropped_too_small +
613 drvs->rx_dropped_too_short +
614 drvs->rx_dropped_header_too_small +
615 drvs->rx_dropped_tcp_length +
ab1594e9 616 drvs->rx_dropped_runt;
68110868 617
6b7c5b94 618 /* detailed rx errors */
ab1594e9 619 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long;
68110868 622
ab1594e9 623 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
624
625 /* frame alignment errors */
ab1594e9 626 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 627
6b7c5b94
SP
628 /* receiver fifo overrun */
629 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 630 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
631 drvs->rx_input_fifo_overflow_drop +
632 drvs->rx_drops_no_pbuf;
ab1594e9 633 return stats;
6b7c5b94
SP
634}
635
b236916a 636void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 637{
6b7c5b94
SP
638 struct net_device *netdev = adapter->netdev;
639
b236916a 640 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 641 netif_carrier_off(netdev);
b236916a 642 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 643 }
b236916a
AK
644
645 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
646 netif_carrier_on(netdev);
647 else
648 netif_carrier_off(netdev);
6b7c5b94
SP
649}
650
3c8def97 651static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 652 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 653{
3c8def97
SP
654 struct be_tx_stats *stats = tx_stats(txo);
655
ab1594e9 656 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
657 stats->tx_reqs++;
658 stats->tx_wrbs += wrb_cnt;
659 stats->tx_bytes += copied;
660 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 661 if (stopped)
ac124ff9 662 stats->tx_stops++;
ab1594e9 663 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
664}
665
666/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
667static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
668 bool *dummy)
6b7c5b94 669{
ebc8d2ab
DM
670 int cnt = (skb->len > skb->data_len);
671
672 cnt += skb_shinfo(skb)->nr_frags;
673
6b7c5b94
SP
674 /* to account for hdr wrb */
675 cnt++;
fe6d2a38
SP
676 if (lancer_chip(adapter) || !(cnt & 1)) {
677 *dummy = false;
678 } else {
6b7c5b94
SP
679 /* add a dummy to make it an even num */
680 cnt++;
681 *dummy = true;
fe6d2a38 682 }
6b7c5b94
SP
683 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
684 return cnt;
685}
686
687static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
688{
689 wrb->frag_pa_hi = upper_32_bits(addr);
690 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
691 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 692 wrb->rsvd0 = 0;
6b7c5b94
SP
693}
694
1ded132d
AK
695static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
696 struct sk_buff *skb)
697{
698 u8 vlan_prio;
699 u16 vlan_tag;
700
701 vlan_tag = vlan_tx_tag_get(skb);
702 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
703 /* If vlan priority provided by OS is NOT in available bmap */
704 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
705 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
706 adapter->recommended_prio;
707
708 return vlan_tag;
709}
710
cc4ce020 711static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 712 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 713{
1ded132d 714 u16 vlan_tag;
cc4ce020 715
6b7c5b94
SP
716 memset(hdr, 0, sizeof(*hdr));
717
718 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
719
49e4b847 720 if (skb_is_gso(skb)) {
6b7c5b94
SP
721 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
722 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
723 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 724 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 725 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
726 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
727 if (is_tcp_pkt(skb))
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
729 else if (is_udp_pkt(skb))
730 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
731 }
732
4c5102f9 733 if (vlan_tx_tag_present(skb)) {
6b7c5b94 734 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 735 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 736 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
737 }
738
bc0c3405
AK
739 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
742 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
743 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
744}
745
2b7bcebf 746static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
747 bool unmap_single)
748{
749 dma_addr_t dma;
750
751 be_dws_le_to_cpu(wrb, sizeof(*wrb));
752
753 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 754 if (wrb->frag_len) {
7101e111 755 if (unmap_single)
2b7bcebf
IV
756 dma_unmap_single(dev, dma, wrb->frag_len,
757 DMA_TO_DEVICE);
7101e111 758 else
2b7bcebf 759 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
760 }
761}
6b7c5b94 762
3c8def97 763static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
764 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
765 bool skip_hw_vlan)
6b7c5b94 766{
7101e111
SP
767 dma_addr_t busaddr;
768 int i, copied = 0;
2b7bcebf 769 struct device *dev = &adapter->pdev->dev;
6b7c5b94 770 struct sk_buff *first_skb = skb;
6b7c5b94
SP
771 struct be_eth_wrb *wrb;
772 struct be_eth_hdr_wrb *hdr;
7101e111
SP
773 bool map_single = false;
774 u16 map_head;
6b7c5b94 775
6b7c5b94
SP
776 hdr = queue_head_node(txq);
777 queue_head_inc(txq);
7101e111 778 map_head = txq->head;
6b7c5b94 779
ebc8d2ab 780 if (skb->len > skb->data_len) {
e743d313 781 int len = skb_headlen(skb);
2b7bcebf
IV
782 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
783 if (dma_mapping_error(dev, busaddr))
7101e111
SP
784 goto dma_err;
785 map_single = true;
ebc8d2ab
DM
786 wrb = queue_head_node(txq);
787 wrb_fill(wrb, busaddr, len);
788 be_dws_cpu_to_le(wrb, sizeof(*wrb));
789 queue_head_inc(txq);
790 copied += len;
791 }
6b7c5b94 792
ebc8d2ab 793 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 794 const struct skb_frag_struct *frag =
ebc8d2ab 795 &skb_shinfo(skb)->frags[i];
b061b39e 796 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 797 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 798 if (dma_mapping_error(dev, busaddr))
7101e111 799 goto dma_err;
ebc8d2ab 800 wrb = queue_head_node(txq);
9e903e08 801 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
802 be_dws_cpu_to_le(wrb, sizeof(*wrb));
803 queue_head_inc(txq);
9e903e08 804 copied += skb_frag_size(frag);
6b7c5b94
SP
805 }
806
807 if (dummy_wrb) {
808 wrb = queue_head_node(txq);
809 wrb_fill(wrb, 0, 0);
810 be_dws_cpu_to_le(wrb, sizeof(*wrb));
811 queue_head_inc(txq);
812 }
813
bc0c3405 814 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
815 be_dws_cpu_to_le(hdr, sizeof(*hdr));
816
817 return copied;
7101e111
SP
818dma_err:
819 txq->head = map_head;
820 while (copied) {
821 wrb = queue_head_node(txq);
2b7bcebf 822 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
823 map_single = false;
824 copied -= wrb->frag_len;
825 queue_head_inc(txq);
826 }
827 return 0;
6b7c5b94
SP
828}
829
93040ae5 830static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
831 struct sk_buff *skb,
832 bool *skip_hw_vlan)
93040ae5
SK
833{
834 u16 vlan_tag = 0;
835
836 skb = skb_share_check(skb, GFP_ATOMIC);
837 if (unlikely(!skb))
838 return skb;
839
efee8e87 840 if (vlan_tx_tag_present(skb))
93040ae5 841 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
842
843 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
844 if (!vlan_tag)
845 vlan_tag = adapter->pvid;
846 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
847 * skip VLAN insertion
848 */
849 if (skip_hw_vlan)
850 *skip_hw_vlan = true;
851 }
bc0c3405
AK
852
853 if (vlan_tag) {
58717686 854 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
855 if (unlikely(!skb))
856 return skb;
bc0c3405
AK
857 skb->vlan_tci = 0;
858 }
859
860 /* Insert the outer VLAN, if any */
861 if (adapter->qnq_vid) {
862 vlan_tag = adapter->qnq_vid;
58717686 863 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
864 if (unlikely(!skb))
865 return skb;
866 if (skip_hw_vlan)
867 *skip_hw_vlan = true;
868 }
869
93040ae5
SK
870 return skb;
871}
872
bc0c3405
AK
873static bool be_ipv6_exthdr_check(struct sk_buff *skb)
874{
875 struct ethhdr *eh = (struct ethhdr *)skb->data;
876 u16 offset = ETH_HLEN;
877
878 if (eh->h_proto == htons(ETH_P_IPV6)) {
879 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
880
881 offset += sizeof(struct ipv6hdr);
882 if (ip6h->nexthdr != NEXTHDR_TCP &&
883 ip6h->nexthdr != NEXTHDR_UDP) {
884 struct ipv6_opt_hdr *ehdr =
885 (struct ipv6_opt_hdr *) (skb->data + offset);
886
887 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
888 if (ehdr->hdrlen == 0xff)
889 return true;
890 }
891 }
892 return false;
893}
894
895static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
896{
897 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
898}
899
ee9c799c
SP
900static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
901 struct sk_buff *skb)
bc0c3405 902{
ee9c799c 903 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
904}
905
ee9c799c
SP
906static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
907 struct sk_buff *skb,
908 bool *skip_hw_vlan)
6b7c5b94 909{
d2cb6ce7 910 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
911 unsigned int eth_hdr_len;
912 struct iphdr *ip;
93040ae5 913
b54881f9 914 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 915 * may cause a transmit stall on that port. So the work-around is to
b54881f9 916 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 917 */
b54881f9 918 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667
SK
919 if (skb_padto(skb, 36))
920 goto tx_drop;
921 skb->len = 36;
922 }
923
1297f9db
AK
924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 926 * For padded packets, Lancer computes incorrect checksum.
1ded132d 927 */
ee9c799c
SP
928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 932 is_ipv4_pkt(skb)) {
93040ae5
SK
933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
1ded132d 936
d2cb6ce7
AK
937 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in UMC mode
939 */
940 if ((adapter->function_mode & UMC_ENABLED) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 942 *skip_hw_vlan = true;
d2cb6ce7 943
93040ae5
SK
944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
951 if (unlikely(!skb))
952 goto tx_drop;
953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
973 if (unlikely(!skb))
974 goto tx_drop;
1ded132d
AK
975 }
976
ee9c799c
SP
977 return skb;
978tx_drop:
979 dev_kfree_skb_any(skb);
980 return NULL;
981}
982
983static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
984{
985 struct be_adapter *adapter = netdev_priv(netdev);
986 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
987 struct be_queue_info *txq = &txo->q;
988 bool dummy_wrb, stopped = false;
989 u32 wrb_cnt = 0, copied = 0;
990 bool skip_hw_vlan = false;
991 u32 start = txq->head;
992
993 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
994 if (!skb) {
995 tx_stats(txo)->tx_drv_drops++;
ee9c799c 996 return NETDEV_TX_OK;
bc617526 997 }
ee9c799c 998
fe6d2a38 999 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1000
bc0c3405
AK
1001 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1002 skip_hw_vlan);
c190e3c8 1003 if (copied) {
cd8f76c0
ED
1004 int gso_segs = skb_shinfo(skb)->gso_segs;
1005
c190e3c8 1006 /* record the sent skb in the sent_skb table */
3c8def97
SP
1007 BUG_ON(txo->sent_skb_list[start]);
1008 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1009
1010 /* Ensure txq has space for the next skb; Else stop the queue
1011 * *BEFORE* ringing the tx doorbell, so that we serialze the
1012 * tx compls of the current transmit which'll wake up the queue
1013 */
7101e111 1014 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1015 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1016 txq->len) {
3c8def97 1017 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1018 stopped = true;
1019 }
6b7c5b94 1020
94d73aaa 1021 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1022
cd8f76c0 1023 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1024 } else {
1025 txq->head = start;
bc617526 1026 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1027 dev_kfree_skb_any(skb);
6b7c5b94 1028 }
6b7c5b94
SP
1029 return NETDEV_TX_OK;
1030}
1031
1032static int be_change_mtu(struct net_device *netdev, int new_mtu)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1036 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1037 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1038 dev_info(&adapter->pdev->dev,
1039 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1040 BE_MIN_MTU,
1041 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1042 return -EINVAL;
1043 }
1044 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1045 netdev->mtu, new_mtu);
1046 netdev->mtu = new_mtu;
1047 return 0;
1048}
1049
1050/*
82903e4b
AK
1051 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1052 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1053 */
10329df8 1054static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1055{
10329df8
SP
1056 u16 vids[BE_NUM_VLANS_SUPPORTED];
1057 u16 num = 0, i;
82903e4b 1058 int status = 0;
1da87b7f 1059
c0e64ef4
SP
1060 /* No need to further configure vids if in promiscuous mode */
1061 if (adapter->promiscuous)
1062 return 0;
1063
92bf14ab 1064 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1065 goto set_vlan_promisc;
1066
1067 /* Construct VLAN Table to give to HW */
1068 for (i = 0; i < VLAN_N_VID; i++)
1069 if (adapter->vlan_tag[i])
10329df8 1070 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1071
1072 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1073 vids, num, 1, 0);
0fc16ebf 1074
0fc16ebf 1075 if (status) {
d9d604f8
AK
1076 /* Set to VLAN promisc mode as setting VLAN filter failed */
1077 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1078 goto set_vlan_promisc;
1079 dev_err(&adapter->pdev->dev,
1080 "Setting HW VLAN filtering failed.\n");
1081 } else {
1082 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1083 /* hw VLAN filtering re-enabled. */
1084 status = be_cmd_rx_filter(adapter,
1085 BE_FLAGS_VLAN_PROMISC, OFF);
1086 if (!status) {
1087 dev_info(&adapter->pdev->dev,
1088 "Disabling VLAN Promiscuous mode.\n");
1089 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1090 dev_info(&adapter->pdev->dev,
1091 "Re-Enabling HW VLAN filtering\n");
1092 }
1093 }
6b7c5b94 1094 }
1da87b7f 1095
b31c50a7 1096 return status;
0fc16ebf
PR
1097
1098set_vlan_promisc:
d9d604f8
AK
1099 dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1100
1101 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1102 if (!status) {
1103 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1104 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1105 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1106 } else
1107 dev_err(&adapter->pdev->dev,
1108 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1109 return status;
6b7c5b94
SP
1110}
1111
80d5c368 1112static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1113{
1114 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1115 int status = 0;
6b7c5b94 1116
ba343c77 1117
a85e9986
PR
1118 /* Packets with VID 0 are always received by Lancer by default */
1119 if (lancer_chip(adapter) && vid == 0)
1120 goto ret;
1121
6b7c5b94 1122 adapter->vlan_tag[vid] = 1;
92bf14ab 1123 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1124 status = be_vid_config(adapter);
8e586137 1125
80817cbf
AK
1126 if (!status)
1127 adapter->vlans_added++;
1128 else
1129 adapter->vlan_tag[vid] = 0;
1130ret:
1131 return status;
6b7c5b94
SP
1132}
1133
80d5c368 1134static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1135{
1136 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1137 int status = 0;
6b7c5b94 1138
a85e9986
PR
1139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret;
1142
6b7c5b94 1143 adapter->vlan_tag[vid] = 0;
92bf14ab 1144 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1145 status = be_vid_config(adapter);
8e586137 1146
80817cbf
AK
1147 if (!status)
1148 adapter->vlans_added--;
1149 else
1150 adapter->vlan_tag[vid] = 1;
1151ret:
1152 return status;
6b7c5b94
SP
1153}
1154
a54769f5 1155static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1158 int status;
6b7c5b94 1159
24307eef 1160 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1161 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1162 adapter->promiscuous = true;
1163 goto done;
6b7c5b94
SP
1164 }
1165
25985edc 1166 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1167 if (adapter->promiscuous) {
1168 adapter->promiscuous = false;
5b8821b7 1169 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1170
1171 if (adapter->vlans_added)
10329df8 1172 be_vid_config(adapter);
6b7c5b94
SP
1173 }
1174
e7b909a6 1175 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1176 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1177 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1178 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1179 goto done;
6b7c5b94 1180 }
6b7c5b94 1181
fbc13f01
AK
1182 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1183 struct netdev_hw_addr *ha;
1184 int i = 1; /* First slot is claimed by the Primary MAC */
1185
1186 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1187 be_cmd_pmac_del(adapter, adapter->if_handle,
1188 adapter->pmac_id[i], 0);
1189 }
1190
92bf14ab 1191 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1192 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1193 adapter->promiscuous = true;
1194 goto done;
1195 }
1196
1197 netdev_for_each_uc_addr(ha, adapter->netdev) {
1198 adapter->uc_macs++; /* First slot is for Primary MAC */
1199 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1200 adapter->if_handle,
1201 &adapter->pmac_id[adapter->uc_macs], 0);
1202 }
1203 }
1204
0fc16ebf
PR
1205 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1206
1207 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1208 if (status) {
1209 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1210 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1211 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1212 }
24307eef
SP
1213done:
1214 return;
6b7c5b94
SP
1215}
1216
ba343c77
SB
1217static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1218{
1219 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1220 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1221 int status;
1222
11ac75ed 1223 if (!sriov_enabled(adapter))
ba343c77
SB
1224 return -EPERM;
1225
11ac75ed 1226 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1227 return -EINVAL;
1228
3175d8c2
SP
1229 if (BEx_chip(adapter)) {
1230 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1231 vf + 1);
ba343c77 1232
11ac75ed
SP
1233 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1234 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1235 } else {
1236 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1237 vf + 1);
590c391d
PR
1238 }
1239
64600ea5 1240 if (status)
ba343c77
SB
1241 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1242 mac, vf);
64600ea5 1243 else
11ac75ed 1244 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1245
ba343c77
SB
1246 return status;
1247}
1248
64600ea5
AK
1249static int be_get_vf_config(struct net_device *netdev, int vf,
1250 struct ifla_vf_info *vi)
1251{
1252 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1253 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1254
11ac75ed 1255 if (!sriov_enabled(adapter))
64600ea5
AK
1256 return -EPERM;
1257
11ac75ed 1258 if (vf >= adapter->num_vfs)
64600ea5
AK
1259 return -EINVAL;
1260
1261 vi->vf = vf;
11ac75ed 1262 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1263 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1264 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1265 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1266
1267 return 0;
1268}
1269
1da87b7f
AK
1270static int be_set_vf_vlan(struct net_device *netdev,
1271 int vf, u16 vlan, u8 qos)
1272{
1273 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1274 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1275 int status = 0;
1276
11ac75ed 1277 if (!sriov_enabled(adapter))
1da87b7f
AK
1278 return -EPERM;
1279
b9fc0e53 1280 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1281 return -EINVAL;
1282
b9fc0e53
AK
1283 if (vlan || qos) {
1284 vlan |= qos << VLAN_PRIO_SHIFT;
1285 if (vf_cfg->vlan_tag != vlan) {
f1f3ee1b 1286 /* If this is new value, program it. Else skip. */
b9fc0e53
AK
1287 vf_cfg->vlan_tag = vlan;
1288 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1289 vf_cfg->if_handle, 0);
f1f3ee1b 1290 }
1da87b7f 1291 } else {
f1f3ee1b 1292 /* Reset Transparent Vlan Tagging. */
b9fc0e53
AK
1293 vf_cfg->vlan_tag = 0;
1294 vlan = vf_cfg->def_vid;
f1f3ee1b 1295 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
b9fc0e53 1296 vf_cfg->if_handle, 0);
1da87b7f
AK
1297 }
1298
1da87b7f
AK
1299
1300 if (status)
1301 dev_info(&adapter->pdev->dev,
1302 "VLAN %d config on VF %d failed\n", vlan, vf);
1303 return status;
1304}
1305
e1d18735
AK
1306static int be_set_vf_tx_rate(struct net_device *netdev,
1307 int vf, int rate)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
1310 int status = 0;
1311
11ac75ed 1312 if (!sriov_enabled(adapter))
e1d18735
AK
1313 return -EPERM;
1314
94f434c2 1315 if (vf >= adapter->num_vfs)
e1d18735
AK
1316 return -EINVAL;
1317
94f434c2
AK
1318 if (rate < 100 || rate > 10000) {
1319 dev_err(&adapter->pdev->dev,
1320 "tx rate must be between 100 and 10000 Mbps\n");
1321 return -EINVAL;
1322 }
e1d18735 1323
d5c18473
PR
1324 if (lancer_chip(adapter))
1325 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1326 else
1327 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1328
1329 if (status)
94f434c2 1330 dev_err(&adapter->pdev->dev,
e1d18735 1331 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1332 else
1333 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1334 return status;
1335}
1336
2632bafd
SP
1337static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1338 ulong now)
6b7c5b94 1339{
2632bafd
SP
1340 aic->rx_pkts_prev = rx_pkts;
1341 aic->tx_reqs_prev = tx_pkts;
1342 aic->jiffies = now;
1343}
ac124ff9 1344
2632bafd
SP
1345static void be_eqd_update(struct be_adapter *adapter)
1346{
1347 struct be_set_eqd set_eqd[MAX_EVT_QS];
1348 int eqd, i, num = 0, start;
1349 struct be_aic_obj *aic;
1350 struct be_eq_obj *eqo;
1351 struct be_rx_obj *rxo;
1352 struct be_tx_obj *txo;
1353 u64 rx_pkts, tx_pkts;
1354 ulong now;
1355 u32 pps, delta;
10ef9ab4 1356
2632bafd
SP
1357 for_all_evt_queues(adapter, eqo, i) {
1358 aic = &adapter->aic_obj[eqo->idx];
1359 if (!aic->enable) {
1360 if (aic->jiffies)
1361 aic->jiffies = 0;
1362 eqd = aic->et_eqd;
1363 goto modify_eqd;
1364 }
6b7c5b94 1365
2632bafd
SP
1366 rxo = &adapter->rx_obj[eqo->idx];
1367 do {
1368 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1369 rx_pkts = rxo->stats.rx_pkts;
1370 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1371
2632bafd
SP
1372 txo = &adapter->tx_obj[eqo->idx];
1373 do {
1374 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1375 tx_pkts = txo->stats.tx_reqs;
1376 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1377
6b7c5b94 1378
2632bafd
SP
1379 /* Skip, if wrapped around or first calculation */
1380 now = jiffies;
1381 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1382 rx_pkts < aic->rx_pkts_prev ||
1383 tx_pkts < aic->tx_reqs_prev) {
1384 be_aic_update(aic, rx_pkts, tx_pkts, now);
1385 continue;
1386 }
1387
1388 delta = jiffies_to_msecs(now - aic->jiffies);
1389 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1390 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1391 eqd = (pps / 15000) << 2;
10ef9ab4 1392
2632bafd
SP
1393 if (eqd < 8)
1394 eqd = 0;
1395 eqd = min_t(u32, eqd, aic->max_eqd);
1396 eqd = max_t(u32, eqd, aic->min_eqd);
1397
1398 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1399modify_eqd:
2632bafd
SP
1400 if (eqd != aic->prev_eqd) {
1401 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1402 set_eqd[num].eq_id = eqo->q.id;
1403 aic->prev_eqd = eqd;
1404 num++;
1405 }
ac124ff9 1406 }
2632bafd
SP
1407
1408 if (num)
1409 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1410}
1411
3abcdeda 1412static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1413 struct be_rx_compl_info *rxcp)
4097f663 1414{
ac124ff9 1415 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1416
ab1594e9 1417 u64_stats_update_begin(&stats->sync);
3abcdeda 1418 stats->rx_compl++;
2e588f84 1419 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1420 stats->rx_pkts++;
2e588f84 1421 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1422 stats->rx_mcast_pkts++;
2e588f84 1423 if (rxcp->err)
ac124ff9 1424 stats->rx_compl_err++;
ab1594e9 1425 u64_stats_update_end(&stats->sync);
4097f663
SP
1426}
1427
2e588f84 1428static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1429{
19fad86f
PR
1430 /* L4 checksum is not reliable for non TCP/UDP packets.
1431 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1432 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1433 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1434}
1435
10ef9ab4
SP
1436static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1437 u16 frag_idx)
6b7c5b94 1438{
10ef9ab4 1439 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1440 struct be_rx_page_info *rx_page_info;
3abcdeda 1441 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1442
3abcdeda 1443 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1444 BUG_ON(!rx_page_info->page);
1445
205859a2 1446 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1447 dma_unmap_page(&adapter->pdev->dev,
1448 dma_unmap_addr(rx_page_info, bus),
1449 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1450 rx_page_info->last_page_user = false;
1451 }
6b7c5b94
SP
1452
1453 atomic_dec(&rxq->used);
1454 return rx_page_info;
1455}
1456
1457/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1458static void be_rx_compl_discard(struct be_rx_obj *rxo,
1459 struct be_rx_compl_info *rxcp)
6b7c5b94 1460{
3abcdeda 1461 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1462 struct be_rx_page_info *page_info;
2e588f84 1463 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1464
e80d9da6 1465 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1466 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1467 put_page(page_info->page);
1468 memset(page_info, 0, sizeof(*page_info));
2e588f84 1469 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1470 }
1471}
1472
1473/*
1474 * skb_fill_rx_data forms a complete skb for an ether frame
1475 * indicated by rxcp.
1476 */
10ef9ab4
SP
1477static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1478 struct be_rx_compl_info *rxcp)
6b7c5b94 1479{
3abcdeda 1480 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1481 struct be_rx_page_info *page_info;
2e588f84
SP
1482 u16 i, j;
1483 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1484 u8 *start;
6b7c5b94 1485
10ef9ab4 1486 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1487 start = page_address(page_info->page) + page_info->page_offset;
1488 prefetch(start);
1489
1490 /* Copy data in the first descriptor of this completion */
2e588f84 1491 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1492
6b7c5b94
SP
1493 skb->len = curr_frag_len;
1494 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1495 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1496 /* Complete packet has now been moved to data */
1497 put_page(page_info->page);
1498 skb->data_len = 0;
1499 skb->tail += curr_frag_len;
1500 } else {
ac1ae5f3
ED
1501 hdr_len = ETH_HLEN;
1502 memcpy(skb->data, start, hdr_len);
6b7c5b94 1503 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1504 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1505 skb_shinfo(skb)->frags[0].page_offset =
1506 page_info->page_offset + hdr_len;
9e903e08 1507 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1508 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1509 skb->truesize += rx_frag_size;
6b7c5b94
SP
1510 skb->tail += hdr_len;
1511 }
205859a2 1512 page_info->page = NULL;
6b7c5b94 1513
2e588f84
SP
1514 if (rxcp->pkt_size <= rx_frag_size) {
1515 BUG_ON(rxcp->num_rcvd != 1);
1516 return;
6b7c5b94
SP
1517 }
1518
1519 /* More frags present for this completion */
2e588f84
SP
1520 index_inc(&rxcp->rxq_idx, rxq->len);
1521 remaining = rxcp->pkt_size - curr_frag_len;
1522 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1523 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1524 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1525
bd46cb6c
AK
1526 /* Coalesce all frags from the same physical page in one slot */
1527 if (page_info->page_offset == 0) {
1528 /* Fresh page */
1529 j++;
b061b39e 1530 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1531 skb_shinfo(skb)->frags[j].page_offset =
1532 page_info->page_offset;
9e903e08 1533 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1534 skb_shinfo(skb)->nr_frags++;
1535 } else {
1536 put_page(page_info->page);
1537 }
1538
9e903e08 1539 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1540 skb->len += curr_frag_len;
1541 skb->data_len += curr_frag_len;
bdb28a97 1542 skb->truesize += rx_frag_size;
2e588f84
SP
1543 remaining -= curr_frag_len;
1544 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1545 page_info->page = NULL;
6b7c5b94 1546 }
bd46cb6c 1547 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1548}
1549
5be93b9a 1550/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1551static void be_rx_compl_process(struct be_rx_obj *rxo,
1552 struct be_rx_compl_info *rxcp)
6b7c5b94 1553{
10ef9ab4 1554 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1555 struct net_device *netdev = adapter->netdev;
6b7c5b94 1556 struct sk_buff *skb;
89420424 1557
bb349bb4 1558 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1559 if (unlikely(!skb)) {
ac124ff9 1560 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1561 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1562 return;
1563 }
1564
10ef9ab4 1565 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1566
6332c8d3 1567 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1569 else
1570 skb_checksum_none_assert(skb);
6b7c5b94 1571
6332c8d3 1572 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1573 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1574 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1575 skb->rxhash = rxcp->rss_hash;
1576
6b7c5b94 1577
343e43c0 1578 if (rxcp->vlanf)
86a9bad3 1579 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1580
1581 netif_receive_skb(skb);
6b7c5b94
SP
1582}
1583
5be93b9a 1584/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1585static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1586 struct napi_struct *napi,
1587 struct be_rx_compl_info *rxcp)
6b7c5b94 1588{
10ef9ab4 1589 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1590 struct be_rx_page_info *page_info;
5be93b9a 1591 struct sk_buff *skb = NULL;
3abcdeda 1592 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1593 u16 remaining, curr_frag_len;
1594 u16 i, j;
3968fa1e 1595
10ef9ab4 1596 skb = napi_get_frags(napi);
5be93b9a 1597 if (!skb) {
10ef9ab4 1598 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1599 return;
1600 }
1601
2e588f84
SP
1602 remaining = rxcp->pkt_size;
1603 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1604 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1605
1606 curr_frag_len = min(remaining, rx_frag_size);
1607
bd46cb6c
AK
1608 /* Coalesce all frags from the same physical page in one slot */
1609 if (i == 0 || page_info->page_offset == 0) {
1610 /* First frag or Fresh page */
1611 j++;
b061b39e 1612 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1613 skb_shinfo(skb)->frags[j].page_offset =
1614 page_info->page_offset;
9e903e08 1615 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1616 } else {
1617 put_page(page_info->page);
1618 }
9e903e08 1619 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1620 skb->truesize += rx_frag_size;
bd46cb6c 1621 remaining -= curr_frag_len;
2e588f84 1622 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1623 memset(page_info, 0, sizeof(*page_info));
1624 }
bd46cb6c 1625 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1626
5be93b9a 1627 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1628 skb->len = rxcp->pkt_size;
1629 skb->data_len = rxcp->pkt_size;
5be93b9a 1630 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1631 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1632 if (adapter->netdev->features & NETIF_F_RXHASH)
1633 skb->rxhash = rxcp->rss_hash;
5be93b9a 1634
343e43c0 1635 if (rxcp->vlanf)
86a9bad3 1636 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1637
10ef9ab4 1638 napi_gro_frags(napi);
2e588f84
SP
1639}
1640
10ef9ab4
SP
1641static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1642 struct be_rx_compl_info *rxcp)
2e588f84
SP
1643{
1644 rxcp->pkt_size =
1645 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1646 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1647 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1648 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1649 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1650 rxcp->ip_csum =
1651 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1652 rxcp->l4_csum =
1653 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1654 rxcp->ipv6 =
1655 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1656 rxcp->rxq_idx =
1657 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1658 rxcp->num_rcvd =
1659 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1660 rxcp->pkt_type =
1661 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1662 rxcp->rss_hash =
c297977e 1663 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1664 if (rxcp->vlanf) {
1665 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1666 compl);
1667 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1668 compl);
15d72184 1669 }
12004ae9 1670 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1671}
1672
10ef9ab4
SP
1673static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1674 struct be_rx_compl_info *rxcp)
2e588f84
SP
1675{
1676 rxcp->pkt_size =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1678 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1679 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1680 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1681 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1682 rxcp->ip_csum =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1684 rxcp->l4_csum =
1685 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1686 rxcp->ipv6 =
1687 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1688 rxcp->rxq_idx =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1690 rxcp->num_rcvd =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1692 rxcp->pkt_type =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1694 rxcp->rss_hash =
c297977e 1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1696 if (rxcp->vlanf) {
1697 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1698 compl);
1699 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1700 compl);
15d72184 1701 }
12004ae9 1702 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1703 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1704 ip_frag, compl);
2e588f84
SP
1705}
1706
1707static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1708{
1709 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1710 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1711 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1712
2e588f84
SP
1713 /* For checking the valid bit it is Ok to use either definition as the
1714 * valid bit is at the same position in both v0 and v1 Rx compl */
1715 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1716 return NULL;
6b7c5b94 1717
2e588f84
SP
1718 rmb();
1719 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1720
2e588f84 1721 if (adapter->be3_native)
10ef9ab4 1722 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1723 else
10ef9ab4 1724 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1725
e38b1706
SK
1726 if (rxcp->ip_frag)
1727 rxcp->l4_csum = 0;
1728
15d72184
SP
1729 if (rxcp->vlanf) {
1730 /* vlanf could be wrongly set in some cards.
1731 * ignore if vtm is not set */
752961a1 1732 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1733 rxcp->vlanf = 0;
6b7c5b94 1734
15d72184 1735 if (!lancer_chip(adapter))
3c709f8f 1736 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1737
939cf306 1738 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1739 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1740 rxcp->vlanf = 0;
1741 }
2e588f84
SP
1742
1743 /* As the compl has been parsed, reset it; we wont touch it again */
1744 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1745
3abcdeda 1746 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1747 return rxcp;
1748}
1749
1829b086 1750static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1751{
6b7c5b94 1752 u32 order = get_order(size);
1829b086 1753
6b7c5b94 1754 if (order > 0)
1829b086
ED
1755 gfp |= __GFP_COMP;
1756 return alloc_pages(gfp, order);
6b7c5b94
SP
1757}
1758
1759/*
1760 * Allocate a page, split it to fragments of size rx_frag_size and post as
1761 * receive buffers to BE
1762 */
1829b086 1763static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1764{
3abcdeda 1765 struct be_adapter *adapter = rxo->adapter;
26d92f92 1766 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1767 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1768 struct page *pagep = NULL;
1769 struct be_eth_rx_d *rxd;
1770 u64 page_dmaaddr = 0, frag_dmaaddr;
1771 u32 posted, page_offset = 0;
1772
3abcdeda 1773 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1774 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775 if (!pagep) {
1829b086 1776 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1777 if (unlikely(!pagep)) {
ac124ff9 1778 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1779 break;
1780 }
2b7bcebf
IV
1781 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1782 0, adapter->big_page_size,
1783 DMA_FROM_DEVICE);
6b7c5b94
SP
1784 page_info->page_offset = 0;
1785 } else {
1786 get_page(pagep);
1787 page_info->page_offset = page_offset + rx_frag_size;
1788 }
1789 page_offset = page_info->page_offset;
1790 page_info->page = pagep;
fac6da5b 1791 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1792 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1793
1794 rxd = queue_head_node(rxq);
1795 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1796 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1797
1798 /* Any space left in the current big page for another frag? */
1799 if ((page_offset + rx_frag_size + rx_frag_size) >
1800 adapter->big_page_size) {
1801 pagep = NULL;
1802 page_info->last_page_user = true;
1803 }
26d92f92
SP
1804
1805 prev_page_info = page_info;
1806 queue_head_inc(rxq);
10ef9ab4 1807 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1808 }
1809 if (pagep)
26d92f92 1810 prev_page_info->last_page_user = true;
6b7c5b94
SP
1811
1812 if (posted) {
6b7c5b94 1813 atomic_add(posted, &rxq->used);
8788fdc2 1814 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1815 } else if (atomic_read(&rxq->used) == 0) {
1816 /* Let be_worker replenish when memory is available */
3abcdeda 1817 rxo->rx_post_starved = true;
6b7c5b94 1818 }
6b7c5b94
SP
1819}
1820
5fb379ee 1821static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1822{
6b7c5b94
SP
1823 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1824
1825 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1826 return NULL;
1827
f3eb62d2 1828 rmb();
6b7c5b94
SP
1829 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1830
1831 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1832
1833 queue_tail_inc(tx_cq);
1834 return txcp;
1835}
1836
3c8def97
SP
1837static u16 be_tx_compl_process(struct be_adapter *adapter,
1838 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1839{
3c8def97 1840 struct be_queue_info *txq = &txo->q;
a73b796e 1841 struct be_eth_wrb *wrb;
3c8def97 1842 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1843 struct sk_buff *sent_skb;
ec43b1a6
SP
1844 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1845 bool unmap_skb_hdr = true;
6b7c5b94 1846
ec43b1a6 1847 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1848 BUG_ON(!sent_skb);
ec43b1a6
SP
1849 sent_skbs[txq->tail] = NULL;
1850
1851 /* skip header wrb */
a73b796e 1852 queue_tail_inc(txq);
6b7c5b94 1853
ec43b1a6 1854 do {
6b7c5b94 1855 cur_index = txq->tail;
a73b796e 1856 wrb = queue_tail_node(txq);
2b7bcebf
IV
1857 unmap_tx_frag(&adapter->pdev->dev, wrb,
1858 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1859 unmap_skb_hdr = false;
1860
6b7c5b94
SP
1861 num_wrbs++;
1862 queue_tail_inc(txq);
ec43b1a6 1863 } while (cur_index != last_index);
6b7c5b94 1864
6b7c5b94 1865 kfree_skb(sent_skb);
4d586b82 1866 return num_wrbs;
6b7c5b94
SP
1867}
1868
10ef9ab4
SP
1869/* Return the number of events in the event queue */
1870static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1871{
10ef9ab4
SP
1872 struct be_eq_entry *eqe;
1873 int num = 0;
859b1e4e 1874
10ef9ab4
SP
1875 do {
1876 eqe = queue_tail_node(&eqo->q);
1877 if (eqe->evt == 0)
1878 break;
859b1e4e 1879
10ef9ab4
SP
1880 rmb();
1881 eqe->evt = 0;
1882 num++;
1883 queue_tail_inc(&eqo->q);
1884 } while (true);
1885
1886 return num;
859b1e4e
SP
1887}
1888
10ef9ab4
SP
1889/* Leaves the EQ is disarmed state */
1890static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1891{
10ef9ab4 1892 int num = events_get(eqo);
859b1e4e 1893
10ef9ab4 1894 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1895}
1896
10ef9ab4 1897static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1898{
1899 struct be_rx_page_info *page_info;
3abcdeda
SP
1900 struct be_queue_info *rxq = &rxo->q;
1901 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1902 struct be_rx_compl_info *rxcp;
d23e946c
SP
1903 struct be_adapter *adapter = rxo->adapter;
1904 int flush_wait = 0;
6b7c5b94
SP
1905 u16 tail;
1906
d23e946c
SP
1907 /* Consume pending rx completions.
1908 * Wait for the flush completion (identified by zero num_rcvd)
1909 * to arrive. Notify CQ even when there are no more CQ entries
1910 * for HW to flush partially coalesced CQ entries.
1911 * In Lancer, there is no need to wait for flush compl.
1912 */
1913 for (;;) {
1914 rxcp = be_rx_compl_get(rxo);
1915 if (rxcp == NULL) {
1916 if (lancer_chip(adapter))
1917 break;
1918
1919 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1920 dev_warn(&adapter->pdev->dev,
1921 "did not receive flush compl\n");
1922 break;
1923 }
1924 be_cq_notify(adapter, rx_cq->id, true, 0);
1925 mdelay(1);
1926 } else {
1927 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1928 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1929 if (rxcp->num_rcvd == 0)
1930 break;
1931 }
6b7c5b94
SP
1932 }
1933
d23e946c
SP
1934 /* After cleanup, leave the CQ in unarmed state */
1935 be_cq_notify(adapter, rx_cq->id, false, 0);
1936
1937 /* Then free posted rx buffers that were not used */
6b7c5b94 1938 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1939 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1940 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1941 put_page(page_info->page);
1942 memset(page_info, 0, sizeof(*page_info));
1943 }
1944 BUG_ON(atomic_read(&rxq->used));
482c9e79 1945 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1946}
1947
0ae57bb3 1948static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1949{
0ae57bb3
SP
1950 struct be_tx_obj *txo;
1951 struct be_queue_info *txq;
a8e9179a 1952 struct be_eth_tx_compl *txcp;
4d586b82 1953 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1954 struct sk_buff *sent_skb;
1955 bool dummy_wrb;
0ae57bb3 1956 int i, pending_txqs;
a8e9179a
SP
1957
1958 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1959 do {
0ae57bb3
SP
1960 pending_txqs = adapter->num_tx_qs;
1961
1962 for_all_tx_queues(adapter, txo, i) {
1963 txq = &txo->q;
1964 while ((txcp = be_tx_compl_get(&txo->cq))) {
1965 end_idx =
1966 AMAP_GET_BITS(struct amap_eth_tx_compl,
1967 wrb_index, txcp);
1968 num_wrbs += be_tx_compl_process(adapter, txo,
1969 end_idx);
1970 cmpl++;
1971 }
1972 if (cmpl) {
1973 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1974 atomic_sub(num_wrbs, &txq->used);
1975 cmpl = 0;
1976 num_wrbs = 0;
1977 }
1978 if (atomic_read(&txq->used) == 0)
1979 pending_txqs--;
a8e9179a
SP
1980 }
1981
0ae57bb3 1982 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1983 break;
1984
1985 mdelay(1);
1986 } while (true);
1987
0ae57bb3
SP
1988 for_all_tx_queues(adapter, txo, i) {
1989 txq = &txo->q;
1990 if (atomic_read(&txq->used))
1991 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1992 atomic_read(&txq->used));
1993
1994 /* free posted tx for which compls will never arrive */
1995 while (atomic_read(&txq->used)) {
1996 sent_skb = txo->sent_skb_list[txq->tail];
1997 end_idx = txq->tail;
1998 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1999 &dummy_wrb);
2000 index_adv(&end_idx, num_wrbs - 1, txq->len);
2001 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2002 atomic_sub(num_wrbs, &txq->used);
2003 }
b03388d6 2004 }
6b7c5b94
SP
2005}
2006
10ef9ab4
SP
2007static void be_evt_queues_destroy(struct be_adapter *adapter)
2008{
2009 struct be_eq_obj *eqo;
2010 int i;
2011
2012 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2013 if (eqo->q.created) {
2014 be_eq_clean(eqo);
10ef9ab4 2015 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2016 netif_napi_del(&eqo->napi);
19d59aa7 2017 }
10ef9ab4
SP
2018 be_queue_free(adapter, &eqo->q);
2019 }
2020}
2021
2022static int be_evt_queues_create(struct be_adapter *adapter)
2023{
2024 struct be_queue_info *eq;
2025 struct be_eq_obj *eqo;
2632bafd 2026 struct be_aic_obj *aic;
10ef9ab4
SP
2027 int i, rc;
2028
92bf14ab
SP
2029 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2030 adapter->cfg_num_qs);
10ef9ab4
SP
2031
2032 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2033 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2034 BE_NAPI_WEIGHT);
2632bafd 2035 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2036 eqo->adapter = adapter;
2037 eqo->tx_budget = BE_TX_BUDGET;
2038 eqo->idx = i;
2632bafd
SP
2039 aic->max_eqd = BE_MAX_EQD;
2040 aic->enable = true;
10ef9ab4
SP
2041
2042 eq = &eqo->q;
2043 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2044 sizeof(struct be_eq_entry));
2045 if (rc)
2046 return rc;
2047
f2f781a7 2048 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2049 if (rc)
2050 return rc;
2051 }
1cfafab9 2052 return 0;
10ef9ab4
SP
2053}
2054
5fb379ee
SP
2055static void be_mcc_queues_destroy(struct be_adapter *adapter)
2056{
2057 struct be_queue_info *q;
5fb379ee 2058
8788fdc2 2059 q = &adapter->mcc_obj.q;
5fb379ee 2060 if (q->created)
8788fdc2 2061 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2062 be_queue_free(adapter, q);
2063
8788fdc2 2064 q = &adapter->mcc_obj.cq;
5fb379ee 2065 if (q->created)
8788fdc2 2066 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2067 be_queue_free(adapter, q);
2068}
2069
2070/* Must be called only after TX qs are created as MCC shares TX EQ */
2071static int be_mcc_queues_create(struct be_adapter *adapter)
2072{
2073 struct be_queue_info *q, *cq;
5fb379ee 2074
8788fdc2 2075 cq = &adapter->mcc_obj.cq;
5fb379ee 2076 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2077 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2078 goto err;
2079
10ef9ab4
SP
2080 /* Use the default EQ for MCC completions */
2081 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2082 goto mcc_cq_free;
2083
8788fdc2 2084 q = &adapter->mcc_obj.q;
5fb379ee
SP
2085 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2086 goto mcc_cq_destroy;
2087
8788fdc2 2088 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2089 goto mcc_q_free;
2090
2091 return 0;
2092
2093mcc_q_free:
2094 be_queue_free(adapter, q);
2095mcc_cq_destroy:
8788fdc2 2096 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2097mcc_cq_free:
2098 be_queue_free(adapter, cq);
2099err:
2100 return -1;
2101}
2102
6b7c5b94
SP
2103static void be_tx_queues_destroy(struct be_adapter *adapter)
2104{
2105 struct be_queue_info *q;
3c8def97
SP
2106 struct be_tx_obj *txo;
2107 u8 i;
6b7c5b94 2108
3c8def97
SP
2109 for_all_tx_queues(adapter, txo, i) {
2110 q = &txo->q;
2111 if (q->created)
2112 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2113 be_queue_free(adapter, q);
6b7c5b94 2114
3c8def97
SP
2115 q = &txo->cq;
2116 if (q->created)
2117 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2118 be_queue_free(adapter, q);
2119 }
6b7c5b94
SP
2120}
2121
7707133c 2122static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2123{
10ef9ab4 2124 struct be_queue_info *cq, *eq;
3c8def97 2125 struct be_tx_obj *txo;
92bf14ab 2126 int status, i;
6b7c5b94 2127
92bf14ab 2128 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2129
10ef9ab4
SP
2130 for_all_tx_queues(adapter, txo, i) {
2131 cq = &txo->cq;
2132 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2133 sizeof(struct be_eth_tx_compl));
2134 if (status)
2135 return status;
3c8def97 2136
10ef9ab4
SP
2137 /* If num_evt_qs is less than num_tx_qs, then more than
2138 * one txq share an eq
2139 */
2140 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2141 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2142 if (status)
2143 return status;
6b7c5b94 2144
10ef9ab4
SP
2145 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2146 sizeof(struct be_eth_wrb));
2147 if (status)
2148 return status;
6b7c5b94 2149
94d73aaa 2150 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2151 if (status)
2152 return status;
3c8def97 2153 }
6b7c5b94 2154
d379142b
SP
2155 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2156 adapter->num_tx_qs);
10ef9ab4 2157 return 0;
6b7c5b94
SP
2158}
2159
10ef9ab4 2160static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2161{
2162 struct be_queue_info *q;
3abcdeda
SP
2163 struct be_rx_obj *rxo;
2164 int i;
2165
2166 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2167 q = &rxo->cq;
2168 if (q->created)
2169 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2170 be_queue_free(adapter, q);
ac6a0c4a
SP
2171 }
2172}
2173
10ef9ab4 2174static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2175{
10ef9ab4 2176 struct be_queue_info *eq, *cq;
3abcdeda
SP
2177 struct be_rx_obj *rxo;
2178 int rc, i;
6b7c5b94 2179
92bf14ab
SP
2180 /* We can create as many RSS rings as there are EQs. */
2181 adapter->num_rx_qs = adapter->num_evt_qs;
2182
2183 /* We'll use RSS only if atleast 2 RSS rings are supported.
2184 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2185 */
92bf14ab
SP
2186 if (adapter->num_rx_qs > 1)
2187 adapter->num_rx_qs++;
2188
6b7c5b94 2189 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2190 for_all_rx_queues(adapter, rxo, i) {
2191 rxo->adapter = adapter;
3abcdeda
SP
2192 cq = &rxo->cq;
2193 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2194 sizeof(struct be_eth_rx_compl));
2195 if (rc)
10ef9ab4 2196 return rc;
3abcdeda 2197
10ef9ab4
SP
2198 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2199 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2200 if (rc)
10ef9ab4 2201 return rc;
3abcdeda 2202 }
6b7c5b94 2203
d379142b
SP
2204 dev_info(&adapter->pdev->dev,
2205 "created %d RSS queue(s) and 1 default RX queue\n",
2206 adapter->num_rx_qs - 1);
10ef9ab4 2207 return 0;
b628bde2
SP
2208}
2209
6b7c5b94
SP
2210static irqreturn_t be_intx(int irq, void *dev)
2211{
e49cc34f
SP
2212 struct be_eq_obj *eqo = dev;
2213 struct be_adapter *adapter = eqo->adapter;
2214 int num_evts = 0;
6b7c5b94 2215
d0b9cec3
SP
2216 /* IRQ is not expected when NAPI is scheduled as the EQ
2217 * will not be armed.
2218 * But, this can happen on Lancer INTx where it takes
2219 * a while to de-assert INTx or in BE2 where occasionaly
2220 * an interrupt may be raised even when EQ is unarmed.
2221 * If NAPI is already scheduled, then counting & notifying
2222 * events will orphan them.
e49cc34f 2223 */
d0b9cec3 2224 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2225 num_evts = events_get(eqo);
d0b9cec3
SP
2226 __napi_schedule(&eqo->napi);
2227 if (num_evts)
2228 eqo->spurious_intr = 0;
2229 }
2230 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2231
d0b9cec3
SP
2232 /* Return IRQ_HANDLED only for the the first spurious intr
2233 * after a valid intr to stop the kernel from branding
2234 * this irq as a bad one!
e49cc34f 2235 */
d0b9cec3
SP
2236 if (num_evts || eqo->spurious_intr++ == 0)
2237 return IRQ_HANDLED;
2238 else
2239 return IRQ_NONE;
6b7c5b94
SP
2240}
2241
10ef9ab4 2242static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2243{
10ef9ab4 2244 struct be_eq_obj *eqo = dev;
6b7c5b94 2245
0b545a62
SP
2246 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2247 napi_schedule(&eqo->napi);
6b7c5b94
SP
2248 return IRQ_HANDLED;
2249}
2250
2e588f84 2251static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2252{
e38b1706 2253 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2254}
2255
10ef9ab4
SP
2256static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2257 int budget)
6b7c5b94 2258{
3abcdeda
SP
2259 struct be_adapter *adapter = rxo->adapter;
2260 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2261 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2262 u32 work_done;
2263
2264 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2265 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2266 if (!rxcp)
2267 break;
2268
12004ae9
SP
2269 /* Is it a flush compl that has no data */
2270 if (unlikely(rxcp->num_rcvd == 0))
2271 goto loop_continue;
2272
2273 /* Discard compl with partial DMA Lancer B0 */
2274 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2275 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2276 goto loop_continue;
2277 }
2278
2279 /* On BE drop pkts that arrive due to imperfect filtering in
2280 * promiscuous mode on some skews
2281 */
2282 if (unlikely(rxcp->port != adapter->port_num &&
2283 !lancer_chip(adapter))) {
10ef9ab4 2284 be_rx_compl_discard(rxo, rxcp);
12004ae9 2285 goto loop_continue;
64642811 2286 }
009dd872 2287
12004ae9 2288 if (do_gro(rxcp))
10ef9ab4 2289 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2290 else
10ef9ab4 2291 be_rx_compl_process(rxo, rxcp);
12004ae9 2292loop_continue:
2e588f84 2293 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2294 }
2295
10ef9ab4
SP
2296 if (work_done) {
2297 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2298
10ef9ab4
SP
2299 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2300 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2301 }
10ef9ab4 2302
6b7c5b94
SP
2303 return work_done;
2304}
2305
10ef9ab4
SP
2306static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2307 int budget, int idx)
6b7c5b94 2308{
6b7c5b94 2309 struct be_eth_tx_compl *txcp;
10ef9ab4 2310 int num_wrbs = 0, work_done;
3c8def97 2311
10ef9ab4
SP
2312 for (work_done = 0; work_done < budget; work_done++) {
2313 txcp = be_tx_compl_get(&txo->cq);
2314 if (!txcp)
2315 break;
2316 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2317 AMAP_GET_BITS(struct amap_eth_tx_compl,
2318 wrb_index, txcp));
10ef9ab4 2319 }
6b7c5b94 2320
10ef9ab4
SP
2321 if (work_done) {
2322 be_cq_notify(adapter, txo->cq.id, true, work_done);
2323 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2324
10ef9ab4
SP
2325 /* As Tx wrbs have been freed up, wake up netdev queue
2326 * if it was stopped due to lack of tx wrbs. */
2327 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2328 atomic_read(&txo->q.used) < txo->q.len / 2) {
2329 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2330 }
10ef9ab4
SP
2331
2332 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2333 tx_stats(txo)->tx_compl += work_done;
2334 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2335 }
10ef9ab4
SP
2336 return (work_done < budget); /* Done */
2337}
6b7c5b94 2338
68d7bdcb 2339int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2340{
2341 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2342 struct be_adapter *adapter = eqo->adapter;
0b545a62 2343 int max_work = 0, work, i, num_evts;
10ef9ab4 2344 bool tx_done;
f31e50a8 2345
0b545a62
SP
2346 num_evts = events_get(eqo);
2347
10ef9ab4
SP
2348 /* Process all TXQs serviced by this EQ */
2349 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2350 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2351 eqo->tx_budget, i);
2352 if (!tx_done)
2353 max_work = budget;
f31e50a8
SP
2354 }
2355
10ef9ab4
SP
2356 /* This loop will iterate twice for EQ0 in which
2357 * completions of the last RXQ (default one) are also processed
2358 * For other EQs the loop iterates only once
2359 */
2360 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2361 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2362 max_work = max(work, max_work);
2363 }
6b7c5b94 2364
10ef9ab4
SP
2365 if (is_mcc_eqo(eqo))
2366 be_process_mcc(adapter);
93c86700 2367
10ef9ab4
SP
2368 if (max_work < budget) {
2369 napi_complete(napi);
0b545a62 2370 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2371 } else {
2372 /* As we'll continue in polling mode, count and clear events */
0b545a62 2373 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2374 }
10ef9ab4 2375 return max_work;
6b7c5b94
SP
2376}
2377
f67ef7ba 2378void be_detect_error(struct be_adapter *adapter)
7c185276 2379{
e1cfb67a
PR
2380 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2381 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2382 u32 i;
2383
d23e946c 2384 if (be_hw_error(adapter))
72f02485
SP
2385 return;
2386
e1cfb67a
PR
2387 if (lancer_chip(adapter)) {
2388 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2389 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2390 sliport_err1 = ioread32(adapter->db +
2391 SLIPORT_ERROR1_OFFSET);
2392 sliport_err2 = ioread32(adapter->db +
2393 SLIPORT_ERROR2_OFFSET);
2394 }
2395 } else {
2396 pci_read_config_dword(adapter->pdev,
2397 PCICFG_UE_STATUS_LOW, &ue_lo);
2398 pci_read_config_dword(adapter->pdev,
2399 PCICFG_UE_STATUS_HIGH, &ue_hi);
2400 pci_read_config_dword(adapter->pdev,
2401 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2402 pci_read_config_dword(adapter->pdev,
2403 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2404
f67ef7ba
PR
2405 ue_lo = (ue_lo & ~ue_lo_mask);
2406 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2407 }
7c185276 2408
1451ae6e
AK
2409 /* On certain platforms BE hardware can indicate spurious UEs.
2410 * Allow the h/w to stop working completely in case of a real UE.
2411 * Hence not setting the hw_error for UE detection.
2412 */
2413 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2414 adapter->hw_error = true;
434b3648 2415 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2416 "Error detected in the card\n");
2417 }
2418
2419 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2420 dev_err(&adapter->pdev->dev,
2421 "ERR: sliport status 0x%x\n", sliport_status);
2422 dev_err(&adapter->pdev->dev,
2423 "ERR: sliport error1 0x%x\n", sliport_err1);
2424 dev_err(&adapter->pdev->dev,
2425 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2426 }
2427
e1cfb67a
PR
2428 if (ue_lo) {
2429 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2430 if (ue_lo & 1)
7c185276
AK
2431 dev_err(&adapter->pdev->dev,
2432 "UE: %s bit set\n", ue_status_low_desc[i]);
2433 }
2434 }
f67ef7ba 2435
e1cfb67a
PR
2436 if (ue_hi) {
2437 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2438 if (ue_hi & 1)
7c185276
AK
2439 dev_err(&adapter->pdev->dev,
2440 "UE: %s bit set\n", ue_status_hi_desc[i]);
2441 }
2442 }
2443
2444}
2445
8d56ff11
SP
2446static void be_msix_disable(struct be_adapter *adapter)
2447{
ac6a0c4a 2448 if (msix_enabled(adapter)) {
8d56ff11 2449 pci_disable_msix(adapter->pdev);
ac6a0c4a 2450 adapter->num_msix_vec = 0;
68d7bdcb 2451 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2452 }
2453}
2454
c2bba3df 2455static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2456{
92bf14ab 2457 int i, status, num_vec;
d379142b 2458 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2459
92bf14ab
SP
2460 /* If RoCE is supported, program the max number of NIC vectors that
2461 * may be configured via set-channels, along with vectors needed for
2462 * RoCe. Else, just program the number we'll use initially.
2463 */
2464 if (be_roce_supported(adapter))
2465 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2466 2 * num_online_cpus());
2467 else
2468 num_vec = adapter->cfg_num_qs;
3abcdeda 2469
ac6a0c4a 2470 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2471 adapter->msix_entries[i].entry = i;
2472
ac6a0c4a 2473 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2474 if (status == 0) {
2475 goto done;
92bf14ab 2476 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2477 num_vec = status;
c2bba3df
SK
2478 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2479 num_vec);
2480 if (!status)
3abcdeda 2481 goto done;
3abcdeda 2482 }
d379142b
SP
2483
2484 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2485
c2bba3df
SK
2486 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2487 if (!be_physfn(adapter))
2488 return status;
2489 return 0;
3abcdeda 2490done:
92bf14ab
SP
2491 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2492 adapter->num_msix_roce_vec = num_vec / 2;
2493 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2494 adapter->num_msix_roce_vec);
2495 }
2496
2497 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2498
2499 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2500 adapter->num_msix_vec);
c2bba3df 2501 return 0;
6b7c5b94
SP
2502}
2503
fe6d2a38 2504static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2505 struct be_eq_obj *eqo)
b628bde2 2506{
f2f781a7 2507 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2508}
6b7c5b94 2509
b628bde2
SP
2510static int be_msix_register(struct be_adapter *adapter)
2511{
10ef9ab4
SP
2512 struct net_device *netdev = adapter->netdev;
2513 struct be_eq_obj *eqo;
2514 int status, i, vec;
6b7c5b94 2515
10ef9ab4
SP
2516 for_all_evt_queues(adapter, eqo, i) {
2517 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2518 vec = be_msix_vec_get(adapter, eqo);
2519 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2520 if (status)
2521 goto err_msix;
2522 }
b628bde2 2523
6b7c5b94 2524 return 0;
3abcdeda 2525err_msix:
10ef9ab4
SP
2526 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2527 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2528 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2529 status);
ac6a0c4a 2530 be_msix_disable(adapter);
6b7c5b94
SP
2531 return status;
2532}
2533
2534static int be_irq_register(struct be_adapter *adapter)
2535{
2536 struct net_device *netdev = adapter->netdev;
2537 int status;
2538
ac6a0c4a 2539 if (msix_enabled(adapter)) {
6b7c5b94
SP
2540 status = be_msix_register(adapter);
2541 if (status == 0)
2542 goto done;
ba343c77
SB
2543 /* INTx is not supported for VF */
2544 if (!be_physfn(adapter))
2545 return status;
6b7c5b94
SP
2546 }
2547
e49cc34f 2548 /* INTx: only the first EQ is used */
6b7c5b94
SP
2549 netdev->irq = adapter->pdev->irq;
2550 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2551 &adapter->eq_obj[0]);
6b7c5b94
SP
2552 if (status) {
2553 dev_err(&adapter->pdev->dev,
2554 "INTx request IRQ failed - err %d\n", status);
2555 return status;
2556 }
2557done:
2558 adapter->isr_registered = true;
2559 return 0;
2560}
2561
2562static void be_irq_unregister(struct be_adapter *adapter)
2563{
2564 struct net_device *netdev = adapter->netdev;
10ef9ab4 2565 struct be_eq_obj *eqo;
3abcdeda 2566 int i;
6b7c5b94
SP
2567
2568 if (!adapter->isr_registered)
2569 return;
2570
2571 /* INTx */
ac6a0c4a 2572 if (!msix_enabled(adapter)) {
e49cc34f 2573 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2574 goto done;
2575 }
2576
2577 /* MSIx */
10ef9ab4
SP
2578 for_all_evt_queues(adapter, eqo, i)
2579 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2580
6b7c5b94
SP
2581done:
2582 adapter->isr_registered = false;
6b7c5b94
SP
2583}
2584
10ef9ab4 2585static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2586{
2587 struct be_queue_info *q;
2588 struct be_rx_obj *rxo;
2589 int i;
2590
2591 for_all_rx_queues(adapter, rxo, i) {
2592 q = &rxo->q;
2593 if (q->created) {
2594 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2595 be_rx_cq_clean(rxo);
482c9e79 2596 }
10ef9ab4 2597 be_queue_free(adapter, q);
482c9e79
SP
2598 }
2599}
2600
889cd4b2
SP
2601static int be_close(struct net_device *netdev)
2602{
2603 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2604 struct be_eq_obj *eqo;
2605 int i;
889cd4b2 2606
045508a8
PP
2607 be_roce_dev_close(adapter);
2608
04d3d624
SK
2609 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2610 for_all_evt_queues(adapter, eqo, i)
2611 napi_disable(&eqo->napi);
2612 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2613 }
a323d9bf
SP
2614
2615 be_async_mcc_disable(adapter);
2616
2617 /* Wait for all pending tx completions to arrive so that
2618 * all tx skbs are freed.
2619 */
fba87559 2620 netif_tx_disable(netdev);
6e1f9975 2621 be_tx_compl_clean(adapter);
a323d9bf
SP
2622
2623 be_rx_qs_destroy(adapter);
2624
2625 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2626 if (msix_enabled(adapter))
2627 synchronize_irq(be_msix_vec_get(adapter, eqo));
2628 else
2629 synchronize_irq(netdev->irq);
2630 be_eq_clean(eqo);
63fcb27f
PR
2631 }
2632
889cd4b2
SP
2633 be_irq_unregister(adapter);
2634
482c9e79
SP
2635 return 0;
2636}
2637
10ef9ab4 2638static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2639{
2640 struct be_rx_obj *rxo;
e9008ee9
PR
2641 int rc, i, j;
2642 u8 rsstable[128];
482c9e79
SP
2643
2644 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2645 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2646 sizeof(struct be_eth_rx_d));
2647 if (rc)
2648 return rc;
2649 }
2650
2651 /* The FW would like the default RXQ to be created first */
2652 rxo = default_rxo(adapter);
2653 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2654 adapter->if_handle, false, &rxo->rss_id);
2655 if (rc)
2656 return rc;
2657
2658 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2659 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2660 rx_frag_size, adapter->if_handle,
2661 true, &rxo->rss_id);
482c9e79
SP
2662 if (rc)
2663 return rc;
2664 }
2665
2666 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2667 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2668 for_all_rss_queues(adapter, rxo, i) {
2669 if ((j + i) >= 128)
2670 break;
2671 rsstable[j + i] = rxo->rss_id;
2672 }
2673 }
594ad54a
SR
2674 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2675 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2676
2677 if (!BEx_chip(adapter))
2678 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2679 RSS_ENABLE_UDP_IPV6;
2680
2681 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2682 128);
2683 if (rc) {
2684 adapter->rss_flags = 0;
482c9e79 2685 return rc;
594ad54a 2686 }
482c9e79
SP
2687 }
2688
2689 /* First time posting */
10ef9ab4 2690 for_all_rx_queues(adapter, rxo, i)
482c9e79 2691 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2692 return 0;
2693}
2694
6b7c5b94
SP
2695static int be_open(struct net_device *netdev)
2696{
2697 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2698 struct be_eq_obj *eqo;
3abcdeda 2699 struct be_rx_obj *rxo;
10ef9ab4 2700 struct be_tx_obj *txo;
b236916a 2701 u8 link_status;
3abcdeda 2702 int status, i;
5fb379ee 2703
10ef9ab4 2704 status = be_rx_qs_create(adapter);
482c9e79
SP
2705 if (status)
2706 goto err;
2707
c2bba3df
SK
2708 status = be_irq_register(adapter);
2709 if (status)
2710 goto err;
5fb379ee 2711
10ef9ab4 2712 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2713 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2714
10ef9ab4
SP
2715 for_all_tx_queues(adapter, txo, i)
2716 be_cq_notify(adapter, txo->cq.id, true, 0);
2717
7a1e9b20
SP
2718 be_async_mcc_enable(adapter);
2719
10ef9ab4
SP
2720 for_all_evt_queues(adapter, eqo, i) {
2721 napi_enable(&eqo->napi);
2722 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2723 }
04d3d624 2724 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2725
323ff71e 2726 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2727 if (!status)
2728 be_link_status_update(adapter, link_status);
2729
fba87559 2730 netif_tx_start_all_queues(netdev);
045508a8 2731 be_roce_dev_open(adapter);
889cd4b2
SP
2732 return 0;
2733err:
2734 be_close(adapter->netdev);
2735 return -EIO;
5fb379ee
SP
2736}
2737
71d8d1b5
AK
2738static int be_setup_wol(struct be_adapter *adapter, bool enable)
2739{
2740 struct be_dma_mem cmd;
2741 int status = 0;
2742 u8 mac[ETH_ALEN];
2743
2744 memset(mac, 0, ETH_ALEN);
2745
2746 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2747 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2748 GFP_KERNEL);
71d8d1b5
AK
2749 if (cmd.va == NULL)
2750 return -1;
71d8d1b5
AK
2751
2752 if (enable) {
2753 status = pci_write_config_dword(adapter->pdev,
2754 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2755 if (status) {
2756 dev_err(&adapter->pdev->dev,
2381a55c 2757 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2758 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2759 cmd.dma);
71d8d1b5
AK
2760 return status;
2761 }
2762 status = be_cmd_enable_magic_wol(adapter,
2763 adapter->netdev->dev_addr, &cmd);
2764 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2765 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2766 } else {
2767 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2768 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2769 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2770 }
2771
2b7bcebf 2772 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2773 return status;
2774}
2775
6d87f5c3
AK
2776/*
2777 * Generate a seed MAC address from the PF MAC Address using jhash.
2778 * MAC Address for VFs are assigned incrementally starting from the seed.
2779 * These addresses are programmed in the ASIC by the PF and the VF driver
2780 * queries for the MAC address during its probe.
2781 */
4c876616 2782static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2783{
f9449ab7 2784 u32 vf;
3abcdeda 2785 int status = 0;
6d87f5c3 2786 u8 mac[ETH_ALEN];
11ac75ed 2787 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2788
2789 be_vf_eth_addr_generate(adapter, mac);
2790
11ac75ed 2791 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2792 if (BEx_chip(adapter))
590c391d 2793 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2794 vf_cfg->if_handle,
2795 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2796 else
2797 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2798 vf + 1);
590c391d 2799
6d87f5c3
AK
2800 if (status)
2801 dev_err(&adapter->pdev->dev,
590c391d 2802 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2803 else
11ac75ed 2804 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2805
2806 mac[5] += 1;
2807 }
2808 return status;
2809}
2810
4c876616
SP
2811static int be_vfs_mac_query(struct be_adapter *adapter)
2812{
2813 int status, vf;
2814 u8 mac[ETH_ALEN];
2815 struct be_vf_cfg *vf_cfg;
95046b92 2816 bool active = false;
4c876616
SP
2817
2818 for_all_vfs(adapter, vf_cfg, vf) {
2819 be_cmd_get_mac_from_list(adapter, mac, &active,
2820 &vf_cfg->pmac_id, 0);
2821
2822 status = be_cmd_mac_addr_query(adapter, mac, false,
2823 vf_cfg->if_handle, 0);
2824 if (status)
2825 return status;
2826 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2827 }
2828 return 0;
2829}
2830
f9449ab7 2831static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2832{
11ac75ed 2833 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2834 u32 vf;
2835
257a3feb 2836 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2837 dev_warn(&adapter->pdev->dev,
2838 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2839 goto done;
2840 }
2841
b4c1df93
SP
2842 pci_disable_sriov(adapter->pdev);
2843
11ac75ed 2844 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2845 if (BEx_chip(adapter))
11ac75ed
SP
2846 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2847 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2848 else
2849 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2850 vf + 1);
f9449ab7 2851
11ac75ed
SP
2852 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2853 }
39f1d94d
SP
2854done:
2855 kfree(adapter->vf_cfg);
2856 adapter->num_vfs = 0;
6d87f5c3
AK
2857}
2858
7707133c
SP
2859static void be_clear_queues(struct be_adapter *adapter)
2860{
2861 be_mcc_queues_destroy(adapter);
2862 be_rx_cqs_destroy(adapter);
2863 be_tx_queues_destroy(adapter);
2864 be_evt_queues_destroy(adapter);
2865}
2866
68d7bdcb 2867static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2868{
191eb756
SP
2869 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2870 cancel_delayed_work_sync(&adapter->work);
2871 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2872 }
68d7bdcb
SP
2873}
2874
2875static int be_clear(struct be_adapter *adapter)
2876{
2877 int i;
2878
2879 be_cancel_worker(adapter);
191eb756 2880
11ac75ed 2881 if (sriov_enabled(adapter))
f9449ab7
SP
2882 be_vf_clear(adapter);
2883
2d17f403
SP
2884 /* delete the primary mac along with the uc-mac list */
2885 for (i = 0; i < (adapter->uc_macs + 1); i++)
fbc13f01 2886 be_cmd_pmac_del(adapter, adapter->if_handle,
2d17f403
SP
2887 adapter->pmac_id[i], 0);
2888 adapter->uc_macs = 0;
fbc13f01 2889
f9449ab7 2890 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2891
7707133c 2892 be_clear_queues(adapter);
a54769f5 2893
abb93951
PR
2894 kfree(adapter->pmac_id);
2895 adapter->pmac_id = NULL;
2896
10ef9ab4 2897 be_msix_disable(adapter);
a54769f5
SP
2898 return 0;
2899}
2900
4c876616 2901static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2902{
92bf14ab 2903 struct be_resources res = {0};
4c876616
SP
2904 struct be_vf_cfg *vf_cfg;
2905 u32 cap_flags, en_flags, vf;
922bbe88 2906 int status = 0;
abb93951 2907
4c876616
SP
2908 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2909 BE_IF_FLAGS_MULTICAST;
abb93951 2910
4c876616 2911 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2912 if (!BE3_chip(adapter)) {
2913 status = be_cmd_get_profile_config(adapter, &res,
2914 vf + 1);
2915 if (!status)
2916 cap_flags = res.if_cap_flags;
2917 }
4c876616
SP
2918
2919 /* If a FW profile exists, then cap_flags are updated */
2920 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2921 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2922 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2923 &vf_cfg->if_handle, vf + 1);
2924 if (status)
2925 goto err;
2926 }
2927err:
2928 return status;
abb93951
PR
2929}
2930
39f1d94d 2931static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2932{
11ac75ed 2933 struct be_vf_cfg *vf_cfg;
30128031
SP
2934 int vf;
2935
39f1d94d
SP
2936 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2937 GFP_KERNEL);
2938 if (!adapter->vf_cfg)
2939 return -ENOMEM;
2940
11ac75ed
SP
2941 for_all_vfs(adapter, vf_cfg, vf) {
2942 vf_cfg->if_handle = -1;
2943 vf_cfg->pmac_id = -1;
30128031 2944 }
39f1d94d 2945 return 0;
30128031
SP
2946}
2947
f9449ab7
SP
2948static int be_vf_setup(struct be_adapter *adapter)
2949{
11ac75ed 2950 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2951 u16 def_vlan, lnk_speed;
4c876616
SP
2952 int status, old_vfs, vf;
2953 struct device *dev = &adapter->pdev->dev;
04a06028 2954 u32 privileges;
39f1d94d 2955
257a3feb 2956 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2957 if (old_vfs) {
2958 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2959 if (old_vfs != num_vfs)
2960 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2961 adapter->num_vfs = old_vfs;
39f1d94d 2962 } else {
92bf14ab 2963 if (num_vfs > be_max_vfs(adapter))
4c876616 2964 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
2965 be_max_vfs(adapter), num_vfs);
2966 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 2967 if (!adapter->num_vfs)
4c876616 2968 return 0;
39f1d94d
SP
2969 }
2970
2971 status = be_vf_setup_init(adapter);
2972 if (status)
2973 goto err;
30128031 2974
4c876616
SP
2975 if (old_vfs) {
2976 for_all_vfs(adapter, vf_cfg, vf) {
2977 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2978 if (status)
2979 goto err;
2980 }
2981 } else {
2982 status = be_vfs_if_create(adapter);
f9449ab7
SP
2983 if (status)
2984 goto err;
f9449ab7
SP
2985 }
2986
4c876616
SP
2987 if (old_vfs) {
2988 status = be_vfs_mac_query(adapter);
2989 if (status)
2990 goto err;
2991 } else {
39f1d94d
SP
2992 status = be_vf_eth_addr_config(adapter);
2993 if (status)
2994 goto err;
2995 }
f9449ab7 2996
11ac75ed 2997 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
2998 /* Allow VFs to programs MAC/VLAN filters */
2999 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3000 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3001 status = be_cmd_set_fn_privileges(adapter,
3002 privileges |
3003 BE_PRIV_FILTMGMT,
3004 vf + 1);
3005 if (!status)
3006 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3007 vf);
3008 }
3009
4c876616
SP
3010 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3011 * Allow full available bandwidth
3012 */
3013 if (BE3_chip(adapter) && !old_vfs)
3014 be_cmd_set_qos(adapter, 1000, vf+1);
3015
3016 status = be_cmd_link_status_query(adapter, &lnk_speed,
3017 NULL, vf + 1);
3018 if (!status)
3019 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
3020
3021 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 3022 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
3023 if (status)
3024 goto err;
3025 vf_cfg->def_vid = def_vlan;
dcf7ebba 3026
0599863d
VV
3027 if (!old_vfs)
3028 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3029 }
b4c1df93
SP
3030
3031 if (!old_vfs) {
3032 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3033 if (status) {
3034 dev_err(dev, "SRIOV enable failed\n");
3035 adapter->num_vfs = 0;
3036 goto err;
3037 }
3038 }
f9449ab7
SP
3039 return 0;
3040err:
4c876616
SP
3041 dev_err(dev, "VF setup failed\n");
3042 be_vf_clear(adapter);
f9449ab7
SP
3043 return status;
3044}
3045
92bf14ab
SP
3046/* On BE2/BE3 FW does not suggest the supported limits */
3047static void BEx_get_resources(struct be_adapter *adapter,
3048 struct be_resources *res)
3049{
3050 struct pci_dev *pdev = adapter->pdev;
3051 bool use_sriov = false;
3052
b905b5d4 3053 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab
SP
3054 int max_vfs;
3055
3056 max_vfs = pci_sriov_get_totalvfs(pdev);
3057 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3058 use_sriov = res->max_vfs;
92bf14ab
SP
3059 }
3060
3061 if (be_physfn(adapter))
3062 res->max_uc_mac = BE_UC_PMAC_COUNT;
3063 else
3064 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3065
3066 if (adapter->function_mode & FLEX10_MODE)
3067 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3068 else if (adapter->function_mode & UMC_ENABLED)
3069 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3070 else
3071 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3072 res->max_mcast_mac = BE_MAX_MC;
3073
30f3fe45 3074 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3075 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3076 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3077 res->max_tx_qs = 1;
3078 else
3079 res->max_tx_qs = BE3_MAX_TX_QS;
3080
3081 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3082 !use_sriov && be_physfn(adapter))
3083 res->max_rss_qs = (adapter->be3_native) ?
3084 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3085 res->max_rx_qs = res->max_rss_qs + 1;
3086
68d7bdcb 3087 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
92bf14ab
SP
3088
3089 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3090 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3091 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3092}
3093
30128031
SP
3094static void be_setup_init(struct be_adapter *adapter)
3095{
3096 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3097 adapter->phy.link_speed = -1;
30128031
SP
3098 adapter->if_handle = -1;
3099 adapter->be3_native = false;
3100 adapter->promiscuous = false;
f25b119c
PR
3101 if (be_physfn(adapter))
3102 adapter->cmd_privileges = MAX_PRIVILEGES;
3103 else
3104 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3105}
3106
92bf14ab 3107static int be_get_resources(struct be_adapter *adapter)
abb93951 3108{
92bf14ab
SP
3109 struct device *dev = &adapter->pdev->dev;
3110 struct be_resources res = {0};
3111 int status;
abb93951 3112
92bf14ab
SP
3113 if (BEx_chip(adapter)) {
3114 BEx_get_resources(adapter, &res);
3115 adapter->res = res;
abb93951
PR
3116 }
3117
92bf14ab
SP
3118 /* For Lancer, SH etc read per-function resource limits from FW.
3119 * GET_FUNC_CONFIG returns per function guaranteed limits.
3120 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3121 */
3122 if (!BEx_chip(adapter)) {
3123 status = be_cmd_get_func_config(adapter, &res);
3124 if (status)
3125 return status;
abb93951 3126
92bf14ab
SP
3127 /* If RoCE may be enabled stash away half the EQs for RoCE */
3128 if (be_roce_supported(adapter))
3129 res.max_evt_qs /= 2;
3130 adapter->res = res;
abb93951 3131
92bf14ab
SP
3132 if (be_physfn(adapter)) {
3133 status = be_cmd_get_profile_config(adapter, &res, 0);
3134 if (status)
3135 return status;
3136 adapter->res.max_vfs = res.max_vfs;
3137 }
abb93951 3138
92bf14ab
SP
3139 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3140 be_max_txqs(adapter), be_max_rxqs(adapter),
3141 be_max_rss(adapter), be_max_eqs(adapter),
3142 be_max_vfs(adapter));
3143 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3144 be_max_uc(adapter), be_max_mc(adapter),
3145 be_max_vlans(adapter));
abb93951 3146 }
4c876616 3147
92bf14ab 3148 return 0;
abb93951
PR
3149}
3150
39f1d94d
SP
3151/* Routine to query per function resource limits */
3152static int be_get_config(struct be_adapter *adapter)
3153{
4c876616 3154 int status;
39f1d94d 3155
abb93951
PR
3156 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3157 &adapter->function_mode,
0ad3157e
VV
3158 &adapter->function_caps,
3159 &adapter->asic_rev);
abb93951 3160 if (status)
92bf14ab 3161 return status;
abb93951 3162
92bf14ab
SP
3163 status = be_get_resources(adapter);
3164 if (status)
3165 return status;
abb93951
PR
3166
3167 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3168 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3169 GFP_KERNEL);
3170 if (!adapter->pmac_id)
3171 return -ENOMEM;
abb93951 3172
92bf14ab
SP
3173 /* Sanitize cfg_num_qs based on HW and platform limits */
3174 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3175
3176 return 0;
39f1d94d
SP
3177}
3178
95046b92
SP
3179static int be_mac_setup(struct be_adapter *adapter)
3180{
3181 u8 mac[ETH_ALEN];
3182 int status;
3183
3184 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3185 status = be_cmd_get_perm_mac(adapter, mac);
3186 if (status)
3187 return status;
3188
3189 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3190 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3191 } else {
3192 /* Maybe the HW was reset; dev_addr must be re-programmed */
3193 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3194 }
3195
3196 /* On BE3 VFs this cmd may fail due to lack of privilege.
3197 * Ignore the failure as in this case pmac_id is fetched
3198 * in the IFACE_CREATE cmd.
3199 */
3200 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3201 &adapter->pmac_id[0], 0);
3202 return 0;
3203}
3204
68d7bdcb
SP
3205static void be_schedule_worker(struct be_adapter *adapter)
3206{
3207 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3208 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3209}
3210
7707133c 3211static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3212{
68d7bdcb 3213 struct net_device *netdev = adapter->netdev;
10ef9ab4 3214 int status;
ba343c77 3215
7707133c 3216 status = be_evt_queues_create(adapter);
abb93951
PR
3217 if (status)
3218 goto err;
73d540f2 3219
7707133c 3220 status = be_tx_qs_create(adapter);
c2bba3df
SK
3221 if (status)
3222 goto err;
10ef9ab4 3223
7707133c 3224 status = be_rx_cqs_create(adapter);
10ef9ab4 3225 if (status)
a54769f5 3226 goto err;
6b7c5b94 3227
7707133c 3228 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3229 if (status)
3230 goto err;
3231
68d7bdcb
SP
3232 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3233 if (status)
3234 goto err;
3235
3236 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3237 if (status)
3238 goto err;
3239
7707133c
SP
3240 return 0;
3241err:
3242 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3243 return status;
3244}
3245
68d7bdcb
SP
3246int be_update_queues(struct be_adapter *adapter)
3247{
3248 struct net_device *netdev = adapter->netdev;
3249 int status;
3250
3251 if (netif_running(netdev))
3252 be_close(netdev);
3253
3254 be_cancel_worker(adapter);
3255
3256 /* If any vectors have been shared with RoCE we cannot re-program
3257 * the MSIx table.
3258 */
3259 if (!adapter->num_msix_roce_vec)
3260 be_msix_disable(adapter);
3261
3262 be_clear_queues(adapter);
3263
3264 if (!msix_enabled(adapter)) {
3265 status = be_msix_enable(adapter);
3266 if (status)
3267 return status;
3268 }
3269
3270 status = be_setup_queues(adapter);
3271 if (status)
3272 return status;
3273
3274 be_schedule_worker(adapter);
3275
3276 if (netif_running(netdev))
3277 status = be_open(netdev);
3278
3279 return status;
3280}
3281
7707133c
SP
3282static int be_setup(struct be_adapter *adapter)
3283{
3284 struct device *dev = &adapter->pdev->dev;
3285 u32 tx_fc, rx_fc, en_flags;
3286 int status;
3287
3288 be_setup_init(adapter);
3289
3290 if (!lancer_chip(adapter))
3291 be_cmd_req_native_mode(adapter);
3292
3293 status = be_get_config(adapter);
10ef9ab4 3294 if (status)
a54769f5 3295 goto err;
6b7c5b94 3296
7707133c 3297 status = be_msix_enable(adapter);
10ef9ab4 3298 if (status)
a54769f5 3299 goto err;
6b7c5b94 3300
f9449ab7 3301 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3302 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3303 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3304 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3305 en_flags = en_flags & be_if_cap_flags(adapter);
3306 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3307 &adapter->if_handle, 0);
7707133c 3308 if (status)
a54769f5 3309 goto err;
6b7c5b94 3310
68d7bdcb
SP
3311 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3312 rtnl_lock();
7707133c 3313 status = be_setup_queues(adapter);
68d7bdcb 3314 rtnl_unlock();
95046b92 3315 if (status)
1578e777
PR
3316 goto err;
3317
7707133c
SP
3318 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3319 /* In UMC mode FW does not return right privileges.
3320 * Override with correct privilege equivalent to PF.
3321 */
3322 if (be_is_mc(adapter))
3323 adapter->cmd_privileges = MAX_PRIVILEGES;
3324
3325 status = be_mac_setup(adapter);
10ef9ab4
SP
3326 if (status)
3327 goto err;
3328
eeb65ced 3329 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3330
1d1e9a46 3331 if (adapter->vlans_added)
10329df8 3332 be_vid_config(adapter);
7ab8b0b4 3333
a54769f5 3334 be_set_rx_mode(adapter->netdev);
5fb379ee 3335
ddc3f5cb 3336 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3337
ddc3f5cb
AK
3338 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3339 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3340 adapter->rx_fc);
2dc1deb6 3341
b905b5d4 3342 if (sriov_want(adapter)) {
92bf14ab 3343 if (be_max_vfs(adapter))
39f1d94d
SP
3344 be_vf_setup(adapter);
3345 else
3346 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3347 }
3348
f25b119c
PR
3349 status = be_cmd_get_phy_info(adapter);
3350 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3351 adapter->phy.fc_autoneg = 1;
3352
68d7bdcb 3353 be_schedule_worker(adapter);
f9449ab7 3354 return 0;
a54769f5
SP
3355err:
3356 be_clear(adapter);
3357 return status;
3358}
6b7c5b94 3359
66268739
IV
3360#ifdef CONFIG_NET_POLL_CONTROLLER
3361static void be_netpoll(struct net_device *netdev)
3362{
3363 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3364 struct be_eq_obj *eqo;
66268739
IV
3365 int i;
3366
e49cc34f
SP
3367 for_all_evt_queues(adapter, eqo, i) {
3368 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3369 napi_schedule(&eqo->napi);
3370 }
10ef9ab4
SP
3371
3372 return;
66268739
IV
3373}
3374#endif
3375
84517482 3376#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3377static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3378
fa9a6fed 3379static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3380 const u8 *p, u32 img_start, int image_size,
3381 int hdr_size)
fa9a6fed
SB
3382{
3383 u32 crc_offset;
3384 u8 flashed_crc[4];
3385 int status;
3f0d4560
AK
3386
3387 crc_offset = hdr_size + img_start + image_size - 4;
3388
fa9a6fed 3389 p += crc_offset;
3f0d4560
AK
3390
3391 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3392 (image_size - 4));
fa9a6fed
SB
3393 if (status) {
3394 dev_err(&adapter->pdev->dev,
3395 "could not get crc from flash, not flashing redboot\n");
3396 return false;
3397 }
3398
3399 /*update redboot only if crc does not match*/
3400 if (!memcmp(flashed_crc, p, 4))
3401 return false;
3402 else
3403 return true;
fa9a6fed
SB
3404}
3405
306f1348
SP
3406static bool phy_flashing_required(struct be_adapter *adapter)
3407{
42f11cf2
AK
3408 return (adapter->phy.phy_type == TN_8022 &&
3409 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3410}
3411
c165541e
PR
3412static bool is_comp_in_ufi(struct be_adapter *adapter,
3413 struct flash_section_info *fsec, int type)
3414{
3415 int i = 0, img_type = 0;
3416 struct flash_section_info_g2 *fsec_g2 = NULL;
3417
ca34fe38 3418 if (BE2_chip(adapter))
c165541e
PR
3419 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3420
3421 for (i = 0; i < MAX_FLASH_COMP; i++) {
3422 if (fsec_g2)
3423 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3424 else
3425 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3426
3427 if (img_type == type)
3428 return true;
3429 }
3430 return false;
3431
3432}
3433
4188e7df 3434static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3435 int header_size,
3436 const struct firmware *fw)
3437{
3438 struct flash_section_info *fsec = NULL;
3439 const u8 *p = fw->data;
3440
3441 p += header_size;
3442 while (p < (fw->data + fw->size)) {
3443 fsec = (struct flash_section_info *)p;
3444 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3445 return fsec;
3446 p += 32;
3447 }
3448 return NULL;
3449}
3450
773a2d7c
PR
3451static int be_flash(struct be_adapter *adapter, const u8 *img,
3452 struct be_dma_mem *flash_cmd, int optype, int img_size)
3453{
3454 u32 total_bytes = 0, flash_op, num_bytes = 0;
3455 int status = 0;
3456 struct be_cmd_write_flashrom *req = flash_cmd->va;
3457
3458 total_bytes = img_size;
3459 while (total_bytes) {
3460 num_bytes = min_t(u32, 32*1024, total_bytes);
3461
3462 total_bytes -= num_bytes;
3463
3464 if (!total_bytes) {
3465 if (optype == OPTYPE_PHY_FW)
3466 flash_op = FLASHROM_OPER_PHY_FLASH;
3467 else
3468 flash_op = FLASHROM_OPER_FLASH;
3469 } else {
3470 if (optype == OPTYPE_PHY_FW)
3471 flash_op = FLASHROM_OPER_PHY_SAVE;
3472 else
3473 flash_op = FLASHROM_OPER_SAVE;
3474 }
3475
be716446 3476 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3477 img += num_bytes;
3478 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3479 flash_op, num_bytes);
3480 if (status) {
3481 if (status == ILLEGAL_IOCTL_REQ &&
3482 optype == OPTYPE_PHY_FW)
3483 break;
3484 dev_err(&adapter->pdev->dev,
3485 "cmd to write to flash rom failed.\n");
3486 return status;
3487 }
3488 }
3489 return 0;
3490}
3491
0ad3157e 3492/* For BE2, BE3 and BE3-R */
ca34fe38 3493static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3494 const struct firmware *fw,
3495 struct be_dma_mem *flash_cmd,
3496 int num_of_images)
3f0d4560 3497
84517482 3498{
3f0d4560 3499 int status = 0, i, filehdr_size = 0;
c165541e 3500 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3501 const u8 *p = fw->data;
215faf9c 3502 const struct flash_comp *pflashcomp;
773a2d7c 3503 int num_comp, redboot;
c165541e
PR
3504 struct flash_section_info *fsec = NULL;
3505
3506 struct flash_comp gen3_flash_types[] = {
3507 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3508 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3509 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3510 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3511 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3512 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3513 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3514 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3515 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3516 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3517 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3518 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3519 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3520 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3521 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3522 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3523 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3524 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3525 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3526 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3527 };
c165541e
PR
3528
3529 struct flash_comp gen2_flash_types[] = {
3530 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3531 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3532 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3533 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3534 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3535 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3536 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3537 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3538 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3539 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3540 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3541 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3542 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3543 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3544 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3545 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3546 };
3547
ca34fe38 3548 if (BE3_chip(adapter)) {
3f0d4560
AK
3549 pflashcomp = gen3_flash_types;
3550 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3551 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3552 } else {
3553 pflashcomp = gen2_flash_types;
3554 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3555 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3556 }
ca34fe38 3557
c165541e
PR
3558 /* Get flash section info*/
3559 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3560 if (!fsec) {
3561 dev_err(&adapter->pdev->dev,
3562 "Invalid Cookie. UFI corrupted ?\n");
3563 return -1;
3564 }
9fe96934 3565 for (i = 0; i < num_comp; i++) {
c165541e 3566 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3567 continue;
c165541e
PR
3568
3569 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3570 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3571 continue;
3572
773a2d7c
PR
3573 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3574 !phy_flashing_required(adapter))
306f1348 3575 continue;
c165541e 3576
773a2d7c
PR
3577 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3578 redboot = be_flash_redboot(adapter, fw->data,
3579 pflashcomp[i].offset, pflashcomp[i].size,
3580 filehdr_size + img_hdrs_size);
3581 if (!redboot)
3582 continue;
3583 }
c165541e 3584
3f0d4560 3585 p = fw->data;
c165541e 3586 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3587 if (p + pflashcomp[i].size > fw->data + fw->size)
3588 return -1;
773a2d7c
PR
3589
3590 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3591 pflashcomp[i].size);
3592 if (status) {
3593 dev_err(&adapter->pdev->dev,
3594 "Flashing section type %d failed.\n",
3595 pflashcomp[i].img_type);
3596 return status;
84517482 3597 }
84517482 3598 }
84517482
AK
3599 return 0;
3600}
3601
773a2d7c
PR
3602static int be_flash_skyhawk(struct be_adapter *adapter,
3603 const struct firmware *fw,
3604 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3605{
773a2d7c
PR
3606 int status = 0, i, filehdr_size = 0;
3607 int img_offset, img_size, img_optype, redboot;
3608 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3609 const u8 *p = fw->data;
3610 struct flash_section_info *fsec = NULL;
3611
3612 filehdr_size = sizeof(struct flash_file_hdr_g3);
3613 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3614 if (!fsec) {
3615 dev_err(&adapter->pdev->dev,
3616 "Invalid Cookie. UFI corrupted ?\n");
3617 return -1;
3618 }
3619
3620 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3621 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3622 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3623
3624 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3625 case IMAGE_FIRMWARE_iSCSI:
3626 img_optype = OPTYPE_ISCSI_ACTIVE;
3627 break;
3628 case IMAGE_BOOT_CODE:
3629 img_optype = OPTYPE_REDBOOT;
3630 break;
3631 case IMAGE_OPTION_ROM_ISCSI:
3632 img_optype = OPTYPE_BIOS;
3633 break;
3634 case IMAGE_OPTION_ROM_PXE:
3635 img_optype = OPTYPE_PXE_BIOS;
3636 break;
3637 case IMAGE_OPTION_ROM_FCoE:
3638 img_optype = OPTYPE_FCOE_BIOS;
3639 break;
3640 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3641 img_optype = OPTYPE_ISCSI_BACKUP;
3642 break;
3643 case IMAGE_NCSI:
3644 img_optype = OPTYPE_NCSI_FW;
3645 break;
3646 default:
3647 continue;
3648 }
3649
3650 if (img_optype == OPTYPE_REDBOOT) {
3651 redboot = be_flash_redboot(adapter, fw->data,
3652 img_offset, img_size,
3653 filehdr_size + img_hdrs_size);
3654 if (!redboot)
3655 continue;
3656 }
3657
3658 p = fw->data;
3659 p += filehdr_size + img_offset + img_hdrs_size;
3660 if (p + img_size > fw->data + fw->size)
3661 return -1;
3662
3663 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3664 if (status) {
3665 dev_err(&adapter->pdev->dev,
3666 "Flashing section type %d failed.\n",
3667 fsec->fsec_entry[i].type);
3668 return status;
3669 }
3670 }
3671 return 0;
3f0d4560
AK
3672}
3673
485bf569
SN
3674static int lancer_fw_download(struct be_adapter *adapter,
3675 const struct firmware *fw)
84517482 3676{
485bf569
SN
3677#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3678#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3679 struct be_dma_mem flash_cmd;
485bf569
SN
3680 const u8 *data_ptr = NULL;
3681 u8 *dest_image_ptr = NULL;
3682 size_t image_size = 0;
3683 u32 chunk_size = 0;
3684 u32 data_written = 0;
3685 u32 offset = 0;
3686 int status = 0;
3687 u8 add_status = 0;
f67ef7ba 3688 u8 change_status;
84517482 3689
485bf569 3690 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3691 dev_err(&adapter->pdev->dev,
485bf569
SN
3692 "FW Image not properly aligned. "
3693 "Length must be 4 byte aligned.\n");
3694 status = -EINVAL;
3695 goto lancer_fw_exit;
d9efd2af
SB
3696 }
3697
485bf569
SN
3698 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3699 + LANCER_FW_DOWNLOAD_CHUNK;
3700 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3701 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3702 if (!flash_cmd.va) {
3703 status = -ENOMEM;
485bf569
SN
3704 goto lancer_fw_exit;
3705 }
84517482 3706
485bf569
SN
3707 dest_image_ptr = flash_cmd.va +
3708 sizeof(struct lancer_cmd_req_write_object);
3709 image_size = fw->size;
3710 data_ptr = fw->data;
3711
3712 while (image_size) {
3713 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3714
3715 /* Copy the image chunk content. */
3716 memcpy(dest_image_ptr, data_ptr, chunk_size);
3717
3718 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3719 chunk_size, offset,
3720 LANCER_FW_DOWNLOAD_LOCATION,
3721 &data_written, &change_status,
3722 &add_status);
485bf569
SN
3723 if (status)
3724 break;
3725
3726 offset += data_written;
3727 data_ptr += data_written;
3728 image_size -= data_written;
3729 }
3730
3731 if (!status) {
3732 /* Commit the FW written */
3733 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3734 0, offset,
3735 LANCER_FW_DOWNLOAD_LOCATION,
3736 &data_written, &change_status,
3737 &add_status);
485bf569
SN
3738 }
3739
3740 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3741 flash_cmd.dma);
3742 if (status) {
3743 dev_err(&adapter->pdev->dev,
3744 "Firmware load error. "
3745 "Status code: 0x%x Additional Status: 0x%x\n",
3746 status, add_status);
3747 goto lancer_fw_exit;
3748 }
3749
f67ef7ba 3750 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3751 status = lancer_physdev_ctrl(adapter,
3752 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3753 if (status) {
3754 dev_err(&adapter->pdev->dev,
3755 "Adapter busy for FW reset.\n"
3756 "New FW will not be active.\n");
3757 goto lancer_fw_exit;
3758 }
3759 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3760 dev_err(&adapter->pdev->dev,
3761 "System reboot required for new FW"
3762 " to be active\n");
3763 }
3764
485bf569
SN
3765 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3766lancer_fw_exit:
3767 return status;
3768}
3769
ca34fe38
SP
3770#define UFI_TYPE2 2
3771#define UFI_TYPE3 3
0ad3157e 3772#define UFI_TYPE3R 10
ca34fe38
SP
3773#define UFI_TYPE4 4
3774static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3775 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3776{
3777 if (fhdr == NULL)
3778 goto be_get_ufi_exit;
3779
ca34fe38
SP
3780 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3781 return UFI_TYPE4;
0ad3157e
VV
3782 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3783 if (fhdr->asic_type_rev == 0x10)
3784 return UFI_TYPE3R;
3785 else
3786 return UFI_TYPE3;
3787 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3788 return UFI_TYPE2;
773a2d7c
PR
3789
3790be_get_ufi_exit:
3791 dev_err(&adapter->pdev->dev,
3792 "UFI and Interface are not compatible for flashing\n");
3793 return -1;
3794}
3795
485bf569
SN
3796static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3797{
485bf569
SN
3798 struct flash_file_hdr_g3 *fhdr3;
3799 struct image_hdr *img_hdr_ptr = NULL;
3800 struct be_dma_mem flash_cmd;
3801 const u8 *p;
773a2d7c 3802 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3803
be716446 3804 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3805 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3806 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3807 if (!flash_cmd.va) {
3808 status = -ENOMEM;
485bf569 3809 goto be_fw_exit;
84517482
AK
3810 }
3811
773a2d7c 3812 p = fw->data;
0ad3157e 3813 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3814
0ad3157e 3815 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3816
773a2d7c
PR
3817 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3818 for (i = 0; i < num_imgs; i++) {
3819 img_hdr_ptr = (struct image_hdr *)(fw->data +
3820 (sizeof(struct flash_file_hdr_g3) +
3821 i * sizeof(struct image_hdr)));
3822 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3823 switch (ufi_type) {
3824 case UFI_TYPE4:
773a2d7c
PR
3825 status = be_flash_skyhawk(adapter, fw,
3826 &flash_cmd, num_imgs);
0ad3157e
VV
3827 break;
3828 case UFI_TYPE3R:
ca34fe38
SP
3829 status = be_flash_BEx(adapter, fw, &flash_cmd,
3830 num_imgs);
0ad3157e
VV
3831 break;
3832 case UFI_TYPE3:
3833 /* Do not flash this ufi on BE3-R cards */
3834 if (adapter->asic_rev < 0x10)
3835 status = be_flash_BEx(adapter, fw,
3836 &flash_cmd,
3837 num_imgs);
3838 else {
3839 status = -1;
3840 dev_err(&adapter->pdev->dev,
3841 "Can't load BE3 UFI on BE3R\n");
3842 }
3843 }
3f0d4560 3844 }
773a2d7c
PR
3845 }
3846
ca34fe38
SP
3847 if (ufi_type == UFI_TYPE2)
3848 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3849 else if (ufi_type == -1)
3f0d4560 3850 status = -1;
84517482 3851
2b7bcebf
IV
3852 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3853 flash_cmd.dma);
84517482
AK
3854 if (status) {
3855 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3856 goto be_fw_exit;
84517482
AK
3857 }
3858
af901ca1 3859 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3860
485bf569
SN
3861be_fw_exit:
3862 return status;
3863}
3864
3865int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3866{
3867 const struct firmware *fw;
3868 int status;
3869
3870 if (!netif_running(adapter->netdev)) {
3871 dev_err(&adapter->pdev->dev,
3872 "Firmware load not allowed (interface is down)\n");
3873 return -1;
3874 }
3875
3876 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3877 if (status)
3878 goto fw_exit;
3879
3880 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3881
3882 if (lancer_chip(adapter))
3883 status = lancer_fw_download(adapter, fw);
3884 else
3885 status = be_fw_download(adapter, fw);
3886
eeb65ced
SK
3887 if (!status)
3888 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3889 adapter->fw_on_flash);
3890
84517482
AK
3891fw_exit:
3892 release_firmware(fw);
3893 return status;
3894}
3895
a77dcb8c
AK
3896static int be_ndo_bridge_setlink(struct net_device *dev,
3897 struct nlmsghdr *nlh)
3898{
3899 struct be_adapter *adapter = netdev_priv(dev);
3900 struct nlattr *attr, *br_spec;
3901 int rem;
3902 int status = 0;
3903 u16 mode = 0;
3904
3905 if (!sriov_enabled(adapter))
3906 return -EOPNOTSUPP;
3907
3908 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3909
3910 nla_for_each_nested(attr, br_spec, rem) {
3911 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3912 continue;
3913
3914 mode = nla_get_u16(attr);
3915 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3916 return -EINVAL;
3917
3918 status = be_cmd_set_hsw_config(adapter, 0, 0,
3919 adapter->if_handle,
3920 mode == BRIDGE_MODE_VEPA ?
3921 PORT_FWD_TYPE_VEPA :
3922 PORT_FWD_TYPE_VEB);
3923 if (status)
3924 goto err;
3925
3926 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3927 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3928
3929 return status;
3930 }
3931err:
3932 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3933 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3934
3935 return status;
3936}
3937
3938static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3939 struct net_device *dev,
3940 u32 filter_mask)
3941{
3942 struct be_adapter *adapter = netdev_priv(dev);
3943 int status = 0;
3944 u8 hsw_mode;
3945
3946 if (!sriov_enabled(adapter))
3947 return 0;
3948
3949 /* BE and Lancer chips support VEB mode only */
3950 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3951 hsw_mode = PORT_FWD_TYPE_VEB;
3952 } else {
3953 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3954 adapter->if_handle, &hsw_mode);
3955 if (status)
3956 return 0;
3957 }
3958
3959 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3960 hsw_mode == PORT_FWD_TYPE_VEPA ?
3961 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3962}
3963
e5686ad8 3964static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3965 .ndo_open = be_open,
3966 .ndo_stop = be_close,
3967 .ndo_start_xmit = be_xmit,
a54769f5 3968 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3969 .ndo_set_mac_address = be_mac_addr_set,
3970 .ndo_change_mtu = be_change_mtu,
ab1594e9 3971 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3972 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3973 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3974 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3975 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3976 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3977 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3978 .ndo_get_vf_config = be_get_vf_config,
3979#ifdef CONFIG_NET_POLL_CONTROLLER
3980 .ndo_poll_controller = be_netpoll,
3981#endif
a77dcb8c
AK
3982 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3983 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6b7c5b94
SP
3984};
3985
3986static void be_netdev_init(struct net_device *netdev)
3987{
3988 struct be_adapter *adapter = netdev_priv(netdev);
3989
6332c8d3 3990 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3991 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3992 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3993 if (be_multi_rxq(adapter))
3994 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3995
3996 netdev->features |= netdev->hw_features |
f646968f 3997 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3998
eb8a50d9 3999 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4000 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4001
fbc13f01
AK
4002 netdev->priv_flags |= IFF_UNICAST_FLT;
4003
6b7c5b94
SP
4004 netdev->flags |= IFF_MULTICAST;
4005
b7e5887e 4006 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4007
10ef9ab4 4008 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4009
4010 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4011}
4012
4013static void be_unmap_pci_bars(struct be_adapter *adapter)
4014{
c5b3ad4c
SP
4015 if (adapter->csr)
4016 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4017 if (adapter->db)
ce66f781 4018 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4019}
4020
ce66f781
SP
4021static int db_bar(struct be_adapter *adapter)
4022{
4023 if (lancer_chip(adapter) || !be_physfn(adapter))
4024 return 0;
4025 else
4026 return 4;
4027}
4028
4029static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4030{
dbf0f2a7 4031 if (skyhawk_chip(adapter)) {
ce66f781
SP
4032 adapter->roce_db.size = 4096;
4033 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4034 db_bar(adapter));
4035 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4036 db_bar(adapter));
4037 }
045508a8 4038 return 0;
6b7c5b94
SP
4039}
4040
4041static int be_map_pci_bars(struct be_adapter *adapter)
4042{
4043 u8 __iomem *addr;
ce66f781 4044 u32 sli_intf;
6b7c5b94 4045
ce66f781
SP
4046 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4047 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
4048 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 4049
c5b3ad4c
SP
4050 if (BEx_chip(adapter) && be_physfn(adapter)) {
4051 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4052 if (adapter->csr == NULL)
4053 return -ENOMEM;
4054 }
4055
ce66f781 4056 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4057 if (addr == NULL)
4058 goto pci_map_err;
ba343c77 4059 adapter->db = addr;
ce66f781
SP
4060
4061 be_roce_map_pci_bars(adapter);
6b7c5b94 4062 return 0;
ce66f781 4063
6b7c5b94
SP
4064pci_map_err:
4065 be_unmap_pci_bars(adapter);
4066 return -ENOMEM;
4067}
4068
6b7c5b94
SP
4069static void be_ctrl_cleanup(struct be_adapter *adapter)
4070{
8788fdc2 4071 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4072
4073 be_unmap_pci_bars(adapter);
4074
4075 if (mem->va)
2b7bcebf
IV
4076 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4077 mem->dma);
e7b909a6 4078
5b8821b7 4079 mem = &adapter->rx_filter;
e7b909a6 4080 if (mem->va)
2b7bcebf
IV
4081 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4082 mem->dma);
6b7c5b94
SP
4083}
4084
6b7c5b94
SP
4085static int be_ctrl_init(struct be_adapter *adapter)
4086{
8788fdc2
SP
4087 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4088 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4089 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4090 u32 sli_intf;
6b7c5b94 4091 int status;
6b7c5b94 4092
ce66f781
SP
4093 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4094 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4095 SLI_INTF_FAMILY_SHIFT;
4096 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4097
6b7c5b94
SP
4098 status = be_map_pci_bars(adapter);
4099 if (status)
e7b909a6 4100 goto done;
6b7c5b94
SP
4101
4102 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4103 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4104 mbox_mem_alloc->size,
4105 &mbox_mem_alloc->dma,
4106 GFP_KERNEL);
6b7c5b94 4107 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4108 status = -ENOMEM;
4109 goto unmap_pci_bars;
6b7c5b94
SP
4110 }
4111 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4112 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4113 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4114 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4115
5b8821b7 4116 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4117 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4118 rx_filter->size, &rx_filter->dma,
4119 GFP_KERNEL);
5b8821b7 4120 if (rx_filter->va == NULL) {
e7b909a6
SP
4121 status = -ENOMEM;
4122 goto free_mbox;
4123 }
1f9061d2 4124
2984961c 4125 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4126 spin_lock_init(&adapter->mcc_lock);
4127 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4128
dd131e76 4129 init_completion(&adapter->flash_compl);
cf588477 4130 pci_save_state(adapter->pdev);
6b7c5b94 4131 return 0;
e7b909a6
SP
4132
4133free_mbox:
2b7bcebf
IV
4134 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4135 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4136
4137unmap_pci_bars:
4138 be_unmap_pci_bars(adapter);
4139
4140done:
4141 return status;
6b7c5b94
SP
4142}
4143
4144static void be_stats_cleanup(struct be_adapter *adapter)
4145{
3abcdeda 4146 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4147
4148 if (cmd->va)
2b7bcebf
IV
4149 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4150 cmd->va, cmd->dma);
6b7c5b94
SP
4151}
4152
4153static int be_stats_init(struct be_adapter *adapter)
4154{
3abcdeda 4155 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4156
ca34fe38
SP
4157 if (lancer_chip(adapter))
4158 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4159 else if (BE2_chip(adapter))
89a88ab8 4160 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4161 else if (BE3_chip(adapter))
ca34fe38 4162 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4163 else
4164 /* ALL non-BE ASICs */
4165 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4166
ede23fa8
JP
4167 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4168 GFP_KERNEL);
6b7c5b94
SP
4169 if (cmd->va == NULL)
4170 return -1;
4171 return 0;
4172}
4173
3bc6b06c 4174static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4175{
4176 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4177
6b7c5b94
SP
4178 if (!adapter)
4179 return;
4180
045508a8 4181 be_roce_dev_remove(adapter);
8cef7a78 4182 be_intr_set(adapter, false);
045508a8 4183
f67ef7ba
PR
4184 cancel_delayed_work_sync(&adapter->func_recovery_work);
4185
6b7c5b94
SP
4186 unregister_netdev(adapter->netdev);
4187
5fb379ee
SP
4188 be_clear(adapter);
4189
bf99e50d
PR
4190 /* tell fw we're done with firing cmds */
4191 be_cmd_fw_clean(adapter);
4192
6b7c5b94
SP
4193 be_stats_cleanup(adapter);
4194
4195 be_ctrl_cleanup(adapter);
4196
d6b6d987
SP
4197 pci_disable_pcie_error_reporting(pdev);
4198
6b7c5b94
SP
4199 pci_set_drvdata(pdev, NULL);
4200 pci_release_regions(pdev);
4201 pci_disable_device(pdev);
4202
4203 free_netdev(adapter->netdev);
4204}
4205
4762f6ce
AK
4206bool be_is_wol_supported(struct be_adapter *adapter)
4207{
4208 return ((adapter->wol_cap & BE_WOL_CAP) &&
4209 !be_is_wol_excluded(adapter)) ? true : false;
4210}
4211
941a77d5
SK
4212u32 be_get_fw_log_level(struct be_adapter *adapter)
4213{
4214 struct be_dma_mem extfat_cmd;
4215 struct be_fat_conf_params *cfgs;
4216 int status;
4217 u32 level = 0;
4218 int j;
4219
f25b119c
PR
4220 if (lancer_chip(adapter))
4221 return 0;
4222
941a77d5
SK
4223 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4224 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4225 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4226 &extfat_cmd.dma);
4227
4228 if (!extfat_cmd.va) {
4229 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4230 __func__);
4231 goto err;
4232 }
4233
4234 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4235 if (!status) {
4236 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4237 sizeof(struct be_cmd_resp_hdr));
ac46a462 4238 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4239 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4240 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4241 }
4242 }
4243 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4244 extfat_cmd.dma);
4245err:
4246 return level;
4247}
abb93951 4248
39f1d94d 4249static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4250{
6b7c5b94 4251 int status;
941a77d5 4252 u32 level;
6b7c5b94 4253
9e1453c5
AK
4254 status = be_cmd_get_cntl_attributes(adapter);
4255 if (status)
4256 return status;
4257
4762f6ce
AK
4258 status = be_cmd_get_acpi_wol_cap(adapter);
4259 if (status) {
4260 /* in case of a failure to get wol capabillities
4261 * check the exclusion list to determine WOL capability */
4262 if (!be_is_wol_excluded(adapter))
4263 adapter->wol_cap |= BE_WOL_CAP;
4264 }
4265
4266 if (be_is_wol_supported(adapter))
4267 adapter->wol = true;
4268
7aeb2156
PR
4269 /* Must be a power of 2 or else MODULO will BUG_ON */
4270 adapter->be_get_temp_freq = 64;
4271
941a77d5
SK
4272 level = be_get_fw_log_level(adapter);
4273 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4274
92bf14ab 4275 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4276 return 0;
6b7c5b94
SP
4277}
4278
f67ef7ba 4279static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4280{
01e5b2c4 4281 struct device *dev = &adapter->pdev->dev;
d8110f62 4282 int status;
d8110f62 4283
f67ef7ba
PR
4284 status = lancer_test_and_set_rdy_state(adapter);
4285 if (status)
4286 goto err;
d8110f62 4287
f67ef7ba
PR
4288 if (netif_running(adapter->netdev))
4289 be_close(adapter->netdev);
d8110f62 4290
f67ef7ba
PR
4291 be_clear(adapter);
4292
01e5b2c4 4293 be_clear_all_error(adapter);
f67ef7ba
PR
4294
4295 status = be_setup(adapter);
4296 if (status)
4297 goto err;
d8110f62 4298
f67ef7ba
PR
4299 if (netif_running(adapter->netdev)) {
4300 status = be_open(adapter->netdev);
d8110f62
PR
4301 if (status)
4302 goto err;
f67ef7ba 4303 }
d8110f62 4304
01e5b2c4 4305 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4306 return 0;
4307err:
01e5b2c4
SK
4308 if (status == -EAGAIN)
4309 dev_err(dev, "Waiting for resource provisioning\n");
4310 else
4311 dev_err(dev, "Error recovery failed\n");
d8110f62 4312
f67ef7ba
PR
4313 return status;
4314}
4315
4316static void be_func_recovery_task(struct work_struct *work)
4317{
4318 struct be_adapter *adapter =
4319 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4320 int status = 0;
d8110f62 4321
f67ef7ba 4322 be_detect_error(adapter);
d8110f62 4323
f67ef7ba 4324 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4325
f67ef7ba
PR
4326 rtnl_lock();
4327 netif_device_detach(adapter->netdev);
4328 rtnl_unlock();
d8110f62 4329
f67ef7ba 4330 status = lancer_recover_func(adapter);
f67ef7ba
PR
4331 if (!status)
4332 netif_device_attach(adapter->netdev);
d8110f62 4333 }
f67ef7ba 4334
01e5b2c4
SK
4335 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4336 * no need to attempt further recovery.
4337 */
4338 if (!status || status == -EAGAIN)
4339 schedule_delayed_work(&adapter->func_recovery_work,
4340 msecs_to_jiffies(1000));
d8110f62
PR
4341}
4342
4343static void be_worker(struct work_struct *work)
4344{
4345 struct be_adapter *adapter =
4346 container_of(work, struct be_adapter, work.work);
4347 struct be_rx_obj *rxo;
4348 int i;
4349
d8110f62
PR
4350 /* when interrupts are not yet enabled, just reap any pending
4351 * mcc completions */
4352 if (!netif_running(adapter->netdev)) {
072a9c48 4353 local_bh_disable();
10ef9ab4 4354 be_process_mcc(adapter);
072a9c48 4355 local_bh_enable();
d8110f62
PR
4356 goto reschedule;
4357 }
4358
4359 if (!adapter->stats_cmd_sent) {
4360 if (lancer_chip(adapter))
4361 lancer_cmd_get_pport_stats(adapter,
4362 &adapter->stats_cmd);
4363 else
4364 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4365 }
4366
d696b5e2
VV
4367 if (be_physfn(adapter) &&
4368 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4369 be_cmd_get_die_temperature(adapter);
4370
d8110f62 4371 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4372 if (rxo->rx_post_starved) {
4373 rxo->rx_post_starved = false;
4374 be_post_rx_frags(rxo, GFP_KERNEL);
4375 }
4376 }
4377
2632bafd 4378 be_eqd_update(adapter);
10ef9ab4 4379
d8110f62
PR
4380reschedule:
4381 adapter->work_counter++;
4382 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4383}
4384
257a3feb 4385/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4386static bool be_reset_required(struct be_adapter *adapter)
4387{
257a3feb 4388 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4389}
4390
d379142b
SP
4391static char *mc_name(struct be_adapter *adapter)
4392{
4393 if (adapter->function_mode & FLEX10_MODE)
4394 return "FLEX10";
4395 else if (adapter->function_mode & VNIC_MODE)
4396 return "vNIC";
4397 else if (adapter->function_mode & UMC_ENABLED)
4398 return "UMC";
4399 else
4400 return "";
4401}
4402
4403static inline char *func_name(struct be_adapter *adapter)
4404{
4405 return be_physfn(adapter) ? "PF" : "VF";
4406}
4407
1dd06ae8 4408static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4409{
4410 int status = 0;
4411 struct be_adapter *adapter;
4412 struct net_device *netdev;
b4e32a71 4413 char port_name;
6b7c5b94
SP
4414
4415 status = pci_enable_device(pdev);
4416 if (status)
4417 goto do_none;
4418
4419 status = pci_request_regions(pdev, DRV_NAME);
4420 if (status)
4421 goto disable_dev;
4422 pci_set_master(pdev);
4423
7f640062 4424 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4425 if (netdev == NULL) {
4426 status = -ENOMEM;
4427 goto rel_reg;
4428 }
4429 adapter = netdev_priv(netdev);
4430 adapter->pdev = pdev;
4431 pci_set_drvdata(pdev, adapter);
4432 adapter->netdev = netdev;
2243e2e9 4433 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4434
2b7bcebf 4435 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4436 if (!status) {
2bd92cd2
CH
4437 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4438 if (status < 0) {
4439 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4440 goto free_netdev;
4441 }
6b7c5b94
SP
4442 netdev->features |= NETIF_F_HIGHDMA;
4443 } else {
2b7bcebf 4444 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4445 if (!status)
4446 status = dma_set_coherent_mask(&pdev->dev,
4447 DMA_BIT_MASK(32));
6b7c5b94
SP
4448 if (status) {
4449 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4450 goto free_netdev;
4451 }
4452 }
4453
d6b6d987
SP
4454 status = pci_enable_pcie_error_reporting(pdev);
4455 if (status)
4ce1fd61 4456 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
d6b6d987 4457
6b7c5b94
SP
4458 status = be_ctrl_init(adapter);
4459 if (status)
39f1d94d 4460 goto free_netdev;
6b7c5b94 4461
2243e2e9 4462 /* sync up with fw's ready state */
ba343c77 4463 if (be_physfn(adapter)) {
bf99e50d 4464 status = be_fw_wait_ready(adapter);
ba343c77
SB
4465 if (status)
4466 goto ctrl_clean;
ba343c77 4467 }
6b7c5b94 4468
39f1d94d
SP
4469 if (be_reset_required(adapter)) {
4470 status = be_cmd_reset_function(adapter);
4471 if (status)
4472 goto ctrl_clean;
556ae191 4473
2d177be8
KA
4474 /* Wait for interrupts to quiesce after an FLR */
4475 msleep(100);
4476 }
8cef7a78
SK
4477
4478 /* Allow interrupts for other ULPs running on NIC function */
4479 be_intr_set(adapter, true);
10ef9ab4 4480
2d177be8
KA
4481 /* tell fw we're ready to fire cmds */
4482 status = be_cmd_fw_init(adapter);
4483 if (status)
4484 goto ctrl_clean;
4485
2243e2e9
SP
4486 status = be_stats_init(adapter);
4487 if (status)
4488 goto ctrl_clean;
4489
39f1d94d 4490 status = be_get_initial_config(adapter);
6b7c5b94
SP
4491 if (status)
4492 goto stats_clean;
6b7c5b94
SP
4493
4494 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4495 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4496 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4497
5fb379ee
SP
4498 status = be_setup(adapter);
4499 if (status)
55f5c3c5 4500 goto stats_clean;
2243e2e9 4501
3abcdeda 4502 be_netdev_init(netdev);
6b7c5b94
SP
4503 status = register_netdev(netdev);
4504 if (status != 0)
5fb379ee 4505 goto unsetup;
6b7c5b94 4506
045508a8
PP
4507 be_roce_dev_add(adapter);
4508
f67ef7ba
PR
4509 schedule_delayed_work(&adapter->func_recovery_work,
4510 msecs_to_jiffies(1000));
b4e32a71
PR
4511
4512 be_cmd_query_port_name(adapter, &port_name);
4513
d379142b
SP
4514 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4515 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4516
6b7c5b94
SP
4517 return 0;
4518
5fb379ee
SP
4519unsetup:
4520 be_clear(adapter);
6b7c5b94
SP
4521stats_clean:
4522 be_stats_cleanup(adapter);
4523ctrl_clean:
4524 be_ctrl_cleanup(adapter);
f9449ab7 4525free_netdev:
fe6d2a38 4526 free_netdev(netdev);
8d56ff11 4527 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4528rel_reg:
4529 pci_release_regions(pdev);
4530disable_dev:
4531 pci_disable_device(pdev);
4532do_none:
c4ca2374 4533 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4534 return status;
4535}
4536
4537static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4538{
4539 struct be_adapter *adapter = pci_get_drvdata(pdev);
4540 struct net_device *netdev = adapter->netdev;
4541
71d8d1b5
AK
4542 if (adapter->wol)
4543 be_setup_wol(adapter, true);
4544
f67ef7ba
PR
4545 cancel_delayed_work_sync(&adapter->func_recovery_work);
4546
6b7c5b94
SP
4547 netif_device_detach(netdev);
4548 if (netif_running(netdev)) {
4549 rtnl_lock();
4550 be_close(netdev);
4551 rtnl_unlock();
4552 }
9b0365f1 4553 be_clear(adapter);
6b7c5b94
SP
4554
4555 pci_save_state(pdev);
4556 pci_disable_device(pdev);
4557 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4558 return 0;
4559}
4560
4561static int be_resume(struct pci_dev *pdev)
4562{
4563 int status = 0;
4564 struct be_adapter *adapter = pci_get_drvdata(pdev);
4565 struct net_device *netdev = adapter->netdev;
4566
4567 netif_device_detach(netdev);
4568
4569 status = pci_enable_device(pdev);
4570 if (status)
4571 return status;
4572
1ca01512 4573 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4574 pci_restore_state(pdev);
4575
dd5746bf
SB
4576 status = be_fw_wait_ready(adapter);
4577 if (status)
4578 return status;
4579
2243e2e9
SP
4580 /* tell fw we're ready to fire cmds */
4581 status = be_cmd_fw_init(adapter);
4582 if (status)
4583 return status;
4584
9b0365f1 4585 be_setup(adapter);
6b7c5b94
SP
4586 if (netif_running(netdev)) {
4587 rtnl_lock();
4588 be_open(netdev);
4589 rtnl_unlock();
4590 }
f67ef7ba
PR
4591
4592 schedule_delayed_work(&adapter->func_recovery_work,
4593 msecs_to_jiffies(1000));
6b7c5b94 4594 netif_device_attach(netdev);
71d8d1b5
AK
4595
4596 if (adapter->wol)
4597 be_setup_wol(adapter, false);
a4ca055f 4598
6b7c5b94
SP
4599 return 0;
4600}
4601
82456b03
SP
4602/*
4603 * An FLR will stop BE from DMAing any data.
4604 */
4605static void be_shutdown(struct pci_dev *pdev)
4606{
4607 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4608
2d5d4154
AK
4609 if (!adapter)
4610 return;
82456b03 4611
0f4a6828 4612 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4613 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4614
2d5d4154 4615 netif_device_detach(adapter->netdev);
82456b03 4616
57841869
AK
4617 be_cmd_reset_function(adapter);
4618
82456b03 4619 pci_disable_device(pdev);
82456b03
SP
4620}
4621
cf588477
SP
4622static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4623 pci_channel_state_t state)
4624{
4625 struct be_adapter *adapter = pci_get_drvdata(pdev);
4626 struct net_device *netdev = adapter->netdev;
4627
4628 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4629
01e5b2c4
SK
4630 if (!adapter->eeh_error) {
4631 adapter->eeh_error = true;
cf588477 4632
01e5b2c4 4633 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4634
cf588477 4635 rtnl_lock();
01e5b2c4
SK
4636 netif_device_detach(netdev);
4637 if (netif_running(netdev))
4638 be_close(netdev);
cf588477 4639 rtnl_unlock();
01e5b2c4
SK
4640
4641 be_clear(adapter);
cf588477 4642 }
cf588477
SP
4643
4644 if (state == pci_channel_io_perm_failure)
4645 return PCI_ERS_RESULT_DISCONNECT;
4646
4647 pci_disable_device(pdev);
4648
eeb7fc7b
SK
4649 /* The error could cause the FW to trigger a flash debug dump.
4650 * Resetting the card while flash dump is in progress
c8a54163
PR
4651 * can cause it not to recover; wait for it to finish.
4652 * Wait only for first function as it is needed only once per
4653 * adapter.
eeb7fc7b 4654 */
c8a54163
PR
4655 if (pdev->devfn == 0)
4656 ssleep(30);
4657
cf588477
SP
4658 return PCI_ERS_RESULT_NEED_RESET;
4659}
4660
4661static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4662{
4663 struct be_adapter *adapter = pci_get_drvdata(pdev);
4664 int status;
4665
4666 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4667
4668 status = pci_enable_device(pdev);
4669 if (status)
4670 return PCI_ERS_RESULT_DISCONNECT;
4671
4672 pci_set_master(pdev);
1ca01512 4673 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4674 pci_restore_state(pdev);
4675
4676 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4677 dev_info(&adapter->pdev->dev,
4678 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4679 status = be_fw_wait_ready(adapter);
cf588477
SP
4680 if (status)
4681 return PCI_ERS_RESULT_DISCONNECT;
4682
d6b6d987 4683 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4684 be_clear_all_error(adapter);
cf588477
SP
4685 return PCI_ERS_RESULT_RECOVERED;
4686}
4687
4688static void be_eeh_resume(struct pci_dev *pdev)
4689{
4690 int status = 0;
4691 struct be_adapter *adapter = pci_get_drvdata(pdev);
4692 struct net_device *netdev = adapter->netdev;
4693
4694 dev_info(&adapter->pdev->dev, "EEH resume\n");
4695
4696 pci_save_state(pdev);
4697
2d177be8 4698 status = be_cmd_reset_function(adapter);
cf588477
SP
4699 if (status)
4700 goto err;
4701
2d177be8
KA
4702 /* tell fw we're ready to fire cmds */
4703 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4704 if (status)
4705 goto err;
4706
cf588477
SP
4707 status = be_setup(adapter);
4708 if (status)
4709 goto err;
4710
4711 if (netif_running(netdev)) {
4712 status = be_open(netdev);
4713 if (status)
4714 goto err;
4715 }
f67ef7ba
PR
4716
4717 schedule_delayed_work(&adapter->func_recovery_work,
4718 msecs_to_jiffies(1000));
cf588477
SP
4719 netif_device_attach(netdev);
4720 return;
4721err:
4722 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4723}
4724
3646f0e5 4725static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4726 .error_detected = be_eeh_err_detected,
4727 .slot_reset = be_eeh_reset,
4728 .resume = be_eeh_resume,
4729};
4730
6b7c5b94
SP
4731static struct pci_driver be_driver = {
4732 .name = DRV_NAME,
4733 .id_table = be_dev_ids,
4734 .probe = be_probe,
4735 .remove = be_remove,
4736 .suspend = be_suspend,
cf588477 4737 .resume = be_resume,
82456b03 4738 .shutdown = be_shutdown,
cf588477 4739 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4740};
4741
4742static int __init be_init_module(void)
4743{
8e95a202
JP
4744 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4745 rx_frag_size != 2048) {
6b7c5b94
SP
4746 printk(KERN_WARNING DRV_NAME
4747 " : Module param rx_frag_size must be 2048/4096/8192."
4748 " Using 2048\n");
4749 rx_frag_size = 2048;
4750 }
6b7c5b94
SP
4751
4752 return pci_register_driver(&be_driver);
4753}
4754module_init(be_init_module);
4755
4756static void __exit be_exit_module(void)
4757{
4758 pci_unregister_driver(&be_driver);
4759}
4760module_exit(be_exit_module);