]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
bnx2x: fix typos in "configure"
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
3c8def97 665static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
6b7c5b94 668{
3c8def97
SP
669 struct be_tx_stats *stats = tx_stats(txo);
670
ab1594e9 671 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 676 if (stopped)
ac124ff9 677 stats->tx_stops++;
ab1594e9 678 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 683 bool *dummy)
6b7c5b94 684{
ebc8d2ab
DM
685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
6b7c5b94
SP
689 /* to account for hdr wrb */
690 cnt++;
fe6d2a38
SP
691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
6b7c5b94
SP
694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
fe6d2a38 697 }
6b7c5b94
SP
698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 707 wrb->rsvd0 = 0;
6b7c5b94
SP
708}
709
1ded132d 710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 711 struct sk_buff *skb)
1ded132d
AK
712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
c9c47142
SP
726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
cc4ce020 739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
6b7c5b94 742{
c9c47142 743 u16 vlan_tag, proto;
cc4ce020 744
6b7c5b94
SP
745 memset(hdr, 0, sizeof(*hdr));
746
c3c18bc1 747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 748
49e4b847 749 if (skb_is_gso(skb)) {
c3c18bc1
SP
750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 755 if (skb->encapsulation) {
c3c18bc1 756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
c3c18bc1 762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 763 else if (proto == IPPROTO_UDP)
c3c18bc1 764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
765 }
766
4c5102f9 767 if (vlan_tx_tag_present(skb)) {
c3c18bc1 768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
771 }
772
bc0c3405 773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
c3c18bc1
SP
774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
6b7c5b94
SP
778}
779
2b7bcebf 780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 781 bool unmap_single)
7101e111
SP
782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 788 if (wrb->frag_len) {
7101e111 789 if (unmap_single)
2b7bcebf
IV
790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
7101e111 792 else
2b7bcebf 793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
794 }
795}
6b7c5b94 796
3c8def97 797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
6b7c5b94 800{
7101e111
SP
801 dma_addr_t busaddr;
802 int i, copied = 0;
2b7bcebf 803 struct device *dev = &adapter->pdev->dev;
6b7c5b94 804 struct sk_buff *first_skb = skb;
6b7c5b94
SP
805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
7101e111
SP
807 bool map_single = false;
808 u16 map_head;
6b7c5b94 809
6b7c5b94
SP
810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
7101e111 812 map_head = txq->head;
6b7c5b94 813
ebc8d2ab 814 if (skb->len > skb->data_len) {
e743d313 815 int len = skb_headlen(skb);
03d28ffe 816
2b7bcebf
IV
817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
7101e111
SP
819 goto dma_err;
820 map_single = true;
ebc8d2ab
DM
821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
6b7c5b94 827
ebc8d2ab 828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 830
b061b39e 831 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 832 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 833 if (dma_mapping_error(dev, busaddr))
7101e111 834 goto dma_err;
ebc8d2ab 835 wrb = queue_head_node(txq);
9e903e08 836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
9e903e08 839 copied += skb_frag_size(frag);
6b7c5b94
SP
840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
bc0c3405 849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
7101e111
SP
853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
2b7bcebf 857 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
858 map_single = false;
859 copied -= wrb->frag_len;
d3de1540 860 adapter->drv_stats.dma_map_errors++;
7101e111
SP
861 queue_head_inc(txq);
862 }
863 return 0;
6b7c5b94
SP
864}
865
93040ae5 866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
93040ae5
SK
869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
efee8e87 876 if (vlan_tx_tag_present(skb))
93040ae5 877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
bc0c3405
AK
888
889 if (vlan_tag) {
62749e2c
JP
890 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
891 vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
bc0c3405
AK
894 skb->vlan_tci = 0;
895 }
896
897 /* Insert the outer VLAN, if any */
898 if (adapter->qnq_vid) {
899 vlan_tag = adapter->qnq_vid;
62749e2c
JP
900 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
901 vlan_tag);
bc0c3405
AK
902 if (unlikely(!skb))
903 return skb;
904 if (skip_hw_vlan)
905 *skip_hw_vlan = true;
906 }
907
93040ae5
SK
908 return skb;
909}
910
bc0c3405
AK
911static bool be_ipv6_exthdr_check(struct sk_buff *skb)
912{
913 struct ethhdr *eh = (struct ethhdr *)skb->data;
914 u16 offset = ETH_HLEN;
915
916 if (eh->h_proto == htons(ETH_P_IPV6)) {
917 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
918
919 offset += sizeof(struct ipv6hdr);
920 if (ip6h->nexthdr != NEXTHDR_TCP &&
921 ip6h->nexthdr != NEXTHDR_UDP) {
922 struct ipv6_opt_hdr *ehdr =
504fbf1e 923 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
924
925 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
926 if (ehdr->hdrlen == 0xff)
927 return true;
928 }
929 }
930 return false;
931}
932
933static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
934{
935 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
936}
937
748b539a 938static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 939{
ee9c799c 940 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
941}
942
ec495fac
VV
943static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
944 struct sk_buff *skb,
945 bool *skip_hw_vlan)
6b7c5b94 946{
d2cb6ce7 947 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
948 unsigned int eth_hdr_len;
949 struct iphdr *ip;
93040ae5 950
1297f9db
AK
951 /* For padded packets, BE HW modifies tot_len field in IP header
952 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 953 * For padded packets, Lancer computes incorrect checksum.
1ded132d 954 */
ee9c799c
SP
955 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
956 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
957 if (skb->len <= 60 &&
958 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 959 is_ipv4_pkt(skb)) {
93040ae5
SK
960 ip = (struct iphdr *)ip_hdr(skb);
961 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
962 }
1ded132d 963
d2cb6ce7 964 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 965 * tagging in pvid-tagging mode
d2cb6ce7 966 */
f93f160b 967 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 968 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 969 *skip_hw_vlan = true;
d2cb6ce7 970
93040ae5
SK
971 /* HW has a bug wherein it will calculate CSUM for VLAN
972 * pkts even though it is disabled.
973 * Manually insert VLAN in pkt.
974 */
975 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
976 vlan_tx_tag_present(skb)) {
977 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 978 if (unlikely(!skb))
c9128951 979 goto err;
bc0c3405
AK
980 }
981
982 /* HW may lockup when VLAN HW tagging is requested on
983 * certain ipv6 packets. Drop such pkts if the HW workaround to
984 * skip HW tagging is not enabled by FW.
985 */
986 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
987 (adapter->pvid || adapter->qnq_vid) &&
988 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
989 goto tx_drop;
990
991 /* Manual VLAN tag insertion to prevent:
992 * ASIC lockup when the ASIC inserts VLAN tag into
993 * certain ipv6 packets. Insert VLAN tags in driver,
994 * and set event, completion, vlan bits accordingly
995 * in the Tx WRB.
996 */
997 if (be_ipv6_tx_stall_chk(adapter, skb) &&
998 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 999 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 1000 if (unlikely(!skb))
c9128951 1001 goto err;
1ded132d
AK
1002 }
1003
ee9c799c
SP
1004 return skb;
1005tx_drop:
1006 dev_kfree_skb_any(skb);
c9128951 1007err:
ee9c799c
SP
1008 return NULL;
1009}
1010
ec495fac
VV
1011static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1012 struct sk_buff *skb,
1013 bool *skip_hw_vlan)
1014{
1015 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1016 * less may cause a transmit stall on that port. So the work-around is
1017 * to pad short packets (<= 32 bytes) to a 36-byte length.
1018 */
1019 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1020 if (skb_put_padto(skb, 36))
ec495fac 1021 return NULL;
ec495fac
VV
1022 }
1023
1024 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1025 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1026 if (!skb)
1027 return NULL;
1028 }
1029
1030 return skb;
1031}
1032
ee9c799c
SP
1033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{
1035 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1037 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1044 if (!skb) {
1045 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1046 return NETDEV_TX_OK;
bc617526 1047 }
ee9c799c 1048
fe6d2a38 1049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1050
bc0c3405
AK
1051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1052 skip_hw_vlan);
c190e3c8 1053 if (copied) {
cd8f76c0
ED
1054 int gso_segs = skb_shinfo(skb)->gso_segs;
1055
c190e3c8 1056 /* record the sent skb in the sent_skb table */
3c8def97
SP
1057 BUG_ON(txo->sent_skb_list[start]);
1058 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1059
1060 /* Ensure txq has space for the next skb; Else stop the queue
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
7101e111 1064 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
3c8def97 1067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1068 stopped = true;
1069 }
6b7c5b94 1070
94d73aaa 1071 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1072
cd8f76c0 1073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1074 } else {
1075 txq->head = start;
bc617526 1076 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1077 dev_kfree_skb_any(skb);
6b7c5b94 1078 }
6b7c5b94
SP
1079 return NETDEV_TX_OK;
1080}
1081
1082static int be_change_mtu(struct net_device *netdev, int new_mtu)
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1085 struct device *dev = &adapter->pdev->dev;
1086
1087 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1088 dev_info(dev, "MTU must be between %d and %d bytes\n",
1089 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1090 return -EINVAL;
1091 }
0d3f5cce
KA
1092
1093 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1094 netdev->mtu, new_mtu);
6b7c5b94
SP
1095 netdev->mtu = new_mtu;
1096 return 0;
1097}
1098
1099/*
82903e4b
AK
1100 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1101 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1102 */
10329df8 1103static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1104{
50762667 1105 struct device *dev = &adapter->pdev->dev;
10329df8 1106 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1107 u16 num = 0, i = 0;
82903e4b 1108 int status = 0;
1da87b7f 1109
c0e64ef4
SP
1110 /* No need to further configure vids if in promiscuous mode */
1111 if (adapter->promiscuous)
1112 return 0;
1113
92bf14ab 1114 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1115 goto set_vlan_promisc;
1116
1117 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1118 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1119 vids[num++] = cpu_to_le16(i);
0fc16ebf 1120
4d567d97 1121 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1122 if (status) {
d9d604f8 1123 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1124 if (addl_status(status) ==
1125 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8 1126 goto set_vlan_promisc;
50762667 1127 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8
AK
1128 } else {
1129 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1130 /* hw VLAN filtering re-enabled. */
1131 status = be_cmd_rx_filter(adapter,
1132 BE_FLAGS_VLAN_PROMISC, OFF);
1133 if (!status) {
50762667
VV
1134 dev_info(dev,
1135 "Disabling VLAN Promiscuous mode\n");
d9d604f8 1136 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1137 }
1138 }
6b7c5b94 1139 }
1da87b7f 1140
b31c50a7 1141 return status;
0fc16ebf
PR
1142
1143set_vlan_promisc:
a6b74e01
SK
1144 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1145 return 0;
d9d604f8
AK
1146
1147 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1148 if (!status) {
50762667 1149 dev_info(dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1150 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1151 } else
50762667 1152 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
0fc16ebf 1153 return status;
6b7c5b94
SP
1154}
1155
80d5c368 1156static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1157{
1158 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1159 int status = 0;
6b7c5b94 1160
a85e9986
PR
1161 /* Packets with VID 0 are always received by Lancer by default */
1162 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1163 return status;
1164
f6cbd364 1165 if (test_bit(vid, adapter->vids))
48291c22 1166 return status;
a85e9986 1167
f6cbd364 1168 set_bit(vid, adapter->vids);
a6b74e01 1169 adapter->vlans_added++;
8e586137 1170
a6b74e01
SK
1171 status = be_vid_config(adapter);
1172 if (status) {
1173 adapter->vlans_added--;
f6cbd364 1174 clear_bit(vid, adapter->vids);
a6b74e01 1175 }
48291c22 1176
80817cbf 1177 return status;
6b7c5b94
SP
1178}
1179
80d5c368 1180static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1181{
1182 struct be_adapter *adapter = netdev_priv(netdev);
1183
a85e9986
PR
1184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1186 return 0;
a85e9986 1187
f6cbd364 1188 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1189 adapter->vlans_added--;
1190
1191 return be_vid_config(adapter);
6b7c5b94
SP
1192}
1193
7ad09458
S
1194static void be_clear_promisc(struct be_adapter *adapter)
1195{
1196 adapter->promiscuous = false;
a0794885 1197 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1198
1199 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1200}
1201
a54769f5 1202static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1203{
1204 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1205 int status;
6b7c5b94 1206
24307eef 1207 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1208 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1209 adapter->promiscuous = true;
1210 goto done;
6b7c5b94
SP
1211 }
1212
25985edc 1213 /* BE was previously in promiscuous mode; disable it */
24307eef 1214 if (adapter->promiscuous) {
7ad09458 1215 be_clear_promisc(adapter);
c0e64ef4 1216 if (adapter->vlans_added)
10329df8 1217 be_vid_config(adapter);
6b7c5b94
SP
1218 }
1219
e7b909a6 1220 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1221 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1222 netdev_mc_count(netdev) > be_max_mc(adapter))
1223 goto set_mcast_promisc;
6b7c5b94 1224
fbc13f01
AK
1225 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1226 struct netdev_hw_addr *ha;
1227 int i = 1; /* First slot is claimed by the Primary MAC */
1228
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1230 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0);
1232 }
1233
92bf14ab 1234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1236 adapter->promiscuous = true;
1237 goto done;
1238 }
1239
1240 netdev_for_each_uc_addr(ha, adapter->netdev) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 }
1247
0fc16ebf 1248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1249 if (!status) {
1250 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1251 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1252 goto done;
0fc16ebf 1253 }
a0794885
KA
1254
1255set_mcast_promisc:
1256 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1257 return;
1258
1259 /* Set to MCAST promisc mode if setting MULTICAST address fails
1260 * or if num configured exceeds what we support
1261 */
1262 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1263 if (!status)
1264 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1265done:
1266 return;
6b7c5b94
SP
1267}
1268
ba343c77
SB
1269static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1270{
1271 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1272 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1273 int status;
1274
11ac75ed 1275 if (!sriov_enabled(adapter))
ba343c77
SB
1276 return -EPERM;
1277
11ac75ed 1278 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1279 return -EINVAL;
1280
3c31aaf3
VV
1281 /* Proceed further only if user provided MAC is different
1282 * from active MAC
1283 */
1284 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1285 return 0;
1286
3175d8c2
SP
1287 if (BEx_chip(adapter)) {
1288 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1289 vf + 1);
ba343c77 1290
11ac75ed
SP
1291 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1292 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1293 } else {
1294 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1295 vf + 1);
590c391d
PR
1296 }
1297
abccf23e
KA
1298 if (status) {
1299 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1300 mac, vf, status);
1301 return be_cmd_status(status);
1302 }
64600ea5 1303
abccf23e
KA
1304 ether_addr_copy(vf_cfg->mac_addr, mac);
1305
1306 return 0;
ba343c77
SB
1307}
1308
64600ea5 1309static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1310 struct ifla_vf_info *vi)
64600ea5
AK
1311{
1312 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1313 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1314
11ac75ed 1315 if (!sriov_enabled(adapter))
64600ea5
AK
1316 return -EPERM;
1317
11ac75ed 1318 if (vf >= adapter->num_vfs)
64600ea5
AK
1319 return -EINVAL;
1320
1321 vi->vf = vf;
ed616689
SC
1322 vi->max_tx_rate = vf_cfg->tx_rate;
1323 vi->min_tx_rate = 0;
a60b3a13
AK
1324 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1325 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1326 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1327 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1328
1329 return 0;
1330}
1331
748b539a 1332static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1333{
1334 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1335 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1336 int status = 0;
1337
11ac75ed 1338 if (!sriov_enabled(adapter))
1da87b7f
AK
1339 return -EPERM;
1340
b9fc0e53 1341 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1342 return -EINVAL;
1343
b9fc0e53
AK
1344 if (vlan || qos) {
1345 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1346 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1347 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1348 vf_cfg->if_handle, 0);
1da87b7f 1349 } else {
f1f3ee1b 1350 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1351 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1352 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1353 }
1354
abccf23e
KA
1355 if (status) {
1356 dev_err(&adapter->pdev->dev,
1357 "VLAN %d config on VF %d failed : %#x\n", vlan,
1358 vf, status);
1359 return be_cmd_status(status);
1360 }
1361
1362 vf_cfg->vlan_tag = vlan;
1363
1364 return 0;
1da87b7f
AK
1365}
1366
ed616689
SC
1367static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1368 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1369{
1370 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1371 struct device *dev = &adapter->pdev->dev;
1372 int percent_rate, status = 0;
1373 u16 link_speed = 0;
1374 u8 link_status;
e1d18735 1375
11ac75ed 1376 if (!sriov_enabled(adapter))
e1d18735
AK
1377 return -EPERM;
1378
94f434c2 1379 if (vf >= adapter->num_vfs)
e1d18735
AK
1380 return -EINVAL;
1381
ed616689
SC
1382 if (min_tx_rate)
1383 return -EINVAL;
1384
0f77ba73
RN
1385 if (!max_tx_rate)
1386 goto config_qos;
1387
1388 status = be_cmd_link_status_query(adapter, &link_speed,
1389 &link_status, 0);
1390 if (status)
1391 goto err;
1392
1393 if (!link_status) {
1394 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1395 status = -ENETDOWN;
0f77ba73
RN
1396 goto err;
1397 }
1398
1399 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1400 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1401 link_speed);
1402 status = -EINVAL;
1403 goto err;
1404 }
1405
1406 /* On Skyhawk the QOS setting must be done only as a % value */
1407 percent_rate = link_speed / 100;
1408 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1409 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1410 percent_rate);
1411 status = -EINVAL;
1412 goto err;
94f434c2 1413 }
e1d18735 1414
0f77ba73
RN
1415config_qos:
1416 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1417 if (status)
0f77ba73
RN
1418 goto err;
1419
1420 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1421 return 0;
1422
1423err:
1424 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1425 max_tx_rate, vf);
abccf23e 1426 return be_cmd_status(status);
e1d18735 1427}
e2fb1afa 1428
bdce2ad7
SR
1429static int be_set_vf_link_state(struct net_device *netdev, int vf,
1430 int link_state)
1431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
1433 int status;
1434
1435 if (!sriov_enabled(adapter))
1436 return -EPERM;
1437
1438 if (vf >= adapter->num_vfs)
1439 return -EINVAL;
1440
1441 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1442 if (status) {
1443 dev_err(&adapter->pdev->dev,
1444 "Link state change on VF %d failed: %#x\n", vf, status);
1445 return be_cmd_status(status);
1446 }
bdce2ad7 1447
abccf23e
KA
1448 adapter->vf_cfg[vf].plink_tracking = link_state;
1449
1450 return 0;
bdce2ad7 1451}
e1d18735 1452
2632bafd
SP
1453static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1454 ulong now)
6b7c5b94 1455{
2632bafd
SP
1456 aic->rx_pkts_prev = rx_pkts;
1457 aic->tx_reqs_prev = tx_pkts;
1458 aic->jiffies = now;
1459}
ac124ff9 1460
2632bafd
SP
1461static void be_eqd_update(struct be_adapter *adapter)
1462{
1463 struct be_set_eqd set_eqd[MAX_EVT_QS];
1464 int eqd, i, num = 0, start;
1465 struct be_aic_obj *aic;
1466 struct be_eq_obj *eqo;
1467 struct be_rx_obj *rxo;
1468 struct be_tx_obj *txo;
1469 u64 rx_pkts, tx_pkts;
1470 ulong now;
1471 u32 pps, delta;
10ef9ab4 1472
2632bafd
SP
1473 for_all_evt_queues(adapter, eqo, i) {
1474 aic = &adapter->aic_obj[eqo->idx];
1475 if (!aic->enable) {
1476 if (aic->jiffies)
1477 aic->jiffies = 0;
1478 eqd = aic->et_eqd;
1479 goto modify_eqd;
1480 }
6b7c5b94 1481
2632bafd
SP
1482 rxo = &adapter->rx_obj[eqo->idx];
1483 do {
57a7744e 1484 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1485 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1486 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1487
2632bafd
SP
1488 txo = &adapter->tx_obj[eqo->idx];
1489 do {
57a7744e 1490 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1491 tx_pkts = txo->stats.tx_reqs;
57a7744e 1492 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1493
2632bafd
SP
1494 /* Skip, if wrapped around or first calculation */
1495 now = jiffies;
1496 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1497 rx_pkts < aic->rx_pkts_prev ||
1498 tx_pkts < aic->tx_reqs_prev) {
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
1500 continue;
1501 }
1502
1503 delta = jiffies_to_msecs(now - aic->jiffies);
1504 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1505 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1506 eqd = (pps / 15000) << 2;
10ef9ab4 1507
2632bafd
SP
1508 if (eqd < 8)
1509 eqd = 0;
1510 eqd = min_t(u32, eqd, aic->max_eqd);
1511 eqd = max_t(u32, eqd, aic->min_eqd);
1512
1513 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1514modify_eqd:
2632bafd
SP
1515 if (eqd != aic->prev_eqd) {
1516 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1517 set_eqd[num].eq_id = eqo->q.id;
1518 aic->prev_eqd = eqd;
1519 num++;
1520 }
ac124ff9 1521 }
2632bafd
SP
1522
1523 if (num)
1524 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1525}
1526
3abcdeda 1527static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1528 struct be_rx_compl_info *rxcp)
4097f663 1529{
ac124ff9 1530 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1531
ab1594e9 1532 u64_stats_update_begin(&stats->sync);
3abcdeda 1533 stats->rx_compl++;
2e588f84 1534 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1535 stats->rx_pkts++;
2e588f84 1536 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1537 stats->rx_mcast_pkts++;
2e588f84 1538 if (rxcp->err)
ac124ff9 1539 stats->rx_compl_err++;
ab1594e9 1540 u64_stats_update_end(&stats->sync);
4097f663
SP
1541}
1542
2e588f84 1543static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1544{
19fad86f 1545 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1546 * Also ignore ipcksm for ipv6 pkts
1547 */
2e588f84 1548 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1549 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1550}
1551
0b0ef1d0 1552static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1553{
10ef9ab4 1554 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1555 struct be_rx_page_info *rx_page_info;
3abcdeda 1556 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1557 u16 frag_idx = rxq->tail;
6b7c5b94 1558
3abcdeda 1559 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1560 BUG_ON(!rx_page_info->page);
1561
e50287be 1562 if (rx_page_info->last_frag) {
2b7bcebf
IV
1563 dma_unmap_page(&adapter->pdev->dev,
1564 dma_unmap_addr(rx_page_info, bus),
1565 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1566 rx_page_info->last_frag = false;
1567 } else {
1568 dma_sync_single_for_cpu(&adapter->pdev->dev,
1569 dma_unmap_addr(rx_page_info, bus),
1570 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1571 }
6b7c5b94 1572
0b0ef1d0 1573 queue_tail_inc(rxq);
6b7c5b94
SP
1574 atomic_dec(&rxq->used);
1575 return rx_page_info;
1576}
1577
1578/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1579static void be_rx_compl_discard(struct be_rx_obj *rxo,
1580 struct be_rx_compl_info *rxcp)
6b7c5b94 1581{
6b7c5b94 1582 struct be_rx_page_info *page_info;
2e588f84 1583 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1584
e80d9da6 1585 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1586 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1587 put_page(page_info->page);
1588 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1589 }
1590}
1591
1592/*
1593 * skb_fill_rx_data forms a complete skb for an ether frame
1594 * indicated by rxcp.
1595 */
10ef9ab4
SP
1596static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1597 struct be_rx_compl_info *rxcp)
6b7c5b94 1598{
6b7c5b94 1599 struct be_rx_page_info *page_info;
2e588f84
SP
1600 u16 i, j;
1601 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1602 u8 *start;
6b7c5b94 1603
0b0ef1d0 1604 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1605 start = page_address(page_info->page) + page_info->page_offset;
1606 prefetch(start);
1607
1608 /* Copy data in the first descriptor of this completion */
2e588f84 1609 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1610
6b7c5b94
SP
1611 skb->len = curr_frag_len;
1612 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1613 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1614 /* Complete packet has now been moved to data */
1615 put_page(page_info->page);
1616 skb->data_len = 0;
1617 skb->tail += curr_frag_len;
1618 } else {
ac1ae5f3
ED
1619 hdr_len = ETH_HLEN;
1620 memcpy(skb->data, start, hdr_len);
6b7c5b94 1621 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1622 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1623 skb_shinfo(skb)->frags[0].page_offset =
1624 page_info->page_offset + hdr_len;
748b539a
SP
1625 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1626 curr_frag_len - hdr_len);
6b7c5b94 1627 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1628 skb->truesize += rx_frag_size;
6b7c5b94
SP
1629 skb->tail += hdr_len;
1630 }
205859a2 1631 page_info->page = NULL;
6b7c5b94 1632
2e588f84
SP
1633 if (rxcp->pkt_size <= rx_frag_size) {
1634 BUG_ON(rxcp->num_rcvd != 1);
1635 return;
6b7c5b94
SP
1636 }
1637
1638 /* More frags present for this completion */
2e588f84
SP
1639 remaining = rxcp->pkt_size - curr_frag_len;
1640 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1641 page_info = get_rx_page_info(rxo);
2e588f84 1642 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1643
bd46cb6c
AK
1644 /* Coalesce all frags from the same physical page in one slot */
1645 if (page_info->page_offset == 0) {
1646 /* Fresh page */
1647 j++;
b061b39e 1648 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1649 skb_shinfo(skb)->frags[j].page_offset =
1650 page_info->page_offset;
9e903e08 1651 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1652 skb_shinfo(skb)->nr_frags++;
1653 } else {
1654 put_page(page_info->page);
1655 }
1656
9e903e08 1657 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1658 skb->len += curr_frag_len;
1659 skb->data_len += curr_frag_len;
bdb28a97 1660 skb->truesize += rx_frag_size;
2e588f84 1661 remaining -= curr_frag_len;
205859a2 1662 page_info->page = NULL;
6b7c5b94 1663 }
bd46cb6c 1664 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1665}
1666
5be93b9a 1667/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1668static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1669 struct be_rx_compl_info *rxcp)
6b7c5b94 1670{
10ef9ab4 1671 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1672 struct net_device *netdev = adapter->netdev;
6b7c5b94 1673 struct sk_buff *skb;
89420424 1674
bb349bb4 1675 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1676 if (unlikely(!skb)) {
ac124ff9 1677 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1678 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1679 return;
1680 }
1681
10ef9ab4 1682 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1683
6332c8d3 1684 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1685 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1686 else
1687 skb_checksum_none_assert(skb);
6b7c5b94 1688
6332c8d3 1689 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1690 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1691 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1692 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1693
b6c0e89d 1694 skb->csum_level = rxcp->tunneled;
6384a4d0 1695 skb_mark_napi_id(skb, napi);
6b7c5b94 1696
343e43c0 1697 if (rxcp->vlanf)
86a9bad3 1698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1699
1700 netif_receive_skb(skb);
6b7c5b94
SP
1701}
1702
5be93b9a 1703/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1704static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1705 struct napi_struct *napi,
1706 struct be_rx_compl_info *rxcp)
6b7c5b94 1707{
10ef9ab4 1708 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1709 struct be_rx_page_info *page_info;
5be93b9a 1710 struct sk_buff *skb = NULL;
2e588f84
SP
1711 u16 remaining, curr_frag_len;
1712 u16 i, j;
3968fa1e 1713
10ef9ab4 1714 skb = napi_get_frags(napi);
5be93b9a 1715 if (!skb) {
10ef9ab4 1716 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1717 return;
1718 }
1719
2e588f84
SP
1720 remaining = rxcp->pkt_size;
1721 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1722 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1723
1724 curr_frag_len = min(remaining, rx_frag_size);
1725
bd46cb6c
AK
1726 /* Coalesce all frags from the same physical page in one slot */
1727 if (i == 0 || page_info->page_offset == 0) {
1728 /* First frag or Fresh page */
1729 j++;
b061b39e 1730 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1731 skb_shinfo(skb)->frags[j].page_offset =
1732 page_info->page_offset;
9e903e08 1733 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1734 } else {
1735 put_page(page_info->page);
1736 }
9e903e08 1737 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1738 skb->truesize += rx_frag_size;
bd46cb6c 1739 remaining -= curr_frag_len;
6b7c5b94
SP
1740 memset(page_info, 0, sizeof(*page_info));
1741 }
bd46cb6c 1742 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1743
5be93b9a 1744 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1745 skb->len = rxcp->pkt_size;
1746 skb->data_len = rxcp->pkt_size;
5be93b9a 1747 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1748 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1749 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1750 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1751
b6c0e89d 1752 skb->csum_level = rxcp->tunneled;
6384a4d0 1753 skb_mark_napi_id(skb, napi);
5be93b9a 1754
343e43c0 1755 if (rxcp->vlanf)
86a9bad3 1756 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1757
10ef9ab4 1758 napi_gro_frags(napi);
2e588f84
SP
1759}
1760
10ef9ab4
SP
1761static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1762 struct be_rx_compl_info *rxcp)
2e588f84 1763{
c3c18bc1
SP
1764 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1765 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1766 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1767 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1768 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1769 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1770 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1771 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1772 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1773 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1774 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1775 if (rxcp->vlanf) {
c3c18bc1
SP
1776 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1777 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1778 }
c3c18bc1 1779 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1780 rxcp->tunneled =
c3c18bc1 1781 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1782}
1783
10ef9ab4
SP
1784static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1785 struct be_rx_compl_info *rxcp)
2e588f84 1786{
c3c18bc1
SP
1787 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1788 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1789 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1790 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1791 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1792 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1793 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1794 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1795 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1796 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1797 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1798 if (rxcp->vlanf) {
c3c18bc1
SP
1799 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1800 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1801 }
c3c18bc1
SP
1802 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1803 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1804}
1805
1806static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1807{
1808 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1809 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1810 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1811
2e588f84
SP
1812 /* For checking the valid bit it is Ok to use either definition as the
1813 * valid bit is at the same position in both v0 and v1 Rx compl */
1814 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1815 return NULL;
6b7c5b94 1816
2e588f84
SP
1817 rmb();
1818 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1819
2e588f84 1820 if (adapter->be3_native)
10ef9ab4 1821 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1822 else
10ef9ab4 1823 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1824
e38b1706
SK
1825 if (rxcp->ip_frag)
1826 rxcp->l4_csum = 0;
1827
15d72184 1828 if (rxcp->vlanf) {
f93f160b
VV
1829 /* In QNQ modes, if qnq bit is not set, then the packet was
1830 * tagged only with the transparent outer vlan-tag and must
1831 * not be treated as a vlan packet by host
1832 */
1833 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1834 rxcp->vlanf = 0;
6b7c5b94 1835
15d72184 1836 if (!lancer_chip(adapter))
3c709f8f 1837 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1838
939cf306 1839 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1840 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1841 rxcp->vlanf = 0;
1842 }
2e588f84
SP
1843
1844 /* As the compl has been parsed, reset it; we wont touch it again */
1845 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1846
3abcdeda 1847 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1848 return rxcp;
1849}
1850
1829b086 1851static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1852{
6b7c5b94 1853 u32 order = get_order(size);
1829b086 1854
6b7c5b94 1855 if (order > 0)
1829b086
ED
1856 gfp |= __GFP_COMP;
1857 return alloc_pages(gfp, order);
6b7c5b94
SP
1858}
1859
1860/*
1861 * Allocate a page, split it to fragments of size rx_frag_size and post as
1862 * receive buffers to BE
1863 */
c30d7266 1864static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1865{
3abcdeda 1866 struct be_adapter *adapter = rxo->adapter;
26d92f92 1867 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1868 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1869 struct page *pagep = NULL;
ba42fad0 1870 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1871 struct be_eth_rx_d *rxd;
1872 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1873 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1874
3abcdeda 1875 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1876 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1877 if (!pagep) {
1829b086 1878 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1879 if (unlikely(!pagep)) {
ac124ff9 1880 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1881 break;
1882 }
ba42fad0
IV
1883 page_dmaaddr = dma_map_page(dev, pagep, 0,
1884 adapter->big_page_size,
2b7bcebf 1885 DMA_FROM_DEVICE);
ba42fad0
IV
1886 if (dma_mapping_error(dev, page_dmaaddr)) {
1887 put_page(pagep);
1888 pagep = NULL;
d3de1540 1889 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1890 break;
1891 }
e50287be 1892 page_offset = 0;
6b7c5b94
SP
1893 } else {
1894 get_page(pagep);
e50287be 1895 page_offset += rx_frag_size;
6b7c5b94 1896 }
e50287be 1897 page_info->page_offset = page_offset;
6b7c5b94 1898 page_info->page = pagep;
6b7c5b94
SP
1899
1900 rxd = queue_head_node(rxq);
e50287be 1901 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1902 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1903 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1904
1905 /* Any space left in the current big page for another frag? */
1906 if ((page_offset + rx_frag_size + rx_frag_size) >
1907 adapter->big_page_size) {
1908 pagep = NULL;
e50287be
SP
1909 page_info->last_frag = true;
1910 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1911 } else {
1912 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1913 }
26d92f92
SP
1914
1915 prev_page_info = page_info;
1916 queue_head_inc(rxq);
10ef9ab4 1917 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1918 }
e50287be
SP
1919
1920 /* Mark the last frag of a page when we break out of the above loop
1921 * with no more slots available in the RXQ
1922 */
1923 if (pagep) {
1924 prev_page_info->last_frag = true;
1925 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1926 }
6b7c5b94
SP
1927
1928 if (posted) {
6b7c5b94 1929 atomic_add(posted, &rxq->used);
6384a4d0
SP
1930 if (rxo->rx_post_starved)
1931 rxo->rx_post_starved = false;
c30d7266
AK
1932 do {
1933 notify = min(256u, posted);
1934 be_rxq_notify(adapter, rxq->id, notify);
1935 posted -= notify;
1936 } while (posted);
ea1dae11
SP
1937 } else if (atomic_read(&rxq->used) == 0) {
1938 /* Let be_worker replenish when memory is available */
3abcdeda 1939 rxo->rx_post_starved = true;
6b7c5b94 1940 }
6b7c5b94
SP
1941}
1942
5fb379ee 1943static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1944{
6b7c5b94
SP
1945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1946
1947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1948 return NULL;
1949
f3eb62d2 1950 rmb();
6b7c5b94
SP
1951 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1952
1953 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1954
1955 queue_tail_inc(tx_cq);
1956 return txcp;
1957}
1958
3c8def97 1959static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1960 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1961{
3c8def97 1962 struct be_queue_info *txq = &txo->q;
a73b796e 1963 struct be_eth_wrb *wrb;
3c8def97 1964 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1965 struct sk_buff *sent_skb;
ec43b1a6
SP
1966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
6b7c5b94 1968
ec43b1a6 1969 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1970 BUG_ON(!sent_skb);
ec43b1a6
SP
1971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
a73b796e 1974 queue_tail_inc(txq);
6b7c5b94 1975
ec43b1a6 1976 do {
6b7c5b94 1977 cur_index = txq->tail;
a73b796e 1978 wrb = queue_tail_node(txq);
2b7bcebf
IV
1979 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1981 unmap_skb_hdr = false;
1982
6b7c5b94
SP
1983 num_wrbs++;
1984 queue_tail_inc(txq);
ec43b1a6 1985 } while (cur_index != last_index);
6b7c5b94 1986
96d49225 1987 dev_consume_skb_any(sent_skb);
4d586b82 1988 return num_wrbs;
6b7c5b94
SP
1989}
1990
10ef9ab4
SP
1991/* Return the number of events in the event queue */
1992static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1993{
10ef9ab4
SP
1994 struct be_eq_entry *eqe;
1995 int num = 0;
859b1e4e 1996
10ef9ab4
SP
1997 do {
1998 eqe = queue_tail_node(&eqo->q);
1999 if (eqe->evt == 0)
2000 break;
859b1e4e 2001
10ef9ab4
SP
2002 rmb();
2003 eqe->evt = 0;
2004 num++;
2005 queue_tail_inc(&eqo->q);
2006 } while (true);
2007
2008 return num;
859b1e4e
SP
2009}
2010
10ef9ab4
SP
2011/* Leaves the EQ is disarmed state */
2012static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2013{
10ef9ab4 2014 int num = events_get(eqo);
859b1e4e 2015
10ef9ab4 2016 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2017}
2018
10ef9ab4 2019static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2020{
2021 struct be_rx_page_info *page_info;
3abcdeda
SP
2022 struct be_queue_info *rxq = &rxo->q;
2023 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2024 struct be_rx_compl_info *rxcp;
d23e946c
SP
2025 struct be_adapter *adapter = rxo->adapter;
2026 int flush_wait = 0;
6b7c5b94 2027
d23e946c
SP
2028 /* Consume pending rx completions.
2029 * Wait for the flush completion (identified by zero num_rcvd)
2030 * to arrive. Notify CQ even when there are no more CQ entries
2031 * for HW to flush partially coalesced CQ entries.
2032 * In Lancer, there is no need to wait for flush compl.
2033 */
2034 for (;;) {
2035 rxcp = be_rx_compl_get(rxo);
ddf1169f 2036 if (!rxcp) {
d23e946c
SP
2037 if (lancer_chip(adapter))
2038 break;
2039
2040 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2041 dev_warn(&adapter->pdev->dev,
2042 "did not receive flush compl\n");
2043 break;
2044 }
2045 be_cq_notify(adapter, rx_cq->id, true, 0);
2046 mdelay(1);
2047 } else {
2048 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2049 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2050 if (rxcp->num_rcvd == 0)
2051 break;
2052 }
6b7c5b94
SP
2053 }
2054
d23e946c
SP
2055 /* After cleanup, leave the CQ in unarmed state */
2056 be_cq_notify(adapter, rx_cq->id, false, 0);
2057
2058 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2059 while (atomic_read(&rxq->used) > 0) {
2060 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2061 put_page(page_info->page);
2062 memset(page_info, 0, sizeof(*page_info));
2063 }
2064 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2065 rxq->tail = 0;
2066 rxq->head = 0;
6b7c5b94
SP
2067}
2068
0ae57bb3 2069static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2070{
0ae57bb3
SP
2071 struct be_tx_obj *txo;
2072 struct be_queue_info *txq;
a8e9179a 2073 struct be_eth_tx_compl *txcp;
4d586b82 2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
0ae57bb3 2077 int i, pending_txqs;
a8e9179a 2078
1a3d0717 2079 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2080 do {
0ae57bb3
SP
2081 pending_txqs = adapter->num_tx_qs;
2082
2083 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2084 cmpl = 0;
2085 num_wrbs = 0;
0ae57bb3
SP
2086 txq = &txo->q;
2087 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2088 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2089 num_wrbs += be_tx_compl_process(adapter, txo,
2090 end_idx);
2091 cmpl++;
2092 }
2093 if (cmpl) {
2094 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2095 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2096 timeo = 0;
0ae57bb3
SP
2097 }
2098 if (atomic_read(&txq->used) == 0)
2099 pending_txqs--;
a8e9179a
SP
2100 }
2101
1a3d0717 2102 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2103 break;
2104
2105 mdelay(1);
2106 } while (true);
2107
0ae57bb3
SP
2108 for_all_tx_queues(adapter, txo, i) {
2109 txq = &txo->q;
2110 if (atomic_read(&txq->used))
2111 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2112 atomic_read(&txq->used));
2113
2114 /* free posted tx for which compls will never arrive */
2115 while (atomic_read(&txq->used)) {
2116 sent_skb = txo->sent_skb_list[txq->tail];
2117 end_idx = txq->tail;
2118 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2119 &dummy_wrb);
2120 index_adv(&end_idx, num_wrbs - 1, txq->len);
2121 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2122 atomic_sub(num_wrbs, &txq->used);
2123 }
b03388d6 2124 }
6b7c5b94
SP
2125}
2126
10ef9ab4
SP
2127static void be_evt_queues_destroy(struct be_adapter *adapter)
2128{
2129 struct be_eq_obj *eqo;
2130 int i;
2131
2132 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2133 if (eqo->q.created) {
2134 be_eq_clean(eqo);
10ef9ab4 2135 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2136 napi_hash_del(&eqo->napi);
68d7bdcb 2137 netif_napi_del(&eqo->napi);
19d59aa7 2138 }
10ef9ab4
SP
2139 be_queue_free(adapter, &eqo->q);
2140 }
2141}
2142
2143static int be_evt_queues_create(struct be_adapter *adapter)
2144{
2145 struct be_queue_info *eq;
2146 struct be_eq_obj *eqo;
2632bafd 2147 struct be_aic_obj *aic;
10ef9ab4
SP
2148 int i, rc;
2149
92bf14ab
SP
2150 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2151 adapter->cfg_num_qs);
10ef9ab4
SP
2152
2153 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2154 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2155 BE_NAPI_WEIGHT);
6384a4d0 2156 napi_hash_add(&eqo->napi);
2632bafd 2157 aic = &adapter->aic_obj[i];
10ef9ab4 2158 eqo->adapter = adapter;
10ef9ab4 2159 eqo->idx = i;
2632bafd
SP
2160 aic->max_eqd = BE_MAX_EQD;
2161 aic->enable = true;
10ef9ab4
SP
2162
2163 eq = &eqo->q;
2164 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2165 sizeof(struct be_eq_entry));
10ef9ab4
SP
2166 if (rc)
2167 return rc;
2168
f2f781a7 2169 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2170 if (rc)
2171 return rc;
2172 }
1cfafab9 2173 return 0;
10ef9ab4
SP
2174}
2175
5fb379ee
SP
2176static void be_mcc_queues_destroy(struct be_adapter *adapter)
2177{
2178 struct be_queue_info *q;
5fb379ee 2179
8788fdc2 2180 q = &adapter->mcc_obj.q;
5fb379ee 2181 if (q->created)
8788fdc2 2182 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2183 be_queue_free(adapter, q);
2184
8788fdc2 2185 q = &adapter->mcc_obj.cq;
5fb379ee 2186 if (q->created)
8788fdc2 2187 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2188 be_queue_free(adapter, q);
2189}
2190
2191/* Must be called only after TX qs are created as MCC shares TX EQ */
2192static int be_mcc_queues_create(struct be_adapter *adapter)
2193{
2194 struct be_queue_info *q, *cq;
5fb379ee 2195
8788fdc2 2196 cq = &adapter->mcc_obj.cq;
5fb379ee 2197 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2198 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2199 goto err;
2200
10ef9ab4
SP
2201 /* Use the default EQ for MCC completions */
2202 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2203 goto mcc_cq_free;
2204
8788fdc2 2205 q = &adapter->mcc_obj.q;
5fb379ee
SP
2206 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2207 goto mcc_cq_destroy;
2208
8788fdc2 2209 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2210 goto mcc_q_free;
2211
2212 return 0;
2213
2214mcc_q_free:
2215 be_queue_free(adapter, q);
2216mcc_cq_destroy:
8788fdc2 2217 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2218mcc_cq_free:
2219 be_queue_free(adapter, cq);
2220err:
2221 return -1;
2222}
2223
6b7c5b94
SP
2224static void be_tx_queues_destroy(struct be_adapter *adapter)
2225{
2226 struct be_queue_info *q;
3c8def97
SP
2227 struct be_tx_obj *txo;
2228 u8 i;
6b7c5b94 2229
3c8def97
SP
2230 for_all_tx_queues(adapter, txo, i) {
2231 q = &txo->q;
2232 if (q->created)
2233 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2234 be_queue_free(adapter, q);
6b7c5b94 2235
3c8def97
SP
2236 q = &txo->cq;
2237 if (q->created)
2238 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2239 be_queue_free(adapter, q);
2240 }
6b7c5b94
SP
2241}
2242
7707133c 2243static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2244{
10ef9ab4 2245 struct be_queue_info *cq, *eq;
3c8def97 2246 struct be_tx_obj *txo;
92bf14ab 2247 int status, i;
6b7c5b94 2248
92bf14ab 2249 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2250
10ef9ab4
SP
2251 for_all_tx_queues(adapter, txo, i) {
2252 cq = &txo->cq;
2253 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2254 sizeof(struct be_eth_tx_compl));
2255 if (status)
2256 return status;
3c8def97 2257
827da44c
JS
2258 u64_stats_init(&txo->stats.sync);
2259 u64_stats_init(&txo->stats.sync_compl);
2260
10ef9ab4
SP
2261 /* If num_evt_qs is less than num_tx_qs, then more than
2262 * one txq share an eq
2263 */
2264 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2265 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2266 if (status)
2267 return status;
6b7c5b94 2268
10ef9ab4
SP
2269 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2270 sizeof(struct be_eth_wrb));
2271 if (status)
2272 return status;
6b7c5b94 2273
94d73aaa 2274 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2275 if (status)
2276 return status;
3c8def97 2277 }
6b7c5b94 2278
d379142b
SP
2279 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2280 adapter->num_tx_qs);
10ef9ab4 2281 return 0;
6b7c5b94
SP
2282}
2283
10ef9ab4 2284static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2285{
2286 struct be_queue_info *q;
3abcdeda
SP
2287 struct be_rx_obj *rxo;
2288 int i;
2289
2290 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2291 q = &rxo->cq;
2292 if (q->created)
2293 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2294 be_queue_free(adapter, q);
ac6a0c4a
SP
2295 }
2296}
2297
10ef9ab4 2298static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2299{
10ef9ab4 2300 struct be_queue_info *eq, *cq;
3abcdeda
SP
2301 struct be_rx_obj *rxo;
2302 int rc, i;
6b7c5b94 2303
92bf14ab
SP
2304 /* We can create as many RSS rings as there are EQs. */
2305 adapter->num_rx_qs = adapter->num_evt_qs;
2306
2307 /* We'll use RSS only if atleast 2 RSS rings are supported.
2308 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2309 */
92bf14ab
SP
2310 if (adapter->num_rx_qs > 1)
2311 adapter->num_rx_qs++;
2312
6b7c5b94 2313 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2314 for_all_rx_queues(adapter, rxo, i) {
2315 rxo->adapter = adapter;
3abcdeda
SP
2316 cq = &rxo->cq;
2317 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2318 sizeof(struct be_eth_rx_compl));
3abcdeda 2319 if (rc)
10ef9ab4 2320 return rc;
3abcdeda 2321
827da44c 2322 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2323 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2324 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2325 if (rc)
10ef9ab4 2326 return rc;
3abcdeda 2327 }
6b7c5b94 2328
d379142b
SP
2329 dev_info(&adapter->pdev->dev,
2330 "created %d RSS queue(s) and 1 default RX queue\n",
2331 adapter->num_rx_qs - 1);
10ef9ab4 2332 return 0;
b628bde2
SP
2333}
2334
6b7c5b94
SP
2335static irqreturn_t be_intx(int irq, void *dev)
2336{
e49cc34f
SP
2337 struct be_eq_obj *eqo = dev;
2338 struct be_adapter *adapter = eqo->adapter;
2339 int num_evts = 0;
6b7c5b94 2340
d0b9cec3
SP
2341 /* IRQ is not expected when NAPI is scheduled as the EQ
2342 * will not be armed.
2343 * But, this can happen on Lancer INTx where it takes
2344 * a while to de-assert INTx or in BE2 where occasionaly
2345 * an interrupt may be raised even when EQ is unarmed.
2346 * If NAPI is already scheduled, then counting & notifying
2347 * events will orphan them.
e49cc34f 2348 */
d0b9cec3 2349 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2350 num_evts = events_get(eqo);
d0b9cec3
SP
2351 __napi_schedule(&eqo->napi);
2352 if (num_evts)
2353 eqo->spurious_intr = 0;
2354 }
2355 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2356
d0b9cec3
SP
2357 /* Return IRQ_HANDLED only for the the first spurious intr
2358 * after a valid intr to stop the kernel from branding
2359 * this irq as a bad one!
e49cc34f 2360 */
d0b9cec3
SP
2361 if (num_evts || eqo->spurious_intr++ == 0)
2362 return IRQ_HANDLED;
2363 else
2364 return IRQ_NONE;
6b7c5b94
SP
2365}
2366
10ef9ab4 2367static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2368{
10ef9ab4 2369 struct be_eq_obj *eqo = dev;
6b7c5b94 2370
0b545a62
SP
2371 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2372 napi_schedule(&eqo->napi);
6b7c5b94
SP
2373 return IRQ_HANDLED;
2374}
2375
2e588f84 2376static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2377{
e38b1706 2378 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2379}
2380
10ef9ab4 2381static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2382 int budget, int polling)
6b7c5b94 2383{
3abcdeda
SP
2384 struct be_adapter *adapter = rxo->adapter;
2385 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2386 struct be_rx_compl_info *rxcp;
6b7c5b94 2387 u32 work_done;
c30d7266 2388 u32 frags_consumed = 0;
6b7c5b94
SP
2389
2390 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2391 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2392 if (!rxcp)
2393 break;
2394
12004ae9
SP
2395 /* Is it a flush compl that has no data */
2396 if (unlikely(rxcp->num_rcvd == 0))
2397 goto loop_continue;
2398
2399 /* Discard compl with partial DMA Lancer B0 */
2400 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2401 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2402 goto loop_continue;
2403 }
2404
2405 /* On BE drop pkts that arrive due to imperfect filtering in
2406 * promiscuous mode on some skews
2407 */
2408 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2409 !lancer_chip(adapter))) {
10ef9ab4 2410 be_rx_compl_discard(rxo, rxcp);
12004ae9 2411 goto loop_continue;
64642811 2412 }
009dd872 2413
6384a4d0
SP
2414 /* Don't do gro when we're busy_polling */
2415 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2416 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2417 else
6384a4d0
SP
2418 be_rx_compl_process(rxo, napi, rxcp);
2419
12004ae9 2420loop_continue:
c30d7266 2421 frags_consumed += rxcp->num_rcvd;
2e588f84 2422 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2423 }
2424
10ef9ab4
SP
2425 if (work_done) {
2426 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2427
6384a4d0
SP
2428 /* When an rx-obj gets into post_starved state, just
2429 * let be_worker do the posting.
2430 */
2431 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2432 !rxo->rx_post_starved)
c30d7266
AK
2433 be_post_rx_frags(rxo, GFP_ATOMIC,
2434 max_t(u32, MAX_RX_POST,
2435 frags_consumed));
6b7c5b94 2436 }
10ef9ab4 2437
6b7c5b94
SP
2438 return work_done;
2439}
2440
512bb8a2
KA
2441static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2442{
2443 switch (status) {
2444 case BE_TX_COMP_HDR_PARSE_ERR:
2445 tx_stats(txo)->tx_hdr_parse_err++;
2446 break;
2447 case BE_TX_COMP_NDMA_ERR:
2448 tx_stats(txo)->tx_dma_err++;
2449 break;
2450 case BE_TX_COMP_ACL_ERR:
2451 tx_stats(txo)->tx_spoof_check_err++;
2452 break;
2453 }
2454}
2455
2456static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2457{
2458 switch (status) {
2459 case LANCER_TX_COMP_LSO_ERR:
2460 tx_stats(txo)->tx_tso_err++;
2461 break;
2462 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2463 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2464 tx_stats(txo)->tx_spoof_check_err++;
2465 break;
2466 case LANCER_TX_COMP_QINQ_ERR:
2467 tx_stats(txo)->tx_qinq_err++;
2468 break;
2469 case LANCER_TX_COMP_PARITY_ERR:
2470 tx_stats(txo)->tx_internal_parity_err++;
2471 break;
2472 case LANCER_TX_COMP_DMA_ERR:
2473 tx_stats(txo)->tx_dma_err++;
2474 break;
2475 }
2476}
2477
c8f64615
SP
2478static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2479 int idx)
6b7c5b94 2480{
6b7c5b94 2481 struct be_eth_tx_compl *txcp;
c8f64615 2482 int num_wrbs = 0, work_done = 0;
512bb8a2 2483 u32 compl_status;
c8f64615
SP
2484 u16 last_idx;
2485
2486 while ((txcp = be_tx_compl_get(&txo->cq))) {
2487 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2488 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2489 work_done++;
3c8def97 2490
512bb8a2
KA
2491 compl_status = GET_TX_COMPL_BITS(status, txcp);
2492 if (compl_status) {
2493 if (lancer_chip(adapter))
2494 lancer_update_tx_err(txo, compl_status);
2495 else
2496 be_update_tx_err(txo, compl_status);
2497 }
10ef9ab4 2498 }
6b7c5b94 2499
10ef9ab4
SP
2500 if (work_done) {
2501 be_cq_notify(adapter, txo->cq.id, true, work_done);
2502 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2503
10ef9ab4
SP
2504 /* As Tx wrbs have been freed up, wake up netdev queue
2505 * if it was stopped due to lack of tx wrbs. */
2506 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2507 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2508 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2509 }
10ef9ab4
SP
2510
2511 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2512 tx_stats(txo)->tx_compl += work_done;
2513 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2514 }
10ef9ab4 2515}
6b7c5b94 2516
68d7bdcb 2517int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2518{
2519 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2520 struct be_adapter *adapter = eqo->adapter;
0b545a62 2521 int max_work = 0, work, i, num_evts;
6384a4d0 2522 struct be_rx_obj *rxo;
a4906ea0 2523 struct be_tx_obj *txo;
f31e50a8 2524
0b545a62
SP
2525 num_evts = events_get(eqo);
2526
a4906ea0
SP
2527 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2528 be_process_tx(adapter, txo, i);
f31e50a8 2529
6384a4d0
SP
2530 if (be_lock_napi(eqo)) {
2531 /* This loop will iterate twice for EQ0 in which
2532 * completions of the last RXQ (default one) are also processed
2533 * For other EQs the loop iterates only once
2534 */
2535 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2536 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2537 max_work = max(work, max_work);
2538 }
2539 be_unlock_napi(eqo);
2540 } else {
2541 max_work = budget;
10ef9ab4 2542 }
6b7c5b94 2543
10ef9ab4
SP
2544 if (is_mcc_eqo(eqo))
2545 be_process_mcc(adapter);
93c86700 2546
10ef9ab4
SP
2547 if (max_work < budget) {
2548 napi_complete(napi);
0b545a62 2549 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2550 } else {
2551 /* As we'll continue in polling mode, count and clear events */
0b545a62 2552 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2553 }
10ef9ab4 2554 return max_work;
6b7c5b94
SP
2555}
2556
6384a4d0
SP
2557#ifdef CONFIG_NET_RX_BUSY_POLL
2558static int be_busy_poll(struct napi_struct *napi)
2559{
2560 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2561 struct be_adapter *adapter = eqo->adapter;
2562 struct be_rx_obj *rxo;
2563 int i, work = 0;
2564
2565 if (!be_lock_busy_poll(eqo))
2566 return LL_FLUSH_BUSY;
2567
2568 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2569 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2570 if (work)
2571 break;
2572 }
2573
2574 be_unlock_busy_poll(eqo);
2575 return work;
2576}
2577#endif
2578
f67ef7ba 2579void be_detect_error(struct be_adapter *adapter)
7c185276 2580{
e1cfb67a
PR
2581 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2582 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2583 u32 i;
eb0eecc1
SK
2584 bool error_detected = false;
2585 struct device *dev = &adapter->pdev->dev;
2586 struct net_device *netdev = adapter->netdev;
7c185276 2587
d23e946c 2588 if (be_hw_error(adapter))
72f02485
SP
2589 return;
2590
e1cfb67a
PR
2591 if (lancer_chip(adapter)) {
2592 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2593 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2594 sliport_err1 = ioread32(adapter->db +
748b539a 2595 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2596 sliport_err2 = ioread32(adapter->db +
748b539a 2597 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2598 adapter->hw_error = true;
2599 /* Do not log error messages if its a FW reset */
2600 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2601 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2602 dev_info(dev, "Firmware update in progress\n");
2603 } else {
2604 error_detected = true;
2605 dev_err(dev, "Error detected in the card\n");
2606 dev_err(dev, "ERR: sliport status 0x%x\n",
2607 sliport_status);
2608 dev_err(dev, "ERR: sliport error1 0x%x\n",
2609 sliport_err1);
2610 dev_err(dev, "ERR: sliport error2 0x%x\n",
2611 sliport_err2);
2612 }
e1cfb67a
PR
2613 }
2614 } else {
2615 pci_read_config_dword(adapter->pdev,
748b539a 2616 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2617 pci_read_config_dword(adapter->pdev,
748b539a 2618 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2619 pci_read_config_dword(adapter->pdev,
748b539a 2620 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2621 pci_read_config_dword(adapter->pdev,
748b539a 2622 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2623
f67ef7ba
PR
2624 ue_lo = (ue_lo & ~ue_lo_mask);
2625 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2626
eb0eecc1
SK
2627 /* On certain platforms BE hardware can indicate spurious UEs.
2628 * Allow HW to stop working completely in case of a real UE.
2629 * Hence not setting the hw_error for UE detection.
2630 */
f67ef7ba 2631
eb0eecc1
SK
2632 if (ue_lo || ue_hi) {
2633 error_detected = true;
2634 dev_err(dev,
2635 "Unrecoverable Error detected in the adapter");
2636 dev_err(dev, "Please reboot server to recover");
2637 if (skyhawk_chip(adapter))
2638 adapter->hw_error = true;
2639 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2640 if (ue_lo & 1)
2641 dev_err(dev, "UE: %s bit set\n",
2642 ue_status_low_desc[i]);
2643 }
2644 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2645 if (ue_hi & 1)
2646 dev_err(dev, "UE: %s bit set\n",
2647 ue_status_hi_desc[i]);
2648 }
7c185276
AK
2649 }
2650 }
eb0eecc1
SK
2651 if (error_detected)
2652 netif_carrier_off(netdev);
7c185276
AK
2653}
2654
8d56ff11
SP
2655static void be_msix_disable(struct be_adapter *adapter)
2656{
ac6a0c4a 2657 if (msix_enabled(adapter)) {
8d56ff11 2658 pci_disable_msix(adapter->pdev);
ac6a0c4a 2659 adapter->num_msix_vec = 0;
68d7bdcb 2660 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2661 }
2662}
2663
c2bba3df 2664static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2665{
7dc4c064 2666 int i, num_vec;
d379142b 2667 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2668
92bf14ab
SP
2669 /* If RoCE is supported, program the max number of NIC vectors that
2670 * may be configured via set-channels, along with vectors needed for
2671 * RoCe. Else, just program the number we'll use initially.
2672 */
2673 if (be_roce_supported(adapter))
2674 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2675 2 * num_online_cpus());
2676 else
2677 num_vec = adapter->cfg_num_qs;
3abcdeda 2678
ac6a0c4a 2679 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2680 adapter->msix_entries[i].entry = i;
2681
7dc4c064
AG
2682 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2683 MIN_MSIX_VECTORS, num_vec);
2684 if (num_vec < 0)
2685 goto fail;
92bf14ab 2686
92bf14ab
SP
2687 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2688 adapter->num_msix_roce_vec = num_vec / 2;
2689 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2690 adapter->num_msix_roce_vec);
2691 }
2692
2693 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2694
2695 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2696 adapter->num_msix_vec);
c2bba3df 2697 return 0;
7dc4c064
AG
2698
2699fail:
2700 dev_warn(dev, "MSIx enable failed\n");
2701
2702 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2703 if (!be_physfn(adapter))
2704 return num_vec;
2705 return 0;
6b7c5b94
SP
2706}
2707
fe6d2a38 2708static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2709 struct be_eq_obj *eqo)
b628bde2 2710{
f2f781a7 2711 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2712}
6b7c5b94 2713
b628bde2
SP
2714static int be_msix_register(struct be_adapter *adapter)
2715{
10ef9ab4
SP
2716 struct net_device *netdev = adapter->netdev;
2717 struct be_eq_obj *eqo;
2718 int status, i, vec;
6b7c5b94 2719
10ef9ab4
SP
2720 for_all_evt_queues(adapter, eqo, i) {
2721 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2722 vec = be_msix_vec_get(adapter, eqo);
2723 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2724 if (status)
2725 goto err_msix;
2726 }
b628bde2 2727
6b7c5b94 2728 return 0;
3abcdeda 2729err_msix:
10ef9ab4
SP
2730 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2731 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2732 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2733 status);
ac6a0c4a 2734 be_msix_disable(adapter);
6b7c5b94
SP
2735 return status;
2736}
2737
2738static int be_irq_register(struct be_adapter *adapter)
2739{
2740 struct net_device *netdev = adapter->netdev;
2741 int status;
2742
ac6a0c4a 2743 if (msix_enabled(adapter)) {
6b7c5b94
SP
2744 status = be_msix_register(adapter);
2745 if (status == 0)
2746 goto done;
ba343c77
SB
2747 /* INTx is not supported for VF */
2748 if (!be_physfn(adapter))
2749 return status;
6b7c5b94
SP
2750 }
2751
e49cc34f 2752 /* INTx: only the first EQ is used */
6b7c5b94
SP
2753 netdev->irq = adapter->pdev->irq;
2754 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2755 &adapter->eq_obj[0]);
6b7c5b94
SP
2756 if (status) {
2757 dev_err(&adapter->pdev->dev,
2758 "INTx request IRQ failed - err %d\n", status);
2759 return status;
2760 }
2761done:
2762 adapter->isr_registered = true;
2763 return 0;
2764}
2765
2766static void be_irq_unregister(struct be_adapter *adapter)
2767{
2768 struct net_device *netdev = adapter->netdev;
10ef9ab4 2769 struct be_eq_obj *eqo;
3abcdeda 2770 int i;
6b7c5b94
SP
2771
2772 if (!adapter->isr_registered)
2773 return;
2774
2775 /* INTx */
ac6a0c4a 2776 if (!msix_enabled(adapter)) {
e49cc34f 2777 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2778 goto done;
2779 }
2780
2781 /* MSIx */
10ef9ab4
SP
2782 for_all_evt_queues(adapter, eqo, i)
2783 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2784
6b7c5b94
SP
2785done:
2786 adapter->isr_registered = false;
6b7c5b94
SP
2787}
2788
10ef9ab4 2789static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2790{
2791 struct be_queue_info *q;
2792 struct be_rx_obj *rxo;
2793 int i;
2794
2795 for_all_rx_queues(adapter, rxo, i) {
2796 q = &rxo->q;
2797 if (q->created) {
2798 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2799 be_rx_cq_clean(rxo);
482c9e79 2800 }
10ef9ab4 2801 be_queue_free(adapter, q);
482c9e79
SP
2802 }
2803}
2804
889cd4b2
SP
2805static int be_close(struct net_device *netdev)
2806{
2807 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2808 struct be_eq_obj *eqo;
2809 int i;
889cd4b2 2810
e1ad8e33
KA
2811 /* This protection is needed as be_close() may be called even when the
2812 * adapter is in cleared state (after eeh perm failure)
2813 */
2814 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2815 return 0;
2816
045508a8
PP
2817 be_roce_dev_close(adapter);
2818
dff345c5
IV
2819 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2820 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2821 napi_disable(&eqo->napi);
6384a4d0
SP
2822 be_disable_busy_poll(eqo);
2823 }
71237b6f 2824 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2825 }
a323d9bf
SP
2826
2827 be_async_mcc_disable(adapter);
2828
2829 /* Wait for all pending tx completions to arrive so that
2830 * all tx skbs are freed.
2831 */
fba87559 2832 netif_tx_disable(netdev);
6e1f9975 2833 be_tx_compl_clean(adapter);
a323d9bf
SP
2834
2835 be_rx_qs_destroy(adapter);
2836
d11a347d
AK
2837 for (i = 1; i < (adapter->uc_macs + 1); i++)
2838 be_cmd_pmac_del(adapter, adapter->if_handle,
2839 adapter->pmac_id[i], 0);
2840 adapter->uc_macs = 0;
2841
a323d9bf 2842 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2843 if (msix_enabled(adapter))
2844 synchronize_irq(be_msix_vec_get(adapter, eqo));
2845 else
2846 synchronize_irq(netdev->irq);
2847 be_eq_clean(eqo);
63fcb27f
PR
2848 }
2849
889cd4b2
SP
2850 be_irq_unregister(adapter);
2851
482c9e79
SP
2852 return 0;
2853}
2854
10ef9ab4 2855static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 2856{
1dcf7b1c
ED
2857 struct rss_info *rss = &adapter->rss_info;
2858 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 2859 struct be_rx_obj *rxo;
e9008ee9 2860 int rc, i, j;
482c9e79
SP
2861
2862 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2863 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2864 sizeof(struct be_eth_rx_d));
2865 if (rc)
2866 return rc;
2867 }
2868
2869 /* The FW would like the default RXQ to be created first */
2870 rxo = default_rxo(adapter);
2871 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2872 adapter->if_handle, false, &rxo->rss_id);
2873 if (rc)
2874 return rc;
2875
2876 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2877 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2878 rx_frag_size, adapter->if_handle,
2879 true, &rxo->rss_id);
482c9e79
SP
2880 if (rc)
2881 return rc;
2882 }
2883
2884 if (be_multi_rxq(adapter)) {
e2557877
VD
2885 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2886 j += adapter->num_rx_qs - 1) {
e9008ee9 2887 for_all_rss_queues(adapter, rxo, i) {
e2557877 2888 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2889 break;
e2557877
VD
2890 rss->rsstable[j + i] = rxo->rss_id;
2891 rss->rss_queue[j + i] = i;
e9008ee9
PR
2892 }
2893 }
e2557877
VD
2894 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2895 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2896
2897 if (!BEx_chip(adapter))
e2557877
VD
2898 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2899 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2900 } else {
2901 /* Disable RSS, if only default RX Q is created */
e2557877 2902 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2903 }
594ad54a 2904
1dcf7b1c 2905 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 2906 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 2907 128, rss_key);
da1388d6 2908 if (rc) {
e2557877 2909 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2910 return rc;
482c9e79
SP
2911 }
2912
1dcf7b1c 2913 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 2914
482c9e79 2915 /* First time posting */
10ef9ab4 2916 for_all_rx_queues(adapter, rxo, i)
c30d7266 2917 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
2918 return 0;
2919}
2920
6b7c5b94
SP
2921static int be_open(struct net_device *netdev)
2922{
2923 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2924 struct be_eq_obj *eqo;
3abcdeda 2925 struct be_rx_obj *rxo;
10ef9ab4 2926 struct be_tx_obj *txo;
b236916a 2927 u8 link_status;
3abcdeda 2928 int status, i;
5fb379ee 2929
10ef9ab4 2930 status = be_rx_qs_create(adapter);
482c9e79
SP
2931 if (status)
2932 goto err;
2933
c2bba3df
SK
2934 status = be_irq_register(adapter);
2935 if (status)
2936 goto err;
5fb379ee 2937
10ef9ab4 2938 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2939 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2940
10ef9ab4
SP
2941 for_all_tx_queues(adapter, txo, i)
2942 be_cq_notify(adapter, txo->cq.id, true, 0);
2943
7a1e9b20
SP
2944 be_async_mcc_enable(adapter);
2945
10ef9ab4
SP
2946 for_all_evt_queues(adapter, eqo, i) {
2947 napi_enable(&eqo->napi);
6384a4d0 2948 be_enable_busy_poll(eqo);
4cad9f3b 2949 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2950 }
04d3d624 2951 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2952
323ff71e 2953 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2954 if (!status)
2955 be_link_status_update(adapter, link_status);
2956
fba87559 2957 netif_tx_start_all_queues(netdev);
045508a8 2958 be_roce_dev_open(adapter);
c9c47142 2959
c5abe7c0 2960#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2961 if (skyhawk_chip(adapter))
2962 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2963#endif
2964
889cd4b2
SP
2965 return 0;
2966err:
2967 be_close(adapter->netdev);
2968 return -EIO;
5fb379ee
SP
2969}
2970
71d8d1b5
AK
2971static int be_setup_wol(struct be_adapter *adapter, bool enable)
2972{
2973 struct be_dma_mem cmd;
2974 int status = 0;
2975 u8 mac[ETH_ALEN];
2976
2977 memset(mac, 0, ETH_ALEN);
2978
2979 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2980 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2981 GFP_KERNEL);
ddf1169f 2982 if (!cmd.va)
6b568689 2983 return -ENOMEM;
71d8d1b5
AK
2984
2985 if (enable) {
2986 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2987 PCICFG_PM_CONTROL_OFFSET,
2988 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2989 if (status) {
2990 dev_err(&adapter->pdev->dev,
2381a55c 2991 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2992 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2993 cmd.dma);
71d8d1b5
AK
2994 return status;
2995 }
2996 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2997 adapter->netdev->dev_addr,
2998 &cmd);
71d8d1b5
AK
2999 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3000 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3001 } else {
3002 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3003 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3004 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3005 }
3006
2b7bcebf 3007 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3008 return status;
3009}
3010
6d87f5c3
AK
3011/*
3012 * Generate a seed MAC address from the PF MAC Address using jhash.
3013 * MAC Address for VFs are assigned incrementally starting from the seed.
3014 * These addresses are programmed in the ASIC by the PF and the VF driver
3015 * queries for the MAC address during its probe.
3016 */
4c876616 3017static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3018{
f9449ab7 3019 u32 vf;
3abcdeda 3020 int status = 0;
6d87f5c3 3021 u8 mac[ETH_ALEN];
11ac75ed 3022 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3023
3024 be_vf_eth_addr_generate(adapter, mac);
3025
11ac75ed 3026 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3027 if (BEx_chip(adapter))
590c391d 3028 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3029 vf_cfg->if_handle,
3030 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3031 else
3032 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3033 vf + 1);
590c391d 3034
6d87f5c3
AK
3035 if (status)
3036 dev_err(&adapter->pdev->dev,
748b539a
SP
3037 "Mac address assignment failed for VF %d\n",
3038 vf);
6d87f5c3 3039 else
11ac75ed 3040 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3041
3042 mac[5] += 1;
3043 }
3044 return status;
3045}
3046
4c876616
SP
3047static int be_vfs_mac_query(struct be_adapter *adapter)
3048{
3049 int status, vf;
3050 u8 mac[ETH_ALEN];
3051 struct be_vf_cfg *vf_cfg;
4c876616
SP
3052
3053 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3054 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3055 mac, vf_cfg->if_handle,
3056 false, vf+1);
4c876616
SP
3057 if (status)
3058 return status;
3059 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3060 }
3061 return 0;
3062}
3063
f9449ab7 3064static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3065{
11ac75ed 3066 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3067 u32 vf;
3068
257a3feb 3069 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3070 dev_warn(&adapter->pdev->dev,
3071 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3072 goto done;
3073 }
3074
b4c1df93
SP
3075 pci_disable_sriov(adapter->pdev);
3076
11ac75ed 3077 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3078 if (BEx_chip(adapter))
11ac75ed
SP
3079 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3080 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3081 else
3082 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3083 vf + 1);
f9449ab7 3084
11ac75ed
SP
3085 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3086 }
39f1d94d
SP
3087done:
3088 kfree(adapter->vf_cfg);
3089 adapter->num_vfs = 0;
f174c7ec 3090 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3091}
3092
7707133c
SP
3093static void be_clear_queues(struct be_adapter *adapter)
3094{
3095 be_mcc_queues_destroy(adapter);
3096 be_rx_cqs_destroy(adapter);
3097 be_tx_queues_destroy(adapter);
3098 be_evt_queues_destroy(adapter);
3099}
3100
68d7bdcb 3101static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3102{
191eb756
SP
3103 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3104 cancel_delayed_work_sync(&adapter->work);
3105 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3106 }
68d7bdcb
SP
3107}
3108
b05004ad 3109static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3110{
3111 int i;
3112
b05004ad
SK
3113 if (adapter->pmac_id) {
3114 for (i = 0; i < (adapter->uc_macs + 1); i++)
3115 be_cmd_pmac_del(adapter, adapter->if_handle,
3116 adapter->pmac_id[i], 0);
3117 adapter->uc_macs = 0;
3118
3119 kfree(adapter->pmac_id);
3120 adapter->pmac_id = NULL;
3121 }
3122}
3123
c5abe7c0 3124#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3125static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3126{
630f4b70
SB
3127 struct net_device *netdev = adapter->netdev;
3128
c9c47142
SP
3129 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3130 be_cmd_manage_iface(adapter, adapter->if_handle,
3131 OP_CONVERT_TUNNEL_TO_NORMAL);
3132
3133 if (adapter->vxlan_port)
3134 be_cmd_set_vxlan_port(adapter, 0);
3135
3136 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3137 adapter->vxlan_port = 0;
630f4b70
SB
3138
3139 netdev->hw_enc_features = 0;
3140 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3141}
c5abe7c0 3142#endif
c9c47142 3143
b05004ad
SK
3144static int be_clear(struct be_adapter *adapter)
3145{
68d7bdcb 3146 be_cancel_worker(adapter);
191eb756 3147
11ac75ed 3148 if (sriov_enabled(adapter))
f9449ab7
SP
3149 be_vf_clear(adapter);
3150
bec84e6b
VV
3151 /* Re-configure FW to distribute resources evenly across max-supported
3152 * number of VFs, only when VFs are not already enabled.
3153 */
3154 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3155 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3156 pci_sriov_get_totalvfs(adapter->pdev));
3157
c5abe7c0 3158#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3159 be_disable_vxlan_offloads(adapter);
c5abe7c0 3160#endif
2d17f403 3161 /* delete the primary mac along with the uc-mac list */
b05004ad 3162 be_mac_clear(adapter);
fbc13f01 3163
f9449ab7 3164 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3165
7707133c 3166 be_clear_queues(adapter);
a54769f5 3167
10ef9ab4 3168 be_msix_disable(adapter);
e1ad8e33 3169 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3170 return 0;
3171}
3172
4c876616 3173static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3174{
92bf14ab 3175 struct be_resources res = {0};
4c876616
SP
3176 struct be_vf_cfg *vf_cfg;
3177 u32 cap_flags, en_flags, vf;
922bbe88 3178 int status = 0;
abb93951 3179
4c876616
SP
3180 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3181 BE_IF_FLAGS_MULTICAST;
abb93951 3182
4c876616 3183 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3184 if (!BE3_chip(adapter)) {
3185 status = be_cmd_get_profile_config(adapter, &res,
3186 vf + 1);
3187 if (!status)
3188 cap_flags = res.if_cap_flags;
3189 }
4c876616
SP
3190
3191 /* If a FW profile exists, then cap_flags are updated */
3192 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3193 BE_IF_FLAGS_BROADCAST |
3194 BE_IF_FLAGS_MULTICAST);
3195 status =
3196 be_cmd_if_create(adapter, cap_flags, en_flags,
3197 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3198 if (status)
3199 goto err;
3200 }
3201err:
3202 return status;
abb93951
PR
3203}
3204
39f1d94d 3205static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3206{
11ac75ed 3207 struct be_vf_cfg *vf_cfg;
30128031
SP
3208 int vf;
3209
39f1d94d
SP
3210 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3211 GFP_KERNEL);
3212 if (!adapter->vf_cfg)
3213 return -ENOMEM;
3214
11ac75ed
SP
3215 for_all_vfs(adapter, vf_cfg, vf) {
3216 vf_cfg->if_handle = -1;
3217 vf_cfg->pmac_id = -1;
30128031 3218 }
39f1d94d 3219 return 0;
30128031
SP
3220}
3221
f9449ab7
SP
3222static int be_vf_setup(struct be_adapter *adapter)
3223{
c502224e 3224 struct device *dev = &adapter->pdev->dev;
11ac75ed 3225 struct be_vf_cfg *vf_cfg;
4c876616 3226 int status, old_vfs, vf;
04a06028 3227 u32 privileges;
39f1d94d 3228
257a3feb 3229 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3230
3231 status = be_vf_setup_init(adapter);
3232 if (status)
3233 goto err;
30128031 3234
4c876616
SP
3235 if (old_vfs) {
3236 for_all_vfs(adapter, vf_cfg, vf) {
3237 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3238 if (status)
3239 goto err;
3240 }
f9449ab7 3241
4c876616
SP
3242 status = be_vfs_mac_query(adapter);
3243 if (status)
3244 goto err;
3245 } else {
bec84e6b
VV
3246 status = be_vfs_if_create(adapter);
3247 if (status)
3248 goto err;
3249
39f1d94d
SP
3250 status = be_vf_eth_addr_config(adapter);
3251 if (status)
3252 goto err;
3253 }
f9449ab7 3254
11ac75ed 3255 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3256 /* Allow VFs to programs MAC/VLAN filters */
3257 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3258 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3259 status = be_cmd_set_fn_privileges(adapter,
3260 privileges |
3261 BE_PRIV_FILTMGMT,
3262 vf + 1);
3263 if (!status)
3264 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3265 vf);
3266 }
3267
0f77ba73
RN
3268 /* Allow full available bandwidth */
3269 if (!old_vfs)
3270 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3271
bdce2ad7 3272 if (!old_vfs) {
0599863d 3273 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3274 be_cmd_set_logical_link_config(adapter,
3275 IFLA_VF_LINK_STATE_AUTO,
3276 vf+1);
3277 }
f9449ab7 3278 }
b4c1df93
SP
3279
3280 if (!old_vfs) {
3281 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3282 if (status) {
3283 dev_err(dev, "SRIOV enable failed\n");
3284 adapter->num_vfs = 0;
3285 goto err;
3286 }
3287 }
f174c7ec
VV
3288
3289 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3290 return 0;
3291err:
4c876616
SP
3292 dev_err(dev, "VF setup failed\n");
3293 be_vf_clear(adapter);
f9449ab7
SP
3294 return status;
3295}
3296
f93f160b
VV
3297/* Converting function_mode bits on BE3 to SH mc_type enums */
3298
3299static u8 be_convert_mc_type(u32 function_mode)
3300{
66064dbc 3301 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3302 return vNIC1;
66064dbc 3303 else if (function_mode & QNQ_MODE)
f93f160b
VV
3304 return FLEX10;
3305 else if (function_mode & VNIC_MODE)
3306 return vNIC2;
3307 else if (function_mode & UMC_ENABLED)
3308 return UMC;
3309 else
3310 return MC_NONE;
3311}
3312
92bf14ab
SP
3313/* On BE2/BE3 FW does not suggest the supported limits */
3314static void BEx_get_resources(struct be_adapter *adapter,
3315 struct be_resources *res)
3316{
bec84e6b 3317 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3318
3319 if (be_physfn(adapter))
3320 res->max_uc_mac = BE_UC_PMAC_COUNT;
3321 else
3322 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3323
f93f160b
VV
3324 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3325
3326 if (be_is_mc(adapter)) {
3327 /* Assuming that there are 4 channels per port,
3328 * when multi-channel is enabled
3329 */
3330 if (be_is_qnq_mode(adapter))
3331 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3332 else
3333 /* In a non-qnq multichannel mode, the pvid
3334 * takes up one vlan entry
3335 */
3336 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3337 } else {
92bf14ab 3338 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3339 }
3340
92bf14ab
SP
3341 res->max_mcast_mac = BE_MAX_MC;
3342
a5243dab
VV
3343 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3344 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3345 * *only* if it is RSS-capable.
3346 */
3347 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3348 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3349 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3350 res->max_tx_qs = 1;
a28277dc
SR
3351 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3352 struct be_resources super_nic_res = {0};
3353
3354 /* On a SuperNIC profile, the driver needs to use the
3355 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3356 */
3357 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3358 /* Some old versions of BE3 FW don't report max_tx_qs value */
3359 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3360 } else {
92bf14ab 3361 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3362 }
92bf14ab
SP
3363
3364 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3365 !use_sriov && be_physfn(adapter))
3366 res->max_rss_qs = (adapter->be3_native) ?
3367 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3368 res->max_rx_qs = res->max_rss_qs + 1;
3369
e3dc867c 3370 if (be_physfn(adapter))
d3518e21 3371 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3372 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3373 else
3374 res->max_evt_qs = 1;
92bf14ab
SP
3375
3376 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3377 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3378 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3379}
3380
30128031
SP
3381static void be_setup_init(struct be_adapter *adapter)
3382{
3383 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3384 adapter->phy.link_speed = -1;
30128031
SP
3385 adapter->if_handle = -1;
3386 adapter->be3_native = false;
3387 adapter->promiscuous = false;
f25b119c
PR
3388 if (be_physfn(adapter))
3389 adapter->cmd_privileges = MAX_PRIVILEGES;
3390 else
3391 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3392}
3393
bec84e6b
VV
3394static int be_get_sriov_config(struct be_adapter *adapter)
3395{
3396 struct device *dev = &adapter->pdev->dev;
3397 struct be_resources res = {0};
d3d18312 3398 int max_vfs, old_vfs;
bec84e6b
VV
3399
3400 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3401 be_cmd_get_profile_config(adapter, &res, 0);
3402
bec84e6b
VV
3403 if (BE3_chip(adapter) && !res.max_vfs) {
3404 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3405 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3406 }
3407
d3d18312 3408 adapter->pool_res = res;
bec84e6b
VV
3409
3410 if (!be_max_vfs(adapter)) {
3411 if (num_vfs)
50762667 3412 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3413 adapter->num_vfs = 0;
3414 return 0;
3415 }
3416
d3d18312
SP
3417 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3418
bec84e6b
VV
3419 /* validate num_vfs module param */
3420 old_vfs = pci_num_vf(adapter->pdev);
3421 if (old_vfs) {
3422 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3423 if (old_vfs != num_vfs)
3424 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3425 adapter->num_vfs = old_vfs;
3426 } else {
3427 if (num_vfs > be_max_vfs(adapter)) {
3428 dev_info(dev, "Resources unavailable to init %d VFs\n",
3429 num_vfs);
3430 dev_info(dev, "Limiting to %d VFs\n",
3431 be_max_vfs(adapter));
3432 }
3433 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3434 }
3435
3436 return 0;
3437}
3438
92bf14ab 3439static int be_get_resources(struct be_adapter *adapter)
abb93951 3440{
92bf14ab
SP
3441 struct device *dev = &adapter->pdev->dev;
3442 struct be_resources res = {0};
3443 int status;
abb93951 3444
92bf14ab
SP
3445 if (BEx_chip(adapter)) {
3446 BEx_get_resources(adapter, &res);
3447 adapter->res = res;
abb93951
PR
3448 }
3449
92bf14ab
SP
3450 /* For Lancer, SH etc read per-function resource limits from FW.
3451 * GET_FUNC_CONFIG returns per function guaranteed limits.
3452 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3453 */
3454 if (!BEx_chip(adapter)) {
3455 status = be_cmd_get_func_config(adapter, &res);
3456 if (status)
3457 return status;
abb93951 3458
92bf14ab
SP
3459 /* If RoCE may be enabled stash away half the EQs for RoCE */
3460 if (be_roce_supported(adapter))
3461 res.max_evt_qs /= 2;
3462 adapter->res = res;
abb93951 3463 }
4c876616 3464
acbafeb1
SP
3465 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3466 be_max_txqs(adapter), be_max_rxqs(adapter),
3467 be_max_rss(adapter), be_max_eqs(adapter),
3468 be_max_vfs(adapter));
3469 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3470 be_max_uc(adapter), be_max_mc(adapter),
3471 be_max_vlans(adapter));
3472
92bf14ab 3473 return 0;
abb93951
PR
3474}
3475
d3d18312
SP
3476static void be_sriov_config(struct be_adapter *adapter)
3477{
3478 struct device *dev = &adapter->pdev->dev;
3479 int status;
3480
3481 status = be_get_sriov_config(adapter);
3482 if (status) {
3483 dev_err(dev, "Failed to query SR-IOV configuration\n");
3484 dev_err(dev, "SR-IOV cannot be enabled\n");
3485 return;
3486 }
3487
3488 /* When the HW is in SRIOV capable configuration, the PF-pool
3489 * resources are equally distributed across the max-number of
3490 * VFs. The user may request only a subset of the max-vfs to be
3491 * enabled. Based on num_vfs, redistribute the resources across
3492 * num_vfs so that each VF will have access to more number of
3493 * resources. This facility is not available in BE3 FW.
3494 * Also, this is done by FW in Lancer chip.
3495 */
3496 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3497 status = be_cmd_set_sriov_config(adapter,
3498 adapter->pool_res,
3499 adapter->num_vfs);
3500 if (status)
3501 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3502 }
3503}
3504
39f1d94d
SP
3505static int be_get_config(struct be_adapter *adapter)
3506{
542963b7 3507 u16 profile_id;
4c876616 3508 int status;
39f1d94d 3509
e97e3cda 3510 status = be_cmd_query_fw_cfg(adapter);
abb93951 3511 if (status)
92bf14ab 3512 return status;
abb93951 3513
542963b7
VV
3514 if (be_physfn(adapter)) {
3515 status = be_cmd_get_active_profile(adapter, &profile_id);
3516 if (!status)
3517 dev_info(&adapter->pdev->dev,
3518 "Using profile 0x%x\n", profile_id);
962bcb75 3519 }
bec84e6b 3520
d3d18312
SP
3521 if (!BE2_chip(adapter) && be_physfn(adapter))
3522 be_sriov_config(adapter);
542963b7 3523
92bf14ab
SP
3524 status = be_get_resources(adapter);
3525 if (status)
3526 return status;
abb93951 3527
46ee9c14
RN
3528 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3529 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3530 if (!adapter->pmac_id)
3531 return -ENOMEM;
abb93951 3532
92bf14ab
SP
3533 /* Sanitize cfg_num_qs based on HW and platform limits */
3534 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3535
3536 return 0;
39f1d94d
SP
3537}
3538
95046b92
SP
3539static int be_mac_setup(struct be_adapter *adapter)
3540{
3541 u8 mac[ETH_ALEN];
3542 int status;
3543
3544 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3545 status = be_cmd_get_perm_mac(adapter, mac);
3546 if (status)
3547 return status;
3548
3549 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3550 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3551 } else {
3552 /* Maybe the HW was reset; dev_addr must be re-programmed */
3553 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3554 }
3555
2c7a9dc1
AK
3556 /* For BE3-R VFs, the PF programs the initial MAC address */
3557 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3558 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3559 &adapter->pmac_id[0], 0);
95046b92
SP
3560 return 0;
3561}
3562
68d7bdcb
SP
3563static void be_schedule_worker(struct be_adapter *adapter)
3564{
3565 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3566 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3567}
3568
7707133c 3569static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3570{
68d7bdcb 3571 struct net_device *netdev = adapter->netdev;
10ef9ab4 3572 int status;
ba343c77 3573
7707133c 3574 status = be_evt_queues_create(adapter);
abb93951
PR
3575 if (status)
3576 goto err;
73d540f2 3577
7707133c 3578 status = be_tx_qs_create(adapter);
c2bba3df
SK
3579 if (status)
3580 goto err;
10ef9ab4 3581
7707133c 3582 status = be_rx_cqs_create(adapter);
10ef9ab4 3583 if (status)
a54769f5 3584 goto err;
6b7c5b94 3585
7707133c 3586 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3587 if (status)
3588 goto err;
3589
68d7bdcb
SP
3590 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3591 if (status)
3592 goto err;
3593
3594 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3595 if (status)
3596 goto err;
3597
7707133c
SP
3598 return 0;
3599err:
3600 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3601 return status;
3602}
3603
68d7bdcb
SP
3604int be_update_queues(struct be_adapter *adapter)
3605{
3606 struct net_device *netdev = adapter->netdev;
3607 int status;
3608
3609 if (netif_running(netdev))
3610 be_close(netdev);
3611
3612 be_cancel_worker(adapter);
3613
3614 /* If any vectors have been shared with RoCE we cannot re-program
3615 * the MSIx table.
3616 */
3617 if (!adapter->num_msix_roce_vec)
3618 be_msix_disable(adapter);
3619
3620 be_clear_queues(adapter);
3621
3622 if (!msix_enabled(adapter)) {
3623 status = be_msix_enable(adapter);
3624 if (status)
3625 return status;
3626 }
3627
3628 status = be_setup_queues(adapter);
3629 if (status)
3630 return status;
3631
3632 be_schedule_worker(adapter);
3633
3634 if (netif_running(netdev))
3635 status = be_open(netdev);
3636
3637 return status;
3638}
3639
7707133c
SP
3640static int be_setup(struct be_adapter *adapter)
3641{
3642 struct device *dev = &adapter->pdev->dev;
3643 u32 tx_fc, rx_fc, en_flags;
3644 int status;
3645
3646 be_setup_init(adapter);
3647
3648 if (!lancer_chip(adapter))
3649 be_cmd_req_native_mode(adapter);
3650
3651 status = be_get_config(adapter);
10ef9ab4 3652 if (status)
a54769f5 3653 goto err;
6b7c5b94 3654
7707133c 3655 status = be_msix_enable(adapter);
10ef9ab4 3656 if (status)
a54769f5 3657 goto err;
6b7c5b94 3658
f9449ab7 3659 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3660 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3661 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3662 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3663 en_flags = en_flags & be_if_cap_flags(adapter);
3664 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3665 &adapter->if_handle, 0);
7707133c 3666 if (status)
a54769f5 3667 goto err;
6b7c5b94 3668
68d7bdcb
SP
3669 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3670 rtnl_lock();
7707133c 3671 status = be_setup_queues(adapter);
68d7bdcb 3672 rtnl_unlock();
95046b92 3673 if (status)
1578e777
PR
3674 goto err;
3675
7707133c 3676 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3677
3678 status = be_mac_setup(adapter);
10ef9ab4
SP
3679 if (status)
3680 goto err;
3681
e97e3cda 3682 be_cmd_get_fw_ver(adapter);
acbafeb1 3683 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3684
e9e2a904 3685 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3686 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3687 adapter->fw_ver);
3688 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3689 }
3690
1d1e9a46 3691 if (adapter->vlans_added)
10329df8 3692 be_vid_config(adapter);
7ab8b0b4 3693
a54769f5 3694 be_set_rx_mode(adapter->netdev);
5fb379ee 3695
76a9e08e
SR
3696 be_cmd_get_acpi_wol_cap(adapter);
3697
ddc3f5cb 3698 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3699
ddc3f5cb
AK
3700 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3701 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3702 adapter->rx_fc);
2dc1deb6 3703
bdce2ad7
SR
3704 if (be_physfn(adapter))
3705 be_cmd_set_logical_link_config(adapter,
3706 IFLA_VF_LINK_STATE_AUTO, 0);
3707
bec84e6b
VV
3708 if (adapter->num_vfs)
3709 be_vf_setup(adapter);
f9449ab7 3710
f25b119c
PR
3711 status = be_cmd_get_phy_info(adapter);
3712 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3713 adapter->phy.fc_autoneg = 1;
3714
68d7bdcb 3715 be_schedule_worker(adapter);
e1ad8e33 3716 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3717 return 0;
a54769f5
SP
3718err:
3719 be_clear(adapter);
3720 return status;
3721}
6b7c5b94 3722
66268739
IV
3723#ifdef CONFIG_NET_POLL_CONTROLLER
3724static void be_netpoll(struct net_device *netdev)
3725{
3726 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3727 struct be_eq_obj *eqo;
66268739
IV
3728 int i;
3729
e49cc34f
SP
3730 for_all_evt_queues(adapter, eqo, i) {
3731 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3732 napi_schedule(&eqo->napi);
3733 }
66268739
IV
3734}
3735#endif
3736
96c9b2e4 3737static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3738
306f1348
SP
3739static bool phy_flashing_required(struct be_adapter *adapter)
3740{
42f11cf2
AK
3741 return (adapter->phy.phy_type == TN_8022 &&
3742 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3743}
3744
c165541e
PR
3745static bool is_comp_in_ufi(struct be_adapter *adapter,
3746 struct flash_section_info *fsec, int type)
3747{
3748 int i = 0, img_type = 0;
3749 struct flash_section_info_g2 *fsec_g2 = NULL;
3750
ca34fe38 3751 if (BE2_chip(adapter))
c165541e
PR
3752 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3753
3754 for (i = 0; i < MAX_FLASH_COMP; i++) {
3755 if (fsec_g2)
3756 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3757 else
3758 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3759
3760 if (img_type == type)
3761 return true;
3762 }
3763 return false;
3764
3765}
3766
4188e7df 3767static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3768 int header_size,
3769 const struct firmware *fw)
c165541e
PR
3770{
3771 struct flash_section_info *fsec = NULL;
3772 const u8 *p = fw->data;
3773
3774 p += header_size;
3775 while (p < (fw->data + fw->size)) {
3776 fsec = (struct flash_section_info *)p;
3777 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3778 return fsec;
3779 p += 32;
3780 }
3781 return NULL;
3782}
3783
96c9b2e4
VV
3784static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3785 u32 img_offset, u32 img_size, int hdr_size,
3786 u16 img_optype, bool *crc_match)
3787{
3788 u32 crc_offset;
3789 int status;
3790 u8 crc[4];
3791
3792 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3793 if (status)
3794 return status;
3795
3796 crc_offset = hdr_size + img_offset + img_size - 4;
3797
3798 /* Skip flashing, if crc of flashed region matches */
3799 if (!memcmp(crc, p + crc_offset, 4))
3800 *crc_match = true;
3801 else
3802 *crc_match = false;
3803
3804 return status;
3805}
3806
773a2d7c 3807static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3808 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3809{
773a2d7c 3810 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3811 u32 total_bytes, flash_op, num_bytes;
3812 int status;
773a2d7c
PR
3813
3814 total_bytes = img_size;
3815 while (total_bytes) {
3816 num_bytes = min_t(u32, 32*1024, total_bytes);
3817
3818 total_bytes -= num_bytes;
3819
3820 if (!total_bytes) {
3821 if (optype == OPTYPE_PHY_FW)
3822 flash_op = FLASHROM_OPER_PHY_FLASH;
3823 else
3824 flash_op = FLASHROM_OPER_FLASH;
3825 } else {
3826 if (optype == OPTYPE_PHY_FW)
3827 flash_op = FLASHROM_OPER_PHY_SAVE;
3828 else
3829 flash_op = FLASHROM_OPER_SAVE;
3830 }
3831
be716446 3832 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3833 img += num_bytes;
3834 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3835 flash_op, num_bytes);
4c60005f 3836 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3837 optype == OPTYPE_PHY_FW)
3838 break;
3839 else if (status)
773a2d7c 3840 return status;
773a2d7c
PR
3841 }
3842 return 0;
3843}
3844
0ad3157e 3845/* For BE2, BE3 and BE3-R */
ca34fe38 3846static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3847 const struct firmware *fw,
3848 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3849{
c165541e 3850 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3851 struct device *dev = &adapter->pdev->dev;
c165541e 3852 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3853 int status, i, filehdr_size, num_comp;
3854 const struct flash_comp *pflashcomp;
3855 bool crc_match;
3856 const u8 *p;
c165541e
PR
3857
3858 struct flash_comp gen3_flash_types[] = {
3859 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3860 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3861 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3862 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3863 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3864 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3865 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3866 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3867 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3868 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3869 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3870 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3871 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3872 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3873 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3874 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3875 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3876 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3877 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3878 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3879 };
c165541e
PR
3880
3881 struct flash_comp gen2_flash_types[] = {
3882 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3883 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3884 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3885 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3886 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3887 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3888 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3889 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3890 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3891 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3892 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3893 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3894 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3895 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3896 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3897 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3898 };
3899
ca34fe38 3900 if (BE3_chip(adapter)) {
3f0d4560
AK
3901 pflashcomp = gen3_flash_types;
3902 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3903 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3904 } else {
3905 pflashcomp = gen2_flash_types;
3906 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3907 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3908 }
ca34fe38 3909
c165541e
PR
3910 /* Get flash section info*/
3911 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3912 if (!fsec) {
96c9b2e4 3913 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3914 return -1;
3915 }
9fe96934 3916 for (i = 0; i < num_comp; i++) {
c165541e 3917 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3918 continue;
c165541e
PR
3919
3920 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3921 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3922 continue;
3923
773a2d7c
PR
3924 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3925 !phy_flashing_required(adapter))
306f1348 3926 continue;
c165541e 3927
773a2d7c 3928 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3929 status = be_check_flash_crc(adapter, fw->data,
3930 pflashcomp[i].offset,
3931 pflashcomp[i].size,
3932 filehdr_size +
3933 img_hdrs_size,
3934 OPTYPE_REDBOOT, &crc_match);
3935 if (status) {
3936 dev_err(dev,
3937 "Could not get CRC for 0x%x region\n",
3938 pflashcomp[i].optype);
3939 continue;
3940 }
3941
3942 if (crc_match)
773a2d7c
PR
3943 continue;
3944 }
c165541e 3945
96c9b2e4
VV
3946 p = fw->data + filehdr_size + pflashcomp[i].offset +
3947 img_hdrs_size;
306f1348
SP
3948 if (p + pflashcomp[i].size > fw->data + fw->size)
3949 return -1;
773a2d7c
PR
3950
3951 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3952 pflashcomp[i].size);
773a2d7c 3953 if (status) {
96c9b2e4 3954 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3955 pflashcomp[i].img_type);
3956 return status;
84517482 3957 }
84517482 3958 }
84517482
AK
3959 return 0;
3960}
3961
96c9b2e4
VV
3962static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3963{
3964 u32 img_type = le32_to_cpu(fsec_entry.type);
3965 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3966
3967 if (img_optype != 0xFFFF)
3968 return img_optype;
3969
3970 switch (img_type) {
3971 case IMAGE_FIRMWARE_iSCSI:
3972 img_optype = OPTYPE_ISCSI_ACTIVE;
3973 break;
3974 case IMAGE_BOOT_CODE:
3975 img_optype = OPTYPE_REDBOOT;
3976 break;
3977 case IMAGE_OPTION_ROM_ISCSI:
3978 img_optype = OPTYPE_BIOS;
3979 break;
3980 case IMAGE_OPTION_ROM_PXE:
3981 img_optype = OPTYPE_PXE_BIOS;
3982 break;
3983 case IMAGE_OPTION_ROM_FCoE:
3984 img_optype = OPTYPE_FCOE_BIOS;
3985 break;
3986 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3987 img_optype = OPTYPE_ISCSI_BACKUP;
3988 break;
3989 case IMAGE_NCSI:
3990 img_optype = OPTYPE_NCSI_FW;
3991 break;
3992 case IMAGE_FLASHISM_JUMPVECTOR:
3993 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3994 break;
3995 case IMAGE_FIRMWARE_PHY:
3996 img_optype = OPTYPE_SH_PHY_FW;
3997 break;
3998 case IMAGE_REDBOOT_DIR:
3999 img_optype = OPTYPE_REDBOOT_DIR;
4000 break;
4001 case IMAGE_REDBOOT_CONFIG:
4002 img_optype = OPTYPE_REDBOOT_CONFIG;
4003 break;
4004 case IMAGE_UFI_DIR:
4005 img_optype = OPTYPE_UFI_DIR;
4006 break;
4007 default:
4008 break;
4009 }
4010
4011 return img_optype;
4012}
4013
773a2d7c 4014static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4015 const struct firmware *fw,
4016 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4017{
773a2d7c 4018 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 4019 struct device *dev = &adapter->pdev->dev;
773a2d7c 4020 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4021 u32 img_offset, img_size, img_type;
4022 int status, i, filehdr_size;
4023 bool crc_match, old_fw_img;
4024 u16 img_optype;
4025 const u8 *p;
773a2d7c
PR
4026
4027 filehdr_size = sizeof(struct flash_file_hdr_g3);
4028 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4029 if (!fsec) {
96c9b2e4 4030 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4031 return -EINVAL;
773a2d7c
PR
4032 }
4033
4034 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4035 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4036 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4037 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4038 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4039 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4040
96c9b2e4 4041 if (img_optype == 0xFFFF)
773a2d7c 4042 continue;
96c9b2e4
VV
4043 /* Don't bother verifying CRC if an old FW image is being
4044 * flashed
4045 */
4046 if (old_fw_img)
4047 goto flash;
4048
4049 status = be_check_flash_crc(adapter, fw->data, img_offset,
4050 img_size, filehdr_size +
4051 img_hdrs_size, img_optype,
4052 &crc_match);
4053 /* The current FW image on the card does not recognize the new
4054 * FLASH op_type. The FW download is partially complete.
4055 * Reboot the server now to enable FW image to recognize the
4056 * new FLASH op_type. To complete the remaining process,
4057 * download the same FW again after the reboot.
4058 */
4c60005f
KA
4059 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4060 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4061 dev_err(dev, "Flash incomplete. Reset the server\n");
4062 dev_err(dev, "Download FW image again after reset\n");
4063 return -EAGAIN;
4064 } else if (status) {
4065 dev_err(dev, "Could not get CRC for 0x%x region\n",
4066 img_optype);
4067 return -EFAULT;
773a2d7c
PR
4068 }
4069
96c9b2e4
VV
4070 if (crc_match)
4071 continue;
773a2d7c 4072
96c9b2e4
VV
4073flash:
4074 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4075 if (p + img_size > fw->data + fw->size)
4076 return -1;
4077
4078 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4079 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4080 * UFI_DIR region
4081 */
4c60005f
KA
4082 if (old_fw_img &&
4083 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4084 (img_optype == OPTYPE_UFI_DIR &&
4085 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4086 continue;
4087 } else if (status) {
4088 dev_err(dev, "Flashing section type 0x%x failed\n",
4089 img_type);
4090 return -EFAULT;
773a2d7c
PR
4091 }
4092 }
4093 return 0;
3f0d4560
AK
4094}
4095
485bf569 4096static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4097 const struct firmware *fw)
84517482 4098{
485bf569
SN
4099#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4100#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4101 struct device *dev = &adapter->pdev->dev;
84517482 4102 struct be_dma_mem flash_cmd;
485bf569
SN
4103 const u8 *data_ptr = NULL;
4104 u8 *dest_image_ptr = NULL;
4105 size_t image_size = 0;
4106 u32 chunk_size = 0;
4107 u32 data_written = 0;
4108 u32 offset = 0;
4109 int status = 0;
4110 u8 add_status = 0;
f67ef7ba 4111 u8 change_status;
84517482 4112
485bf569 4113 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4114 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4115 return -EINVAL;
d9efd2af
SB
4116 }
4117
485bf569
SN
4118 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4119 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4120 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4121 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4122 if (!flash_cmd.va)
4123 return -ENOMEM;
84517482 4124
485bf569
SN
4125 dest_image_ptr = flash_cmd.va +
4126 sizeof(struct lancer_cmd_req_write_object);
4127 image_size = fw->size;
4128 data_ptr = fw->data;
4129
4130 while (image_size) {
4131 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4132
4133 /* Copy the image chunk content. */
4134 memcpy(dest_image_ptr, data_ptr, chunk_size);
4135
4136 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4137 chunk_size, offset,
4138 LANCER_FW_DOWNLOAD_LOCATION,
4139 &data_written, &change_status,
4140 &add_status);
485bf569
SN
4141 if (status)
4142 break;
4143
4144 offset += data_written;
4145 data_ptr += data_written;
4146 image_size -= data_written;
4147 }
4148
4149 if (!status) {
4150 /* Commit the FW written */
4151 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4152 0, offset,
4153 LANCER_FW_DOWNLOAD_LOCATION,
4154 &data_written, &change_status,
4155 &add_status);
485bf569
SN
4156 }
4157
bb864e07 4158 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4159 if (status) {
bb864e07 4160 dev_err(dev, "Firmware load error\n");
3fb8cb80 4161 return be_cmd_status(status);
485bf569
SN
4162 }
4163
bb864e07
KA
4164 dev_info(dev, "Firmware flashed successfully\n");
4165
f67ef7ba 4166 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4167 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4168 status = lancer_physdev_ctrl(adapter,
4169 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4170 if (status) {
bb864e07
KA
4171 dev_err(dev, "Adapter busy, could not reset FW\n");
4172 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4173 }
4174 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4175 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4176 }
3fb8cb80
KA
4177
4178 return 0;
485bf569
SN
4179}
4180
ca34fe38
SP
4181#define UFI_TYPE2 2
4182#define UFI_TYPE3 3
0ad3157e 4183#define UFI_TYPE3R 10
ca34fe38
SP
4184#define UFI_TYPE4 4
4185static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4186 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4187{
ddf1169f 4188 if (!fhdr)
773a2d7c
PR
4189 goto be_get_ufi_exit;
4190
ca34fe38
SP
4191 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4192 return UFI_TYPE4;
0ad3157e
VV
4193 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4194 if (fhdr->asic_type_rev == 0x10)
4195 return UFI_TYPE3R;
4196 else
4197 return UFI_TYPE3;
4198 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4199 return UFI_TYPE2;
773a2d7c
PR
4200
4201be_get_ufi_exit:
4202 dev_err(&adapter->pdev->dev,
4203 "UFI and Interface are not compatible for flashing\n");
4204 return -1;
4205}
4206
485bf569
SN
4207static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4208{
485bf569
SN
4209 struct flash_file_hdr_g3 *fhdr3;
4210 struct image_hdr *img_hdr_ptr = NULL;
4211 struct be_dma_mem flash_cmd;
4212 const u8 *p;
773a2d7c 4213 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4214
be716446 4215 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4216 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4217 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4218 if (!flash_cmd.va) {
4219 status = -ENOMEM;
485bf569 4220 goto be_fw_exit;
84517482
AK
4221 }
4222
773a2d7c 4223 p = fw->data;
0ad3157e 4224 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4225
0ad3157e 4226 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4227
773a2d7c
PR
4228 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4229 for (i = 0; i < num_imgs; i++) {
4230 img_hdr_ptr = (struct image_hdr *)(fw->data +
4231 (sizeof(struct flash_file_hdr_g3) +
4232 i * sizeof(struct image_hdr)));
4233 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4234 switch (ufi_type) {
4235 case UFI_TYPE4:
773a2d7c 4236 status = be_flash_skyhawk(adapter, fw,
748b539a 4237 &flash_cmd, num_imgs);
0ad3157e
VV
4238 break;
4239 case UFI_TYPE3R:
ca34fe38
SP
4240 status = be_flash_BEx(adapter, fw, &flash_cmd,
4241 num_imgs);
0ad3157e
VV
4242 break;
4243 case UFI_TYPE3:
4244 /* Do not flash this ufi on BE3-R cards */
4245 if (adapter->asic_rev < 0x10)
4246 status = be_flash_BEx(adapter, fw,
4247 &flash_cmd,
4248 num_imgs);
4249 else {
56ace3a0 4250 status = -EINVAL;
0ad3157e
VV
4251 dev_err(&adapter->pdev->dev,
4252 "Can't load BE3 UFI on BE3R\n");
4253 }
4254 }
3f0d4560 4255 }
773a2d7c
PR
4256 }
4257
ca34fe38
SP
4258 if (ufi_type == UFI_TYPE2)
4259 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4260 else if (ufi_type == -1)
56ace3a0 4261 status = -EINVAL;
84517482 4262
2b7bcebf
IV
4263 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4264 flash_cmd.dma);
84517482
AK
4265 if (status) {
4266 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4267 goto be_fw_exit;
84517482
AK
4268 }
4269
af901ca1 4270 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4271
485bf569
SN
4272be_fw_exit:
4273 return status;
4274}
4275
4276int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4277{
4278 const struct firmware *fw;
4279 int status;
4280
4281 if (!netif_running(adapter->netdev)) {
4282 dev_err(&adapter->pdev->dev,
4283 "Firmware load not allowed (interface is down)\n");
940a3fcd 4284 return -ENETDOWN;
485bf569
SN
4285 }
4286
4287 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4288 if (status)
4289 goto fw_exit;
4290
4291 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4292
4293 if (lancer_chip(adapter))
4294 status = lancer_fw_download(adapter, fw);
4295 else
4296 status = be_fw_download(adapter, fw);
4297
eeb65ced 4298 if (!status)
e97e3cda 4299 be_cmd_get_fw_ver(adapter);
eeb65ced 4300
84517482
AK
4301fw_exit:
4302 release_firmware(fw);
4303 return status;
4304}
4305
748b539a 4306static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4307{
4308 struct be_adapter *adapter = netdev_priv(dev);
4309 struct nlattr *attr, *br_spec;
4310 int rem;
4311 int status = 0;
4312 u16 mode = 0;
4313
4314 if (!sriov_enabled(adapter))
4315 return -EOPNOTSUPP;
4316
4317 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4318 if (!br_spec)
4319 return -EINVAL;
a77dcb8c
AK
4320
4321 nla_for_each_nested(attr, br_spec, rem) {
4322 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4323 continue;
4324
b7c1a314
TG
4325 if (nla_len(attr) < sizeof(mode))
4326 return -EINVAL;
4327
a77dcb8c
AK
4328 mode = nla_get_u16(attr);
4329 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4330 return -EINVAL;
4331
4332 status = be_cmd_set_hsw_config(adapter, 0, 0,
4333 adapter->if_handle,
4334 mode == BRIDGE_MODE_VEPA ?
4335 PORT_FWD_TYPE_VEPA :
4336 PORT_FWD_TYPE_VEB);
4337 if (status)
4338 goto err;
4339
4340 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4341 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4342
4343 return status;
4344 }
4345err:
4346 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4347 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4348
4349 return status;
4350}
4351
4352static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4353 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4354{
4355 struct be_adapter *adapter = netdev_priv(dev);
4356 int status = 0;
4357 u8 hsw_mode;
4358
4359 if (!sriov_enabled(adapter))
4360 return 0;
4361
4362 /* BE and Lancer chips support VEB mode only */
4363 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4364 hsw_mode = PORT_FWD_TYPE_VEB;
4365 } else {
4366 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4367 adapter->if_handle, &hsw_mode);
4368 if (status)
4369 return 0;
4370 }
4371
4372 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4373 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4374 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4375 0, 0);
a77dcb8c
AK
4376}
4377
c5abe7c0 4378#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4379/* VxLAN offload Notes:
4380 *
4381 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4382 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4383 * is expected to work across all types of IP tunnels once exported. Skyhawk
4384 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4385 * offloads in hw_enc_features only when a VxLAN port is added. Note this only
4386 * ensures that other tunnels work fine while VxLAN offloads are not enabled.
4387 *
4388 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4389 * adds more than one port, disable offloads and don't re-enable them again
4390 * until after all the tunnels are removed.
4391 */
c9c47142
SP
4392static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4393 __be16 port)
4394{
4395 struct be_adapter *adapter = netdev_priv(netdev);
4396 struct device *dev = &adapter->pdev->dev;
4397 int status;
4398
4399 if (lancer_chip(adapter) || BEx_chip(adapter))
4400 return;
4401
4402 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4403 dev_info(dev,
4404 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4405 dev_info(dev, "Disabling VxLAN offloads\n");
4406 adapter->vxlan_port_count++;
4407 goto err;
c9c47142
SP
4408 }
4409
630f4b70
SB
4410 if (adapter->vxlan_port_count++ >= 1)
4411 return;
4412
c9c47142
SP
4413 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4414 OP_CONVERT_NORMAL_TO_TUNNEL);
4415 if (status) {
4416 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4417 goto err;
4418 }
4419
4420 status = be_cmd_set_vxlan_port(adapter, port);
4421 if (status) {
4422 dev_warn(dev, "Failed to add VxLAN port\n");
4423 goto err;
4424 }
4425 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4426 adapter->vxlan_port = port;
4427
630f4b70
SB
4428 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4429 NETIF_F_TSO | NETIF_F_TSO6 |
4430 NETIF_F_GSO_UDP_TUNNEL;
4431 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4432
c9c47142
SP
4433 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4434 be16_to_cpu(port));
4435 return;
4436err:
4437 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4438}
4439
4440static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4441 __be16 port)
4442{
4443 struct be_adapter *adapter = netdev_priv(netdev);
4444
4445 if (lancer_chip(adapter) || BEx_chip(adapter))
4446 return;
4447
4448 if (adapter->vxlan_port != port)
630f4b70 4449 goto done;
c9c47142
SP
4450
4451 be_disable_vxlan_offloads(adapter);
4452
4453 dev_info(&adapter->pdev->dev,
4454 "Disabled VxLAN offloads for UDP port %d\n",
4455 be16_to_cpu(port));
630f4b70
SB
4456done:
4457 adapter->vxlan_port_count--;
c9c47142 4458}
725d548f
JS
4459
4460static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4461{
4462 return vxlan_gso_check(skb);
4463}
c5abe7c0 4464#endif
c9c47142 4465
e5686ad8 4466static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4467 .ndo_open = be_open,
4468 .ndo_stop = be_close,
4469 .ndo_start_xmit = be_xmit,
a54769f5 4470 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4471 .ndo_set_mac_address = be_mac_addr_set,
4472 .ndo_change_mtu = be_change_mtu,
ab1594e9 4473 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4474 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4475 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4476 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4477 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4478 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4479 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4480 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4481 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4482#ifdef CONFIG_NET_POLL_CONTROLLER
4483 .ndo_poll_controller = be_netpoll,
4484#endif
a77dcb8c
AK
4485 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4486 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4487#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4488 .ndo_busy_poll = be_busy_poll,
6384a4d0 4489#endif
c5abe7c0 4490#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4491 .ndo_add_vxlan_port = be_add_vxlan_port,
4492 .ndo_del_vxlan_port = be_del_vxlan_port,
725d548f 4493 .ndo_gso_check = be_gso_check,
c5abe7c0 4494#endif
6b7c5b94
SP
4495};
4496
4497static void be_netdev_init(struct net_device *netdev)
4498{
4499 struct be_adapter *adapter = netdev_priv(netdev);
4500
6332c8d3 4501 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4502 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4503 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4504 if (be_multi_rxq(adapter))
4505 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4506
4507 netdev->features |= netdev->hw_features |
f646968f 4508 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4509
eb8a50d9 4510 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4511 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4512
fbc13f01
AK
4513 netdev->priv_flags |= IFF_UNICAST_FLT;
4514
6b7c5b94
SP
4515 netdev->flags |= IFF_MULTICAST;
4516
b7e5887e 4517 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4518
10ef9ab4 4519 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4520
7ad24ea4 4521 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4522}
4523
4524static void be_unmap_pci_bars(struct be_adapter *adapter)
4525{
c5b3ad4c
SP
4526 if (adapter->csr)
4527 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4528 if (adapter->db)
ce66f781 4529 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4530}
4531
ce66f781
SP
4532static int db_bar(struct be_adapter *adapter)
4533{
4534 if (lancer_chip(adapter) || !be_physfn(adapter))
4535 return 0;
4536 else
4537 return 4;
4538}
4539
4540static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4541{
dbf0f2a7 4542 if (skyhawk_chip(adapter)) {
ce66f781
SP
4543 adapter->roce_db.size = 4096;
4544 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4545 db_bar(adapter));
4546 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4547 db_bar(adapter));
4548 }
045508a8 4549 return 0;
6b7c5b94
SP
4550}
4551
4552static int be_map_pci_bars(struct be_adapter *adapter)
4553{
4554 u8 __iomem *addr;
fe6d2a38 4555
c5b3ad4c
SP
4556 if (BEx_chip(adapter) && be_physfn(adapter)) {
4557 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4558 if (!adapter->csr)
c5b3ad4c
SP
4559 return -ENOMEM;
4560 }
4561
ce66f781 4562 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4563 if (!addr)
6b7c5b94 4564 goto pci_map_err;
ba343c77 4565 adapter->db = addr;
ce66f781
SP
4566
4567 be_roce_map_pci_bars(adapter);
6b7c5b94 4568 return 0;
ce66f781 4569
6b7c5b94 4570pci_map_err:
acbafeb1 4571 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4572 be_unmap_pci_bars(adapter);
4573 return -ENOMEM;
4574}
4575
6b7c5b94
SP
4576static void be_ctrl_cleanup(struct be_adapter *adapter)
4577{
8788fdc2 4578 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4579
4580 be_unmap_pci_bars(adapter);
4581
4582 if (mem->va)
2b7bcebf
IV
4583 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4584 mem->dma);
e7b909a6 4585
5b8821b7 4586 mem = &adapter->rx_filter;
e7b909a6 4587 if (mem->va)
2b7bcebf
IV
4588 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4589 mem->dma);
6b7c5b94
SP
4590}
4591
6b7c5b94
SP
4592static int be_ctrl_init(struct be_adapter *adapter)
4593{
8788fdc2
SP
4594 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4595 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4596 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4597 u32 sli_intf;
6b7c5b94 4598 int status;
6b7c5b94 4599
ce66f781
SP
4600 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4601 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4602 SLI_INTF_FAMILY_SHIFT;
4603 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4604
6b7c5b94
SP
4605 status = be_map_pci_bars(adapter);
4606 if (status)
e7b909a6 4607 goto done;
6b7c5b94
SP
4608
4609 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4610 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4611 mbox_mem_alloc->size,
4612 &mbox_mem_alloc->dma,
4613 GFP_KERNEL);
6b7c5b94 4614 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4615 status = -ENOMEM;
4616 goto unmap_pci_bars;
6b7c5b94
SP
4617 }
4618 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4619 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4620 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4621 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4622
5b8821b7 4623 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4624 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4625 rx_filter->size, &rx_filter->dma,
4626 GFP_KERNEL);
ddf1169f 4627 if (!rx_filter->va) {
e7b909a6
SP
4628 status = -ENOMEM;
4629 goto free_mbox;
4630 }
1f9061d2 4631
2984961c 4632 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4633 spin_lock_init(&adapter->mcc_lock);
4634 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4635
5eeff635 4636 init_completion(&adapter->et_cmd_compl);
cf588477 4637 pci_save_state(adapter->pdev);
6b7c5b94 4638 return 0;
e7b909a6
SP
4639
4640free_mbox:
2b7bcebf
IV
4641 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4642 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4643
4644unmap_pci_bars:
4645 be_unmap_pci_bars(adapter);
4646
4647done:
4648 return status;
6b7c5b94
SP
4649}
4650
4651static void be_stats_cleanup(struct be_adapter *adapter)
4652{
3abcdeda 4653 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4654
4655 if (cmd->va)
2b7bcebf
IV
4656 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4657 cmd->va, cmd->dma);
6b7c5b94
SP
4658}
4659
4660static int be_stats_init(struct be_adapter *adapter)
4661{
3abcdeda 4662 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4663
ca34fe38
SP
4664 if (lancer_chip(adapter))
4665 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4666 else if (BE2_chip(adapter))
89a88ab8 4667 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4668 else if (BE3_chip(adapter))
ca34fe38 4669 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4670 else
4671 /* ALL non-BE ASICs */
4672 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4673
ede23fa8
JP
4674 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4675 GFP_KERNEL);
ddf1169f 4676 if (!cmd->va)
6b568689 4677 return -ENOMEM;
6b7c5b94
SP
4678 return 0;
4679}
4680
3bc6b06c 4681static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4682{
4683 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4684
6b7c5b94
SP
4685 if (!adapter)
4686 return;
4687
045508a8 4688 be_roce_dev_remove(adapter);
8cef7a78 4689 be_intr_set(adapter, false);
045508a8 4690
f67ef7ba
PR
4691 cancel_delayed_work_sync(&adapter->func_recovery_work);
4692
6b7c5b94
SP
4693 unregister_netdev(adapter->netdev);
4694
5fb379ee
SP
4695 be_clear(adapter);
4696
bf99e50d
PR
4697 /* tell fw we're done with firing cmds */
4698 be_cmd_fw_clean(adapter);
4699
6b7c5b94
SP
4700 be_stats_cleanup(adapter);
4701
4702 be_ctrl_cleanup(adapter);
4703
d6b6d987
SP
4704 pci_disable_pcie_error_reporting(pdev);
4705
6b7c5b94
SP
4706 pci_release_regions(pdev);
4707 pci_disable_device(pdev);
4708
4709 free_netdev(adapter->netdev);
4710}
4711
39f1d94d 4712static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4713{
baaa08d1 4714 int status, level;
6b7c5b94 4715
9e1453c5
AK
4716 status = be_cmd_get_cntl_attributes(adapter);
4717 if (status)
4718 return status;
4719
7aeb2156
PR
4720 /* Must be a power of 2 or else MODULO will BUG_ON */
4721 adapter->be_get_temp_freq = 64;
4722
baaa08d1
VV
4723 if (BEx_chip(adapter)) {
4724 level = be_cmd_get_fw_log_level(adapter);
4725 adapter->msg_enable =
4726 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4727 }
941a77d5 4728
92bf14ab 4729 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4730 return 0;
6b7c5b94
SP
4731}
4732
f67ef7ba 4733static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4734{
01e5b2c4 4735 struct device *dev = &adapter->pdev->dev;
d8110f62 4736 int status;
d8110f62 4737
f67ef7ba
PR
4738 status = lancer_test_and_set_rdy_state(adapter);
4739 if (status)
4740 goto err;
d8110f62 4741
f67ef7ba
PR
4742 if (netif_running(adapter->netdev))
4743 be_close(adapter->netdev);
d8110f62 4744
f67ef7ba
PR
4745 be_clear(adapter);
4746
01e5b2c4 4747 be_clear_all_error(adapter);
f67ef7ba
PR
4748
4749 status = be_setup(adapter);
4750 if (status)
4751 goto err;
d8110f62 4752
f67ef7ba
PR
4753 if (netif_running(adapter->netdev)) {
4754 status = be_open(adapter->netdev);
d8110f62
PR
4755 if (status)
4756 goto err;
f67ef7ba 4757 }
d8110f62 4758
4bebb56a 4759 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4760 return 0;
4761err:
01e5b2c4
SK
4762 if (status == -EAGAIN)
4763 dev_err(dev, "Waiting for resource provisioning\n");
4764 else
4bebb56a 4765 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4766
f67ef7ba
PR
4767 return status;
4768}
4769
4770static void be_func_recovery_task(struct work_struct *work)
4771{
4772 struct be_adapter *adapter =
4773 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4774 int status = 0;
d8110f62 4775
f67ef7ba 4776 be_detect_error(adapter);
d8110f62 4777
f67ef7ba 4778 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
4779 rtnl_lock();
4780 netif_device_detach(adapter->netdev);
4781 rtnl_unlock();
d8110f62 4782
f67ef7ba 4783 status = lancer_recover_func(adapter);
f67ef7ba
PR
4784 if (!status)
4785 netif_device_attach(adapter->netdev);
d8110f62 4786 }
f67ef7ba 4787
01e5b2c4
SK
4788 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4789 * no need to attempt further recovery.
4790 */
4791 if (!status || status == -EAGAIN)
4792 schedule_delayed_work(&adapter->func_recovery_work,
4793 msecs_to_jiffies(1000));
d8110f62
PR
4794}
4795
4796static void be_worker(struct work_struct *work)
4797{
4798 struct be_adapter *adapter =
4799 container_of(work, struct be_adapter, work.work);
4800 struct be_rx_obj *rxo;
4801 int i;
4802
d8110f62
PR
4803 /* when interrupts are not yet enabled, just reap any pending
4804 * mcc completions */
4805 if (!netif_running(adapter->netdev)) {
072a9c48 4806 local_bh_disable();
10ef9ab4 4807 be_process_mcc(adapter);
072a9c48 4808 local_bh_enable();
d8110f62
PR
4809 goto reschedule;
4810 }
4811
4812 if (!adapter->stats_cmd_sent) {
4813 if (lancer_chip(adapter))
4814 lancer_cmd_get_pport_stats(adapter,
cd3307aa 4815 &adapter->stats_cmd);
d8110f62
PR
4816 else
4817 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4818 }
4819
d696b5e2
VV
4820 if (be_physfn(adapter) &&
4821 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4822 be_cmd_get_die_temperature(adapter);
4823
d8110f62 4824 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4825 /* Replenish RX-queues starved due to memory
4826 * allocation failures.
4827 */
4828 if (rxo->rx_post_starved)
c30d7266 4829 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
4830 }
4831
2632bafd 4832 be_eqd_update(adapter);
10ef9ab4 4833
d8110f62
PR
4834reschedule:
4835 adapter->work_counter++;
4836 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4837}
4838
257a3feb 4839/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4840static bool be_reset_required(struct be_adapter *adapter)
4841{
257a3feb 4842 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4843}
4844
d379142b
SP
4845static char *mc_name(struct be_adapter *adapter)
4846{
f93f160b
VV
4847 char *str = ""; /* default */
4848
4849 switch (adapter->mc_type) {
4850 case UMC:
4851 str = "UMC";
4852 break;
4853 case FLEX10:
4854 str = "FLEX10";
4855 break;
4856 case vNIC1:
4857 str = "vNIC-1";
4858 break;
4859 case nPAR:
4860 str = "nPAR";
4861 break;
4862 case UFP:
4863 str = "UFP";
4864 break;
4865 case vNIC2:
4866 str = "vNIC-2";
4867 break;
4868 default:
4869 str = "";
4870 }
4871
4872 return str;
d379142b
SP
4873}
4874
4875static inline char *func_name(struct be_adapter *adapter)
4876{
4877 return be_physfn(adapter) ? "PF" : "VF";
4878}
4879
1dd06ae8 4880static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4881{
4882 int status = 0;
4883 struct be_adapter *adapter;
4884 struct net_device *netdev;
b4e32a71 4885 char port_name;
6b7c5b94 4886
acbafeb1
SP
4887 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4888
6b7c5b94
SP
4889 status = pci_enable_device(pdev);
4890 if (status)
4891 goto do_none;
4892
4893 status = pci_request_regions(pdev, DRV_NAME);
4894 if (status)
4895 goto disable_dev;
4896 pci_set_master(pdev);
4897
7f640062 4898 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4899 if (!netdev) {
6b7c5b94
SP
4900 status = -ENOMEM;
4901 goto rel_reg;
4902 }
4903 adapter = netdev_priv(netdev);
4904 adapter->pdev = pdev;
4905 pci_set_drvdata(pdev, adapter);
4906 adapter->netdev = netdev;
2243e2e9 4907 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4908
4c15c243 4909 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4910 if (!status) {
4911 netdev->features |= NETIF_F_HIGHDMA;
4912 } else {
4c15c243 4913 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4914 if (status) {
4915 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4916 goto free_netdev;
4917 }
4918 }
4919
2f951a9a
KA
4920 status = pci_enable_pcie_error_reporting(pdev);
4921 if (!status)
4922 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 4923
6b7c5b94
SP
4924 status = be_ctrl_init(adapter);
4925 if (status)
39f1d94d 4926 goto free_netdev;
6b7c5b94 4927
2243e2e9 4928 /* sync up with fw's ready state */
ba343c77 4929 if (be_physfn(adapter)) {
bf99e50d 4930 status = be_fw_wait_ready(adapter);
ba343c77
SB
4931 if (status)
4932 goto ctrl_clean;
ba343c77 4933 }
6b7c5b94 4934
39f1d94d
SP
4935 if (be_reset_required(adapter)) {
4936 status = be_cmd_reset_function(adapter);
4937 if (status)
4938 goto ctrl_clean;
556ae191 4939
2d177be8
KA
4940 /* Wait for interrupts to quiesce after an FLR */
4941 msleep(100);
4942 }
8cef7a78
SK
4943
4944 /* Allow interrupts for other ULPs running on NIC function */
4945 be_intr_set(adapter, true);
10ef9ab4 4946
2d177be8
KA
4947 /* tell fw we're ready to fire cmds */
4948 status = be_cmd_fw_init(adapter);
4949 if (status)
4950 goto ctrl_clean;
4951
2243e2e9
SP
4952 status = be_stats_init(adapter);
4953 if (status)
4954 goto ctrl_clean;
4955
39f1d94d 4956 status = be_get_initial_config(adapter);
6b7c5b94
SP
4957 if (status)
4958 goto stats_clean;
6b7c5b94
SP
4959
4960 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4961 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
4962 adapter->rx_fc = true;
4963 adapter->tx_fc = true;
6b7c5b94 4964
5fb379ee
SP
4965 status = be_setup(adapter);
4966 if (status)
55f5c3c5 4967 goto stats_clean;
2243e2e9 4968
3abcdeda 4969 be_netdev_init(netdev);
6b7c5b94
SP
4970 status = register_netdev(netdev);
4971 if (status != 0)
5fb379ee 4972 goto unsetup;
6b7c5b94 4973
045508a8
PP
4974 be_roce_dev_add(adapter);
4975
f67ef7ba
PR
4976 schedule_delayed_work(&adapter->func_recovery_work,
4977 msecs_to_jiffies(1000));
b4e32a71
PR
4978
4979 be_cmd_query_port_name(adapter, &port_name);
4980
d379142b
SP
4981 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4982 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4983
6b7c5b94
SP
4984 return 0;
4985
5fb379ee
SP
4986unsetup:
4987 be_clear(adapter);
6b7c5b94
SP
4988stats_clean:
4989 be_stats_cleanup(adapter);
4990ctrl_clean:
4991 be_ctrl_cleanup(adapter);
f9449ab7 4992free_netdev:
fe6d2a38 4993 free_netdev(netdev);
6b7c5b94
SP
4994rel_reg:
4995 pci_release_regions(pdev);
4996disable_dev:
4997 pci_disable_device(pdev);
4998do_none:
c4ca2374 4999 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5000 return status;
5001}
5002
5003static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5004{
5005 struct be_adapter *adapter = pci_get_drvdata(pdev);
5006 struct net_device *netdev = adapter->netdev;
5007
76a9e08e 5008 if (adapter->wol_en)
71d8d1b5
AK
5009 be_setup_wol(adapter, true);
5010
d4360d6f 5011 be_intr_set(adapter, false);
f67ef7ba
PR
5012 cancel_delayed_work_sync(&adapter->func_recovery_work);
5013
6b7c5b94
SP
5014 netif_device_detach(netdev);
5015 if (netif_running(netdev)) {
5016 rtnl_lock();
5017 be_close(netdev);
5018 rtnl_unlock();
5019 }
9b0365f1 5020 be_clear(adapter);
6b7c5b94
SP
5021
5022 pci_save_state(pdev);
5023 pci_disable_device(pdev);
5024 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5025 return 0;
5026}
5027
5028static int be_resume(struct pci_dev *pdev)
5029{
5030 int status = 0;
5031 struct be_adapter *adapter = pci_get_drvdata(pdev);
5032 struct net_device *netdev = adapter->netdev;
5033
5034 netif_device_detach(netdev);
5035
5036 status = pci_enable_device(pdev);
5037 if (status)
5038 return status;
5039
1ca01512 5040 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5041 pci_restore_state(pdev);
5042
dd5746bf
SB
5043 status = be_fw_wait_ready(adapter);
5044 if (status)
5045 return status;
5046
d4360d6f 5047 be_intr_set(adapter, true);
2243e2e9
SP
5048 /* tell fw we're ready to fire cmds */
5049 status = be_cmd_fw_init(adapter);
5050 if (status)
5051 return status;
5052
9b0365f1 5053 be_setup(adapter);
6b7c5b94
SP
5054 if (netif_running(netdev)) {
5055 rtnl_lock();
5056 be_open(netdev);
5057 rtnl_unlock();
5058 }
f67ef7ba
PR
5059
5060 schedule_delayed_work(&adapter->func_recovery_work,
5061 msecs_to_jiffies(1000));
6b7c5b94 5062 netif_device_attach(netdev);
71d8d1b5 5063
76a9e08e 5064 if (adapter->wol_en)
71d8d1b5 5065 be_setup_wol(adapter, false);
a4ca055f 5066
6b7c5b94
SP
5067 return 0;
5068}
5069
82456b03
SP
5070/*
5071 * An FLR will stop BE from DMAing any data.
5072 */
5073static void be_shutdown(struct pci_dev *pdev)
5074{
5075 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5076
2d5d4154
AK
5077 if (!adapter)
5078 return;
82456b03 5079
d114f99a 5080 be_roce_dev_shutdown(adapter);
0f4a6828 5081 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5082 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5083
2d5d4154 5084 netif_device_detach(adapter->netdev);
82456b03 5085
57841869
AK
5086 be_cmd_reset_function(adapter);
5087
82456b03 5088 pci_disable_device(pdev);
82456b03
SP
5089}
5090
cf588477 5091static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5092 pci_channel_state_t state)
cf588477
SP
5093{
5094 struct be_adapter *adapter = pci_get_drvdata(pdev);
5095 struct net_device *netdev = adapter->netdev;
5096
5097 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5098
01e5b2c4
SK
5099 if (!adapter->eeh_error) {
5100 adapter->eeh_error = true;
cf588477 5101
01e5b2c4 5102 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5103
cf588477 5104 rtnl_lock();
01e5b2c4
SK
5105 netif_device_detach(netdev);
5106 if (netif_running(netdev))
5107 be_close(netdev);
cf588477 5108 rtnl_unlock();
01e5b2c4
SK
5109
5110 be_clear(adapter);
cf588477 5111 }
cf588477
SP
5112
5113 if (state == pci_channel_io_perm_failure)
5114 return PCI_ERS_RESULT_DISCONNECT;
5115
5116 pci_disable_device(pdev);
5117
eeb7fc7b
SK
5118 /* The error could cause the FW to trigger a flash debug dump.
5119 * Resetting the card while flash dump is in progress
c8a54163
PR
5120 * can cause it not to recover; wait for it to finish.
5121 * Wait only for first function as it is needed only once per
5122 * adapter.
eeb7fc7b 5123 */
c8a54163
PR
5124 if (pdev->devfn == 0)
5125 ssleep(30);
5126
cf588477
SP
5127 return PCI_ERS_RESULT_NEED_RESET;
5128}
5129
5130static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5131{
5132 struct be_adapter *adapter = pci_get_drvdata(pdev);
5133 int status;
5134
5135 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5136
5137 status = pci_enable_device(pdev);
5138 if (status)
5139 return PCI_ERS_RESULT_DISCONNECT;
5140
5141 pci_set_master(pdev);
1ca01512 5142 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5143 pci_restore_state(pdev);
5144
5145 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5146 dev_info(&adapter->pdev->dev,
5147 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5148 status = be_fw_wait_ready(adapter);
cf588477
SP
5149 if (status)
5150 return PCI_ERS_RESULT_DISCONNECT;
5151
d6b6d987 5152 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5153 be_clear_all_error(adapter);
cf588477
SP
5154 return PCI_ERS_RESULT_RECOVERED;
5155}
5156
5157static void be_eeh_resume(struct pci_dev *pdev)
5158{
5159 int status = 0;
5160 struct be_adapter *adapter = pci_get_drvdata(pdev);
5161 struct net_device *netdev = adapter->netdev;
5162
5163 dev_info(&adapter->pdev->dev, "EEH resume\n");
5164
5165 pci_save_state(pdev);
5166
2d177be8 5167 status = be_cmd_reset_function(adapter);
cf588477
SP
5168 if (status)
5169 goto err;
5170
03a58baa
KA
5171 /* On some BE3 FW versions, after a HW reset,
5172 * interrupts will remain disabled for each function.
5173 * So, explicitly enable interrupts
5174 */
5175 be_intr_set(adapter, true);
5176
2d177be8
KA
5177 /* tell fw we're ready to fire cmds */
5178 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5179 if (status)
5180 goto err;
5181
cf588477
SP
5182 status = be_setup(adapter);
5183 if (status)
5184 goto err;
5185
5186 if (netif_running(netdev)) {
5187 status = be_open(netdev);
5188 if (status)
5189 goto err;
5190 }
f67ef7ba
PR
5191
5192 schedule_delayed_work(&adapter->func_recovery_work,
5193 msecs_to_jiffies(1000));
cf588477
SP
5194 netif_device_attach(netdev);
5195 return;
5196err:
5197 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5198}
5199
3646f0e5 5200static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5201 .error_detected = be_eeh_err_detected,
5202 .slot_reset = be_eeh_reset,
5203 .resume = be_eeh_resume,
5204};
5205
6b7c5b94
SP
5206static struct pci_driver be_driver = {
5207 .name = DRV_NAME,
5208 .id_table = be_dev_ids,
5209 .probe = be_probe,
5210 .remove = be_remove,
5211 .suspend = be_suspend,
cf588477 5212 .resume = be_resume,
82456b03 5213 .shutdown = be_shutdown,
cf588477 5214 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5215};
5216
5217static int __init be_init_module(void)
5218{
8e95a202
JP
5219 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5220 rx_frag_size != 2048) {
6b7c5b94
SP
5221 printk(KERN_WARNING DRV_NAME
5222 " : Module param rx_frag_size must be 2048/4096/8192."
5223 " Using 2048\n");
5224 rx_frag_size = 2048;
5225 }
6b7c5b94
SP
5226
5227 return pci_register_driver(&be_driver);
5228}
5229module_init(be_init_module);
5230
5231static void __exit be_exit_module(void)
5232{
5233 pci_unregister_driver(&be_driver);
5234}
5235module_exit(be_exit_module);