]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
net: sched: fix skb->protocol use in case of accelerated vlan path
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
9baa3c34 42static const struct pci_device_id be_dev_ids[] = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276 88};
e2fb1afa 89
7c185276 90/* UE Status High CSR */
42c8b11e 91static const char * const ue_status_hi_desc[] = {
7c185276
AK
92 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
6bdf8f55
VV
113 "ECRC",
114 "Poison TLP",
42c8b11e 115 "NETC",
6bdf8f55
VV
116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
7c185276
AK
123 "Unknown"
124};
6b7c5b94
SP
125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 129
1cfafab9 130 if (mem->va) {
2b7bcebf
IV
131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
1cfafab9
SP
133 mem->va = NULL;
134 }
6b7c5b94
SP
135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 138 u16 len, u16 entry_size)
6b7c5b94
SP
139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
ede23fa8
JP
146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
6b7c5b94 148 if (!mem->va)
10ef9ab4 149 return -ENOMEM;
6b7c5b94
SP
150 return 0;
151}
152
68c45a2d 153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 154{
db3ea781 155 u32 reg, enabled;
5f0b849e 156
db3ea781 157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 158 &reg);
db3ea781
SP
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781 168 pci_write_config_dword(adapter->pdev,
748b539a 169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
68c45a2d
SK
172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
8788fdc2 188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
03d28ffe 191
6b7c5b94
SP
192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
194
195 wmb();
8788fdc2 196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
197}
198
94d73aaa
VV
199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
6b7c5b94
SP
201{
202 u32 val = 0;
03d28ffe 203
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 212 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
03d28ffe 215
6b7c5b94 216 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
03d28ffe 234
6b7c5b94 235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
ff32f8ab
VV
260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
5a712c13
SP
266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
704e4c88 271 */
5a712c13
SP
272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
704e4c88
PR
283 }
284
5a712c13
SP
285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
704e4c88 287 */
b188f090
SR
288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
a65027e4 290 if (status)
e3a7ae2c 291 goto err;
6b7c5b94 292
5a712c13
SP
293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
61d23e9f 296 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
297 status = -EPERM;
298 goto err;
299 }
300
e3a7ae2c 301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 302 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
303 return 0;
304err:
5a712c13 305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
306 return status;
307}
308
ca34fe38
SP
309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
61000861 316 } else if (BE3_chip(adapter)) {
ca34fe38
SP
317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
61000861
AK
319 return &cmd->hw_stats;
320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
ca34fe38
SP
323 return &cmd->hw_stats;
324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
61000861 334 } else if (BE3_chip(adapter)) {
ca34fe38
SP
335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
61000861
AK
337 return &hw_stats->erx;
338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
ca34fe38
SP
341 return &hw_stats->erx;
342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 346{
ac124ff9
SP
347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 350 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 353
ac124ff9 354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
89a88ab8
AK
375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
ac124ff9 382 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 383 else
ac124ff9 384 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
ca34fe38 394static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 395{
ac124ff9
SP
396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 399 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 402
ac124ff9 403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
ac124ff9 426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
61000861
AK
440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 484 if (be_roce_supported(adapter)) {
461ae379
AK
485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
61000861
AK
492}
493
005d5696
SX
494static void populate_lancer_stats(struct be_adapter *adapter)
495{
005d5696 496 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
ac124ff9 520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 524 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 527 drvs->rx_drops_too_many_frags =
ac124ff9 528 pport_stats->rx_drops_too_many_frags_lo;
005d5696 529}
89a88ab8 530
09c1c68f
SP
531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
4188e7df 543static void populate_erx_stats(struct be_adapter *adapter,
748b539a 544 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
89a88ab8
AK
556void be_parse_stats(struct be_adapter *adapter)
557{
61000861 558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
559 struct be_rx_obj *rxo;
560 int i;
a6c578ef 561 u32 erx_stat;
ac124ff9 562
ca34fe38
SP
563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
005d5696 565 } else {
ca34fe38
SP
566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
61000861
AK
568 else if (BE3_chip(adapter))
569 /* for BE3 */
ca34fe38 570 populate_be_v1_stats(adapter);
61000861
AK
571 else
572 populate_be_v2_stats(adapter);
d51ebd33 573
61000861 574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 575 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 578 }
09c1c68f 579 }
89a88ab8
AK
580}
581
ab1594e9 582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 583 struct rtnl_link_stats64 *stats)
6b7c5b94 584{
ab1594e9 585 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 586 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 587 struct be_rx_obj *rxo;
3c8def97 588 struct be_tx_obj *txo;
ab1594e9
SP
589 u64 pkts, bytes;
590 unsigned int start;
3abcdeda 591 int i;
6b7c5b94 592
3abcdeda 593 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 595
ab1594e9 596 do {
57a7744e 597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
606 }
607
3c8def97 608 for_all_tx_queues(adapter, txo, i) {
ab1594e9 609 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 610
ab1594e9 611 do {
57a7744e 612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
57a7744e 615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
3c8def97 618 }
6b7c5b94
SP
619
620 /* bad pkts received */
ab1594e9 621 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
ab1594e9 630 drvs->rx_dropped_runt;
68110868 631
6b7c5b94 632 /* detailed rx errors */
ab1594e9 633 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
68110868 636
ab1594e9 637 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
638
639 /* frame alignment errors */
ab1594e9 640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 641
6b7c5b94
SP
642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
ab1594e9 647 return stats;
6b7c5b94
SP
648}
649
b236916a 650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 651{
6b7c5b94
SP
652 struct net_device *netdev = adapter->netdev;
653
b236916a 654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 655 netif_carrier_off(netdev);
b236916a 656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 657 }
b236916a 658
bdce2ad7 659 if (link_status)
b236916a
AK
660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
6b7c5b94
SP
663}
664
5f07b3c5 665static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 666{
3c8def97
SP
667 struct be_tx_stats *stats = tx_stats(txo);
668
ab1594e9 669 u64_stats_update_begin(&stats->sync);
ac124ff9 670 stats->tx_reqs++;
5f07b3c5
SP
671 stats->tx_bytes += skb->len;
672 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
5f07b3c5
SP
676/* Returns number of WRBs needed for the skb */
677static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 678{
5f07b3c5
SP
679 /* +1 for the header wrb */
680 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
681}
682
683static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
684{
685 wrb->frag_pa_hi = upper_32_bits(addr);
686 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
687 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 688 wrb->rsvd0 = 0;
6b7c5b94
SP
689}
690
1ded132d 691static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 692 struct sk_buff *skb)
1ded132d
AK
693{
694 u8 vlan_prio;
695 u16 vlan_tag;
696
697 vlan_tag = vlan_tx_tag_get(skb);
698 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
699 /* If vlan priority provided by OS is NOT in available bmap */
700 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
701 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
702 adapter->recommended_prio;
703
704 return vlan_tag;
705}
706
c9c47142
SP
707/* Used only for IP tunnel packets */
708static u16 skb_inner_ip_proto(struct sk_buff *skb)
709{
710 return (inner_ip_hdr(skb)->version == 4) ?
711 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
712}
713
714static u16 skb_ip_proto(struct sk_buff *skb)
715{
716 return (ip_hdr(skb)->version == 4) ?
717 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
718}
719
cc4ce020 720static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
721 struct sk_buff *skb, u32 wrb_cnt, u32 len,
722 bool skip_hw_vlan)
6b7c5b94 723{
c9c47142 724 u16 vlan_tag, proto;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
c3c18bc1 728 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 729
49e4b847 730 if (skb_is_gso(skb)) {
c3c18bc1
SP
731 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
732 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 733 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 734 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 735 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 736 if (skb->encapsulation) {
c3c18bc1 737 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
738 proto = skb_inner_ip_proto(skb);
739 } else {
740 proto = skb_ip_proto(skb);
741 }
742 if (proto == IPPROTO_TCP)
c3c18bc1 743 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 744 else if (proto == IPPROTO_UDP)
c3c18bc1 745 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
746 }
747
4c5102f9 748 if (vlan_tx_tag_present(skb)) {
c3c18bc1 749 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 750 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 751 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
752 }
753
c3c18bc1
SP
754 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
755 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
756
757 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
758 * When this hack is not needed, the evt bit is set while ringing DB
759 */
760 if (skip_hw_vlan)
761 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
762}
763
2b7bcebf 764static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 765 bool unmap_single)
7101e111
SP
766{
767 dma_addr_t dma;
768
769 be_dws_le_to_cpu(wrb, sizeof(*wrb));
770
771 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 772 if (wrb->frag_len) {
7101e111 773 if (unmap_single)
2b7bcebf
IV
774 dma_unmap_single(dev, dma, wrb->frag_len,
775 DMA_TO_DEVICE);
7101e111 776 else
2b7bcebf 777 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
778 }
779}
6b7c5b94 780
5f07b3c5
SP
781/* Returns the number of WRBs used up by the skb */
782static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
783 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 784{
5f07b3c5 785 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 786 struct device *dev = &adapter->pdev->dev;
5f07b3c5 787 struct be_queue_info *txq = &txo->q;
6b7c5b94 788 struct be_eth_hdr_wrb *hdr;
7101e111 789 bool map_single = false;
5f07b3c5
SP
790 struct be_eth_wrb *wrb;
791 dma_addr_t busaddr;
792 u16 head = txq->head;
6b7c5b94 793
6b7c5b94 794 hdr = queue_head_node(txq);
5f07b3c5
SP
795 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
796 be_dws_cpu_to_le(hdr, sizeof(*hdr));
797
6b7c5b94
SP
798 queue_head_inc(txq);
799
ebc8d2ab 800 if (skb->len > skb->data_len) {
e743d313 801 int len = skb_headlen(skb);
03d28ffe 802
2b7bcebf
IV
803 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
804 if (dma_mapping_error(dev, busaddr))
7101e111
SP
805 goto dma_err;
806 map_single = true;
ebc8d2ab
DM
807 wrb = queue_head_node(txq);
808 wrb_fill(wrb, busaddr, len);
809 be_dws_cpu_to_le(wrb, sizeof(*wrb));
810 queue_head_inc(txq);
811 copied += len;
812 }
6b7c5b94 813
ebc8d2ab 814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 815 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 816
b061b39e 817 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 818 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 819 if (dma_mapping_error(dev, busaddr))
7101e111 820 goto dma_err;
ebc8d2ab 821 wrb = queue_head_node(txq);
9e903e08 822 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
9e903e08 825 copied += skb_frag_size(frag);
6b7c5b94
SP
826 }
827
5f07b3c5
SP
828 BUG_ON(txo->sent_skb_list[head]);
829 txo->sent_skb_list[head] = skb;
830 txo->last_req_hdr = head;
831 atomic_add(wrb_cnt, &txq->used);
832 txo->last_req_wrb_cnt = wrb_cnt;
833 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 834
5f07b3c5
SP
835 be_tx_stats_update(txo, skb);
836 return wrb_cnt;
6b7c5b94 837
7101e111 838dma_err:
5f07b3c5
SP
839 /* Bring the queue back to the state it was in before this
840 * routine was invoked.
841 */
842 txq->head = head;
843 /* skip the first wrb (hdr); it's not mapped */
844 queue_head_inc(txq);
7101e111
SP
845 while (copied) {
846 wrb = queue_head_node(txq);
2b7bcebf 847 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
848 map_single = false;
849 copied -= wrb->frag_len;
d3de1540 850 adapter->drv_stats.dma_map_errors++;
7101e111
SP
851 queue_head_inc(txq);
852 }
5f07b3c5 853 txq->head = head;
7101e111 854 return 0;
6b7c5b94
SP
855}
856
93040ae5 857static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
858 struct sk_buff *skb,
859 bool *skip_hw_vlan)
93040ae5
SK
860{
861 u16 vlan_tag = 0;
862
863 skb = skb_share_check(skb, GFP_ATOMIC);
864 if (unlikely(!skb))
865 return skb;
866
efee8e87 867 if (vlan_tx_tag_present(skb))
93040ae5 868 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
869
870 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
871 if (!vlan_tag)
872 vlan_tag = adapter->pvid;
873 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
874 * skip VLAN insertion
875 */
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
bc0c3405
AK
879
880 if (vlan_tag) {
62749e2c
JP
881 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
882 vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
62749e2c
JP
891 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
892 vlan_tag);
bc0c3405
AK
893 if (unlikely(!skb))
894 return skb;
895 if (skip_hw_vlan)
896 *skip_hw_vlan = true;
897 }
898
93040ae5
SK
899 return skb;
900}
901
bc0c3405
AK
902static bool be_ipv6_exthdr_check(struct sk_buff *skb)
903{
904 struct ethhdr *eh = (struct ethhdr *)skb->data;
905 u16 offset = ETH_HLEN;
906
907 if (eh->h_proto == htons(ETH_P_IPV6)) {
908 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
909
910 offset += sizeof(struct ipv6hdr);
911 if (ip6h->nexthdr != NEXTHDR_TCP &&
912 ip6h->nexthdr != NEXTHDR_UDP) {
913 struct ipv6_opt_hdr *ehdr =
504fbf1e 914 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
915
916 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
917 if (ehdr->hdrlen == 0xff)
918 return true;
919 }
920 }
921 return false;
922}
923
924static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
925{
926 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
927}
928
748b539a 929static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 930{
ee9c799c 931 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
932}
933
ec495fac
VV
934static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
935 struct sk_buff *skb,
936 bool *skip_hw_vlan)
6b7c5b94 937{
d2cb6ce7 938 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
939 unsigned int eth_hdr_len;
940 struct iphdr *ip;
93040ae5 941
1297f9db
AK
942 /* For padded packets, BE HW modifies tot_len field in IP header
943 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 944 * For padded packets, Lancer computes incorrect checksum.
1ded132d 945 */
ee9c799c
SP
946 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
947 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
948 if (skb->len <= 60 &&
949 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 950 is_ipv4_pkt(skb)) {
93040ae5
SK
951 ip = (struct iphdr *)ip_hdr(skb);
952 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
953 }
1ded132d 954
d2cb6ce7 955 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 956 * tagging in pvid-tagging mode
d2cb6ce7 957 */
f93f160b 958 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 959 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 960 *skip_hw_vlan = true;
d2cb6ce7 961
93040ae5
SK
962 /* HW has a bug wherein it will calculate CSUM for VLAN
963 * pkts even though it is disabled.
964 * Manually insert VLAN in pkt.
965 */
966 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
967 vlan_tx_tag_present(skb)) {
968 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 969 if (unlikely(!skb))
c9128951 970 goto err;
bc0c3405
AK
971 }
972
973 /* HW may lockup when VLAN HW tagging is requested on
974 * certain ipv6 packets. Drop such pkts if the HW workaround to
975 * skip HW tagging is not enabled by FW.
976 */
977 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
978 (adapter->pvid || adapter->qnq_vid) &&
979 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
980 goto tx_drop;
981
982 /* Manual VLAN tag insertion to prevent:
983 * ASIC lockup when the ASIC inserts VLAN tag into
984 * certain ipv6 packets. Insert VLAN tags in driver,
985 * and set event, completion, vlan bits accordingly
986 * in the Tx WRB.
987 */
988 if (be_ipv6_tx_stall_chk(adapter, skb) &&
989 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 990 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 991 if (unlikely(!skb))
c9128951 992 goto err;
1ded132d
AK
993 }
994
ee9c799c
SP
995 return skb;
996tx_drop:
997 dev_kfree_skb_any(skb);
c9128951 998err:
ee9c799c
SP
999 return NULL;
1000}
1001
ec495fac
VV
1002static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1003 struct sk_buff *skb,
1004 bool *skip_hw_vlan)
1005{
1006 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1007 * less may cause a transmit stall on that port. So the work-around is
1008 * to pad short packets (<= 32 bytes) to a 36-byte length.
1009 */
1010 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1011 if (skb_put_padto(skb, 36))
ec495fac 1012 return NULL;
ec495fac
VV
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
5f07b3c5
SP
1024static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1025{
1026 struct be_queue_info *txq = &txo->q;
1027 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1028
1029 /* Mark the last request eventable if it hasn't been marked already */
1030 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1031 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1032
1033 /* compose a dummy wrb if there are odd set of wrbs to notify */
1034 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1035 wrb_fill(queue_head_node(txq), 0, 0);
1036 queue_head_inc(txq);
1037 atomic_inc(&txq->used);
1038 txo->pend_wrb_cnt++;
1039 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1040 TX_HDR_WRB_NUM_SHIFT);
1041 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1042 TX_HDR_WRB_NUM_SHIFT);
1043 }
1044 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1045 txo->pend_wrb_cnt = 0;
1046}
1047
ee9c799c
SP
1048static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1049{
5f07b3c5 1050 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1051 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1052 u16 q_idx = skb_get_queue_mapping(skb);
1053 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1054 struct be_queue_info *txq = &txo->q;
5f07b3c5 1055 u16 wrb_cnt;
ee9c799c
SP
1056
1057 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1058 if (unlikely(!skb))
1059 goto drop;
6b7c5b94 1060
5f07b3c5
SP
1061 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1062 if (unlikely(!wrb_cnt)) {
1063 dev_kfree_skb_any(skb);
1064 goto drop;
1065 }
cd8f76c0 1066
5f07b3c5
SP
1067 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1068 netif_stop_subqueue(netdev, q_idx);
1069 tx_stats(txo)->tx_stops++;
1070 }
c190e3c8 1071
5f07b3c5
SP
1072 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1073 be_xmit_flush(adapter, txo);
6b7c5b94 1074
5f07b3c5
SP
1075 return NETDEV_TX_OK;
1076drop:
1077 tx_stats(txo)->tx_drv_drops++;
1078 /* Flush the already enqueued tx requests */
1079 if (flush && txo->pend_wrb_cnt)
1080 be_xmit_flush(adapter, txo);
6b7c5b94 1081
6b7c5b94
SP
1082 return NETDEV_TX_OK;
1083}
1084
1085static int be_change_mtu(struct net_device *netdev, int new_mtu)
1086{
1087 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1088 struct device *dev = &adapter->pdev->dev;
1089
1090 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1091 dev_info(dev, "MTU must be between %d and %d bytes\n",
1092 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1093 return -EINVAL;
1094 }
0d3f5cce
KA
1095
1096 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1097 netdev->mtu, new_mtu);
6b7c5b94
SP
1098 netdev->mtu = new_mtu;
1099 return 0;
1100}
1101
1102/*
82903e4b
AK
1103 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1104 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1105 */
10329df8 1106static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1107{
50762667 1108 struct device *dev = &adapter->pdev->dev;
10329df8 1109 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1110 u16 num = 0, i = 0;
82903e4b 1111 int status = 0;
1da87b7f 1112
c0e64ef4
SP
1113 /* No need to further configure vids if in promiscuous mode */
1114 if (adapter->promiscuous)
1115 return 0;
1116
92bf14ab 1117 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1118 goto set_vlan_promisc;
1119
1120 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1121 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1122 vids[num++] = cpu_to_le16(i);
0fc16ebf 1123
4d567d97 1124 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1125 if (status) {
d9d604f8 1126 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1127 if (addl_status(status) ==
1128 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8 1129 goto set_vlan_promisc;
50762667 1130 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8
AK
1131 } else {
1132 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1133 /* hw VLAN filtering re-enabled. */
1134 status = be_cmd_rx_filter(adapter,
1135 BE_FLAGS_VLAN_PROMISC, OFF);
1136 if (!status) {
50762667
VV
1137 dev_info(dev,
1138 "Disabling VLAN Promiscuous mode\n");
d9d604f8 1139 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1140 }
1141 }
6b7c5b94 1142 }
1da87b7f 1143
b31c50a7 1144 return status;
0fc16ebf
PR
1145
1146set_vlan_promisc:
a6b74e01
SK
1147 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1148 return 0;
d9d604f8
AK
1149
1150 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1151 if (!status) {
50762667 1152 dev_info(dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1153 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1154 } else
50762667 1155 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
0fc16ebf 1156 return status;
6b7c5b94
SP
1157}
1158
80d5c368 1159static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1160{
1161 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1162 int status = 0;
6b7c5b94 1163
a85e9986
PR
1164 /* Packets with VID 0 are always received by Lancer by default */
1165 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1166 return status;
1167
f6cbd364 1168 if (test_bit(vid, adapter->vids))
48291c22 1169 return status;
a85e9986 1170
f6cbd364 1171 set_bit(vid, adapter->vids);
a6b74e01 1172 adapter->vlans_added++;
8e586137 1173
a6b74e01
SK
1174 status = be_vid_config(adapter);
1175 if (status) {
1176 adapter->vlans_added--;
f6cbd364 1177 clear_bit(vid, adapter->vids);
a6b74e01 1178 }
48291c22 1179
80817cbf 1180 return status;
6b7c5b94
SP
1181}
1182
80d5c368 1183static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1184{
1185 struct be_adapter *adapter = netdev_priv(netdev);
1186
a85e9986
PR
1187 /* Packets with VID 0 are always received by Lancer by default */
1188 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1189 return 0;
a85e9986 1190
f6cbd364 1191 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1192 adapter->vlans_added--;
1193
1194 return be_vid_config(adapter);
6b7c5b94
SP
1195}
1196
7ad09458
S
1197static void be_clear_promisc(struct be_adapter *adapter)
1198{
1199 adapter->promiscuous = false;
a0794885 1200 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1201
1202 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1203}
1204
a54769f5 1205static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1206{
1207 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1208 int status;
6b7c5b94 1209
24307eef 1210 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1211 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1212 adapter->promiscuous = true;
1213 goto done;
6b7c5b94
SP
1214 }
1215
25985edc 1216 /* BE was previously in promiscuous mode; disable it */
24307eef 1217 if (adapter->promiscuous) {
7ad09458 1218 be_clear_promisc(adapter);
c0e64ef4 1219 if (adapter->vlans_added)
10329df8 1220 be_vid_config(adapter);
6b7c5b94
SP
1221 }
1222
e7b909a6 1223 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1224 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1225 netdev_mc_count(netdev) > be_max_mc(adapter))
1226 goto set_mcast_promisc;
6b7c5b94 1227
fbc13f01
AK
1228 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1229 struct netdev_hw_addr *ha;
1230 int i = 1; /* First slot is claimed by the Primary MAC */
1231
1232 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1233 be_cmd_pmac_del(adapter, adapter->if_handle,
1234 adapter->pmac_id[i], 0);
1235 }
1236
92bf14ab 1237 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1238 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1239 adapter->promiscuous = true;
1240 goto done;
1241 }
1242
1243 netdev_for_each_uc_addr(ha, adapter->netdev) {
1244 adapter->uc_macs++; /* First slot is for Primary MAC */
1245 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1246 adapter->if_handle,
1247 &adapter->pmac_id[adapter->uc_macs], 0);
1248 }
1249 }
1250
0fc16ebf 1251 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1252 if (!status) {
1253 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1254 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1255 goto done;
0fc16ebf 1256 }
a0794885
KA
1257
1258set_mcast_promisc:
1259 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1260 return;
1261
1262 /* Set to MCAST promisc mode if setting MULTICAST address fails
1263 * or if num configured exceeds what we support
1264 */
1265 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1266 if (!status)
1267 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1268done:
1269 return;
6b7c5b94
SP
1270}
1271
ba343c77
SB
1272static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1273{
1274 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1275 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1276 int status;
1277
11ac75ed 1278 if (!sriov_enabled(adapter))
ba343c77
SB
1279 return -EPERM;
1280
11ac75ed 1281 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1282 return -EINVAL;
1283
3c31aaf3
VV
1284 /* Proceed further only if user provided MAC is different
1285 * from active MAC
1286 */
1287 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1288 return 0;
1289
3175d8c2
SP
1290 if (BEx_chip(adapter)) {
1291 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1292 vf + 1);
ba343c77 1293
11ac75ed
SP
1294 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1295 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1296 } else {
1297 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1298 vf + 1);
590c391d
PR
1299 }
1300
abccf23e
KA
1301 if (status) {
1302 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1303 mac, vf, status);
1304 return be_cmd_status(status);
1305 }
64600ea5 1306
abccf23e
KA
1307 ether_addr_copy(vf_cfg->mac_addr, mac);
1308
1309 return 0;
ba343c77
SB
1310}
1311
64600ea5 1312static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1313 struct ifla_vf_info *vi)
64600ea5
AK
1314{
1315 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1316 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1317
11ac75ed 1318 if (!sriov_enabled(adapter))
64600ea5
AK
1319 return -EPERM;
1320
11ac75ed 1321 if (vf >= adapter->num_vfs)
64600ea5
AK
1322 return -EINVAL;
1323
1324 vi->vf = vf;
ed616689
SC
1325 vi->max_tx_rate = vf_cfg->tx_rate;
1326 vi->min_tx_rate = 0;
a60b3a13
AK
1327 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1328 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1329 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1330 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1331
1332 return 0;
1333}
1334
748b539a 1335static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1336{
1337 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1338 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1339 int status = 0;
1340
11ac75ed 1341 if (!sriov_enabled(adapter))
1da87b7f
AK
1342 return -EPERM;
1343
b9fc0e53 1344 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1345 return -EINVAL;
1346
b9fc0e53
AK
1347 if (vlan || qos) {
1348 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1349 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1350 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1351 vf_cfg->if_handle, 0);
1da87b7f 1352 } else {
f1f3ee1b 1353 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1354 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1355 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1356 }
1357
abccf23e
KA
1358 if (status) {
1359 dev_err(&adapter->pdev->dev,
1360 "VLAN %d config on VF %d failed : %#x\n", vlan,
1361 vf, status);
1362 return be_cmd_status(status);
1363 }
1364
1365 vf_cfg->vlan_tag = vlan;
1366
1367 return 0;
1da87b7f
AK
1368}
1369
ed616689
SC
1370static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1371 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1372{
1373 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1374 struct device *dev = &adapter->pdev->dev;
1375 int percent_rate, status = 0;
1376 u16 link_speed = 0;
1377 u8 link_status;
e1d18735 1378
11ac75ed 1379 if (!sriov_enabled(adapter))
e1d18735
AK
1380 return -EPERM;
1381
94f434c2 1382 if (vf >= adapter->num_vfs)
e1d18735
AK
1383 return -EINVAL;
1384
ed616689
SC
1385 if (min_tx_rate)
1386 return -EINVAL;
1387
0f77ba73
RN
1388 if (!max_tx_rate)
1389 goto config_qos;
1390
1391 status = be_cmd_link_status_query(adapter, &link_speed,
1392 &link_status, 0);
1393 if (status)
1394 goto err;
1395
1396 if (!link_status) {
1397 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1398 status = -ENETDOWN;
0f77ba73
RN
1399 goto err;
1400 }
1401
1402 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1403 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1404 link_speed);
1405 status = -EINVAL;
1406 goto err;
1407 }
1408
1409 /* On Skyhawk the QOS setting must be done only as a % value */
1410 percent_rate = link_speed / 100;
1411 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1412 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1413 percent_rate);
1414 status = -EINVAL;
1415 goto err;
94f434c2 1416 }
e1d18735 1417
0f77ba73
RN
1418config_qos:
1419 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1420 if (status)
0f77ba73
RN
1421 goto err;
1422
1423 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1424 return 0;
1425
1426err:
1427 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1428 max_tx_rate, vf);
abccf23e 1429 return be_cmd_status(status);
e1d18735 1430}
e2fb1afa 1431
bdce2ad7
SR
1432static int be_set_vf_link_state(struct net_device *netdev, int vf,
1433 int link_state)
1434{
1435 struct be_adapter *adapter = netdev_priv(netdev);
1436 int status;
1437
1438 if (!sriov_enabled(adapter))
1439 return -EPERM;
1440
1441 if (vf >= adapter->num_vfs)
1442 return -EINVAL;
1443
1444 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1445 if (status) {
1446 dev_err(&adapter->pdev->dev,
1447 "Link state change on VF %d failed: %#x\n", vf, status);
1448 return be_cmd_status(status);
1449 }
bdce2ad7 1450
abccf23e
KA
1451 adapter->vf_cfg[vf].plink_tracking = link_state;
1452
1453 return 0;
bdce2ad7 1454}
e1d18735 1455
2632bafd
SP
1456static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1457 ulong now)
6b7c5b94 1458{
2632bafd
SP
1459 aic->rx_pkts_prev = rx_pkts;
1460 aic->tx_reqs_prev = tx_pkts;
1461 aic->jiffies = now;
1462}
ac124ff9 1463
2632bafd
SP
1464static void be_eqd_update(struct be_adapter *adapter)
1465{
1466 struct be_set_eqd set_eqd[MAX_EVT_QS];
1467 int eqd, i, num = 0, start;
1468 struct be_aic_obj *aic;
1469 struct be_eq_obj *eqo;
1470 struct be_rx_obj *rxo;
1471 struct be_tx_obj *txo;
1472 u64 rx_pkts, tx_pkts;
1473 ulong now;
1474 u32 pps, delta;
10ef9ab4 1475
2632bafd
SP
1476 for_all_evt_queues(adapter, eqo, i) {
1477 aic = &adapter->aic_obj[eqo->idx];
1478 if (!aic->enable) {
1479 if (aic->jiffies)
1480 aic->jiffies = 0;
1481 eqd = aic->et_eqd;
1482 goto modify_eqd;
1483 }
6b7c5b94 1484
2632bafd
SP
1485 rxo = &adapter->rx_obj[eqo->idx];
1486 do {
57a7744e 1487 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1488 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1489 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1490
2632bafd
SP
1491 txo = &adapter->tx_obj[eqo->idx];
1492 do {
57a7744e 1493 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1494 tx_pkts = txo->stats.tx_reqs;
57a7744e 1495 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1496
2632bafd
SP
1497 /* Skip, if wrapped around or first calculation */
1498 now = jiffies;
1499 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1500 rx_pkts < aic->rx_pkts_prev ||
1501 tx_pkts < aic->tx_reqs_prev) {
1502 be_aic_update(aic, rx_pkts, tx_pkts, now);
1503 continue;
1504 }
1505
1506 delta = jiffies_to_msecs(now - aic->jiffies);
1507 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1508 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1509 eqd = (pps / 15000) << 2;
10ef9ab4 1510
2632bafd
SP
1511 if (eqd < 8)
1512 eqd = 0;
1513 eqd = min_t(u32, eqd, aic->max_eqd);
1514 eqd = max_t(u32, eqd, aic->min_eqd);
1515
1516 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1517modify_eqd:
2632bafd
SP
1518 if (eqd != aic->prev_eqd) {
1519 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1520 set_eqd[num].eq_id = eqo->q.id;
1521 aic->prev_eqd = eqd;
1522 num++;
1523 }
ac124ff9 1524 }
2632bafd
SP
1525
1526 if (num)
1527 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1528}
1529
3abcdeda 1530static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1531 struct be_rx_compl_info *rxcp)
4097f663 1532{
ac124ff9 1533 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1534
ab1594e9 1535 u64_stats_update_begin(&stats->sync);
3abcdeda 1536 stats->rx_compl++;
2e588f84 1537 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1538 stats->rx_pkts++;
2e588f84 1539 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1540 stats->rx_mcast_pkts++;
2e588f84 1541 if (rxcp->err)
ac124ff9 1542 stats->rx_compl_err++;
ab1594e9 1543 u64_stats_update_end(&stats->sync);
4097f663
SP
1544}
1545
2e588f84 1546static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1547{
19fad86f 1548 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1549 * Also ignore ipcksm for ipv6 pkts
1550 */
2e588f84 1551 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1552 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1553}
1554
0b0ef1d0 1555static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1556{
10ef9ab4 1557 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1558 struct be_rx_page_info *rx_page_info;
3abcdeda 1559 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1560 u16 frag_idx = rxq->tail;
6b7c5b94 1561
3abcdeda 1562 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1563 BUG_ON(!rx_page_info->page);
1564
e50287be 1565 if (rx_page_info->last_frag) {
2b7bcebf
IV
1566 dma_unmap_page(&adapter->pdev->dev,
1567 dma_unmap_addr(rx_page_info, bus),
1568 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1569 rx_page_info->last_frag = false;
1570 } else {
1571 dma_sync_single_for_cpu(&adapter->pdev->dev,
1572 dma_unmap_addr(rx_page_info, bus),
1573 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1574 }
6b7c5b94 1575
0b0ef1d0 1576 queue_tail_inc(rxq);
6b7c5b94
SP
1577 atomic_dec(&rxq->used);
1578 return rx_page_info;
1579}
1580
1581/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1582static void be_rx_compl_discard(struct be_rx_obj *rxo,
1583 struct be_rx_compl_info *rxcp)
6b7c5b94 1584{
6b7c5b94 1585 struct be_rx_page_info *page_info;
2e588f84 1586 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1587
e80d9da6 1588 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1589 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1590 put_page(page_info->page);
1591 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1592 }
1593}
1594
1595/*
1596 * skb_fill_rx_data forms a complete skb for an ether frame
1597 * indicated by rxcp.
1598 */
10ef9ab4
SP
1599static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1600 struct be_rx_compl_info *rxcp)
6b7c5b94 1601{
6b7c5b94 1602 struct be_rx_page_info *page_info;
2e588f84
SP
1603 u16 i, j;
1604 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1605 u8 *start;
6b7c5b94 1606
0b0ef1d0 1607 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1608 start = page_address(page_info->page) + page_info->page_offset;
1609 prefetch(start);
1610
1611 /* Copy data in the first descriptor of this completion */
2e588f84 1612 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1613
6b7c5b94
SP
1614 skb->len = curr_frag_len;
1615 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1616 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1617 /* Complete packet has now been moved to data */
1618 put_page(page_info->page);
1619 skb->data_len = 0;
1620 skb->tail += curr_frag_len;
1621 } else {
ac1ae5f3
ED
1622 hdr_len = ETH_HLEN;
1623 memcpy(skb->data, start, hdr_len);
6b7c5b94 1624 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1625 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1626 skb_shinfo(skb)->frags[0].page_offset =
1627 page_info->page_offset + hdr_len;
748b539a
SP
1628 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1629 curr_frag_len - hdr_len);
6b7c5b94 1630 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1631 skb->truesize += rx_frag_size;
6b7c5b94
SP
1632 skb->tail += hdr_len;
1633 }
205859a2 1634 page_info->page = NULL;
6b7c5b94 1635
2e588f84
SP
1636 if (rxcp->pkt_size <= rx_frag_size) {
1637 BUG_ON(rxcp->num_rcvd != 1);
1638 return;
6b7c5b94
SP
1639 }
1640
1641 /* More frags present for this completion */
2e588f84
SP
1642 remaining = rxcp->pkt_size - curr_frag_len;
1643 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1644 page_info = get_rx_page_info(rxo);
2e588f84 1645 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1646
bd46cb6c
AK
1647 /* Coalesce all frags from the same physical page in one slot */
1648 if (page_info->page_offset == 0) {
1649 /* Fresh page */
1650 j++;
b061b39e 1651 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1652 skb_shinfo(skb)->frags[j].page_offset =
1653 page_info->page_offset;
9e903e08 1654 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1655 skb_shinfo(skb)->nr_frags++;
1656 } else {
1657 put_page(page_info->page);
1658 }
1659
9e903e08 1660 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1661 skb->len += curr_frag_len;
1662 skb->data_len += curr_frag_len;
bdb28a97 1663 skb->truesize += rx_frag_size;
2e588f84 1664 remaining -= curr_frag_len;
205859a2 1665 page_info->page = NULL;
6b7c5b94 1666 }
bd46cb6c 1667 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1668}
1669
5be93b9a 1670/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1671static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1672 struct be_rx_compl_info *rxcp)
6b7c5b94 1673{
10ef9ab4 1674 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1675 struct net_device *netdev = adapter->netdev;
6b7c5b94 1676 struct sk_buff *skb;
89420424 1677
bb349bb4 1678 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1679 if (unlikely(!skb)) {
ac124ff9 1680 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1681 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1682 return;
1683 }
1684
10ef9ab4 1685 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1686
6332c8d3 1687 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1688 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1689 else
1690 skb_checksum_none_assert(skb);
6b7c5b94 1691
6332c8d3 1692 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1693 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1694 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1695 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1696
b6c0e89d 1697 skb->csum_level = rxcp->tunneled;
6384a4d0 1698 skb_mark_napi_id(skb, napi);
6b7c5b94 1699
343e43c0 1700 if (rxcp->vlanf)
86a9bad3 1701 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1702
1703 netif_receive_skb(skb);
6b7c5b94
SP
1704}
1705
5be93b9a 1706/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1707static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1708 struct napi_struct *napi,
1709 struct be_rx_compl_info *rxcp)
6b7c5b94 1710{
10ef9ab4 1711 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1712 struct be_rx_page_info *page_info;
5be93b9a 1713 struct sk_buff *skb = NULL;
2e588f84
SP
1714 u16 remaining, curr_frag_len;
1715 u16 i, j;
3968fa1e 1716
10ef9ab4 1717 skb = napi_get_frags(napi);
5be93b9a 1718 if (!skb) {
10ef9ab4 1719 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1720 return;
1721 }
1722
2e588f84
SP
1723 remaining = rxcp->pkt_size;
1724 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1725 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1726
1727 curr_frag_len = min(remaining, rx_frag_size);
1728
bd46cb6c
AK
1729 /* Coalesce all frags from the same physical page in one slot */
1730 if (i == 0 || page_info->page_offset == 0) {
1731 /* First frag or Fresh page */
1732 j++;
b061b39e 1733 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1734 skb_shinfo(skb)->frags[j].page_offset =
1735 page_info->page_offset;
9e903e08 1736 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1737 } else {
1738 put_page(page_info->page);
1739 }
9e903e08 1740 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1741 skb->truesize += rx_frag_size;
bd46cb6c 1742 remaining -= curr_frag_len;
6b7c5b94
SP
1743 memset(page_info, 0, sizeof(*page_info));
1744 }
bd46cb6c 1745 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1746
5be93b9a 1747 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1748 skb->len = rxcp->pkt_size;
1749 skb->data_len = rxcp->pkt_size;
5be93b9a 1750 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1751 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1752 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1753 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1754
b6c0e89d 1755 skb->csum_level = rxcp->tunneled;
6384a4d0 1756 skb_mark_napi_id(skb, napi);
5be93b9a 1757
343e43c0 1758 if (rxcp->vlanf)
86a9bad3 1759 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1760
10ef9ab4 1761 napi_gro_frags(napi);
2e588f84
SP
1762}
1763
10ef9ab4
SP
1764static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1765 struct be_rx_compl_info *rxcp)
2e588f84 1766{
c3c18bc1
SP
1767 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1768 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1769 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1770 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1771 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1772 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1773 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1774 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1775 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1776 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1777 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1778 if (rxcp->vlanf) {
c3c18bc1
SP
1779 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1780 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1781 }
c3c18bc1 1782 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1783 rxcp->tunneled =
c3c18bc1 1784 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1785}
1786
10ef9ab4
SP
1787static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1788 struct be_rx_compl_info *rxcp)
2e588f84 1789{
c3c18bc1
SP
1790 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1791 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1792 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1793 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1794 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1795 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1796 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1797 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1798 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1799 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1800 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1801 if (rxcp->vlanf) {
c3c18bc1
SP
1802 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1803 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1804 }
c3c18bc1
SP
1805 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1806 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1807}
1808
1809static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1810{
1811 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1812 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1813 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1814
2e588f84
SP
1815 /* For checking the valid bit it is Ok to use either definition as the
1816 * valid bit is at the same position in both v0 and v1 Rx compl */
1817 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1818 return NULL;
6b7c5b94 1819
2e588f84
SP
1820 rmb();
1821 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1822
2e588f84 1823 if (adapter->be3_native)
10ef9ab4 1824 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1825 else
10ef9ab4 1826 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1827
e38b1706
SK
1828 if (rxcp->ip_frag)
1829 rxcp->l4_csum = 0;
1830
15d72184 1831 if (rxcp->vlanf) {
f93f160b
VV
1832 /* In QNQ modes, if qnq bit is not set, then the packet was
1833 * tagged only with the transparent outer vlan-tag and must
1834 * not be treated as a vlan packet by host
1835 */
1836 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1837 rxcp->vlanf = 0;
6b7c5b94 1838
15d72184 1839 if (!lancer_chip(adapter))
3c709f8f 1840 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1841
939cf306 1842 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1843 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1844 rxcp->vlanf = 0;
1845 }
2e588f84
SP
1846
1847 /* As the compl has been parsed, reset it; we wont touch it again */
1848 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1849
3abcdeda 1850 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1851 return rxcp;
1852}
1853
1829b086 1854static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1855{
6b7c5b94 1856 u32 order = get_order(size);
1829b086 1857
6b7c5b94 1858 if (order > 0)
1829b086
ED
1859 gfp |= __GFP_COMP;
1860 return alloc_pages(gfp, order);
6b7c5b94
SP
1861}
1862
1863/*
1864 * Allocate a page, split it to fragments of size rx_frag_size and post as
1865 * receive buffers to BE
1866 */
c30d7266 1867static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1868{
3abcdeda 1869 struct be_adapter *adapter = rxo->adapter;
26d92f92 1870 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1871 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1872 struct page *pagep = NULL;
ba42fad0 1873 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1874 struct be_eth_rx_d *rxd;
1875 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1876 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1877
3abcdeda 1878 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1879 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1880 if (!pagep) {
1829b086 1881 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1882 if (unlikely(!pagep)) {
ac124ff9 1883 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1884 break;
1885 }
ba42fad0
IV
1886 page_dmaaddr = dma_map_page(dev, pagep, 0,
1887 adapter->big_page_size,
2b7bcebf 1888 DMA_FROM_DEVICE);
ba42fad0
IV
1889 if (dma_mapping_error(dev, page_dmaaddr)) {
1890 put_page(pagep);
1891 pagep = NULL;
d3de1540 1892 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1893 break;
1894 }
e50287be 1895 page_offset = 0;
6b7c5b94
SP
1896 } else {
1897 get_page(pagep);
e50287be 1898 page_offset += rx_frag_size;
6b7c5b94 1899 }
e50287be 1900 page_info->page_offset = page_offset;
6b7c5b94 1901 page_info->page = pagep;
6b7c5b94
SP
1902
1903 rxd = queue_head_node(rxq);
e50287be 1904 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1905 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1906 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1907
1908 /* Any space left in the current big page for another frag? */
1909 if ((page_offset + rx_frag_size + rx_frag_size) >
1910 adapter->big_page_size) {
1911 pagep = NULL;
e50287be
SP
1912 page_info->last_frag = true;
1913 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1914 } else {
1915 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1916 }
26d92f92
SP
1917
1918 prev_page_info = page_info;
1919 queue_head_inc(rxq);
10ef9ab4 1920 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1921 }
e50287be
SP
1922
1923 /* Mark the last frag of a page when we break out of the above loop
1924 * with no more slots available in the RXQ
1925 */
1926 if (pagep) {
1927 prev_page_info->last_frag = true;
1928 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1929 }
6b7c5b94
SP
1930
1931 if (posted) {
6b7c5b94 1932 atomic_add(posted, &rxq->used);
6384a4d0
SP
1933 if (rxo->rx_post_starved)
1934 rxo->rx_post_starved = false;
c30d7266
AK
1935 do {
1936 notify = min(256u, posted);
1937 be_rxq_notify(adapter, rxq->id, notify);
1938 posted -= notify;
1939 } while (posted);
ea1dae11
SP
1940 } else if (atomic_read(&rxq->used) == 0) {
1941 /* Let be_worker replenish when memory is available */
3abcdeda 1942 rxo->rx_post_starved = true;
6b7c5b94 1943 }
6b7c5b94
SP
1944}
1945
5fb379ee 1946static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1947{
6b7c5b94
SP
1948 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1949
1950 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1951 return NULL;
1952
f3eb62d2 1953 rmb();
6b7c5b94
SP
1954 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1955
1956 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1957
1958 queue_tail_inc(tx_cq);
1959 return txcp;
1960}
1961
3c8def97 1962static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1963 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1964{
5f07b3c5 1965 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 1966 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
1967 u16 frag_index, num_wrbs = 0;
1968 struct sk_buff *skb = NULL;
1969 bool unmap_skb_hdr = false;
a73b796e 1970 struct be_eth_wrb *wrb;
6b7c5b94 1971
ec43b1a6 1972 do {
5f07b3c5
SP
1973 if (sent_skbs[txq->tail]) {
1974 /* Free skb from prev req */
1975 if (skb)
1976 dev_consume_skb_any(skb);
1977 skb = sent_skbs[txq->tail];
1978 sent_skbs[txq->tail] = NULL;
1979 queue_tail_inc(txq); /* skip hdr wrb */
1980 num_wrbs++;
1981 unmap_skb_hdr = true;
1982 }
a73b796e 1983 wrb = queue_tail_node(txq);
5f07b3c5 1984 frag_index = txq->tail;
2b7bcebf 1985 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 1986 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 1987 unmap_skb_hdr = false;
6b7c5b94 1988 queue_tail_inc(txq);
5f07b3c5
SP
1989 num_wrbs++;
1990 } while (frag_index != last_index);
1991 dev_consume_skb_any(skb);
6b7c5b94 1992
4d586b82 1993 return num_wrbs;
6b7c5b94
SP
1994}
1995
10ef9ab4
SP
1996/* Return the number of events in the event queue */
1997static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1998{
10ef9ab4
SP
1999 struct be_eq_entry *eqe;
2000 int num = 0;
859b1e4e 2001
10ef9ab4
SP
2002 do {
2003 eqe = queue_tail_node(&eqo->q);
2004 if (eqe->evt == 0)
2005 break;
859b1e4e 2006
10ef9ab4
SP
2007 rmb();
2008 eqe->evt = 0;
2009 num++;
2010 queue_tail_inc(&eqo->q);
2011 } while (true);
2012
2013 return num;
859b1e4e
SP
2014}
2015
10ef9ab4
SP
2016/* Leaves the EQ is disarmed state */
2017static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2018{
10ef9ab4 2019 int num = events_get(eqo);
859b1e4e 2020
10ef9ab4 2021 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2022}
2023
10ef9ab4 2024static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2025{
2026 struct be_rx_page_info *page_info;
3abcdeda
SP
2027 struct be_queue_info *rxq = &rxo->q;
2028 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2029 struct be_rx_compl_info *rxcp;
d23e946c
SP
2030 struct be_adapter *adapter = rxo->adapter;
2031 int flush_wait = 0;
6b7c5b94 2032
d23e946c
SP
2033 /* Consume pending rx completions.
2034 * Wait for the flush completion (identified by zero num_rcvd)
2035 * to arrive. Notify CQ even when there are no more CQ entries
2036 * for HW to flush partially coalesced CQ entries.
2037 * In Lancer, there is no need to wait for flush compl.
2038 */
2039 for (;;) {
2040 rxcp = be_rx_compl_get(rxo);
ddf1169f 2041 if (!rxcp) {
d23e946c
SP
2042 if (lancer_chip(adapter))
2043 break;
2044
2045 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2046 dev_warn(&adapter->pdev->dev,
2047 "did not receive flush compl\n");
2048 break;
2049 }
2050 be_cq_notify(adapter, rx_cq->id, true, 0);
2051 mdelay(1);
2052 } else {
2053 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2054 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2055 if (rxcp->num_rcvd == 0)
2056 break;
2057 }
6b7c5b94
SP
2058 }
2059
d23e946c
SP
2060 /* After cleanup, leave the CQ in unarmed state */
2061 be_cq_notify(adapter, rx_cq->id, false, 0);
2062
2063 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2064 while (atomic_read(&rxq->used) > 0) {
2065 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2066 put_page(page_info->page);
2067 memset(page_info, 0, sizeof(*page_info));
2068 }
2069 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2070 rxq->tail = 0;
2071 rxq->head = 0;
6b7c5b94
SP
2072}
2073
0ae57bb3 2074static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2075{
5f07b3c5
SP
2076 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2077 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2078 struct be_tx_obj *txo;
2079 struct be_queue_info *txq;
a8e9179a 2080 struct be_eth_tx_compl *txcp;
0ae57bb3 2081 int i, pending_txqs;
a8e9179a 2082
1a3d0717 2083 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2084 do {
0ae57bb3
SP
2085 pending_txqs = adapter->num_tx_qs;
2086
2087 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2088 cmpl = 0;
2089 num_wrbs = 0;
0ae57bb3
SP
2090 txq = &txo->q;
2091 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2092 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2093 num_wrbs += be_tx_compl_process(adapter, txo,
2094 end_idx);
2095 cmpl++;
2096 }
2097 if (cmpl) {
2098 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2099 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2100 timeo = 0;
0ae57bb3 2101 }
5f07b3c5 2102 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2103 pending_txqs--;
a8e9179a
SP
2104 }
2105
1a3d0717 2106 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2107 break;
2108
2109 mdelay(1);
2110 } while (true);
2111
5f07b3c5 2112 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2113 for_all_tx_queues(adapter, txo, i) {
2114 txq = &txo->q;
0ae57bb3 2115
5f07b3c5
SP
2116 if (atomic_read(&txq->used)) {
2117 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2118 i, atomic_read(&txq->used));
2119 notified_idx = txq->tail;
0ae57bb3 2120 end_idx = txq->tail;
5f07b3c5
SP
2121 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2122 txq->len);
2123 /* Use the tx-compl process logic to handle requests
2124 * that were not sent to the HW.
2125 */
0ae57bb3
SP
2126 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2127 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2128 BUG_ON(atomic_read(&txq->used));
2129 txo->pend_wrb_cnt = 0;
2130 /* Since hw was never notified of these requests,
2131 * reset TXQ indices
2132 */
2133 txq->head = notified_idx;
2134 txq->tail = notified_idx;
0ae57bb3 2135 }
b03388d6 2136 }
6b7c5b94
SP
2137}
2138
10ef9ab4
SP
2139static void be_evt_queues_destroy(struct be_adapter *adapter)
2140{
2141 struct be_eq_obj *eqo;
2142 int i;
2143
2144 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2145 if (eqo->q.created) {
2146 be_eq_clean(eqo);
10ef9ab4 2147 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2148 napi_hash_del(&eqo->napi);
68d7bdcb 2149 netif_napi_del(&eqo->napi);
19d59aa7 2150 }
10ef9ab4
SP
2151 be_queue_free(adapter, &eqo->q);
2152 }
2153}
2154
2155static int be_evt_queues_create(struct be_adapter *adapter)
2156{
2157 struct be_queue_info *eq;
2158 struct be_eq_obj *eqo;
2632bafd 2159 struct be_aic_obj *aic;
10ef9ab4
SP
2160 int i, rc;
2161
92bf14ab
SP
2162 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2163 adapter->cfg_num_qs);
10ef9ab4
SP
2164
2165 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2166 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2167 BE_NAPI_WEIGHT);
6384a4d0 2168 napi_hash_add(&eqo->napi);
2632bafd 2169 aic = &adapter->aic_obj[i];
10ef9ab4 2170 eqo->adapter = adapter;
10ef9ab4 2171 eqo->idx = i;
2632bafd
SP
2172 aic->max_eqd = BE_MAX_EQD;
2173 aic->enable = true;
10ef9ab4
SP
2174
2175 eq = &eqo->q;
2176 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2177 sizeof(struct be_eq_entry));
10ef9ab4
SP
2178 if (rc)
2179 return rc;
2180
f2f781a7 2181 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2182 if (rc)
2183 return rc;
2184 }
1cfafab9 2185 return 0;
10ef9ab4
SP
2186}
2187
5fb379ee
SP
2188static void be_mcc_queues_destroy(struct be_adapter *adapter)
2189{
2190 struct be_queue_info *q;
5fb379ee 2191
8788fdc2 2192 q = &adapter->mcc_obj.q;
5fb379ee 2193 if (q->created)
8788fdc2 2194 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2195 be_queue_free(adapter, q);
2196
8788fdc2 2197 q = &adapter->mcc_obj.cq;
5fb379ee 2198 if (q->created)
8788fdc2 2199 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2200 be_queue_free(adapter, q);
2201}
2202
2203/* Must be called only after TX qs are created as MCC shares TX EQ */
2204static int be_mcc_queues_create(struct be_adapter *adapter)
2205{
2206 struct be_queue_info *q, *cq;
5fb379ee 2207
8788fdc2 2208 cq = &adapter->mcc_obj.cq;
5fb379ee 2209 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2210 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2211 goto err;
2212
10ef9ab4
SP
2213 /* Use the default EQ for MCC completions */
2214 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2215 goto mcc_cq_free;
2216
8788fdc2 2217 q = &adapter->mcc_obj.q;
5fb379ee
SP
2218 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2219 goto mcc_cq_destroy;
2220
8788fdc2 2221 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2222 goto mcc_q_free;
2223
2224 return 0;
2225
2226mcc_q_free:
2227 be_queue_free(adapter, q);
2228mcc_cq_destroy:
8788fdc2 2229 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2230mcc_cq_free:
2231 be_queue_free(adapter, cq);
2232err:
2233 return -1;
2234}
2235
6b7c5b94
SP
2236static void be_tx_queues_destroy(struct be_adapter *adapter)
2237{
2238 struct be_queue_info *q;
3c8def97
SP
2239 struct be_tx_obj *txo;
2240 u8 i;
6b7c5b94 2241
3c8def97
SP
2242 for_all_tx_queues(adapter, txo, i) {
2243 q = &txo->q;
2244 if (q->created)
2245 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2246 be_queue_free(adapter, q);
6b7c5b94 2247
3c8def97
SP
2248 q = &txo->cq;
2249 if (q->created)
2250 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2251 be_queue_free(adapter, q);
2252 }
6b7c5b94
SP
2253}
2254
7707133c 2255static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2256{
10ef9ab4 2257 struct be_queue_info *cq, *eq;
3c8def97 2258 struct be_tx_obj *txo;
92bf14ab 2259 int status, i;
6b7c5b94 2260
92bf14ab 2261 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2262
10ef9ab4
SP
2263 for_all_tx_queues(adapter, txo, i) {
2264 cq = &txo->cq;
2265 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2266 sizeof(struct be_eth_tx_compl));
2267 if (status)
2268 return status;
3c8def97 2269
827da44c
JS
2270 u64_stats_init(&txo->stats.sync);
2271 u64_stats_init(&txo->stats.sync_compl);
2272
10ef9ab4
SP
2273 /* If num_evt_qs is less than num_tx_qs, then more than
2274 * one txq share an eq
2275 */
2276 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2277 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2278 if (status)
2279 return status;
6b7c5b94 2280
10ef9ab4
SP
2281 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2282 sizeof(struct be_eth_wrb));
2283 if (status)
2284 return status;
6b7c5b94 2285
94d73aaa 2286 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2287 if (status)
2288 return status;
3c8def97 2289 }
6b7c5b94 2290
d379142b
SP
2291 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2292 adapter->num_tx_qs);
10ef9ab4 2293 return 0;
6b7c5b94
SP
2294}
2295
10ef9ab4 2296static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2297{
2298 struct be_queue_info *q;
3abcdeda
SP
2299 struct be_rx_obj *rxo;
2300 int i;
2301
2302 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2303 q = &rxo->cq;
2304 if (q->created)
2305 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2306 be_queue_free(adapter, q);
ac6a0c4a
SP
2307 }
2308}
2309
10ef9ab4 2310static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2311{
10ef9ab4 2312 struct be_queue_info *eq, *cq;
3abcdeda
SP
2313 struct be_rx_obj *rxo;
2314 int rc, i;
6b7c5b94 2315
92bf14ab
SP
2316 /* We can create as many RSS rings as there are EQs. */
2317 adapter->num_rx_qs = adapter->num_evt_qs;
2318
2319 /* We'll use RSS only if atleast 2 RSS rings are supported.
2320 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2321 */
92bf14ab
SP
2322 if (adapter->num_rx_qs > 1)
2323 adapter->num_rx_qs++;
2324
6b7c5b94 2325 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2326 for_all_rx_queues(adapter, rxo, i) {
2327 rxo->adapter = adapter;
3abcdeda
SP
2328 cq = &rxo->cq;
2329 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2330 sizeof(struct be_eth_rx_compl));
3abcdeda 2331 if (rc)
10ef9ab4 2332 return rc;
3abcdeda 2333
827da44c 2334 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2335 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2336 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2337 if (rc)
10ef9ab4 2338 return rc;
3abcdeda 2339 }
6b7c5b94 2340
d379142b
SP
2341 dev_info(&adapter->pdev->dev,
2342 "created %d RSS queue(s) and 1 default RX queue\n",
2343 adapter->num_rx_qs - 1);
10ef9ab4 2344 return 0;
b628bde2
SP
2345}
2346
6b7c5b94
SP
2347static irqreturn_t be_intx(int irq, void *dev)
2348{
e49cc34f
SP
2349 struct be_eq_obj *eqo = dev;
2350 struct be_adapter *adapter = eqo->adapter;
2351 int num_evts = 0;
6b7c5b94 2352
d0b9cec3
SP
2353 /* IRQ is not expected when NAPI is scheduled as the EQ
2354 * will not be armed.
2355 * But, this can happen on Lancer INTx where it takes
2356 * a while to de-assert INTx or in BE2 where occasionaly
2357 * an interrupt may be raised even when EQ is unarmed.
2358 * If NAPI is already scheduled, then counting & notifying
2359 * events will orphan them.
e49cc34f 2360 */
d0b9cec3 2361 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2362 num_evts = events_get(eqo);
d0b9cec3
SP
2363 __napi_schedule(&eqo->napi);
2364 if (num_evts)
2365 eqo->spurious_intr = 0;
2366 }
2367 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2368
d0b9cec3
SP
2369 /* Return IRQ_HANDLED only for the the first spurious intr
2370 * after a valid intr to stop the kernel from branding
2371 * this irq as a bad one!
e49cc34f 2372 */
d0b9cec3
SP
2373 if (num_evts || eqo->spurious_intr++ == 0)
2374 return IRQ_HANDLED;
2375 else
2376 return IRQ_NONE;
6b7c5b94
SP
2377}
2378
10ef9ab4 2379static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2380{
10ef9ab4 2381 struct be_eq_obj *eqo = dev;
6b7c5b94 2382
0b545a62
SP
2383 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2384 napi_schedule(&eqo->napi);
6b7c5b94
SP
2385 return IRQ_HANDLED;
2386}
2387
2e588f84 2388static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2389{
e38b1706 2390 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2391}
2392
10ef9ab4 2393static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2394 int budget, int polling)
6b7c5b94 2395{
3abcdeda
SP
2396 struct be_adapter *adapter = rxo->adapter;
2397 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2398 struct be_rx_compl_info *rxcp;
6b7c5b94 2399 u32 work_done;
c30d7266 2400 u32 frags_consumed = 0;
6b7c5b94
SP
2401
2402 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2403 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2404 if (!rxcp)
2405 break;
2406
12004ae9
SP
2407 /* Is it a flush compl that has no data */
2408 if (unlikely(rxcp->num_rcvd == 0))
2409 goto loop_continue;
2410
2411 /* Discard compl with partial DMA Lancer B0 */
2412 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2413 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2414 goto loop_continue;
2415 }
2416
2417 /* On BE drop pkts that arrive due to imperfect filtering in
2418 * promiscuous mode on some skews
2419 */
2420 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2421 !lancer_chip(adapter))) {
10ef9ab4 2422 be_rx_compl_discard(rxo, rxcp);
12004ae9 2423 goto loop_continue;
64642811 2424 }
009dd872 2425
6384a4d0
SP
2426 /* Don't do gro when we're busy_polling */
2427 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2428 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2429 else
6384a4d0
SP
2430 be_rx_compl_process(rxo, napi, rxcp);
2431
12004ae9 2432loop_continue:
c30d7266 2433 frags_consumed += rxcp->num_rcvd;
2e588f84 2434 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2435 }
2436
10ef9ab4
SP
2437 if (work_done) {
2438 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2439
6384a4d0
SP
2440 /* When an rx-obj gets into post_starved state, just
2441 * let be_worker do the posting.
2442 */
2443 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2444 !rxo->rx_post_starved)
c30d7266
AK
2445 be_post_rx_frags(rxo, GFP_ATOMIC,
2446 max_t(u32, MAX_RX_POST,
2447 frags_consumed));
6b7c5b94 2448 }
10ef9ab4 2449
6b7c5b94
SP
2450 return work_done;
2451}
2452
512bb8a2
KA
2453static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2454{
2455 switch (status) {
2456 case BE_TX_COMP_HDR_PARSE_ERR:
2457 tx_stats(txo)->tx_hdr_parse_err++;
2458 break;
2459 case BE_TX_COMP_NDMA_ERR:
2460 tx_stats(txo)->tx_dma_err++;
2461 break;
2462 case BE_TX_COMP_ACL_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 }
2466}
2467
2468static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2469{
2470 switch (status) {
2471 case LANCER_TX_COMP_LSO_ERR:
2472 tx_stats(txo)->tx_tso_err++;
2473 break;
2474 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2475 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2476 tx_stats(txo)->tx_spoof_check_err++;
2477 break;
2478 case LANCER_TX_COMP_QINQ_ERR:
2479 tx_stats(txo)->tx_qinq_err++;
2480 break;
2481 case LANCER_TX_COMP_PARITY_ERR:
2482 tx_stats(txo)->tx_internal_parity_err++;
2483 break;
2484 case LANCER_TX_COMP_DMA_ERR:
2485 tx_stats(txo)->tx_dma_err++;
2486 break;
2487 }
2488}
2489
c8f64615
SP
2490static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2491 int idx)
6b7c5b94 2492{
6b7c5b94 2493 struct be_eth_tx_compl *txcp;
c8f64615 2494 int num_wrbs = 0, work_done = 0;
512bb8a2 2495 u32 compl_status;
c8f64615
SP
2496 u16 last_idx;
2497
2498 while ((txcp = be_tx_compl_get(&txo->cq))) {
2499 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2500 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2501 work_done++;
3c8def97 2502
512bb8a2
KA
2503 compl_status = GET_TX_COMPL_BITS(status, txcp);
2504 if (compl_status) {
2505 if (lancer_chip(adapter))
2506 lancer_update_tx_err(txo, compl_status);
2507 else
2508 be_update_tx_err(txo, compl_status);
2509 }
10ef9ab4 2510 }
6b7c5b94 2511
10ef9ab4
SP
2512 if (work_done) {
2513 be_cq_notify(adapter, txo->cq.id, true, work_done);
2514 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2515
10ef9ab4
SP
2516 /* As Tx wrbs have been freed up, wake up netdev queue
2517 * if it was stopped due to lack of tx wrbs. */
2518 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2519 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2520 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2521 }
10ef9ab4
SP
2522
2523 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2524 tx_stats(txo)->tx_compl += work_done;
2525 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2526 }
10ef9ab4 2527}
6b7c5b94 2528
68d7bdcb 2529int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2530{
2531 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2532 struct be_adapter *adapter = eqo->adapter;
0b545a62 2533 int max_work = 0, work, i, num_evts;
6384a4d0 2534 struct be_rx_obj *rxo;
a4906ea0 2535 struct be_tx_obj *txo;
f31e50a8 2536
0b545a62
SP
2537 num_evts = events_get(eqo);
2538
a4906ea0
SP
2539 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2540 be_process_tx(adapter, txo, i);
f31e50a8 2541
6384a4d0
SP
2542 if (be_lock_napi(eqo)) {
2543 /* This loop will iterate twice for EQ0 in which
2544 * completions of the last RXQ (default one) are also processed
2545 * For other EQs the loop iterates only once
2546 */
2547 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2548 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2549 max_work = max(work, max_work);
2550 }
2551 be_unlock_napi(eqo);
2552 } else {
2553 max_work = budget;
10ef9ab4 2554 }
6b7c5b94 2555
10ef9ab4
SP
2556 if (is_mcc_eqo(eqo))
2557 be_process_mcc(adapter);
93c86700 2558
10ef9ab4
SP
2559 if (max_work < budget) {
2560 napi_complete(napi);
0b545a62 2561 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2562 } else {
2563 /* As we'll continue in polling mode, count and clear events */
0b545a62 2564 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2565 }
10ef9ab4 2566 return max_work;
6b7c5b94
SP
2567}
2568
6384a4d0
SP
2569#ifdef CONFIG_NET_RX_BUSY_POLL
2570static int be_busy_poll(struct napi_struct *napi)
2571{
2572 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2573 struct be_adapter *adapter = eqo->adapter;
2574 struct be_rx_obj *rxo;
2575 int i, work = 0;
2576
2577 if (!be_lock_busy_poll(eqo))
2578 return LL_FLUSH_BUSY;
2579
2580 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2581 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2582 if (work)
2583 break;
2584 }
2585
2586 be_unlock_busy_poll(eqo);
2587 return work;
2588}
2589#endif
2590
f67ef7ba 2591void be_detect_error(struct be_adapter *adapter)
7c185276 2592{
e1cfb67a
PR
2593 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2594 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2595 u32 i;
eb0eecc1
SK
2596 bool error_detected = false;
2597 struct device *dev = &adapter->pdev->dev;
2598 struct net_device *netdev = adapter->netdev;
7c185276 2599
d23e946c 2600 if (be_hw_error(adapter))
72f02485
SP
2601 return;
2602
e1cfb67a
PR
2603 if (lancer_chip(adapter)) {
2604 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2605 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2606 sliport_err1 = ioread32(adapter->db +
748b539a 2607 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2608 sliport_err2 = ioread32(adapter->db +
748b539a 2609 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2610 adapter->hw_error = true;
2611 /* Do not log error messages if its a FW reset */
2612 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2613 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2614 dev_info(dev, "Firmware update in progress\n");
2615 } else {
2616 error_detected = true;
2617 dev_err(dev, "Error detected in the card\n");
2618 dev_err(dev, "ERR: sliport status 0x%x\n",
2619 sliport_status);
2620 dev_err(dev, "ERR: sliport error1 0x%x\n",
2621 sliport_err1);
2622 dev_err(dev, "ERR: sliport error2 0x%x\n",
2623 sliport_err2);
2624 }
e1cfb67a
PR
2625 }
2626 } else {
2627 pci_read_config_dword(adapter->pdev,
748b539a 2628 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2629 pci_read_config_dword(adapter->pdev,
748b539a 2630 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2631 pci_read_config_dword(adapter->pdev,
748b539a 2632 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2633 pci_read_config_dword(adapter->pdev,
748b539a 2634 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2635
f67ef7ba
PR
2636 ue_lo = (ue_lo & ~ue_lo_mask);
2637 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2638
eb0eecc1
SK
2639 /* On certain platforms BE hardware can indicate spurious UEs.
2640 * Allow HW to stop working completely in case of a real UE.
2641 * Hence not setting the hw_error for UE detection.
2642 */
f67ef7ba 2643
eb0eecc1
SK
2644 if (ue_lo || ue_hi) {
2645 error_detected = true;
2646 dev_err(dev,
2647 "Unrecoverable Error detected in the adapter");
2648 dev_err(dev, "Please reboot server to recover");
2649 if (skyhawk_chip(adapter))
2650 adapter->hw_error = true;
2651 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2652 if (ue_lo & 1)
2653 dev_err(dev, "UE: %s bit set\n",
2654 ue_status_low_desc[i]);
2655 }
2656 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2657 if (ue_hi & 1)
2658 dev_err(dev, "UE: %s bit set\n",
2659 ue_status_hi_desc[i]);
2660 }
7c185276
AK
2661 }
2662 }
eb0eecc1
SK
2663 if (error_detected)
2664 netif_carrier_off(netdev);
7c185276
AK
2665}
2666
8d56ff11
SP
2667static void be_msix_disable(struct be_adapter *adapter)
2668{
ac6a0c4a 2669 if (msix_enabled(adapter)) {
8d56ff11 2670 pci_disable_msix(adapter->pdev);
ac6a0c4a 2671 adapter->num_msix_vec = 0;
68d7bdcb 2672 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2673 }
2674}
2675
c2bba3df 2676static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2677{
7dc4c064 2678 int i, num_vec;
d379142b 2679 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2680
92bf14ab
SP
2681 /* If RoCE is supported, program the max number of NIC vectors that
2682 * may be configured via set-channels, along with vectors needed for
2683 * RoCe. Else, just program the number we'll use initially.
2684 */
2685 if (be_roce_supported(adapter))
2686 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2687 2 * num_online_cpus());
2688 else
2689 num_vec = adapter->cfg_num_qs;
3abcdeda 2690
ac6a0c4a 2691 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2692 adapter->msix_entries[i].entry = i;
2693
7dc4c064
AG
2694 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2695 MIN_MSIX_VECTORS, num_vec);
2696 if (num_vec < 0)
2697 goto fail;
92bf14ab 2698
92bf14ab
SP
2699 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2700 adapter->num_msix_roce_vec = num_vec / 2;
2701 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2702 adapter->num_msix_roce_vec);
2703 }
2704
2705 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2706
2707 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2708 adapter->num_msix_vec);
c2bba3df 2709 return 0;
7dc4c064
AG
2710
2711fail:
2712 dev_warn(dev, "MSIx enable failed\n");
2713
2714 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2715 if (!be_physfn(adapter))
2716 return num_vec;
2717 return 0;
6b7c5b94
SP
2718}
2719
fe6d2a38 2720static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2721 struct be_eq_obj *eqo)
b628bde2 2722{
f2f781a7 2723 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2724}
6b7c5b94 2725
b628bde2
SP
2726static int be_msix_register(struct be_adapter *adapter)
2727{
10ef9ab4
SP
2728 struct net_device *netdev = adapter->netdev;
2729 struct be_eq_obj *eqo;
2730 int status, i, vec;
6b7c5b94 2731
10ef9ab4
SP
2732 for_all_evt_queues(adapter, eqo, i) {
2733 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2734 vec = be_msix_vec_get(adapter, eqo);
2735 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2736 if (status)
2737 goto err_msix;
2738 }
b628bde2 2739
6b7c5b94 2740 return 0;
3abcdeda 2741err_msix:
10ef9ab4
SP
2742 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2743 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2744 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2745 status);
ac6a0c4a 2746 be_msix_disable(adapter);
6b7c5b94
SP
2747 return status;
2748}
2749
2750static int be_irq_register(struct be_adapter *adapter)
2751{
2752 struct net_device *netdev = adapter->netdev;
2753 int status;
2754
ac6a0c4a 2755 if (msix_enabled(adapter)) {
6b7c5b94
SP
2756 status = be_msix_register(adapter);
2757 if (status == 0)
2758 goto done;
ba343c77
SB
2759 /* INTx is not supported for VF */
2760 if (!be_physfn(adapter))
2761 return status;
6b7c5b94
SP
2762 }
2763
e49cc34f 2764 /* INTx: only the first EQ is used */
6b7c5b94
SP
2765 netdev->irq = adapter->pdev->irq;
2766 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2767 &adapter->eq_obj[0]);
6b7c5b94
SP
2768 if (status) {
2769 dev_err(&adapter->pdev->dev,
2770 "INTx request IRQ failed - err %d\n", status);
2771 return status;
2772 }
2773done:
2774 adapter->isr_registered = true;
2775 return 0;
2776}
2777
2778static void be_irq_unregister(struct be_adapter *adapter)
2779{
2780 struct net_device *netdev = adapter->netdev;
10ef9ab4 2781 struct be_eq_obj *eqo;
3abcdeda 2782 int i;
6b7c5b94
SP
2783
2784 if (!adapter->isr_registered)
2785 return;
2786
2787 /* INTx */
ac6a0c4a 2788 if (!msix_enabled(adapter)) {
e49cc34f 2789 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2790 goto done;
2791 }
2792
2793 /* MSIx */
10ef9ab4
SP
2794 for_all_evt_queues(adapter, eqo, i)
2795 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2796
6b7c5b94
SP
2797done:
2798 adapter->isr_registered = false;
6b7c5b94
SP
2799}
2800
10ef9ab4 2801static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2802{
2803 struct be_queue_info *q;
2804 struct be_rx_obj *rxo;
2805 int i;
2806
2807 for_all_rx_queues(adapter, rxo, i) {
2808 q = &rxo->q;
2809 if (q->created) {
2810 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2811 be_rx_cq_clean(rxo);
482c9e79 2812 }
10ef9ab4 2813 be_queue_free(adapter, q);
482c9e79
SP
2814 }
2815}
2816
889cd4b2
SP
2817static int be_close(struct net_device *netdev)
2818{
2819 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2820 struct be_eq_obj *eqo;
2821 int i;
889cd4b2 2822
e1ad8e33
KA
2823 /* This protection is needed as be_close() may be called even when the
2824 * adapter is in cleared state (after eeh perm failure)
2825 */
2826 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2827 return 0;
2828
045508a8
PP
2829 be_roce_dev_close(adapter);
2830
dff345c5
IV
2831 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2832 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2833 napi_disable(&eqo->napi);
6384a4d0
SP
2834 be_disable_busy_poll(eqo);
2835 }
71237b6f 2836 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2837 }
a323d9bf
SP
2838
2839 be_async_mcc_disable(adapter);
2840
2841 /* Wait for all pending tx completions to arrive so that
2842 * all tx skbs are freed.
2843 */
fba87559 2844 netif_tx_disable(netdev);
6e1f9975 2845 be_tx_compl_clean(adapter);
a323d9bf
SP
2846
2847 be_rx_qs_destroy(adapter);
2848
d11a347d
AK
2849 for (i = 1; i < (adapter->uc_macs + 1); i++)
2850 be_cmd_pmac_del(adapter, adapter->if_handle,
2851 adapter->pmac_id[i], 0);
2852 adapter->uc_macs = 0;
2853
a323d9bf 2854 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2855 if (msix_enabled(adapter))
2856 synchronize_irq(be_msix_vec_get(adapter, eqo));
2857 else
2858 synchronize_irq(netdev->irq);
2859 be_eq_clean(eqo);
63fcb27f
PR
2860 }
2861
889cd4b2
SP
2862 be_irq_unregister(adapter);
2863
482c9e79
SP
2864 return 0;
2865}
2866
10ef9ab4 2867static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 2868{
1dcf7b1c
ED
2869 struct rss_info *rss = &adapter->rss_info;
2870 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 2871 struct be_rx_obj *rxo;
e9008ee9 2872 int rc, i, j;
482c9e79
SP
2873
2874 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2875 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2876 sizeof(struct be_eth_rx_d));
2877 if (rc)
2878 return rc;
2879 }
2880
2881 /* The FW would like the default RXQ to be created first */
2882 rxo = default_rxo(adapter);
2883 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2884 adapter->if_handle, false, &rxo->rss_id);
2885 if (rc)
2886 return rc;
2887
2888 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2889 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2890 rx_frag_size, adapter->if_handle,
2891 true, &rxo->rss_id);
482c9e79
SP
2892 if (rc)
2893 return rc;
2894 }
2895
2896 if (be_multi_rxq(adapter)) {
e2557877
VD
2897 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2898 j += adapter->num_rx_qs - 1) {
e9008ee9 2899 for_all_rss_queues(adapter, rxo, i) {
e2557877 2900 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2901 break;
e2557877
VD
2902 rss->rsstable[j + i] = rxo->rss_id;
2903 rss->rss_queue[j + i] = i;
e9008ee9
PR
2904 }
2905 }
e2557877
VD
2906 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2907 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2908
2909 if (!BEx_chip(adapter))
e2557877
VD
2910 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2911 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2912 } else {
2913 /* Disable RSS, if only default RX Q is created */
e2557877 2914 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2915 }
594ad54a 2916
1dcf7b1c 2917 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 2918 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 2919 128, rss_key);
da1388d6 2920 if (rc) {
e2557877 2921 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2922 return rc;
482c9e79
SP
2923 }
2924
1dcf7b1c 2925 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 2926
482c9e79 2927 /* First time posting */
10ef9ab4 2928 for_all_rx_queues(adapter, rxo, i)
c30d7266 2929 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
2930 return 0;
2931}
2932
6b7c5b94
SP
2933static int be_open(struct net_device *netdev)
2934{
2935 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2936 struct be_eq_obj *eqo;
3abcdeda 2937 struct be_rx_obj *rxo;
10ef9ab4 2938 struct be_tx_obj *txo;
b236916a 2939 u8 link_status;
3abcdeda 2940 int status, i;
5fb379ee 2941
10ef9ab4 2942 status = be_rx_qs_create(adapter);
482c9e79
SP
2943 if (status)
2944 goto err;
2945
c2bba3df
SK
2946 status = be_irq_register(adapter);
2947 if (status)
2948 goto err;
5fb379ee 2949
10ef9ab4 2950 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2951 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2952
10ef9ab4
SP
2953 for_all_tx_queues(adapter, txo, i)
2954 be_cq_notify(adapter, txo->cq.id, true, 0);
2955
7a1e9b20
SP
2956 be_async_mcc_enable(adapter);
2957
10ef9ab4
SP
2958 for_all_evt_queues(adapter, eqo, i) {
2959 napi_enable(&eqo->napi);
6384a4d0 2960 be_enable_busy_poll(eqo);
4cad9f3b 2961 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2962 }
04d3d624 2963 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2964
323ff71e 2965 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2966 if (!status)
2967 be_link_status_update(adapter, link_status);
2968
fba87559 2969 netif_tx_start_all_queues(netdev);
045508a8 2970 be_roce_dev_open(adapter);
c9c47142 2971
c5abe7c0 2972#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2973 if (skyhawk_chip(adapter))
2974 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2975#endif
2976
889cd4b2
SP
2977 return 0;
2978err:
2979 be_close(adapter->netdev);
2980 return -EIO;
5fb379ee
SP
2981}
2982
71d8d1b5
AK
2983static int be_setup_wol(struct be_adapter *adapter, bool enable)
2984{
2985 struct be_dma_mem cmd;
2986 int status = 0;
2987 u8 mac[ETH_ALEN];
2988
2989 memset(mac, 0, ETH_ALEN);
2990
2991 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2992 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2993 GFP_KERNEL);
ddf1169f 2994 if (!cmd.va)
6b568689 2995 return -ENOMEM;
71d8d1b5
AK
2996
2997 if (enable) {
2998 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2999 PCICFG_PM_CONTROL_OFFSET,
3000 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3001 if (status) {
3002 dev_err(&adapter->pdev->dev,
2381a55c 3003 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3004 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3005 cmd.dma);
71d8d1b5
AK
3006 return status;
3007 }
3008 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3009 adapter->netdev->dev_addr,
3010 &cmd);
71d8d1b5
AK
3011 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3012 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3013 } else {
3014 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3015 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3016 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3017 }
3018
2b7bcebf 3019 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3020 return status;
3021}
3022
6d87f5c3
AK
3023/*
3024 * Generate a seed MAC address from the PF MAC Address using jhash.
3025 * MAC Address for VFs are assigned incrementally starting from the seed.
3026 * These addresses are programmed in the ASIC by the PF and the VF driver
3027 * queries for the MAC address during its probe.
3028 */
4c876616 3029static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3030{
f9449ab7 3031 u32 vf;
3abcdeda 3032 int status = 0;
6d87f5c3 3033 u8 mac[ETH_ALEN];
11ac75ed 3034 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3035
3036 be_vf_eth_addr_generate(adapter, mac);
3037
11ac75ed 3038 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3039 if (BEx_chip(adapter))
590c391d 3040 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3041 vf_cfg->if_handle,
3042 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3043 else
3044 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3045 vf + 1);
590c391d 3046
6d87f5c3
AK
3047 if (status)
3048 dev_err(&adapter->pdev->dev,
748b539a
SP
3049 "Mac address assignment failed for VF %d\n",
3050 vf);
6d87f5c3 3051 else
11ac75ed 3052 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3053
3054 mac[5] += 1;
3055 }
3056 return status;
3057}
3058
4c876616
SP
3059static int be_vfs_mac_query(struct be_adapter *adapter)
3060{
3061 int status, vf;
3062 u8 mac[ETH_ALEN];
3063 struct be_vf_cfg *vf_cfg;
4c876616
SP
3064
3065 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3066 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3067 mac, vf_cfg->if_handle,
3068 false, vf+1);
4c876616
SP
3069 if (status)
3070 return status;
3071 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3072 }
3073 return 0;
3074}
3075
f9449ab7 3076static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3077{
11ac75ed 3078 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3079 u32 vf;
3080
257a3feb 3081 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3082 dev_warn(&adapter->pdev->dev,
3083 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3084 goto done;
3085 }
3086
b4c1df93
SP
3087 pci_disable_sriov(adapter->pdev);
3088
11ac75ed 3089 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3090 if (BEx_chip(adapter))
11ac75ed
SP
3091 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3092 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3093 else
3094 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3095 vf + 1);
f9449ab7 3096
11ac75ed
SP
3097 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3098 }
39f1d94d
SP
3099done:
3100 kfree(adapter->vf_cfg);
3101 adapter->num_vfs = 0;
f174c7ec 3102 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3103}
3104
7707133c
SP
3105static void be_clear_queues(struct be_adapter *adapter)
3106{
3107 be_mcc_queues_destroy(adapter);
3108 be_rx_cqs_destroy(adapter);
3109 be_tx_queues_destroy(adapter);
3110 be_evt_queues_destroy(adapter);
3111}
3112
68d7bdcb 3113static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3114{
191eb756
SP
3115 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3116 cancel_delayed_work_sync(&adapter->work);
3117 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3118 }
68d7bdcb
SP
3119}
3120
b05004ad 3121static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3122{
3123 int i;
3124
b05004ad
SK
3125 if (adapter->pmac_id) {
3126 for (i = 0; i < (adapter->uc_macs + 1); i++)
3127 be_cmd_pmac_del(adapter, adapter->if_handle,
3128 adapter->pmac_id[i], 0);
3129 adapter->uc_macs = 0;
3130
3131 kfree(adapter->pmac_id);
3132 adapter->pmac_id = NULL;
3133 }
3134}
3135
c5abe7c0 3136#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3137static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3138{
630f4b70
SB
3139 struct net_device *netdev = adapter->netdev;
3140
c9c47142
SP
3141 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3142 be_cmd_manage_iface(adapter, adapter->if_handle,
3143 OP_CONVERT_TUNNEL_TO_NORMAL);
3144
3145 if (adapter->vxlan_port)
3146 be_cmd_set_vxlan_port(adapter, 0);
3147
3148 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3149 adapter->vxlan_port = 0;
630f4b70
SB
3150
3151 netdev->hw_enc_features = 0;
3152 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3153 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3154}
c5abe7c0 3155#endif
c9c47142 3156
b05004ad
SK
3157static int be_clear(struct be_adapter *adapter)
3158{
68d7bdcb 3159 be_cancel_worker(adapter);
191eb756 3160
11ac75ed 3161 if (sriov_enabled(adapter))
f9449ab7
SP
3162 be_vf_clear(adapter);
3163
bec84e6b
VV
3164 /* Re-configure FW to distribute resources evenly across max-supported
3165 * number of VFs, only when VFs are not already enabled.
3166 */
3167 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3168 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3169 pci_sriov_get_totalvfs(adapter->pdev));
3170
c5abe7c0 3171#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3172 be_disable_vxlan_offloads(adapter);
c5abe7c0 3173#endif
2d17f403 3174 /* delete the primary mac along with the uc-mac list */
b05004ad 3175 be_mac_clear(adapter);
fbc13f01 3176
f9449ab7 3177 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3178
7707133c 3179 be_clear_queues(adapter);
a54769f5 3180
10ef9ab4 3181 be_msix_disable(adapter);
e1ad8e33 3182 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3183 return 0;
3184}
3185
4c876616 3186static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3187{
92bf14ab 3188 struct be_resources res = {0};
4c876616
SP
3189 struct be_vf_cfg *vf_cfg;
3190 u32 cap_flags, en_flags, vf;
922bbe88 3191 int status = 0;
abb93951 3192
4c876616
SP
3193 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3194 BE_IF_FLAGS_MULTICAST;
abb93951 3195
4c876616 3196 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3197 if (!BE3_chip(adapter)) {
3198 status = be_cmd_get_profile_config(adapter, &res,
3199 vf + 1);
3200 if (!status)
3201 cap_flags = res.if_cap_flags;
3202 }
4c876616
SP
3203
3204 /* If a FW profile exists, then cap_flags are updated */
3205 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3206 BE_IF_FLAGS_BROADCAST |
3207 BE_IF_FLAGS_MULTICAST);
3208 status =
3209 be_cmd_if_create(adapter, cap_flags, en_flags,
3210 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3211 if (status)
3212 goto err;
3213 }
3214err:
3215 return status;
abb93951
PR
3216}
3217
39f1d94d 3218static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3219{
11ac75ed 3220 struct be_vf_cfg *vf_cfg;
30128031
SP
3221 int vf;
3222
39f1d94d
SP
3223 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3224 GFP_KERNEL);
3225 if (!adapter->vf_cfg)
3226 return -ENOMEM;
3227
11ac75ed
SP
3228 for_all_vfs(adapter, vf_cfg, vf) {
3229 vf_cfg->if_handle = -1;
3230 vf_cfg->pmac_id = -1;
30128031 3231 }
39f1d94d 3232 return 0;
30128031
SP
3233}
3234
f9449ab7
SP
3235static int be_vf_setup(struct be_adapter *adapter)
3236{
c502224e 3237 struct device *dev = &adapter->pdev->dev;
11ac75ed 3238 struct be_vf_cfg *vf_cfg;
4c876616 3239 int status, old_vfs, vf;
04a06028 3240 u32 privileges;
39f1d94d 3241
257a3feb 3242 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3243
3244 status = be_vf_setup_init(adapter);
3245 if (status)
3246 goto err;
30128031 3247
4c876616
SP
3248 if (old_vfs) {
3249 for_all_vfs(adapter, vf_cfg, vf) {
3250 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3251 if (status)
3252 goto err;
3253 }
f9449ab7 3254
4c876616
SP
3255 status = be_vfs_mac_query(adapter);
3256 if (status)
3257 goto err;
3258 } else {
bec84e6b
VV
3259 status = be_vfs_if_create(adapter);
3260 if (status)
3261 goto err;
3262
39f1d94d
SP
3263 status = be_vf_eth_addr_config(adapter);
3264 if (status)
3265 goto err;
3266 }
f9449ab7 3267
11ac75ed 3268 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3269 /* Allow VFs to programs MAC/VLAN filters */
3270 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3271 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3272 status = be_cmd_set_fn_privileges(adapter,
3273 privileges |
3274 BE_PRIV_FILTMGMT,
3275 vf + 1);
3276 if (!status)
3277 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3278 vf);
3279 }
3280
0f77ba73
RN
3281 /* Allow full available bandwidth */
3282 if (!old_vfs)
3283 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3284
bdce2ad7 3285 if (!old_vfs) {
0599863d 3286 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3287 be_cmd_set_logical_link_config(adapter,
3288 IFLA_VF_LINK_STATE_AUTO,
3289 vf+1);
3290 }
f9449ab7 3291 }
b4c1df93
SP
3292
3293 if (!old_vfs) {
3294 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3295 if (status) {
3296 dev_err(dev, "SRIOV enable failed\n");
3297 adapter->num_vfs = 0;
3298 goto err;
3299 }
3300 }
f174c7ec
VV
3301
3302 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3303 return 0;
3304err:
4c876616
SP
3305 dev_err(dev, "VF setup failed\n");
3306 be_vf_clear(adapter);
f9449ab7
SP
3307 return status;
3308}
3309
f93f160b
VV
3310/* Converting function_mode bits on BE3 to SH mc_type enums */
3311
3312static u8 be_convert_mc_type(u32 function_mode)
3313{
66064dbc 3314 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3315 return vNIC1;
66064dbc 3316 else if (function_mode & QNQ_MODE)
f93f160b
VV
3317 return FLEX10;
3318 else if (function_mode & VNIC_MODE)
3319 return vNIC2;
3320 else if (function_mode & UMC_ENABLED)
3321 return UMC;
3322 else
3323 return MC_NONE;
3324}
3325
92bf14ab
SP
3326/* On BE2/BE3 FW does not suggest the supported limits */
3327static void BEx_get_resources(struct be_adapter *adapter,
3328 struct be_resources *res)
3329{
bec84e6b 3330 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3331
3332 if (be_physfn(adapter))
3333 res->max_uc_mac = BE_UC_PMAC_COUNT;
3334 else
3335 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3336
f93f160b
VV
3337 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3338
3339 if (be_is_mc(adapter)) {
3340 /* Assuming that there are 4 channels per port,
3341 * when multi-channel is enabled
3342 */
3343 if (be_is_qnq_mode(adapter))
3344 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3345 else
3346 /* In a non-qnq multichannel mode, the pvid
3347 * takes up one vlan entry
3348 */
3349 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3350 } else {
92bf14ab 3351 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3352 }
3353
92bf14ab
SP
3354 res->max_mcast_mac = BE_MAX_MC;
3355
a5243dab
VV
3356 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3357 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3358 * *only* if it is RSS-capable.
3359 */
3360 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3361 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3362 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3363 res->max_tx_qs = 1;
a28277dc
SR
3364 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3365 struct be_resources super_nic_res = {0};
3366
3367 /* On a SuperNIC profile, the driver needs to use the
3368 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3369 */
3370 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3371 /* Some old versions of BE3 FW don't report max_tx_qs value */
3372 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3373 } else {
92bf14ab 3374 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3375 }
92bf14ab
SP
3376
3377 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3378 !use_sriov && be_physfn(adapter))
3379 res->max_rss_qs = (adapter->be3_native) ?
3380 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3381 res->max_rx_qs = res->max_rss_qs + 1;
3382
e3dc867c 3383 if (be_physfn(adapter))
d3518e21 3384 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3385 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3386 else
3387 res->max_evt_qs = 1;
92bf14ab
SP
3388
3389 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3390 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3391 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3392}
3393
30128031
SP
3394static void be_setup_init(struct be_adapter *adapter)
3395{
3396 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3397 adapter->phy.link_speed = -1;
30128031
SP
3398 adapter->if_handle = -1;
3399 adapter->be3_native = false;
3400 adapter->promiscuous = false;
f25b119c
PR
3401 if (be_physfn(adapter))
3402 adapter->cmd_privileges = MAX_PRIVILEGES;
3403 else
3404 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3405}
3406
bec84e6b
VV
3407static int be_get_sriov_config(struct be_adapter *adapter)
3408{
3409 struct device *dev = &adapter->pdev->dev;
3410 struct be_resources res = {0};
d3d18312 3411 int max_vfs, old_vfs;
bec84e6b
VV
3412
3413 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3414 be_cmd_get_profile_config(adapter, &res, 0);
3415
bec84e6b
VV
3416 if (BE3_chip(adapter) && !res.max_vfs) {
3417 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3418 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3419 }
3420
d3d18312 3421 adapter->pool_res = res;
bec84e6b
VV
3422
3423 if (!be_max_vfs(adapter)) {
3424 if (num_vfs)
50762667 3425 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3426 adapter->num_vfs = 0;
3427 return 0;
3428 }
3429
d3d18312
SP
3430 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3431
bec84e6b
VV
3432 /* validate num_vfs module param */
3433 old_vfs = pci_num_vf(adapter->pdev);
3434 if (old_vfs) {
3435 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3436 if (old_vfs != num_vfs)
3437 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3438 adapter->num_vfs = old_vfs;
3439 } else {
3440 if (num_vfs > be_max_vfs(adapter)) {
3441 dev_info(dev, "Resources unavailable to init %d VFs\n",
3442 num_vfs);
3443 dev_info(dev, "Limiting to %d VFs\n",
3444 be_max_vfs(adapter));
3445 }
3446 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3447 }
3448
3449 return 0;
3450}
3451
92bf14ab 3452static int be_get_resources(struct be_adapter *adapter)
abb93951 3453{
92bf14ab
SP
3454 struct device *dev = &adapter->pdev->dev;
3455 struct be_resources res = {0};
3456 int status;
abb93951 3457
92bf14ab
SP
3458 if (BEx_chip(adapter)) {
3459 BEx_get_resources(adapter, &res);
3460 adapter->res = res;
abb93951
PR
3461 }
3462
92bf14ab
SP
3463 /* For Lancer, SH etc read per-function resource limits from FW.
3464 * GET_FUNC_CONFIG returns per function guaranteed limits.
3465 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3466 */
3467 if (!BEx_chip(adapter)) {
3468 status = be_cmd_get_func_config(adapter, &res);
3469 if (status)
3470 return status;
abb93951 3471
92bf14ab
SP
3472 /* If RoCE may be enabled stash away half the EQs for RoCE */
3473 if (be_roce_supported(adapter))
3474 res.max_evt_qs /= 2;
3475 adapter->res = res;
abb93951 3476 }
4c876616 3477
acbafeb1
SP
3478 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3479 be_max_txqs(adapter), be_max_rxqs(adapter),
3480 be_max_rss(adapter), be_max_eqs(adapter),
3481 be_max_vfs(adapter));
3482 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3483 be_max_uc(adapter), be_max_mc(adapter),
3484 be_max_vlans(adapter));
3485
92bf14ab 3486 return 0;
abb93951
PR
3487}
3488
d3d18312
SP
3489static void be_sriov_config(struct be_adapter *adapter)
3490{
3491 struct device *dev = &adapter->pdev->dev;
3492 int status;
3493
3494 status = be_get_sriov_config(adapter);
3495 if (status) {
3496 dev_err(dev, "Failed to query SR-IOV configuration\n");
3497 dev_err(dev, "SR-IOV cannot be enabled\n");
3498 return;
3499 }
3500
3501 /* When the HW is in SRIOV capable configuration, the PF-pool
3502 * resources are equally distributed across the max-number of
3503 * VFs. The user may request only a subset of the max-vfs to be
3504 * enabled. Based on num_vfs, redistribute the resources across
3505 * num_vfs so that each VF will have access to more number of
3506 * resources. This facility is not available in BE3 FW.
3507 * Also, this is done by FW in Lancer chip.
3508 */
3509 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3510 status = be_cmd_set_sriov_config(adapter,
3511 adapter->pool_res,
3512 adapter->num_vfs);
3513 if (status)
3514 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3515 }
3516}
3517
39f1d94d
SP
3518static int be_get_config(struct be_adapter *adapter)
3519{
542963b7 3520 u16 profile_id;
4c876616 3521 int status;
39f1d94d 3522
e97e3cda 3523 status = be_cmd_query_fw_cfg(adapter);
abb93951 3524 if (status)
92bf14ab 3525 return status;
abb93951 3526
542963b7
VV
3527 if (be_physfn(adapter)) {
3528 status = be_cmd_get_active_profile(adapter, &profile_id);
3529 if (!status)
3530 dev_info(&adapter->pdev->dev,
3531 "Using profile 0x%x\n", profile_id);
962bcb75 3532 }
bec84e6b 3533
d3d18312
SP
3534 if (!BE2_chip(adapter) && be_physfn(adapter))
3535 be_sriov_config(adapter);
542963b7 3536
92bf14ab
SP
3537 status = be_get_resources(adapter);
3538 if (status)
3539 return status;
abb93951 3540
46ee9c14
RN
3541 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3542 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3543 if (!adapter->pmac_id)
3544 return -ENOMEM;
abb93951 3545
92bf14ab
SP
3546 /* Sanitize cfg_num_qs based on HW and platform limits */
3547 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3548
3549 return 0;
39f1d94d
SP
3550}
3551
95046b92
SP
3552static int be_mac_setup(struct be_adapter *adapter)
3553{
3554 u8 mac[ETH_ALEN];
3555 int status;
3556
3557 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3558 status = be_cmd_get_perm_mac(adapter, mac);
3559 if (status)
3560 return status;
3561
3562 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3563 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3564 } else {
3565 /* Maybe the HW was reset; dev_addr must be re-programmed */
3566 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3567 }
3568
2c7a9dc1
AK
3569 /* For BE3-R VFs, the PF programs the initial MAC address */
3570 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3571 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3572 &adapter->pmac_id[0], 0);
95046b92
SP
3573 return 0;
3574}
3575
68d7bdcb
SP
3576static void be_schedule_worker(struct be_adapter *adapter)
3577{
3578 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3579 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3580}
3581
7707133c 3582static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3583{
68d7bdcb 3584 struct net_device *netdev = adapter->netdev;
10ef9ab4 3585 int status;
ba343c77 3586
7707133c 3587 status = be_evt_queues_create(adapter);
abb93951
PR
3588 if (status)
3589 goto err;
73d540f2 3590
7707133c 3591 status = be_tx_qs_create(adapter);
c2bba3df
SK
3592 if (status)
3593 goto err;
10ef9ab4 3594
7707133c 3595 status = be_rx_cqs_create(adapter);
10ef9ab4 3596 if (status)
a54769f5 3597 goto err;
6b7c5b94 3598
7707133c 3599 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3600 if (status)
3601 goto err;
3602
68d7bdcb
SP
3603 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3604 if (status)
3605 goto err;
3606
3607 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3608 if (status)
3609 goto err;
3610
7707133c
SP
3611 return 0;
3612err:
3613 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3614 return status;
3615}
3616
68d7bdcb
SP
3617int be_update_queues(struct be_adapter *adapter)
3618{
3619 struct net_device *netdev = adapter->netdev;
3620 int status;
3621
3622 if (netif_running(netdev))
3623 be_close(netdev);
3624
3625 be_cancel_worker(adapter);
3626
3627 /* If any vectors have been shared with RoCE we cannot re-program
3628 * the MSIx table.
3629 */
3630 if (!adapter->num_msix_roce_vec)
3631 be_msix_disable(adapter);
3632
3633 be_clear_queues(adapter);
3634
3635 if (!msix_enabled(adapter)) {
3636 status = be_msix_enable(adapter);
3637 if (status)
3638 return status;
3639 }
3640
3641 status = be_setup_queues(adapter);
3642 if (status)
3643 return status;
3644
3645 be_schedule_worker(adapter);
3646
3647 if (netif_running(netdev))
3648 status = be_open(netdev);
3649
3650 return status;
3651}
3652
7707133c
SP
3653static int be_setup(struct be_adapter *adapter)
3654{
3655 struct device *dev = &adapter->pdev->dev;
3656 u32 tx_fc, rx_fc, en_flags;
3657 int status;
3658
3659 be_setup_init(adapter);
3660
3661 if (!lancer_chip(adapter))
3662 be_cmd_req_native_mode(adapter);
3663
3664 status = be_get_config(adapter);
10ef9ab4 3665 if (status)
a54769f5 3666 goto err;
6b7c5b94 3667
7707133c 3668 status = be_msix_enable(adapter);
10ef9ab4 3669 if (status)
a54769f5 3670 goto err;
6b7c5b94 3671
f9449ab7 3672 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3673 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3674 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3675 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3676 en_flags = en_flags & be_if_cap_flags(adapter);
3677 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3678 &adapter->if_handle, 0);
7707133c 3679 if (status)
a54769f5 3680 goto err;
6b7c5b94 3681
68d7bdcb
SP
3682 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3683 rtnl_lock();
7707133c 3684 status = be_setup_queues(adapter);
68d7bdcb 3685 rtnl_unlock();
95046b92 3686 if (status)
1578e777
PR
3687 goto err;
3688
7707133c 3689 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3690
3691 status = be_mac_setup(adapter);
10ef9ab4
SP
3692 if (status)
3693 goto err;
3694
e97e3cda 3695 be_cmd_get_fw_ver(adapter);
acbafeb1 3696 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3697
e9e2a904 3698 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3699 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3700 adapter->fw_ver);
3701 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3702 }
3703
1d1e9a46 3704 if (adapter->vlans_added)
10329df8 3705 be_vid_config(adapter);
7ab8b0b4 3706
a54769f5 3707 be_set_rx_mode(adapter->netdev);
5fb379ee 3708
76a9e08e
SR
3709 be_cmd_get_acpi_wol_cap(adapter);
3710
ddc3f5cb 3711 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3712
ddc3f5cb
AK
3713 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3714 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3715 adapter->rx_fc);
2dc1deb6 3716
bdce2ad7
SR
3717 if (be_physfn(adapter))
3718 be_cmd_set_logical_link_config(adapter,
3719 IFLA_VF_LINK_STATE_AUTO, 0);
3720
bec84e6b
VV
3721 if (adapter->num_vfs)
3722 be_vf_setup(adapter);
f9449ab7 3723
f25b119c
PR
3724 status = be_cmd_get_phy_info(adapter);
3725 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3726 adapter->phy.fc_autoneg = 1;
3727
68d7bdcb 3728 be_schedule_worker(adapter);
e1ad8e33 3729 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3730 return 0;
a54769f5
SP
3731err:
3732 be_clear(adapter);
3733 return status;
3734}
6b7c5b94 3735
66268739
IV
3736#ifdef CONFIG_NET_POLL_CONTROLLER
3737static void be_netpoll(struct net_device *netdev)
3738{
3739 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3740 struct be_eq_obj *eqo;
66268739
IV
3741 int i;
3742
e49cc34f
SP
3743 for_all_evt_queues(adapter, eqo, i) {
3744 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3745 napi_schedule(&eqo->napi);
3746 }
66268739
IV
3747}
3748#endif
3749
96c9b2e4 3750static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3751
306f1348
SP
3752static bool phy_flashing_required(struct be_adapter *adapter)
3753{
42f11cf2
AK
3754 return (adapter->phy.phy_type == TN_8022 &&
3755 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3756}
3757
c165541e
PR
3758static bool is_comp_in_ufi(struct be_adapter *adapter,
3759 struct flash_section_info *fsec, int type)
3760{
3761 int i = 0, img_type = 0;
3762 struct flash_section_info_g2 *fsec_g2 = NULL;
3763
ca34fe38 3764 if (BE2_chip(adapter))
c165541e
PR
3765 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3766
3767 for (i = 0; i < MAX_FLASH_COMP; i++) {
3768 if (fsec_g2)
3769 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3770 else
3771 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3772
3773 if (img_type == type)
3774 return true;
3775 }
3776 return false;
3777
3778}
3779
4188e7df 3780static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3781 int header_size,
3782 const struct firmware *fw)
c165541e
PR
3783{
3784 struct flash_section_info *fsec = NULL;
3785 const u8 *p = fw->data;
3786
3787 p += header_size;
3788 while (p < (fw->data + fw->size)) {
3789 fsec = (struct flash_section_info *)p;
3790 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3791 return fsec;
3792 p += 32;
3793 }
3794 return NULL;
3795}
3796
96c9b2e4
VV
3797static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3798 u32 img_offset, u32 img_size, int hdr_size,
3799 u16 img_optype, bool *crc_match)
3800{
3801 u32 crc_offset;
3802 int status;
3803 u8 crc[4];
3804
3805 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3806 if (status)
3807 return status;
3808
3809 crc_offset = hdr_size + img_offset + img_size - 4;
3810
3811 /* Skip flashing, if crc of flashed region matches */
3812 if (!memcmp(crc, p + crc_offset, 4))
3813 *crc_match = true;
3814 else
3815 *crc_match = false;
3816
3817 return status;
3818}
3819
773a2d7c 3820static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3821 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3822{
773a2d7c 3823 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3824 u32 total_bytes, flash_op, num_bytes;
3825 int status;
773a2d7c
PR
3826
3827 total_bytes = img_size;
3828 while (total_bytes) {
3829 num_bytes = min_t(u32, 32*1024, total_bytes);
3830
3831 total_bytes -= num_bytes;
3832
3833 if (!total_bytes) {
3834 if (optype == OPTYPE_PHY_FW)
3835 flash_op = FLASHROM_OPER_PHY_FLASH;
3836 else
3837 flash_op = FLASHROM_OPER_FLASH;
3838 } else {
3839 if (optype == OPTYPE_PHY_FW)
3840 flash_op = FLASHROM_OPER_PHY_SAVE;
3841 else
3842 flash_op = FLASHROM_OPER_SAVE;
3843 }
3844
be716446 3845 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3846 img += num_bytes;
3847 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3848 flash_op, num_bytes);
4c60005f 3849 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3850 optype == OPTYPE_PHY_FW)
3851 break;
3852 else if (status)
773a2d7c 3853 return status;
773a2d7c
PR
3854 }
3855 return 0;
3856}
3857
0ad3157e 3858/* For BE2, BE3 and BE3-R */
ca34fe38 3859static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3860 const struct firmware *fw,
3861 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3862{
c165541e 3863 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3864 struct device *dev = &adapter->pdev->dev;
c165541e 3865 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3866 int status, i, filehdr_size, num_comp;
3867 const struct flash_comp *pflashcomp;
3868 bool crc_match;
3869 const u8 *p;
c165541e
PR
3870
3871 struct flash_comp gen3_flash_types[] = {
3872 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3873 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3874 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3875 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3876 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3877 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3878 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3879 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3880 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3882 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3883 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3884 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3885 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3886 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3887 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3888 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3889 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3890 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3891 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3892 };
c165541e
PR
3893
3894 struct flash_comp gen2_flash_types[] = {
3895 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3896 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3897 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3898 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3899 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3900 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3901 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3902 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3903 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3904 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3905 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3906 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3907 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3908 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3909 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3910 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3911 };
3912
ca34fe38 3913 if (BE3_chip(adapter)) {
3f0d4560
AK
3914 pflashcomp = gen3_flash_types;
3915 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3916 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3917 } else {
3918 pflashcomp = gen2_flash_types;
3919 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3920 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3921 }
ca34fe38 3922
c165541e
PR
3923 /* Get flash section info*/
3924 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3925 if (!fsec) {
96c9b2e4 3926 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3927 return -1;
3928 }
9fe96934 3929 for (i = 0; i < num_comp; i++) {
c165541e 3930 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3931 continue;
c165541e
PR
3932
3933 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3934 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3935 continue;
3936
773a2d7c
PR
3937 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3938 !phy_flashing_required(adapter))
306f1348 3939 continue;
c165541e 3940
773a2d7c 3941 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3942 status = be_check_flash_crc(adapter, fw->data,
3943 pflashcomp[i].offset,
3944 pflashcomp[i].size,
3945 filehdr_size +
3946 img_hdrs_size,
3947 OPTYPE_REDBOOT, &crc_match);
3948 if (status) {
3949 dev_err(dev,
3950 "Could not get CRC for 0x%x region\n",
3951 pflashcomp[i].optype);
3952 continue;
3953 }
3954
3955 if (crc_match)
773a2d7c
PR
3956 continue;
3957 }
c165541e 3958
96c9b2e4
VV
3959 p = fw->data + filehdr_size + pflashcomp[i].offset +
3960 img_hdrs_size;
306f1348
SP
3961 if (p + pflashcomp[i].size > fw->data + fw->size)
3962 return -1;
773a2d7c
PR
3963
3964 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3965 pflashcomp[i].size);
773a2d7c 3966 if (status) {
96c9b2e4 3967 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3968 pflashcomp[i].img_type);
3969 return status;
84517482 3970 }
84517482 3971 }
84517482
AK
3972 return 0;
3973}
3974
96c9b2e4
VV
3975static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3976{
3977 u32 img_type = le32_to_cpu(fsec_entry.type);
3978 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3979
3980 if (img_optype != 0xFFFF)
3981 return img_optype;
3982
3983 switch (img_type) {
3984 case IMAGE_FIRMWARE_iSCSI:
3985 img_optype = OPTYPE_ISCSI_ACTIVE;
3986 break;
3987 case IMAGE_BOOT_CODE:
3988 img_optype = OPTYPE_REDBOOT;
3989 break;
3990 case IMAGE_OPTION_ROM_ISCSI:
3991 img_optype = OPTYPE_BIOS;
3992 break;
3993 case IMAGE_OPTION_ROM_PXE:
3994 img_optype = OPTYPE_PXE_BIOS;
3995 break;
3996 case IMAGE_OPTION_ROM_FCoE:
3997 img_optype = OPTYPE_FCOE_BIOS;
3998 break;
3999 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4000 img_optype = OPTYPE_ISCSI_BACKUP;
4001 break;
4002 case IMAGE_NCSI:
4003 img_optype = OPTYPE_NCSI_FW;
4004 break;
4005 case IMAGE_FLASHISM_JUMPVECTOR:
4006 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4007 break;
4008 case IMAGE_FIRMWARE_PHY:
4009 img_optype = OPTYPE_SH_PHY_FW;
4010 break;
4011 case IMAGE_REDBOOT_DIR:
4012 img_optype = OPTYPE_REDBOOT_DIR;
4013 break;
4014 case IMAGE_REDBOOT_CONFIG:
4015 img_optype = OPTYPE_REDBOOT_CONFIG;
4016 break;
4017 case IMAGE_UFI_DIR:
4018 img_optype = OPTYPE_UFI_DIR;
4019 break;
4020 default:
4021 break;
4022 }
4023
4024 return img_optype;
4025}
4026
773a2d7c 4027static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4028 const struct firmware *fw,
4029 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4030{
773a2d7c 4031 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 4032 struct device *dev = &adapter->pdev->dev;
773a2d7c 4033 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4034 u32 img_offset, img_size, img_type;
4035 int status, i, filehdr_size;
4036 bool crc_match, old_fw_img;
4037 u16 img_optype;
4038 const u8 *p;
773a2d7c
PR
4039
4040 filehdr_size = sizeof(struct flash_file_hdr_g3);
4041 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4042 if (!fsec) {
96c9b2e4 4043 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4044 return -EINVAL;
773a2d7c
PR
4045 }
4046
4047 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4048 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4049 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4050 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4051 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4052 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4053
96c9b2e4 4054 if (img_optype == 0xFFFF)
773a2d7c 4055 continue;
96c9b2e4
VV
4056 /* Don't bother verifying CRC if an old FW image is being
4057 * flashed
4058 */
4059 if (old_fw_img)
4060 goto flash;
4061
4062 status = be_check_flash_crc(adapter, fw->data, img_offset,
4063 img_size, filehdr_size +
4064 img_hdrs_size, img_optype,
4065 &crc_match);
4066 /* The current FW image on the card does not recognize the new
4067 * FLASH op_type. The FW download is partially complete.
4068 * Reboot the server now to enable FW image to recognize the
4069 * new FLASH op_type. To complete the remaining process,
4070 * download the same FW again after the reboot.
4071 */
4c60005f
KA
4072 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4073 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4074 dev_err(dev, "Flash incomplete. Reset the server\n");
4075 dev_err(dev, "Download FW image again after reset\n");
4076 return -EAGAIN;
4077 } else if (status) {
4078 dev_err(dev, "Could not get CRC for 0x%x region\n",
4079 img_optype);
4080 return -EFAULT;
773a2d7c
PR
4081 }
4082
96c9b2e4
VV
4083 if (crc_match)
4084 continue;
773a2d7c 4085
96c9b2e4
VV
4086flash:
4087 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4088 if (p + img_size > fw->data + fw->size)
4089 return -1;
4090
4091 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4092 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4093 * UFI_DIR region
4094 */
4c60005f
KA
4095 if (old_fw_img &&
4096 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4097 (img_optype == OPTYPE_UFI_DIR &&
4098 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4099 continue;
4100 } else if (status) {
4101 dev_err(dev, "Flashing section type 0x%x failed\n",
4102 img_type);
4103 return -EFAULT;
773a2d7c
PR
4104 }
4105 }
4106 return 0;
3f0d4560
AK
4107}
4108
485bf569 4109static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4110 const struct firmware *fw)
84517482 4111{
485bf569
SN
4112#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4113#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4114 struct device *dev = &adapter->pdev->dev;
84517482 4115 struct be_dma_mem flash_cmd;
485bf569
SN
4116 const u8 *data_ptr = NULL;
4117 u8 *dest_image_ptr = NULL;
4118 size_t image_size = 0;
4119 u32 chunk_size = 0;
4120 u32 data_written = 0;
4121 u32 offset = 0;
4122 int status = 0;
4123 u8 add_status = 0;
f67ef7ba 4124 u8 change_status;
84517482 4125
485bf569 4126 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4127 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4128 return -EINVAL;
d9efd2af
SB
4129 }
4130
485bf569
SN
4131 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4132 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4133 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4134 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4135 if (!flash_cmd.va)
4136 return -ENOMEM;
84517482 4137
485bf569
SN
4138 dest_image_ptr = flash_cmd.va +
4139 sizeof(struct lancer_cmd_req_write_object);
4140 image_size = fw->size;
4141 data_ptr = fw->data;
4142
4143 while (image_size) {
4144 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4145
4146 /* Copy the image chunk content. */
4147 memcpy(dest_image_ptr, data_ptr, chunk_size);
4148
4149 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4150 chunk_size, offset,
4151 LANCER_FW_DOWNLOAD_LOCATION,
4152 &data_written, &change_status,
4153 &add_status);
485bf569
SN
4154 if (status)
4155 break;
4156
4157 offset += data_written;
4158 data_ptr += data_written;
4159 image_size -= data_written;
4160 }
4161
4162 if (!status) {
4163 /* Commit the FW written */
4164 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4165 0, offset,
4166 LANCER_FW_DOWNLOAD_LOCATION,
4167 &data_written, &change_status,
4168 &add_status);
485bf569
SN
4169 }
4170
bb864e07 4171 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4172 if (status) {
bb864e07 4173 dev_err(dev, "Firmware load error\n");
3fb8cb80 4174 return be_cmd_status(status);
485bf569
SN
4175 }
4176
bb864e07
KA
4177 dev_info(dev, "Firmware flashed successfully\n");
4178
f67ef7ba 4179 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4180 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4181 status = lancer_physdev_ctrl(adapter,
4182 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4183 if (status) {
bb864e07
KA
4184 dev_err(dev, "Adapter busy, could not reset FW\n");
4185 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4186 }
4187 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4188 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4189 }
3fb8cb80
KA
4190
4191 return 0;
485bf569
SN
4192}
4193
ca34fe38
SP
4194#define UFI_TYPE2 2
4195#define UFI_TYPE3 3
0ad3157e 4196#define UFI_TYPE3R 10
ca34fe38
SP
4197#define UFI_TYPE4 4
4198static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4199 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4200{
ddf1169f 4201 if (!fhdr)
773a2d7c
PR
4202 goto be_get_ufi_exit;
4203
ca34fe38
SP
4204 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4205 return UFI_TYPE4;
0ad3157e
VV
4206 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4207 if (fhdr->asic_type_rev == 0x10)
4208 return UFI_TYPE3R;
4209 else
4210 return UFI_TYPE3;
4211 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4212 return UFI_TYPE2;
773a2d7c
PR
4213
4214be_get_ufi_exit:
4215 dev_err(&adapter->pdev->dev,
4216 "UFI and Interface are not compatible for flashing\n");
4217 return -1;
4218}
4219
485bf569
SN
4220static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4221{
485bf569
SN
4222 struct flash_file_hdr_g3 *fhdr3;
4223 struct image_hdr *img_hdr_ptr = NULL;
4224 struct be_dma_mem flash_cmd;
4225 const u8 *p;
773a2d7c 4226 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4227
be716446 4228 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4229 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4230 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4231 if (!flash_cmd.va) {
4232 status = -ENOMEM;
485bf569 4233 goto be_fw_exit;
84517482
AK
4234 }
4235
773a2d7c 4236 p = fw->data;
0ad3157e 4237 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4238
0ad3157e 4239 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4240
773a2d7c
PR
4241 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4242 for (i = 0; i < num_imgs; i++) {
4243 img_hdr_ptr = (struct image_hdr *)(fw->data +
4244 (sizeof(struct flash_file_hdr_g3) +
4245 i * sizeof(struct image_hdr)));
4246 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4247 switch (ufi_type) {
4248 case UFI_TYPE4:
773a2d7c 4249 status = be_flash_skyhawk(adapter, fw,
748b539a 4250 &flash_cmd, num_imgs);
0ad3157e
VV
4251 break;
4252 case UFI_TYPE3R:
ca34fe38
SP
4253 status = be_flash_BEx(adapter, fw, &flash_cmd,
4254 num_imgs);
0ad3157e
VV
4255 break;
4256 case UFI_TYPE3:
4257 /* Do not flash this ufi on BE3-R cards */
4258 if (adapter->asic_rev < 0x10)
4259 status = be_flash_BEx(adapter, fw,
4260 &flash_cmd,
4261 num_imgs);
4262 else {
56ace3a0 4263 status = -EINVAL;
0ad3157e
VV
4264 dev_err(&adapter->pdev->dev,
4265 "Can't load BE3 UFI on BE3R\n");
4266 }
4267 }
3f0d4560 4268 }
773a2d7c
PR
4269 }
4270
ca34fe38
SP
4271 if (ufi_type == UFI_TYPE2)
4272 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4273 else if (ufi_type == -1)
56ace3a0 4274 status = -EINVAL;
84517482 4275
2b7bcebf
IV
4276 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4277 flash_cmd.dma);
84517482
AK
4278 if (status) {
4279 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4280 goto be_fw_exit;
84517482
AK
4281 }
4282
af901ca1 4283 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4284
485bf569
SN
4285be_fw_exit:
4286 return status;
4287}
4288
4289int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4290{
4291 const struct firmware *fw;
4292 int status;
4293
4294 if (!netif_running(adapter->netdev)) {
4295 dev_err(&adapter->pdev->dev,
4296 "Firmware load not allowed (interface is down)\n");
940a3fcd 4297 return -ENETDOWN;
485bf569
SN
4298 }
4299
4300 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4301 if (status)
4302 goto fw_exit;
4303
4304 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4305
4306 if (lancer_chip(adapter))
4307 status = lancer_fw_download(adapter, fw);
4308 else
4309 status = be_fw_download(adapter, fw);
4310
eeb65ced 4311 if (!status)
e97e3cda 4312 be_cmd_get_fw_ver(adapter);
eeb65ced 4313
84517482
AK
4314fw_exit:
4315 release_firmware(fw);
4316 return status;
4317}
4318
748b539a 4319static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4320{
4321 struct be_adapter *adapter = netdev_priv(dev);
4322 struct nlattr *attr, *br_spec;
4323 int rem;
4324 int status = 0;
4325 u16 mode = 0;
4326
4327 if (!sriov_enabled(adapter))
4328 return -EOPNOTSUPP;
4329
4330 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4331 if (!br_spec)
4332 return -EINVAL;
a77dcb8c
AK
4333
4334 nla_for_each_nested(attr, br_spec, rem) {
4335 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4336 continue;
4337
b7c1a314
TG
4338 if (nla_len(attr) < sizeof(mode))
4339 return -EINVAL;
4340
a77dcb8c
AK
4341 mode = nla_get_u16(attr);
4342 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4343 return -EINVAL;
4344
4345 status = be_cmd_set_hsw_config(adapter, 0, 0,
4346 adapter->if_handle,
4347 mode == BRIDGE_MODE_VEPA ?
4348 PORT_FWD_TYPE_VEPA :
4349 PORT_FWD_TYPE_VEB);
4350 if (status)
4351 goto err;
4352
4353 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4354 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4355
4356 return status;
4357 }
4358err:
4359 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4360 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4361
4362 return status;
4363}
4364
4365static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4366 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4367{
4368 struct be_adapter *adapter = netdev_priv(dev);
4369 int status = 0;
4370 u8 hsw_mode;
4371
4372 if (!sriov_enabled(adapter))
4373 return 0;
4374
4375 /* BE and Lancer chips support VEB mode only */
4376 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4377 hsw_mode = PORT_FWD_TYPE_VEB;
4378 } else {
4379 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4380 adapter->if_handle, &hsw_mode);
4381 if (status)
4382 return 0;
4383 }
4384
4385 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4386 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4387 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4388 0, 0);
a77dcb8c
AK
4389}
4390
c5abe7c0 4391#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4392/* VxLAN offload Notes:
4393 *
4394 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4395 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4396 * is expected to work across all types of IP tunnels once exported. Skyhawk
4397 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4398 * offloads in hw_enc_features only when a VxLAN port is added. Note this only
4399 * ensures that other tunnels work fine while VxLAN offloads are not enabled.
4400 *
4401 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4402 * adds more than one port, disable offloads and don't re-enable them again
4403 * until after all the tunnels are removed.
4404 */
c9c47142
SP
4405static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4406 __be16 port)
4407{
4408 struct be_adapter *adapter = netdev_priv(netdev);
4409 struct device *dev = &adapter->pdev->dev;
4410 int status;
4411
4412 if (lancer_chip(adapter) || BEx_chip(adapter))
4413 return;
4414
4415 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4416 dev_info(dev,
4417 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4418 dev_info(dev, "Disabling VxLAN offloads\n");
4419 adapter->vxlan_port_count++;
4420 goto err;
c9c47142
SP
4421 }
4422
630f4b70
SB
4423 if (adapter->vxlan_port_count++ >= 1)
4424 return;
4425
c9c47142
SP
4426 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4427 OP_CONVERT_NORMAL_TO_TUNNEL);
4428 if (status) {
4429 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4430 goto err;
4431 }
4432
4433 status = be_cmd_set_vxlan_port(adapter, port);
4434 if (status) {
4435 dev_warn(dev, "Failed to add VxLAN port\n");
4436 goto err;
4437 }
4438 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4439 adapter->vxlan_port = port;
4440
630f4b70
SB
4441 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4442 NETIF_F_TSO | NETIF_F_TSO6 |
4443 NETIF_F_GSO_UDP_TUNNEL;
4444 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4445 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4446
c9c47142
SP
4447 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4448 be16_to_cpu(port));
4449 return;
4450err:
4451 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4452}
4453
4454static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4455 __be16 port)
4456{
4457 struct be_adapter *adapter = netdev_priv(netdev);
4458
4459 if (lancer_chip(adapter) || BEx_chip(adapter))
4460 return;
4461
4462 if (adapter->vxlan_port != port)
630f4b70 4463 goto done;
c9c47142
SP
4464
4465 be_disable_vxlan_offloads(adapter);
4466
4467 dev_info(&adapter->pdev->dev,
4468 "Disabled VxLAN offloads for UDP port %d\n",
4469 be16_to_cpu(port));
630f4b70
SB
4470done:
4471 adapter->vxlan_port_count--;
c9c47142 4472}
725d548f 4473
5f35227e
JG
4474static netdev_features_t be_features_check(struct sk_buff *skb,
4475 struct net_device *dev,
4476 netdev_features_t features)
725d548f 4477{
5f35227e 4478 return vxlan_features_check(skb, features);
725d548f 4479}
c5abe7c0 4480#endif
c9c47142 4481
e5686ad8 4482static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4483 .ndo_open = be_open,
4484 .ndo_stop = be_close,
4485 .ndo_start_xmit = be_xmit,
a54769f5 4486 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4487 .ndo_set_mac_address = be_mac_addr_set,
4488 .ndo_change_mtu = be_change_mtu,
ab1594e9 4489 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4490 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4491 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4492 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4493 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4494 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4495 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4496 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4497 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4498#ifdef CONFIG_NET_POLL_CONTROLLER
4499 .ndo_poll_controller = be_netpoll,
4500#endif
a77dcb8c
AK
4501 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4502 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4503#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4504 .ndo_busy_poll = be_busy_poll,
6384a4d0 4505#endif
c5abe7c0 4506#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4507 .ndo_add_vxlan_port = be_add_vxlan_port,
4508 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4509 .ndo_features_check = be_features_check,
c5abe7c0 4510#endif
6b7c5b94
SP
4511};
4512
4513static void be_netdev_init(struct net_device *netdev)
4514{
4515 struct be_adapter *adapter = netdev_priv(netdev);
4516
6332c8d3 4517 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4518 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4519 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4520 if (be_multi_rxq(adapter))
4521 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4522
4523 netdev->features |= netdev->hw_features |
f646968f 4524 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4525
eb8a50d9 4526 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4527 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4528
fbc13f01
AK
4529 netdev->priv_flags |= IFF_UNICAST_FLT;
4530
6b7c5b94
SP
4531 netdev->flags |= IFF_MULTICAST;
4532
b7e5887e 4533 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4534
10ef9ab4 4535 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4536
7ad24ea4 4537 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4538}
4539
4540static void be_unmap_pci_bars(struct be_adapter *adapter)
4541{
c5b3ad4c
SP
4542 if (adapter->csr)
4543 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4544 if (adapter->db)
ce66f781 4545 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4546}
4547
ce66f781
SP
4548static int db_bar(struct be_adapter *adapter)
4549{
4550 if (lancer_chip(adapter) || !be_physfn(adapter))
4551 return 0;
4552 else
4553 return 4;
4554}
4555
4556static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4557{
dbf0f2a7 4558 if (skyhawk_chip(adapter)) {
ce66f781
SP
4559 adapter->roce_db.size = 4096;
4560 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4561 db_bar(adapter));
4562 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4563 db_bar(adapter));
4564 }
045508a8 4565 return 0;
6b7c5b94
SP
4566}
4567
4568static int be_map_pci_bars(struct be_adapter *adapter)
4569{
4570 u8 __iomem *addr;
fe6d2a38 4571
c5b3ad4c
SP
4572 if (BEx_chip(adapter) && be_physfn(adapter)) {
4573 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4574 if (!adapter->csr)
c5b3ad4c
SP
4575 return -ENOMEM;
4576 }
4577
ce66f781 4578 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4579 if (!addr)
6b7c5b94 4580 goto pci_map_err;
ba343c77 4581 adapter->db = addr;
ce66f781
SP
4582
4583 be_roce_map_pci_bars(adapter);
6b7c5b94 4584 return 0;
ce66f781 4585
6b7c5b94 4586pci_map_err:
acbafeb1 4587 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4588 be_unmap_pci_bars(adapter);
4589 return -ENOMEM;
4590}
4591
6b7c5b94
SP
4592static void be_ctrl_cleanup(struct be_adapter *adapter)
4593{
8788fdc2 4594 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4595
4596 be_unmap_pci_bars(adapter);
4597
4598 if (mem->va)
2b7bcebf
IV
4599 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4600 mem->dma);
e7b909a6 4601
5b8821b7 4602 mem = &adapter->rx_filter;
e7b909a6 4603 if (mem->va)
2b7bcebf
IV
4604 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4605 mem->dma);
6b7c5b94
SP
4606}
4607
6b7c5b94
SP
4608static int be_ctrl_init(struct be_adapter *adapter)
4609{
8788fdc2
SP
4610 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4611 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4612 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4613 u32 sli_intf;
6b7c5b94 4614 int status;
6b7c5b94 4615
ce66f781
SP
4616 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4617 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4618 SLI_INTF_FAMILY_SHIFT;
4619 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4620
6b7c5b94
SP
4621 status = be_map_pci_bars(adapter);
4622 if (status)
e7b909a6 4623 goto done;
6b7c5b94
SP
4624
4625 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4626 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4627 mbox_mem_alloc->size,
4628 &mbox_mem_alloc->dma,
4629 GFP_KERNEL);
6b7c5b94 4630 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4631 status = -ENOMEM;
4632 goto unmap_pci_bars;
6b7c5b94
SP
4633 }
4634 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4635 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4636 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4637 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4638
5b8821b7 4639 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4640 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4641 rx_filter->size, &rx_filter->dma,
4642 GFP_KERNEL);
ddf1169f 4643 if (!rx_filter->va) {
e7b909a6
SP
4644 status = -ENOMEM;
4645 goto free_mbox;
4646 }
1f9061d2 4647
2984961c 4648 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4649 spin_lock_init(&adapter->mcc_lock);
4650 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4651
5eeff635 4652 init_completion(&adapter->et_cmd_compl);
cf588477 4653 pci_save_state(adapter->pdev);
6b7c5b94 4654 return 0;
e7b909a6
SP
4655
4656free_mbox:
2b7bcebf
IV
4657 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4658 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4659
4660unmap_pci_bars:
4661 be_unmap_pci_bars(adapter);
4662
4663done:
4664 return status;
6b7c5b94
SP
4665}
4666
4667static void be_stats_cleanup(struct be_adapter *adapter)
4668{
3abcdeda 4669 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4670
4671 if (cmd->va)
2b7bcebf
IV
4672 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4673 cmd->va, cmd->dma);
6b7c5b94
SP
4674}
4675
4676static int be_stats_init(struct be_adapter *adapter)
4677{
3abcdeda 4678 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4679
ca34fe38
SP
4680 if (lancer_chip(adapter))
4681 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4682 else if (BE2_chip(adapter))
89a88ab8 4683 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4684 else if (BE3_chip(adapter))
ca34fe38 4685 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4686 else
4687 /* ALL non-BE ASICs */
4688 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4689
ede23fa8
JP
4690 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4691 GFP_KERNEL);
ddf1169f 4692 if (!cmd->va)
6b568689 4693 return -ENOMEM;
6b7c5b94
SP
4694 return 0;
4695}
4696
3bc6b06c 4697static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4698{
4699 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4700
6b7c5b94
SP
4701 if (!adapter)
4702 return;
4703
045508a8 4704 be_roce_dev_remove(adapter);
8cef7a78 4705 be_intr_set(adapter, false);
045508a8 4706
f67ef7ba
PR
4707 cancel_delayed_work_sync(&adapter->func_recovery_work);
4708
6b7c5b94
SP
4709 unregister_netdev(adapter->netdev);
4710
5fb379ee
SP
4711 be_clear(adapter);
4712
bf99e50d
PR
4713 /* tell fw we're done with firing cmds */
4714 be_cmd_fw_clean(adapter);
4715
6b7c5b94
SP
4716 be_stats_cleanup(adapter);
4717
4718 be_ctrl_cleanup(adapter);
4719
d6b6d987
SP
4720 pci_disable_pcie_error_reporting(pdev);
4721
6b7c5b94
SP
4722 pci_release_regions(pdev);
4723 pci_disable_device(pdev);
4724
4725 free_netdev(adapter->netdev);
4726}
4727
39f1d94d 4728static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4729{
baaa08d1 4730 int status, level;
6b7c5b94 4731
9e1453c5
AK
4732 status = be_cmd_get_cntl_attributes(adapter);
4733 if (status)
4734 return status;
4735
7aeb2156
PR
4736 /* Must be a power of 2 or else MODULO will BUG_ON */
4737 adapter->be_get_temp_freq = 64;
4738
baaa08d1
VV
4739 if (BEx_chip(adapter)) {
4740 level = be_cmd_get_fw_log_level(adapter);
4741 adapter->msg_enable =
4742 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4743 }
941a77d5 4744
92bf14ab 4745 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4746 return 0;
6b7c5b94
SP
4747}
4748
f67ef7ba 4749static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4750{
01e5b2c4 4751 struct device *dev = &adapter->pdev->dev;
d8110f62 4752 int status;
d8110f62 4753
f67ef7ba
PR
4754 status = lancer_test_and_set_rdy_state(adapter);
4755 if (status)
4756 goto err;
d8110f62 4757
f67ef7ba
PR
4758 if (netif_running(adapter->netdev))
4759 be_close(adapter->netdev);
d8110f62 4760
f67ef7ba
PR
4761 be_clear(adapter);
4762
01e5b2c4 4763 be_clear_all_error(adapter);
f67ef7ba
PR
4764
4765 status = be_setup(adapter);
4766 if (status)
4767 goto err;
d8110f62 4768
f67ef7ba
PR
4769 if (netif_running(adapter->netdev)) {
4770 status = be_open(adapter->netdev);
d8110f62
PR
4771 if (status)
4772 goto err;
f67ef7ba 4773 }
d8110f62 4774
4bebb56a 4775 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4776 return 0;
4777err:
01e5b2c4
SK
4778 if (status == -EAGAIN)
4779 dev_err(dev, "Waiting for resource provisioning\n");
4780 else
4bebb56a 4781 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4782
f67ef7ba
PR
4783 return status;
4784}
4785
4786static void be_func_recovery_task(struct work_struct *work)
4787{
4788 struct be_adapter *adapter =
4789 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4790 int status = 0;
d8110f62 4791
f67ef7ba 4792 be_detect_error(adapter);
d8110f62 4793
f67ef7ba 4794 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
4795 rtnl_lock();
4796 netif_device_detach(adapter->netdev);
4797 rtnl_unlock();
d8110f62 4798
f67ef7ba 4799 status = lancer_recover_func(adapter);
f67ef7ba
PR
4800 if (!status)
4801 netif_device_attach(adapter->netdev);
d8110f62 4802 }
f67ef7ba 4803
01e5b2c4
SK
4804 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4805 * no need to attempt further recovery.
4806 */
4807 if (!status || status == -EAGAIN)
4808 schedule_delayed_work(&adapter->func_recovery_work,
4809 msecs_to_jiffies(1000));
d8110f62
PR
4810}
4811
4812static void be_worker(struct work_struct *work)
4813{
4814 struct be_adapter *adapter =
4815 container_of(work, struct be_adapter, work.work);
4816 struct be_rx_obj *rxo;
4817 int i;
4818
d8110f62
PR
4819 /* when interrupts are not yet enabled, just reap any pending
4820 * mcc completions */
4821 if (!netif_running(adapter->netdev)) {
072a9c48 4822 local_bh_disable();
10ef9ab4 4823 be_process_mcc(adapter);
072a9c48 4824 local_bh_enable();
d8110f62
PR
4825 goto reschedule;
4826 }
4827
4828 if (!adapter->stats_cmd_sent) {
4829 if (lancer_chip(adapter))
4830 lancer_cmd_get_pport_stats(adapter,
cd3307aa 4831 &adapter->stats_cmd);
d8110f62
PR
4832 else
4833 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4834 }
4835
d696b5e2
VV
4836 if (be_physfn(adapter) &&
4837 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4838 be_cmd_get_die_temperature(adapter);
4839
d8110f62 4840 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4841 /* Replenish RX-queues starved due to memory
4842 * allocation failures.
4843 */
4844 if (rxo->rx_post_starved)
c30d7266 4845 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
4846 }
4847
2632bafd 4848 be_eqd_update(adapter);
10ef9ab4 4849
d8110f62
PR
4850reschedule:
4851 adapter->work_counter++;
4852 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4853}
4854
257a3feb 4855/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4856static bool be_reset_required(struct be_adapter *adapter)
4857{
257a3feb 4858 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4859}
4860
d379142b
SP
4861static char *mc_name(struct be_adapter *adapter)
4862{
f93f160b
VV
4863 char *str = ""; /* default */
4864
4865 switch (adapter->mc_type) {
4866 case UMC:
4867 str = "UMC";
4868 break;
4869 case FLEX10:
4870 str = "FLEX10";
4871 break;
4872 case vNIC1:
4873 str = "vNIC-1";
4874 break;
4875 case nPAR:
4876 str = "nPAR";
4877 break;
4878 case UFP:
4879 str = "UFP";
4880 break;
4881 case vNIC2:
4882 str = "vNIC-2";
4883 break;
4884 default:
4885 str = "";
4886 }
4887
4888 return str;
d379142b
SP
4889}
4890
4891static inline char *func_name(struct be_adapter *adapter)
4892{
4893 return be_physfn(adapter) ? "PF" : "VF";
4894}
4895
1dd06ae8 4896static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4897{
4898 int status = 0;
4899 struct be_adapter *adapter;
4900 struct net_device *netdev;
b4e32a71 4901 char port_name;
6b7c5b94 4902
acbafeb1
SP
4903 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4904
6b7c5b94
SP
4905 status = pci_enable_device(pdev);
4906 if (status)
4907 goto do_none;
4908
4909 status = pci_request_regions(pdev, DRV_NAME);
4910 if (status)
4911 goto disable_dev;
4912 pci_set_master(pdev);
4913
7f640062 4914 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4915 if (!netdev) {
6b7c5b94
SP
4916 status = -ENOMEM;
4917 goto rel_reg;
4918 }
4919 adapter = netdev_priv(netdev);
4920 adapter->pdev = pdev;
4921 pci_set_drvdata(pdev, adapter);
4922 adapter->netdev = netdev;
2243e2e9 4923 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4924
4c15c243 4925 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4926 if (!status) {
4927 netdev->features |= NETIF_F_HIGHDMA;
4928 } else {
4c15c243 4929 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4930 if (status) {
4931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4932 goto free_netdev;
4933 }
4934 }
4935
2f951a9a
KA
4936 status = pci_enable_pcie_error_reporting(pdev);
4937 if (!status)
4938 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 4939
6b7c5b94
SP
4940 status = be_ctrl_init(adapter);
4941 if (status)
39f1d94d 4942 goto free_netdev;
6b7c5b94 4943
2243e2e9 4944 /* sync up with fw's ready state */
ba343c77 4945 if (be_physfn(adapter)) {
bf99e50d 4946 status = be_fw_wait_ready(adapter);
ba343c77
SB
4947 if (status)
4948 goto ctrl_clean;
ba343c77 4949 }
6b7c5b94 4950
39f1d94d
SP
4951 if (be_reset_required(adapter)) {
4952 status = be_cmd_reset_function(adapter);
4953 if (status)
4954 goto ctrl_clean;
556ae191 4955
2d177be8
KA
4956 /* Wait for interrupts to quiesce after an FLR */
4957 msleep(100);
4958 }
8cef7a78
SK
4959
4960 /* Allow interrupts for other ULPs running on NIC function */
4961 be_intr_set(adapter, true);
10ef9ab4 4962
2d177be8
KA
4963 /* tell fw we're ready to fire cmds */
4964 status = be_cmd_fw_init(adapter);
4965 if (status)
4966 goto ctrl_clean;
4967
2243e2e9
SP
4968 status = be_stats_init(adapter);
4969 if (status)
4970 goto ctrl_clean;
4971
39f1d94d 4972 status = be_get_initial_config(adapter);
6b7c5b94
SP
4973 if (status)
4974 goto stats_clean;
6b7c5b94
SP
4975
4976 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4977 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
4978 adapter->rx_fc = true;
4979 adapter->tx_fc = true;
6b7c5b94 4980
5fb379ee
SP
4981 status = be_setup(adapter);
4982 if (status)
55f5c3c5 4983 goto stats_clean;
2243e2e9 4984
3abcdeda 4985 be_netdev_init(netdev);
6b7c5b94
SP
4986 status = register_netdev(netdev);
4987 if (status != 0)
5fb379ee 4988 goto unsetup;
6b7c5b94 4989
045508a8
PP
4990 be_roce_dev_add(adapter);
4991
f67ef7ba
PR
4992 schedule_delayed_work(&adapter->func_recovery_work,
4993 msecs_to_jiffies(1000));
b4e32a71
PR
4994
4995 be_cmd_query_port_name(adapter, &port_name);
4996
d379142b
SP
4997 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4998 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4999
6b7c5b94
SP
5000 return 0;
5001
5fb379ee
SP
5002unsetup:
5003 be_clear(adapter);
6b7c5b94
SP
5004stats_clean:
5005 be_stats_cleanup(adapter);
5006ctrl_clean:
5007 be_ctrl_cleanup(adapter);
f9449ab7 5008free_netdev:
fe6d2a38 5009 free_netdev(netdev);
6b7c5b94
SP
5010rel_reg:
5011 pci_release_regions(pdev);
5012disable_dev:
5013 pci_disable_device(pdev);
5014do_none:
c4ca2374 5015 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5016 return status;
5017}
5018
5019static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5020{
5021 struct be_adapter *adapter = pci_get_drvdata(pdev);
5022 struct net_device *netdev = adapter->netdev;
5023
76a9e08e 5024 if (adapter->wol_en)
71d8d1b5
AK
5025 be_setup_wol(adapter, true);
5026
d4360d6f 5027 be_intr_set(adapter, false);
f67ef7ba
PR
5028 cancel_delayed_work_sync(&adapter->func_recovery_work);
5029
6b7c5b94
SP
5030 netif_device_detach(netdev);
5031 if (netif_running(netdev)) {
5032 rtnl_lock();
5033 be_close(netdev);
5034 rtnl_unlock();
5035 }
9b0365f1 5036 be_clear(adapter);
6b7c5b94
SP
5037
5038 pci_save_state(pdev);
5039 pci_disable_device(pdev);
5040 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5041 return 0;
5042}
5043
5044static int be_resume(struct pci_dev *pdev)
5045{
5046 int status = 0;
5047 struct be_adapter *adapter = pci_get_drvdata(pdev);
5048 struct net_device *netdev = adapter->netdev;
5049
5050 netif_device_detach(netdev);
5051
5052 status = pci_enable_device(pdev);
5053 if (status)
5054 return status;
5055
1ca01512 5056 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5057 pci_restore_state(pdev);
5058
dd5746bf
SB
5059 status = be_fw_wait_ready(adapter);
5060 if (status)
5061 return status;
5062
d4360d6f 5063 be_intr_set(adapter, true);
2243e2e9
SP
5064 /* tell fw we're ready to fire cmds */
5065 status = be_cmd_fw_init(adapter);
5066 if (status)
5067 return status;
5068
9b0365f1 5069 be_setup(adapter);
6b7c5b94
SP
5070 if (netif_running(netdev)) {
5071 rtnl_lock();
5072 be_open(netdev);
5073 rtnl_unlock();
5074 }
f67ef7ba
PR
5075
5076 schedule_delayed_work(&adapter->func_recovery_work,
5077 msecs_to_jiffies(1000));
6b7c5b94 5078 netif_device_attach(netdev);
71d8d1b5 5079
76a9e08e 5080 if (adapter->wol_en)
71d8d1b5 5081 be_setup_wol(adapter, false);
a4ca055f 5082
6b7c5b94
SP
5083 return 0;
5084}
5085
82456b03
SP
5086/*
5087 * An FLR will stop BE from DMAing any data.
5088 */
5089static void be_shutdown(struct pci_dev *pdev)
5090{
5091 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5092
2d5d4154
AK
5093 if (!adapter)
5094 return;
82456b03 5095
d114f99a 5096 be_roce_dev_shutdown(adapter);
0f4a6828 5097 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5098 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5099
2d5d4154 5100 netif_device_detach(adapter->netdev);
82456b03 5101
57841869
AK
5102 be_cmd_reset_function(adapter);
5103
82456b03 5104 pci_disable_device(pdev);
82456b03
SP
5105}
5106
cf588477 5107static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5108 pci_channel_state_t state)
cf588477
SP
5109{
5110 struct be_adapter *adapter = pci_get_drvdata(pdev);
5111 struct net_device *netdev = adapter->netdev;
5112
5113 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5114
01e5b2c4
SK
5115 if (!adapter->eeh_error) {
5116 adapter->eeh_error = true;
cf588477 5117
01e5b2c4 5118 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5119
cf588477 5120 rtnl_lock();
01e5b2c4
SK
5121 netif_device_detach(netdev);
5122 if (netif_running(netdev))
5123 be_close(netdev);
cf588477 5124 rtnl_unlock();
01e5b2c4
SK
5125
5126 be_clear(adapter);
cf588477 5127 }
cf588477
SP
5128
5129 if (state == pci_channel_io_perm_failure)
5130 return PCI_ERS_RESULT_DISCONNECT;
5131
5132 pci_disable_device(pdev);
5133
eeb7fc7b
SK
5134 /* The error could cause the FW to trigger a flash debug dump.
5135 * Resetting the card while flash dump is in progress
c8a54163
PR
5136 * can cause it not to recover; wait for it to finish.
5137 * Wait only for first function as it is needed only once per
5138 * adapter.
eeb7fc7b 5139 */
c8a54163
PR
5140 if (pdev->devfn == 0)
5141 ssleep(30);
5142
cf588477
SP
5143 return PCI_ERS_RESULT_NEED_RESET;
5144}
5145
5146static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5147{
5148 struct be_adapter *adapter = pci_get_drvdata(pdev);
5149 int status;
5150
5151 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5152
5153 status = pci_enable_device(pdev);
5154 if (status)
5155 return PCI_ERS_RESULT_DISCONNECT;
5156
5157 pci_set_master(pdev);
1ca01512 5158 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5159 pci_restore_state(pdev);
5160
5161 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5162 dev_info(&adapter->pdev->dev,
5163 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5164 status = be_fw_wait_ready(adapter);
cf588477
SP
5165 if (status)
5166 return PCI_ERS_RESULT_DISCONNECT;
5167
d6b6d987 5168 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5169 be_clear_all_error(adapter);
cf588477
SP
5170 return PCI_ERS_RESULT_RECOVERED;
5171}
5172
5173static void be_eeh_resume(struct pci_dev *pdev)
5174{
5175 int status = 0;
5176 struct be_adapter *adapter = pci_get_drvdata(pdev);
5177 struct net_device *netdev = adapter->netdev;
5178
5179 dev_info(&adapter->pdev->dev, "EEH resume\n");
5180
5181 pci_save_state(pdev);
5182
2d177be8 5183 status = be_cmd_reset_function(adapter);
cf588477
SP
5184 if (status)
5185 goto err;
5186
03a58baa
KA
5187 /* On some BE3 FW versions, after a HW reset,
5188 * interrupts will remain disabled for each function.
5189 * So, explicitly enable interrupts
5190 */
5191 be_intr_set(adapter, true);
5192
2d177be8
KA
5193 /* tell fw we're ready to fire cmds */
5194 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5195 if (status)
5196 goto err;
5197
cf588477
SP
5198 status = be_setup(adapter);
5199 if (status)
5200 goto err;
5201
5202 if (netif_running(netdev)) {
5203 status = be_open(netdev);
5204 if (status)
5205 goto err;
5206 }
f67ef7ba
PR
5207
5208 schedule_delayed_work(&adapter->func_recovery_work,
5209 msecs_to_jiffies(1000));
cf588477
SP
5210 netif_device_attach(netdev);
5211 return;
5212err:
5213 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5214}
5215
3646f0e5 5216static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5217 .error_detected = be_eeh_err_detected,
5218 .slot_reset = be_eeh_reset,
5219 .resume = be_eeh_resume,
5220};
5221
6b7c5b94
SP
5222static struct pci_driver be_driver = {
5223 .name = DRV_NAME,
5224 .id_table = be_dev_ids,
5225 .probe = be_probe,
5226 .remove = be_remove,
5227 .suspend = be_suspend,
cf588477 5228 .resume = be_resume,
82456b03 5229 .shutdown = be_shutdown,
cf588477 5230 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5231};
5232
5233static int __init be_init_module(void)
5234{
8e95a202
JP
5235 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5236 rx_frag_size != 2048) {
6b7c5b94
SP
5237 printk(KERN_WARNING DRV_NAME
5238 " : Module param rx_frag_size must be 2048/4096/8192."
5239 " Using 2048\n");
5240 rx_frag_size = 2048;
5241 }
6b7c5b94
SP
5242
5243 return pci_register_driver(&be_driver);
5244}
5245module_init(be_init_module);
5246
5247static void __exit be_exit_module(void)
5248{
5249 pci_unregister_driver(&be_driver);
5250}
5251module_exit(be_exit_module);