]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix skb double free in be_xmit_wrokarounds() failure path
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
6b7c5b94
SP
26
27MODULE_VERSION(DRV_VER);
28MODULE_DEVICE_TABLE(pci, be_dev_ids);
29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
6b7c5b94 41static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87};
88/* UE Status High CSR */
42c8b11e 89static const char * const ue_status_hi_desc[] = {
7c185276
AK
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
42c8b11e 113 "NETC",
7c185276
AK
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122};
6b7c5b94 123
752961a1 124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 128 if (mem->va) {
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
1cfafab9
SP
131 mem->va = NULL;
132 }
6b7c5b94
SP
133}
134
135static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137{
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
ede23fa8
JP
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
6b7c5b94 146 if (!mem->va)
10ef9ab4 147 return -ENOMEM;
6b7c5b94
SP
148 return 0;
149}
150
68c45a2d 151static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 152{
db3ea781 153 u32 reg, enabled;
5f0b849e 154
db3ea781
SP
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
5f0b849e 159 if (!enabled && enable)
6b7c5b94 160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 161 else if (enabled && !enable)
6b7c5b94 162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else
6b7c5b94 164 return;
5f0b849e 165
db3ea781
SP
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
168}
169
68c45a2d
SK
170static void be_intr_set(struct be_adapter *adapter, bool enable)
171{
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184}
185
8788fdc2 186static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
187{
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
191
192 wmb();
8788fdc2 193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
194}
195
94d73aaa
VV
196static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
6b7c5b94
SP
198{
199 u32 val = 0;
94d73aaa 200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
202
203 wmb();
94d73aaa 204 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
205}
206
8788fdc2 207static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
208 bool arm, bool clear_int, u16 num_popped)
209{
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
461ae379
AK
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
ac124ff9 517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 521 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 524 drvs->rx_drops_too_many_frags =
ac124ff9 525 pport_stats->rx_drops_too_many_frags_lo;
005d5696 526}
89a88ab8 527
09c1c68f
SP
528static void accumulate_16bit_val(u32 *acc, u16 val)
529{
530#define lo(x) (x & 0xFFFF)
531#define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538}
539
4188e7df 540static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543{
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552}
553
89a88ab8
AK
554void be_parse_stats(struct be_adapter *adapter)
555{
61000861 556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
557 struct be_rx_obj *rxo;
558 int i;
a6c578ef 559 u32 erx_stat;
ac124ff9 560
ca34fe38
SP
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
005d5696 563 } else {
ca34fe38
SP
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
61000861
AK
566 else if (BE3_chip(adapter))
567 /* for BE3 */
ca34fe38 568 populate_be_v1_stats(adapter);
61000861
AK
569 else
570 populate_be_v2_stats(adapter);
d51ebd33 571
61000861 572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 573 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 576 }
09c1c68f 577 }
89a88ab8
AK
578}
579
ab1594e9
SP
580static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
6b7c5b94 582{
ab1594e9 583 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 584 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 585 struct be_rx_obj *rxo;
3c8def97 586 struct be_tx_obj *txo;
ab1594e9
SP
587 u64 pkts, bytes;
588 unsigned int start;
3abcdeda 589 int i;
6b7c5b94 590
3abcdeda 591 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
603 }
604
3c8def97 605 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
3c8def97 614 }
6b7c5b94
SP
615
616 /* bad pkts received */
ab1594e9 617 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
ab1594e9 626 drvs->rx_dropped_runt;
68110868 627
6b7c5b94 628 /* detailed rx errors */
ab1594e9 629 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
68110868 632
ab1594e9 633 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
634
635 /* frame alignment errors */
ab1594e9 636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 637
6b7c5b94
SP
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
ab1594e9 643 return stats;
6b7c5b94
SP
644}
645
b236916a 646void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 647{
6b7c5b94
SP
648 struct net_device *netdev = adapter->netdev;
649
b236916a 650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 651 netif_carrier_off(netdev);
b236916a 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 653 }
b236916a
AK
654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
6b7c5b94
SP
659}
660
3c8def97 661static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 663{
3c8def97
SP
664 struct be_tx_stats *stats = tx_stats(txo);
665
ab1594e9 666 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 671 if (stopped)
ac124ff9 672 stats->tx_stops++;
ab1594e9 673 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
674}
675
676/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
677static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
6b7c5b94 679{
ebc8d2ab
DM
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
6b7c5b94
SP
684 /* to account for hdr wrb */
685 cnt++;
fe6d2a38
SP
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
6b7c5b94
SP
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
fe6d2a38 692 }
6b7c5b94
SP
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695}
696
697static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698{
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 702 wrb->rsvd0 = 0;
6b7c5b94
SP
703}
704
1ded132d
AK
705static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707{
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719}
720
cc4ce020 721static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 723{
1ded132d 724 u16 vlan_tag;
cc4ce020 725
6b7c5b94
SP
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
49e4b847 730 if (skb_is_gso(skb)) {
6b7c5b94
SP
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
4c5102f9 743 if (vlan_tx_tag_present(skb)) {
6b7c5b94 744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
747 }
748
bc0c3405
AK
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754}
755
2b7bcebf 756static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
757 bool unmap_single)
758{
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 764 if (wrb->frag_len) {
7101e111 765 if (unmap_single)
2b7bcebf
IV
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
7101e111 768 else
2b7bcebf 769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
770 }
771}
6b7c5b94 772
3c8def97 773static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
6b7c5b94 776{
7101e111
SP
777 dma_addr_t busaddr;
778 int i, copied = 0;
2b7bcebf 779 struct device *dev = &adapter->pdev->dev;
6b7c5b94 780 struct sk_buff *first_skb = skb;
6b7c5b94
SP
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
7101e111
SP
783 bool map_single = false;
784 u16 map_head;
6b7c5b94 785
6b7c5b94
SP
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
7101e111 788 map_head = txq->head;
6b7c5b94 789
ebc8d2ab 790 if (skb->len > skb->data_len) {
e743d313 791 int len = skb_headlen(skb);
2b7bcebf
IV
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
7101e111
SP
794 goto dma_err;
795 map_single = true;
ebc8d2ab
DM
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
6b7c5b94 802
ebc8d2ab 803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 804 const struct skb_frag_struct *frag =
ebc8d2ab 805 &skb_shinfo(skb)->frags[i];
b061b39e 806 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 807 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 808 if (dma_mapping_error(dev, busaddr))
7101e111 809 goto dma_err;
ebc8d2ab 810 wrb = queue_head_node(txq);
9e903e08 811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
9e903e08 814 copied += skb_frag_size(frag);
6b7c5b94
SP
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
bc0c3405 824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
7101e111
SP
828dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
2b7bcebf 832 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
6b7c5b94
SP
838}
839
93040ae5 840static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
93040ae5
SK
843{
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
efee8e87 850 if (vlan_tx_tag_present(skb))
93040ae5 851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
bc0c3405
AK
862
863 if (vlan_tag) {
58717686 864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
865 if (unlikely(!skb))
866 return skb;
bc0c3405
AK
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
58717686 873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
93040ae5
SK
880 return skb;
881}
882
bc0c3405
AK
883static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884{
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903}
904
905static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906{
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908}
909
ee9c799c
SP
910static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
bc0c3405 912{
ee9c799c 913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
914}
915
ee9c799c
SP
916static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
6b7c5b94 919{
d2cb6ce7 920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
93040ae5 923
b54881f9 924 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
48265667 925 * may cause a transmit stall on that port. So the work-around is to
b54881f9 926 * pad short packets (<= 32 bytes) to a 36-byte length.
48265667 927 */
b54881f9 928 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
48265667 929 if (skb_padto(skb, 36))
c9128951 930 goto err;
48265667
SK
931 skb->len = 36;
932 }
933
1297f9db
AK
934 /* For padded packets, BE HW modifies tot_len field in IP header
935 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 936 * For padded packets, Lancer computes incorrect checksum.
1ded132d 937 */
ee9c799c
SP
938 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
940 if (skb->len <= 60 &&
941 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 942 is_ipv4_pkt(skb)) {
93040ae5
SK
943 ip = (struct iphdr *)ip_hdr(skb);
944 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945 }
1ded132d 946
d2cb6ce7
AK
947 /* If vlan tag is already inlined in the packet, skip HW VLAN
948 * tagging in UMC mode
949 */
950 if ((adapter->function_mode & UMC_ENABLED) &&
951 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 952 *skip_hw_vlan = true;
d2cb6ce7 953
93040ae5
SK
954 /* HW has a bug wherein it will calculate CSUM for VLAN
955 * pkts even though it is disabled.
956 * Manually insert VLAN in pkt.
957 */
958 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
959 vlan_tx_tag_present(skb)) {
960 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 961 if (unlikely(!skb))
c9128951 962 goto err;
bc0c3405
AK
963 }
964
965 /* HW may lockup when VLAN HW tagging is requested on
966 * certain ipv6 packets. Drop such pkts if the HW workaround to
967 * skip HW tagging is not enabled by FW.
968 */
969 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
970 (adapter->pvid || adapter->qnq_vid) &&
971 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
972 goto tx_drop;
973
974 /* Manual VLAN tag insertion to prevent:
975 * ASIC lockup when the ASIC inserts VLAN tag into
976 * certain ipv6 packets. Insert VLAN tags in driver,
977 * and set event, completion, vlan bits accordingly
978 * in the Tx WRB.
979 */
980 if (be_ipv6_tx_stall_chk(adapter, skb) &&
981 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 982 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 983 if (unlikely(!skb))
c9128951 984 goto err;
1ded132d
AK
985 }
986
ee9c799c
SP
987 return skb;
988tx_drop:
989 dev_kfree_skb_any(skb);
c9128951 990err:
ee9c799c
SP
991 return NULL;
992}
993
994static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
995{
996 struct be_adapter *adapter = netdev_priv(netdev);
997 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
998 struct be_queue_info *txq = &txo->q;
999 bool dummy_wrb, stopped = false;
1000 u32 wrb_cnt = 0, copied = 0;
1001 bool skip_hw_vlan = false;
1002 u32 start = txq->head;
1003
1004 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1005 if (!skb) {
1006 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1007 return NETDEV_TX_OK;
bc617526 1008 }
ee9c799c 1009
fe6d2a38 1010 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1011
bc0c3405
AK
1012 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1013 skip_hw_vlan);
c190e3c8 1014 if (copied) {
cd8f76c0
ED
1015 int gso_segs = skb_shinfo(skb)->gso_segs;
1016
c190e3c8 1017 /* record the sent skb in the sent_skb table */
3c8def97
SP
1018 BUG_ON(txo->sent_skb_list[start]);
1019 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1020
1021 /* Ensure txq has space for the next skb; Else stop the queue
1022 * *BEFORE* ringing the tx doorbell, so that we serialze the
1023 * tx compls of the current transmit which'll wake up the queue
1024 */
7101e111 1025 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1026 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1027 txq->len) {
3c8def97 1028 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1029 stopped = true;
1030 }
6b7c5b94 1031
94d73aaa 1032 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1033
cd8f76c0 1034 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1035 } else {
1036 txq->head = start;
bc617526 1037 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1038 dev_kfree_skb_any(skb);
6b7c5b94 1039 }
6b7c5b94
SP
1040 return NETDEV_TX_OK;
1041}
1042
1043static int be_change_mtu(struct net_device *netdev, int new_mtu)
1044{
1045 struct be_adapter *adapter = netdev_priv(netdev);
1046 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
1047 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1048 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
1049 dev_info(&adapter->pdev->dev,
1050 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
1051 BE_MIN_MTU,
1052 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1053 return -EINVAL;
1054 }
1055 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1056 netdev->mtu, new_mtu);
1057 netdev->mtu = new_mtu;
1058 return 0;
1059}
1060
1061/*
82903e4b
AK
1062 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1063 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1064 */
10329df8 1065static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1066{
10329df8
SP
1067 u16 vids[BE_NUM_VLANS_SUPPORTED];
1068 u16 num = 0, i;
82903e4b 1069 int status = 0;
1da87b7f 1070
c0e64ef4
SP
1071 /* No need to further configure vids if in promiscuous mode */
1072 if (adapter->promiscuous)
1073 return 0;
1074
92bf14ab 1075 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1076 goto set_vlan_promisc;
1077
1078 /* Construct VLAN Table to give to HW */
1079 for (i = 0; i < VLAN_N_VID; i++)
1080 if (adapter->vlan_tag[i])
10329df8 1081 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1082
1083 status = be_cmd_vlan_config(adapter, adapter->if_handle,
012bd387 1084 vids, num, 0);
0fc16ebf 1085
0fc16ebf 1086 if (status) {
d9d604f8
AK
1087 /* Set to VLAN promisc mode as setting VLAN filter failed */
1088 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1089 goto set_vlan_promisc;
1090 dev_err(&adapter->pdev->dev,
1091 "Setting HW VLAN filtering failed.\n");
1092 } else {
1093 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1094 /* hw VLAN filtering re-enabled. */
1095 status = be_cmd_rx_filter(adapter,
1096 BE_FLAGS_VLAN_PROMISC, OFF);
1097 if (!status) {
1098 dev_info(&adapter->pdev->dev,
1099 "Disabling VLAN Promiscuous mode.\n");
1100 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1101 }
1102 }
6b7c5b94 1103 }
1da87b7f 1104
b31c50a7 1105 return status;
0fc16ebf
PR
1106
1107set_vlan_promisc:
a6b74e01
SK
1108 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1109 return 0;
d9d604f8
AK
1110
1111 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112 if (!status) {
1113 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1114 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115 } else
1116 dev_err(&adapter->pdev->dev,
1117 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1118 return status;
6b7c5b94
SP
1119}
1120
80d5c368 1121static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1122{
1123 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1124 int status = 0;
6b7c5b94 1125
a85e9986
PR
1126 /* Packets with VID 0 are always received by Lancer by default */
1127 if (lancer_chip(adapter) && vid == 0)
1128 goto ret;
1129
6b7c5b94 1130 adapter->vlan_tag[vid] = 1;
a6b74e01 1131 adapter->vlans_added++;
8e586137 1132
a6b74e01
SK
1133 status = be_vid_config(adapter);
1134 if (status) {
1135 adapter->vlans_added--;
80817cbf 1136 adapter->vlan_tag[vid] = 0;
a6b74e01 1137 }
80817cbf
AK
1138ret:
1139 return status;
6b7c5b94
SP
1140}
1141
80d5c368 1142static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1143{
1144 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1145 int status = 0;
6b7c5b94 1146
a85e9986
PR
1147 /* Packets with VID 0 are always received by Lancer by default */
1148 if (lancer_chip(adapter) && vid == 0)
1149 goto ret;
1150
6b7c5b94 1151 adapter->vlan_tag[vid] = 0;
a6b74e01 1152 status = be_vid_config(adapter);
80817cbf
AK
1153 if (!status)
1154 adapter->vlans_added--;
1155 else
1156 adapter->vlan_tag[vid] = 1;
1157ret:
1158 return status;
6b7c5b94
SP
1159}
1160
7ad09458
S
1161static void be_clear_promisc(struct be_adapter *adapter)
1162{
1163 adapter->promiscuous = false;
1164 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1165
1166 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1167}
1168
a54769f5 1169static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1170{
1171 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1172 int status;
6b7c5b94 1173
24307eef 1174 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1175 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1176 adapter->promiscuous = true;
1177 goto done;
6b7c5b94
SP
1178 }
1179
25985edc 1180 /* BE was previously in promiscuous mode; disable it */
24307eef 1181 if (adapter->promiscuous) {
7ad09458 1182 be_clear_promisc(adapter);
c0e64ef4 1183 if (adapter->vlans_added)
10329df8 1184 be_vid_config(adapter);
6b7c5b94
SP
1185 }
1186
e7b909a6 1187 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1188 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1189 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1190 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1191 goto done;
6b7c5b94 1192 }
6b7c5b94 1193
fbc13f01
AK
1194 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1195 struct netdev_hw_addr *ha;
1196 int i = 1; /* First slot is claimed by the Primary MAC */
1197
1198 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1199 be_cmd_pmac_del(adapter, adapter->if_handle,
1200 adapter->pmac_id[i], 0);
1201 }
1202
92bf14ab 1203 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1204 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1205 adapter->promiscuous = true;
1206 goto done;
1207 }
1208
1209 netdev_for_each_uc_addr(ha, adapter->netdev) {
1210 adapter->uc_macs++; /* First slot is for Primary MAC */
1211 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1212 adapter->if_handle,
1213 &adapter->pmac_id[adapter->uc_macs], 0);
1214 }
1215 }
1216
0fc16ebf
PR
1217 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1218
1219 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1220 if (status) {
1221 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1222 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1223 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1224 }
24307eef
SP
1225done:
1226 return;
6b7c5b94
SP
1227}
1228
ba343c77
SB
1229static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1230{
1231 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1232 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1233 int status;
1234
11ac75ed 1235 if (!sriov_enabled(adapter))
ba343c77
SB
1236 return -EPERM;
1237
11ac75ed 1238 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1239 return -EINVAL;
1240
3175d8c2
SP
1241 if (BEx_chip(adapter)) {
1242 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1243 vf + 1);
ba343c77 1244
11ac75ed
SP
1245 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1246 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1247 } else {
1248 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1249 vf + 1);
590c391d
PR
1250 }
1251
64600ea5 1252 if (status)
ba343c77
SB
1253 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1254 mac, vf);
64600ea5 1255 else
11ac75ed 1256 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1257
ba343c77
SB
1258 return status;
1259}
1260
64600ea5
AK
1261static int be_get_vf_config(struct net_device *netdev, int vf,
1262 struct ifla_vf_info *vi)
1263{
1264 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1265 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1266
11ac75ed 1267 if (!sriov_enabled(adapter))
64600ea5
AK
1268 return -EPERM;
1269
11ac75ed 1270 if (vf >= adapter->num_vfs)
64600ea5
AK
1271 return -EINVAL;
1272
1273 vi->vf = vf;
11ac75ed 1274 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1275 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1276 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1277 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1278
1279 return 0;
1280}
1281
1da87b7f
AK
1282static int be_set_vf_vlan(struct net_device *netdev,
1283 int vf, u16 vlan, u8 qos)
1284{
1285 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1286 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1287 int status = 0;
1288
11ac75ed 1289 if (!sriov_enabled(adapter))
1da87b7f
AK
1290 return -EPERM;
1291
b9fc0e53 1292 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1293 return -EINVAL;
1294
b9fc0e53
AK
1295 if (vlan || qos) {
1296 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1297 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1298 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299 vf_cfg->if_handle, 0);
1da87b7f 1300 } else {
f1f3ee1b 1301 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1302 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1303 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1304 }
1305
c502224e
SK
1306 if (!status)
1307 vf_cfg->vlan_tag = vlan;
1308 else
1da87b7f 1309 dev_info(&adapter->pdev->dev,
c502224e 1310 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1311 return status;
1312}
1313
e1d18735
AK
1314static int be_set_vf_tx_rate(struct net_device *netdev,
1315 int vf, int rate)
1316{
1317 struct be_adapter *adapter = netdev_priv(netdev);
1318 int status = 0;
1319
11ac75ed 1320 if (!sriov_enabled(adapter))
e1d18735
AK
1321 return -EPERM;
1322
94f434c2 1323 if (vf >= adapter->num_vfs)
e1d18735
AK
1324 return -EINVAL;
1325
94f434c2
AK
1326 if (rate < 100 || rate > 10000) {
1327 dev_err(&adapter->pdev->dev,
1328 "tx rate must be between 100 and 10000 Mbps\n");
1329 return -EINVAL;
1330 }
e1d18735 1331
d5c18473
PR
1332 if (lancer_chip(adapter))
1333 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1334 else
1335 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1336
1337 if (status)
94f434c2 1338 dev_err(&adapter->pdev->dev,
e1d18735 1339 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1340 else
1341 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1342 return status;
1343}
1344
2632bafd
SP
1345static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346 ulong now)
6b7c5b94 1347{
2632bafd
SP
1348 aic->rx_pkts_prev = rx_pkts;
1349 aic->tx_reqs_prev = tx_pkts;
1350 aic->jiffies = now;
1351}
ac124ff9 1352
2632bafd
SP
1353static void be_eqd_update(struct be_adapter *adapter)
1354{
1355 struct be_set_eqd set_eqd[MAX_EVT_QS];
1356 int eqd, i, num = 0, start;
1357 struct be_aic_obj *aic;
1358 struct be_eq_obj *eqo;
1359 struct be_rx_obj *rxo;
1360 struct be_tx_obj *txo;
1361 u64 rx_pkts, tx_pkts;
1362 ulong now;
1363 u32 pps, delta;
10ef9ab4 1364
2632bafd
SP
1365 for_all_evt_queues(adapter, eqo, i) {
1366 aic = &adapter->aic_obj[eqo->idx];
1367 if (!aic->enable) {
1368 if (aic->jiffies)
1369 aic->jiffies = 0;
1370 eqd = aic->et_eqd;
1371 goto modify_eqd;
1372 }
6b7c5b94 1373
2632bafd
SP
1374 rxo = &adapter->rx_obj[eqo->idx];
1375 do {
1376 start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377 rx_pkts = rxo->stats.rx_pkts;
1378 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
10ef9ab4 1379
2632bafd
SP
1380 txo = &adapter->tx_obj[eqo->idx];
1381 do {
1382 start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1383 tx_pkts = txo->stats.tx_reqs;
1384 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
6b7c5b94 1385
6b7c5b94 1386
2632bafd
SP
1387 /* Skip, if wrapped around or first calculation */
1388 now = jiffies;
1389 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1390 rx_pkts < aic->rx_pkts_prev ||
1391 tx_pkts < aic->tx_reqs_prev) {
1392 be_aic_update(aic, rx_pkts, tx_pkts, now);
1393 continue;
1394 }
1395
1396 delta = jiffies_to_msecs(now - aic->jiffies);
1397 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399 eqd = (pps / 15000) << 2;
10ef9ab4 1400
2632bafd
SP
1401 if (eqd < 8)
1402 eqd = 0;
1403 eqd = min_t(u32, eqd, aic->max_eqd);
1404 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1407modify_eqd:
2632bafd
SP
1408 if (eqd != aic->prev_eqd) {
1409 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1410 set_eqd[num].eq_id = eqo->q.id;
1411 aic->prev_eqd = eqd;
1412 num++;
1413 }
ac124ff9 1414 }
2632bafd
SP
1415
1416 if (num)
1417 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1418}
1419
3abcdeda 1420static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1421 struct be_rx_compl_info *rxcp)
4097f663 1422{
ac124ff9 1423 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1424
ab1594e9 1425 u64_stats_update_begin(&stats->sync);
3abcdeda 1426 stats->rx_compl++;
2e588f84 1427 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1428 stats->rx_pkts++;
2e588f84 1429 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1430 stats->rx_mcast_pkts++;
2e588f84 1431 if (rxcp->err)
ac124ff9 1432 stats->rx_compl_err++;
ab1594e9 1433 u64_stats_update_end(&stats->sync);
4097f663
SP
1434}
1435
2e588f84 1436static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1437{
19fad86f
PR
1438 /* L4 checksum is not reliable for non TCP/UDP packets.
1439 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1440 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1441 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1442}
1443
0b0ef1d0 1444static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1445{
10ef9ab4 1446 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1447 struct be_rx_page_info *rx_page_info;
3abcdeda 1448 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1449 u16 frag_idx = rxq->tail;
6b7c5b94 1450
3abcdeda 1451 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1452 BUG_ON(!rx_page_info->page);
1453
205859a2 1454 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1455 dma_unmap_page(&adapter->pdev->dev,
1456 dma_unmap_addr(rx_page_info, bus),
1457 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1458 rx_page_info->last_page_user = false;
1459 }
6b7c5b94 1460
0b0ef1d0 1461 queue_tail_inc(rxq);
6b7c5b94
SP
1462 atomic_dec(&rxq->used);
1463 return rx_page_info;
1464}
1465
1466/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1467static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468 struct be_rx_compl_info *rxcp)
6b7c5b94 1469{
6b7c5b94 1470 struct be_rx_page_info *page_info;
2e588f84 1471 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1472
e80d9da6 1473 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1474 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1475 put_page(page_info->page);
1476 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1477 }
1478}
1479
1480/*
1481 * skb_fill_rx_data forms a complete skb for an ether frame
1482 * indicated by rxcp.
1483 */
10ef9ab4
SP
1484static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1485 struct be_rx_compl_info *rxcp)
6b7c5b94 1486{
6b7c5b94 1487 struct be_rx_page_info *page_info;
2e588f84
SP
1488 u16 i, j;
1489 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1490 u8 *start;
6b7c5b94 1491
0b0ef1d0 1492 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1493 start = page_address(page_info->page) + page_info->page_offset;
1494 prefetch(start);
1495
1496 /* Copy data in the first descriptor of this completion */
2e588f84 1497 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1498
6b7c5b94
SP
1499 skb->len = curr_frag_len;
1500 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1501 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1502 /* Complete packet has now been moved to data */
1503 put_page(page_info->page);
1504 skb->data_len = 0;
1505 skb->tail += curr_frag_len;
1506 } else {
ac1ae5f3
ED
1507 hdr_len = ETH_HLEN;
1508 memcpy(skb->data, start, hdr_len);
6b7c5b94 1509 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1510 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1511 skb_shinfo(skb)->frags[0].page_offset =
1512 page_info->page_offset + hdr_len;
9e903e08 1513 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1514 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1515 skb->truesize += rx_frag_size;
6b7c5b94
SP
1516 skb->tail += hdr_len;
1517 }
205859a2 1518 page_info->page = NULL;
6b7c5b94 1519
2e588f84
SP
1520 if (rxcp->pkt_size <= rx_frag_size) {
1521 BUG_ON(rxcp->num_rcvd != 1);
1522 return;
6b7c5b94
SP
1523 }
1524
1525 /* More frags present for this completion */
2e588f84
SP
1526 remaining = rxcp->pkt_size - curr_frag_len;
1527 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1528 page_info = get_rx_page_info(rxo);
2e588f84 1529 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1530
bd46cb6c
AK
1531 /* Coalesce all frags from the same physical page in one slot */
1532 if (page_info->page_offset == 0) {
1533 /* Fresh page */
1534 j++;
b061b39e 1535 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1536 skb_shinfo(skb)->frags[j].page_offset =
1537 page_info->page_offset;
9e903e08 1538 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1539 skb_shinfo(skb)->nr_frags++;
1540 } else {
1541 put_page(page_info->page);
1542 }
1543
9e903e08 1544 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1545 skb->len += curr_frag_len;
1546 skb->data_len += curr_frag_len;
bdb28a97 1547 skb->truesize += rx_frag_size;
2e588f84 1548 remaining -= curr_frag_len;
205859a2 1549 page_info->page = NULL;
6b7c5b94 1550 }
bd46cb6c 1551 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1552}
1553
5be93b9a 1554/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1555static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1556 struct be_rx_compl_info *rxcp)
6b7c5b94 1557{
10ef9ab4 1558 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1559 struct net_device *netdev = adapter->netdev;
6b7c5b94 1560 struct sk_buff *skb;
89420424 1561
bb349bb4 1562 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1563 if (unlikely(!skb)) {
ac124ff9 1564 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1565 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1566 return;
1567 }
1568
10ef9ab4 1569 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1570
6332c8d3 1571 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1572 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1573 else
1574 skb_checksum_none_assert(skb);
6b7c5b94 1575
6332c8d3 1576 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1577 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1578 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1579 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1580 skb_mark_napi_id(skb, napi);
6b7c5b94 1581
343e43c0 1582 if (rxcp->vlanf)
86a9bad3 1583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1584
1585 netif_receive_skb(skb);
6b7c5b94
SP
1586}
1587
5be93b9a 1588/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1589static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1590 struct napi_struct *napi,
1591 struct be_rx_compl_info *rxcp)
6b7c5b94 1592{
10ef9ab4 1593 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1594 struct be_rx_page_info *page_info;
5be93b9a 1595 struct sk_buff *skb = NULL;
2e588f84
SP
1596 u16 remaining, curr_frag_len;
1597 u16 i, j;
3968fa1e 1598
10ef9ab4 1599 skb = napi_get_frags(napi);
5be93b9a 1600 if (!skb) {
10ef9ab4 1601 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1602 return;
1603 }
1604
2e588f84
SP
1605 remaining = rxcp->pkt_size;
1606 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1607 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1608
1609 curr_frag_len = min(remaining, rx_frag_size);
1610
bd46cb6c
AK
1611 /* Coalesce all frags from the same physical page in one slot */
1612 if (i == 0 || page_info->page_offset == 0) {
1613 /* First frag or Fresh page */
1614 j++;
b061b39e 1615 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1616 skb_shinfo(skb)->frags[j].page_offset =
1617 page_info->page_offset;
9e903e08 1618 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1619 } else {
1620 put_page(page_info->page);
1621 }
9e903e08 1622 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1623 skb->truesize += rx_frag_size;
bd46cb6c 1624 remaining -= curr_frag_len;
6b7c5b94
SP
1625 memset(page_info, 0, sizeof(*page_info));
1626 }
bd46cb6c 1627 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1628
5be93b9a 1629 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1630 skb->len = rxcp->pkt_size;
1631 skb->data_len = rxcp->pkt_size;
5be93b9a 1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1633 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1634 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1635 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
6384a4d0 1636 skb_mark_napi_id(skb, napi);
5be93b9a 1637
343e43c0 1638 if (rxcp->vlanf)
86a9bad3 1639 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1640
10ef9ab4 1641 napi_gro_frags(napi);
2e588f84
SP
1642}
1643
10ef9ab4
SP
1644static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1645 struct be_rx_compl_info *rxcp)
2e588f84
SP
1646{
1647 rxcp->pkt_size =
1648 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1649 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1650 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1651 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1652 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1653 rxcp->ip_csum =
1654 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1655 rxcp->l4_csum =
1656 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1657 rxcp->ipv6 =
1658 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1659 rxcp->num_rcvd =
1660 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1661 rxcp->pkt_type =
1662 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1663 rxcp->rss_hash =
c297977e 1664 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1665 if (rxcp->vlanf) {
1666 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1667 compl);
1668 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1669 compl);
15d72184 1670 }
12004ae9 1671 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1672}
1673
10ef9ab4
SP
1674static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1675 struct be_rx_compl_info *rxcp)
2e588f84
SP
1676{
1677 rxcp->pkt_size =
1678 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1679 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1680 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1681 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1682 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1683 rxcp->ip_csum =
1684 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1685 rxcp->l4_csum =
1686 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1687 rxcp->ipv6 =
1688 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1689 rxcp->num_rcvd =
1690 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1691 rxcp->pkt_type =
1692 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1693 rxcp->rss_hash =
c297977e 1694 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1695 if (rxcp->vlanf) {
1696 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1697 compl);
1698 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1699 compl);
15d72184 1700 }
12004ae9 1701 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1702 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1703 ip_frag, compl);
2e588f84
SP
1704}
1705
1706static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1707{
1708 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1709 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1710 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1711
2e588f84
SP
1712 /* For checking the valid bit it is Ok to use either definition as the
1713 * valid bit is at the same position in both v0 and v1 Rx compl */
1714 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1715 return NULL;
6b7c5b94 1716
2e588f84
SP
1717 rmb();
1718 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1719
2e588f84 1720 if (adapter->be3_native)
10ef9ab4 1721 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1722 else
10ef9ab4 1723 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1724
e38b1706
SK
1725 if (rxcp->ip_frag)
1726 rxcp->l4_csum = 0;
1727
15d72184
SP
1728 if (rxcp->vlanf) {
1729 /* vlanf could be wrongly set in some cards.
1730 * ignore if vtm is not set */
752961a1 1731 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1732 rxcp->vlanf = 0;
6b7c5b94 1733
15d72184 1734 if (!lancer_chip(adapter))
3c709f8f 1735 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1736
939cf306 1737 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1738 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1739 rxcp->vlanf = 0;
1740 }
2e588f84
SP
1741
1742 /* As the compl has been parsed, reset it; we wont touch it again */
1743 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1744
3abcdeda 1745 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1746 return rxcp;
1747}
1748
1829b086 1749static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1750{
6b7c5b94 1751 u32 order = get_order(size);
1829b086 1752
6b7c5b94 1753 if (order > 0)
1829b086
ED
1754 gfp |= __GFP_COMP;
1755 return alloc_pages(gfp, order);
6b7c5b94
SP
1756}
1757
1758/*
1759 * Allocate a page, split it to fragments of size rx_frag_size and post as
1760 * receive buffers to BE
1761 */
1829b086 1762static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1763{
3abcdeda 1764 struct be_adapter *adapter = rxo->adapter;
26d92f92 1765 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1766 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1767 struct page *pagep = NULL;
ba42fad0 1768 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1769 struct be_eth_rx_d *rxd;
1770 u64 page_dmaaddr = 0, frag_dmaaddr;
1771 u32 posted, page_offset = 0;
1772
3abcdeda 1773 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1774 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775 if (!pagep) {
1829b086 1776 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1777 if (unlikely(!pagep)) {
ac124ff9 1778 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1779 break;
1780 }
ba42fad0
IV
1781 page_dmaaddr = dma_map_page(dev, pagep, 0,
1782 adapter->big_page_size,
2b7bcebf 1783 DMA_FROM_DEVICE);
ba42fad0
IV
1784 if (dma_mapping_error(dev, page_dmaaddr)) {
1785 put_page(pagep);
1786 pagep = NULL;
1787 rx_stats(rxo)->rx_post_fail++;
1788 break;
1789 }
6b7c5b94
SP
1790 page_info->page_offset = 0;
1791 } else {
1792 get_page(pagep);
1793 page_info->page_offset = page_offset + rx_frag_size;
1794 }
1795 page_offset = page_info->page_offset;
1796 page_info->page = pagep;
fac6da5b 1797 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1798 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1799
1800 rxd = queue_head_node(rxq);
1801 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1802 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1803
1804 /* Any space left in the current big page for another frag? */
1805 if ((page_offset + rx_frag_size + rx_frag_size) >
1806 adapter->big_page_size) {
1807 pagep = NULL;
1808 page_info->last_page_user = true;
1809 }
26d92f92
SP
1810
1811 prev_page_info = page_info;
1812 queue_head_inc(rxq);
10ef9ab4 1813 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1814 }
1815 if (pagep)
26d92f92 1816 prev_page_info->last_page_user = true;
6b7c5b94
SP
1817
1818 if (posted) {
6b7c5b94 1819 atomic_add(posted, &rxq->used);
6384a4d0
SP
1820 if (rxo->rx_post_starved)
1821 rxo->rx_post_starved = false;
8788fdc2 1822 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1823 } else if (atomic_read(&rxq->used) == 0) {
1824 /* Let be_worker replenish when memory is available */
3abcdeda 1825 rxo->rx_post_starved = true;
6b7c5b94 1826 }
6b7c5b94
SP
1827}
1828
5fb379ee 1829static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1830{
6b7c5b94
SP
1831 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1832
1833 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1834 return NULL;
1835
f3eb62d2 1836 rmb();
6b7c5b94
SP
1837 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1838
1839 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1840
1841 queue_tail_inc(tx_cq);
1842 return txcp;
1843}
1844
3c8def97
SP
1845static u16 be_tx_compl_process(struct be_adapter *adapter,
1846 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1847{
3c8def97 1848 struct be_queue_info *txq = &txo->q;
a73b796e 1849 struct be_eth_wrb *wrb;
3c8def97 1850 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1851 struct sk_buff *sent_skb;
ec43b1a6
SP
1852 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1853 bool unmap_skb_hdr = true;
6b7c5b94 1854
ec43b1a6 1855 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1856 BUG_ON(!sent_skb);
ec43b1a6
SP
1857 sent_skbs[txq->tail] = NULL;
1858
1859 /* skip header wrb */
a73b796e 1860 queue_tail_inc(txq);
6b7c5b94 1861
ec43b1a6 1862 do {
6b7c5b94 1863 cur_index = txq->tail;
a73b796e 1864 wrb = queue_tail_node(txq);
2b7bcebf
IV
1865 unmap_tx_frag(&adapter->pdev->dev, wrb,
1866 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1867 unmap_skb_hdr = false;
1868
6b7c5b94
SP
1869 num_wrbs++;
1870 queue_tail_inc(txq);
ec43b1a6 1871 } while (cur_index != last_index);
6b7c5b94 1872
6b7c5b94 1873 kfree_skb(sent_skb);
4d586b82 1874 return num_wrbs;
6b7c5b94
SP
1875}
1876
10ef9ab4
SP
1877/* Return the number of events in the event queue */
1878static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1879{
10ef9ab4
SP
1880 struct be_eq_entry *eqe;
1881 int num = 0;
859b1e4e 1882
10ef9ab4
SP
1883 do {
1884 eqe = queue_tail_node(&eqo->q);
1885 if (eqe->evt == 0)
1886 break;
859b1e4e 1887
10ef9ab4
SP
1888 rmb();
1889 eqe->evt = 0;
1890 num++;
1891 queue_tail_inc(&eqo->q);
1892 } while (true);
1893
1894 return num;
859b1e4e
SP
1895}
1896
10ef9ab4
SP
1897/* Leaves the EQ is disarmed state */
1898static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1899{
10ef9ab4 1900 int num = events_get(eqo);
859b1e4e 1901
10ef9ab4 1902 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1903}
1904
10ef9ab4 1905static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1906{
1907 struct be_rx_page_info *page_info;
3abcdeda
SP
1908 struct be_queue_info *rxq = &rxo->q;
1909 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1910 struct be_rx_compl_info *rxcp;
d23e946c
SP
1911 struct be_adapter *adapter = rxo->adapter;
1912 int flush_wait = 0;
6b7c5b94 1913
d23e946c
SP
1914 /* Consume pending rx completions.
1915 * Wait for the flush completion (identified by zero num_rcvd)
1916 * to arrive. Notify CQ even when there are no more CQ entries
1917 * for HW to flush partially coalesced CQ entries.
1918 * In Lancer, there is no need to wait for flush compl.
1919 */
1920 for (;;) {
1921 rxcp = be_rx_compl_get(rxo);
1922 if (rxcp == NULL) {
1923 if (lancer_chip(adapter))
1924 break;
1925
1926 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1927 dev_warn(&adapter->pdev->dev,
1928 "did not receive flush compl\n");
1929 break;
1930 }
1931 be_cq_notify(adapter, rx_cq->id, true, 0);
1932 mdelay(1);
1933 } else {
1934 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1935 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1936 if (rxcp->num_rcvd == 0)
1937 break;
1938 }
6b7c5b94
SP
1939 }
1940
d23e946c
SP
1941 /* After cleanup, leave the CQ in unarmed state */
1942 be_cq_notify(adapter, rx_cq->id, false, 0);
1943
1944 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
1945 while (atomic_read(&rxq->used) > 0) {
1946 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1947 put_page(page_info->page);
1948 memset(page_info, 0, sizeof(*page_info));
1949 }
1950 BUG_ON(atomic_read(&rxq->used));
482c9e79 1951 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1952}
1953
0ae57bb3 1954static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1955{
0ae57bb3
SP
1956 struct be_tx_obj *txo;
1957 struct be_queue_info *txq;
a8e9179a 1958 struct be_eth_tx_compl *txcp;
4d586b82 1959 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1960 struct sk_buff *sent_skb;
1961 bool dummy_wrb;
0ae57bb3 1962 int i, pending_txqs;
a8e9179a
SP
1963
1964 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1965 do {
0ae57bb3
SP
1966 pending_txqs = adapter->num_tx_qs;
1967
1968 for_all_tx_queues(adapter, txo, i) {
1969 txq = &txo->q;
1970 while ((txcp = be_tx_compl_get(&txo->cq))) {
1971 end_idx =
1972 AMAP_GET_BITS(struct amap_eth_tx_compl,
1973 wrb_index, txcp);
1974 num_wrbs += be_tx_compl_process(adapter, txo,
1975 end_idx);
1976 cmpl++;
1977 }
1978 if (cmpl) {
1979 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1980 atomic_sub(num_wrbs, &txq->used);
1981 cmpl = 0;
1982 num_wrbs = 0;
1983 }
1984 if (atomic_read(&txq->used) == 0)
1985 pending_txqs--;
a8e9179a
SP
1986 }
1987
0ae57bb3 1988 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1989 break;
1990
1991 mdelay(1);
1992 } while (true);
1993
0ae57bb3
SP
1994 for_all_tx_queues(adapter, txo, i) {
1995 txq = &txo->q;
1996 if (atomic_read(&txq->used))
1997 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1998 atomic_read(&txq->used));
1999
2000 /* free posted tx for which compls will never arrive */
2001 while (atomic_read(&txq->used)) {
2002 sent_skb = txo->sent_skb_list[txq->tail];
2003 end_idx = txq->tail;
2004 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2005 &dummy_wrb);
2006 index_adv(&end_idx, num_wrbs - 1, txq->len);
2007 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2008 atomic_sub(num_wrbs, &txq->used);
2009 }
b03388d6 2010 }
6b7c5b94
SP
2011}
2012
10ef9ab4
SP
2013static void be_evt_queues_destroy(struct be_adapter *adapter)
2014{
2015 struct be_eq_obj *eqo;
2016 int i;
2017
2018 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2019 if (eqo->q.created) {
2020 be_eq_clean(eqo);
10ef9ab4 2021 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2022 napi_hash_del(&eqo->napi);
68d7bdcb 2023 netif_napi_del(&eqo->napi);
19d59aa7 2024 }
10ef9ab4
SP
2025 be_queue_free(adapter, &eqo->q);
2026 }
2027}
2028
2029static int be_evt_queues_create(struct be_adapter *adapter)
2030{
2031 struct be_queue_info *eq;
2032 struct be_eq_obj *eqo;
2632bafd 2033 struct be_aic_obj *aic;
10ef9ab4
SP
2034 int i, rc;
2035
92bf14ab
SP
2036 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2037 adapter->cfg_num_qs);
10ef9ab4
SP
2038
2039 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2040 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2041 BE_NAPI_WEIGHT);
6384a4d0 2042 napi_hash_add(&eqo->napi);
2632bafd 2043 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2044 eqo->adapter = adapter;
2045 eqo->tx_budget = BE_TX_BUDGET;
2046 eqo->idx = i;
2632bafd
SP
2047 aic->max_eqd = BE_MAX_EQD;
2048 aic->enable = true;
10ef9ab4
SP
2049
2050 eq = &eqo->q;
2051 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2052 sizeof(struct be_eq_entry));
2053 if (rc)
2054 return rc;
2055
f2f781a7 2056 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2057 if (rc)
2058 return rc;
2059 }
1cfafab9 2060 return 0;
10ef9ab4
SP
2061}
2062
5fb379ee
SP
2063static void be_mcc_queues_destroy(struct be_adapter *adapter)
2064{
2065 struct be_queue_info *q;
5fb379ee 2066
8788fdc2 2067 q = &adapter->mcc_obj.q;
5fb379ee 2068 if (q->created)
8788fdc2 2069 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2070 be_queue_free(adapter, q);
2071
8788fdc2 2072 q = &adapter->mcc_obj.cq;
5fb379ee 2073 if (q->created)
8788fdc2 2074 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2075 be_queue_free(adapter, q);
2076}
2077
2078/* Must be called only after TX qs are created as MCC shares TX EQ */
2079static int be_mcc_queues_create(struct be_adapter *adapter)
2080{
2081 struct be_queue_info *q, *cq;
5fb379ee 2082
8788fdc2 2083 cq = &adapter->mcc_obj.cq;
5fb379ee 2084 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 2085 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2086 goto err;
2087
10ef9ab4
SP
2088 /* Use the default EQ for MCC completions */
2089 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2090 goto mcc_cq_free;
2091
8788fdc2 2092 q = &adapter->mcc_obj.q;
5fb379ee
SP
2093 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2094 goto mcc_cq_destroy;
2095
8788fdc2 2096 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2097 goto mcc_q_free;
2098
2099 return 0;
2100
2101mcc_q_free:
2102 be_queue_free(adapter, q);
2103mcc_cq_destroy:
8788fdc2 2104 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2105mcc_cq_free:
2106 be_queue_free(adapter, cq);
2107err:
2108 return -1;
2109}
2110
6b7c5b94
SP
2111static void be_tx_queues_destroy(struct be_adapter *adapter)
2112{
2113 struct be_queue_info *q;
3c8def97
SP
2114 struct be_tx_obj *txo;
2115 u8 i;
6b7c5b94 2116
3c8def97
SP
2117 for_all_tx_queues(adapter, txo, i) {
2118 q = &txo->q;
2119 if (q->created)
2120 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2121 be_queue_free(adapter, q);
6b7c5b94 2122
3c8def97
SP
2123 q = &txo->cq;
2124 if (q->created)
2125 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2126 be_queue_free(adapter, q);
2127 }
6b7c5b94
SP
2128}
2129
7707133c 2130static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2131{
10ef9ab4 2132 struct be_queue_info *cq, *eq;
3c8def97 2133 struct be_tx_obj *txo;
92bf14ab 2134 int status, i;
6b7c5b94 2135
92bf14ab 2136 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2137
10ef9ab4
SP
2138 for_all_tx_queues(adapter, txo, i) {
2139 cq = &txo->cq;
2140 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2141 sizeof(struct be_eth_tx_compl));
2142 if (status)
2143 return status;
3c8def97 2144
827da44c
JS
2145 u64_stats_init(&txo->stats.sync);
2146 u64_stats_init(&txo->stats.sync_compl);
2147
10ef9ab4
SP
2148 /* If num_evt_qs is less than num_tx_qs, then more than
2149 * one txq share an eq
2150 */
2151 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2152 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2153 if (status)
2154 return status;
6b7c5b94 2155
10ef9ab4
SP
2156 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2157 sizeof(struct be_eth_wrb));
2158 if (status)
2159 return status;
6b7c5b94 2160
94d73aaa 2161 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2162 if (status)
2163 return status;
3c8def97 2164 }
6b7c5b94 2165
d379142b
SP
2166 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2167 adapter->num_tx_qs);
10ef9ab4 2168 return 0;
6b7c5b94
SP
2169}
2170
10ef9ab4 2171static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2172{
2173 struct be_queue_info *q;
3abcdeda
SP
2174 struct be_rx_obj *rxo;
2175 int i;
2176
2177 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2178 q = &rxo->cq;
2179 if (q->created)
2180 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2181 be_queue_free(adapter, q);
ac6a0c4a
SP
2182 }
2183}
2184
10ef9ab4 2185static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2186{
10ef9ab4 2187 struct be_queue_info *eq, *cq;
3abcdeda
SP
2188 struct be_rx_obj *rxo;
2189 int rc, i;
6b7c5b94 2190
92bf14ab
SP
2191 /* We can create as many RSS rings as there are EQs. */
2192 adapter->num_rx_qs = adapter->num_evt_qs;
2193
2194 /* We'll use RSS only if atleast 2 RSS rings are supported.
2195 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2196 */
92bf14ab
SP
2197 if (adapter->num_rx_qs > 1)
2198 adapter->num_rx_qs++;
2199
6b7c5b94 2200 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2201 for_all_rx_queues(adapter, rxo, i) {
2202 rxo->adapter = adapter;
3abcdeda
SP
2203 cq = &rxo->cq;
2204 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2205 sizeof(struct be_eth_rx_compl));
2206 if (rc)
10ef9ab4 2207 return rc;
3abcdeda 2208
827da44c 2209 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2210 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2211 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2212 if (rc)
10ef9ab4 2213 return rc;
3abcdeda 2214 }
6b7c5b94 2215
d379142b
SP
2216 dev_info(&adapter->pdev->dev,
2217 "created %d RSS queue(s) and 1 default RX queue\n",
2218 adapter->num_rx_qs - 1);
10ef9ab4 2219 return 0;
b628bde2
SP
2220}
2221
6b7c5b94
SP
2222static irqreturn_t be_intx(int irq, void *dev)
2223{
e49cc34f
SP
2224 struct be_eq_obj *eqo = dev;
2225 struct be_adapter *adapter = eqo->adapter;
2226 int num_evts = 0;
6b7c5b94 2227
d0b9cec3
SP
2228 /* IRQ is not expected when NAPI is scheduled as the EQ
2229 * will not be armed.
2230 * But, this can happen on Lancer INTx where it takes
2231 * a while to de-assert INTx or in BE2 where occasionaly
2232 * an interrupt may be raised even when EQ is unarmed.
2233 * If NAPI is already scheduled, then counting & notifying
2234 * events will orphan them.
e49cc34f 2235 */
d0b9cec3 2236 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2237 num_evts = events_get(eqo);
d0b9cec3
SP
2238 __napi_schedule(&eqo->napi);
2239 if (num_evts)
2240 eqo->spurious_intr = 0;
2241 }
2242 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2243
d0b9cec3
SP
2244 /* Return IRQ_HANDLED only for the the first spurious intr
2245 * after a valid intr to stop the kernel from branding
2246 * this irq as a bad one!
e49cc34f 2247 */
d0b9cec3
SP
2248 if (num_evts || eqo->spurious_intr++ == 0)
2249 return IRQ_HANDLED;
2250 else
2251 return IRQ_NONE;
6b7c5b94
SP
2252}
2253
10ef9ab4 2254static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2255{
10ef9ab4 2256 struct be_eq_obj *eqo = dev;
6b7c5b94 2257
0b545a62
SP
2258 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2259 napi_schedule(&eqo->napi);
6b7c5b94
SP
2260 return IRQ_HANDLED;
2261}
2262
2e588f84 2263static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2264{
e38b1706 2265 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2266}
2267
10ef9ab4 2268static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
6384a4d0 2269 int budget, int polling)
6b7c5b94 2270{
3abcdeda
SP
2271 struct be_adapter *adapter = rxo->adapter;
2272 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2273 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2274 u32 work_done;
2275
2276 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2277 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2278 if (!rxcp)
2279 break;
2280
12004ae9
SP
2281 /* Is it a flush compl that has no data */
2282 if (unlikely(rxcp->num_rcvd == 0))
2283 goto loop_continue;
2284
2285 /* Discard compl with partial DMA Lancer B0 */
2286 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2287 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2288 goto loop_continue;
2289 }
2290
2291 /* On BE drop pkts that arrive due to imperfect filtering in
2292 * promiscuous mode on some skews
2293 */
2294 if (unlikely(rxcp->port != adapter->port_num &&
2295 !lancer_chip(adapter))) {
10ef9ab4 2296 be_rx_compl_discard(rxo, rxcp);
12004ae9 2297 goto loop_continue;
64642811 2298 }
009dd872 2299
6384a4d0
SP
2300 /* Don't do gro when we're busy_polling */
2301 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2302 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2303 else
6384a4d0
SP
2304 be_rx_compl_process(rxo, napi, rxcp);
2305
12004ae9 2306loop_continue:
2e588f84 2307 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2308 }
2309
10ef9ab4
SP
2310 if (work_done) {
2311 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2312
6384a4d0
SP
2313 /* When an rx-obj gets into post_starved state, just
2314 * let be_worker do the posting.
2315 */
2316 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2317 !rxo->rx_post_starved)
10ef9ab4 2318 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2319 }
10ef9ab4 2320
6b7c5b94
SP
2321 return work_done;
2322}
2323
10ef9ab4
SP
2324static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2325 int budget, int idx)
6b7c5b94 2326{
6b7c5b94 2327 struct be_eth_tx_compl *txcp;
10ef9ab4 2328 int num_wrbs = 0, work_done;
3c8def97 2329
10ef9ab4
SP
2330 for (work_done = 0; work_done < budget; work_done++) {
2331 txcp = be_tx_compl_get(&txo->cq);
2332 if (!txcp)
2333 break;
2334 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2335 AMAP_GET_BITS(struct amap_eth_tx_compl,
2336 wrb_index, txcp));
10ef9ab4 2337 }
6b7c5b94 2338
10ef9ab4
SP
2339 if (work_done) {
2340 be_cq_notify(adapter, txo->cq.id, true, work_done);
2341 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2342
10ef9ab4
SP
2343 /* As Tx wrbs have been freed up, wake up netdev queue
2344 * if it was stopped due to lack of tx wrbs. */
2345 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2346 atomic_read(&txo->q.used) < txo->q.len / 2) {
2347 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2348 }
10ef9ab4
SP
2349
2350 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2351 tx_stats(txo)->tx_compl += work_done;
2352 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2353 }
10ef9ab4
SP
2354 return (work_done < budget); /* Done */
2355}
6b7c5b94 2356
68d7bdcb 2357int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2358{
2359 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2360 struct be_adapter *adapter = eqo->adapter;
0b545a62 2361 int max_work = 0, work, i, num_evts;
6384a4d0 2362 struct be_rx_obj *rxo;
10ef9ab4 2363 bool tx_done;
f31e50a8 2364
0b545a62
SP
2365 num_evts = events_get(eqo);
2366
10ef9ab4
SP
2367 /* Process all TXQs serviced by this EQ */
2368 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2369 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2370 eqo->tx_budget, i);
2371 if (!tx_done)
2372 max_work = budget;
f31e50a8
SP
2373 }
2374
6384a4d0
SP
2375 if (be_lock_napi(eqo)) {
2376 /* This loop will iterate twice for EQ0 in which
2377 * completions of the last RXQ (default one) are also processed
2378 * For other EQs the loop iterates only once
2379 */
2380 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2381 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2382 max_work = max(work, max_work);
2383 }
2384 be_unlock_napi(eqo);
2385 } else {
2386 max_work = budget;
10ef9ab4 2387 }
6b7c5b94 2388
10ef9ab4
SP
2389 if (is_mcc_eqo(eqo))
2390 be_process_mcc(adapter);
93c86700 2391
10ef9ab4
SP
2392 if (max_work < budget) {
2393 napi_complete(napi);
0b545a62 2394 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2395 } else {
2396 /* As we'll continue in polling mode, count and clear events */
0b545a62 2397 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2398 }
10ef9ab4 2399 return max_work;
6b7c5b94
SP
2400}
2401
6384a4d0
SP
2402#ifdef CONFIG_NET_RX_BUSY_POLL
2403static int be_busy_poll(struct napi_struct *napi)
2404{
2405 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2406 struct be_adapter *adapter = eqo->adapter;
2407 struct be_rx_obj *rxo;
2408 int i, work = 0;
2409
2410 if (!be_lock_busy_poll(eqo))
2411 return LL_FLUSH_BUSY;
2412
2413 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2414 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2415 if (work)
2416 break;
2417 }
2418
2419 be_unlock_busy_poll(eqo);
2420 return work;
2421}
2422#endif
2423
f67ef7ba 2424void be_detect_error(struct be_adapter *adapter)
7c185276 2425{
e1cfb67a
PR
2426 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2427 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2428 u32 i;
2429
d23e946c 2430 if (be_hw_error(adapter))
72f02485
SP
2431 return;
2432
e1cfb67a
PR
2433 if (lancer_chip(adapter)) {
2434 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2435 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2436 sliport_err1 = ioread32(adapter->db +
2437 SLIPORT_ERROR1_OFFSET);
2438 sliport_err2 = ioread32(adapter->db +
2439 SLIPORT_ERROR2_OFFSET);
2440 }
2441 } else {
2442 pci_read_config_dword(adapter->pdev,
2443 PCICFG_UE_STATUS_LOW, &ue_lo);
2444 pci_read_config_dword(adapter->pdev,
2445 PCICFG_UE_STATUS_HIGH, &ue_hi);
2446 pci_read_config_dword(adapter->pdev,
2447 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2448 pci_read_config_dword(adapter->pdev,
2449 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2450
f67ef7ba
PR
2451 ue_lo = (ue_lo & ~ue_lo_mask);
2452 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2453 }
7c185276 2454
1451ae6e
AK
2455 /* On certain platforms BE hardware can indicate spurious UEs.
2456 * Allow the h/w to stop working completely in case of a real UE.
2457 * Hence not setting the hw_error for UE detection.
2458 */
2459 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2460 adapter->hw_error = true;
4bebb56a
SK
2461 /* Do not log error messages if its a FW reset */
2462 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2463 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2464 dev_info(&adapter->pdev->dev,
2465 "Firmware update in progress\n");
2466 return;
2467 } else {
2468 dev_err(&adapter->pdev->dev,
2469 "Error detected in the card\n");
2470 }
f67ef7ba
PR
2471 }
2472
2473 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2474 dev_err(&adapter->pdev->dev,
2475 "ERR: sliport status 0x%x\n", sliport_status);
2476 dev_err(&adapter->pdev->dev,
2477 "ERR: sliport error1 0x%x\n", sliport_err1);
2478 dev_err(&adapter->pdev->dev,
2479 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2480 }
2481
e1cfb67a
PR
2482 if (ue_lo) {
2483 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2484 if (ue_lo & 1)
7c185276
AK
2485 dev_err(&adapter->pdev->dev,
2486 "UE: %s bit set\n", ue_status_low_desc[i]);
2487 }
2488 }
f67ef7ba 2489
e1cfb67a
PR
2490 if (ue_hi) {
2491 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2492 if (ue_hi & 1)
7c185276
AK
2493 dev_err(&adapter->pdev->dev,
2494 "UE: %s bit set\n", ue_status_hi_desc[i]);
2495 }
2496 }
2497
2498}
2499
8d56ff11
SP
2500static void be_msix_disable(struct be_adapter *adapter)
2501{
ac6a0c4a 2502 if (msix_enabled(adapter)) {
8d56ff11 2503 pci_disable_msix(adapter->pdev);
ac6a0c4a 2504 adapter->num_msix_vec = 0;
68d7bdcb 2505 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2506 }
2507}
2508
c2bba3df 2509static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2510{
92bf14ab 2511 int i, status, num_vec;
d379142b 2512 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2513
92bf14ab
SP
2514 /* If RoCE is supported, program the max number of NIC vectors that
2515 * may be configured via set-channels, along with vectors needed for
2516 * RoCe. Else, just program the number we'll use initially.
2517 */
2518 if (be_roce_supported(adapter))
2519 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2520 2 * num_online_cpus());
2521 else
2522 num_vec = adapter->cfg_num_qs;
3abcdeda 2523
ac6a0c4a 2524 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2525 adapter->msix_entries[i].entry = i;
2526
ac6a0c4a 2527 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2528 if (status == 0) {
2529 goto done;
92bf14ab 2530 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2531 num_vec = status;
c2bba3df
SK
2532 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2533 num_vec);
2534 if (!status)
3abcdeda 2535 goto done;
3abcdeda 2536 }
d379142b
SP
2537
2538 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2539
c2bba3df
SK
2540 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2541 if (!be_physfn(adapter))
2542 return status;
2543 return 0;
3abcdeda 2544done:
92bf14ab
SP
2545 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2546 adapter->num_msix_roce_vec = num_vec / 2;
2547 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2548 adapter->num_msix_roce_vec);
2549 }
2550
2551 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2552
2553 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2554 adapter->num_msix_vec);
c2bba3df 2555 return 0;
6b7c5b94
SP
2556}
2557
fe6d2a38 2558static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2559 struct be_eq_obj *eqo)
b628bde2 2560{
f2f781a7 2561 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2562}
6b7c5b94 2563
b628bde2
SP
2564static int be_msix_register(struct be_adapter *adapter)
2565{
10ef9ab4
SP
2566 struct net_device *netdev = adapter->netdev;
2567 struct be_eq_obj *eqo;
2568 int status, i, vec;
6b7c5b94 2569
10ef9ab4
SP
2570 for_all_evt_queues(adapter, eqo, i) {
2571 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2572 vec = be_msix_vec_get(adapter, eqo);
2573 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2574 if (status)
2575 goto err_msix;
2576 }
b628bde2 2577
6b7c5b94 2578 return 0;
3abcdeda 2579err_msix:
10ef9ab4
SP
2580 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2581 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2582 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2583 status);
ac6a0c4a 2584 be_msix_disable(adapter);
6b7c5b94
SP
2585 return status;
2586}
2587
2588static int be_irq_register(struct be_adapter *adapter)
2589{
2590 struct net_device *netdev = adapter->netdev;
2591 int status;
2592
ac6a0c4a 2593 if (msix_enabled(adapter)) {
6b7c5b94
SP
2594 status = be_msix_register(adapter);
2595 if (status == 0)
2596 goto done;
ba343c77
SB
2597 /* INTx is not supported for VF */
2598 if (!be_physfn(adapter))
2599 return status;
6b7c5b94
SP
2600 }
2601
e49cc34f 2602 /* INTx: only the first EQ is used */
6b7c5b94
SP
2603 netdev->irq = adapter->pdev->irq;
2604 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2605 &adapter->eq_obj[0]);
6b7c5b94
SP
2606 if (status) {
2607 dev_err(&adapter->pdev->dev,
2608 "INTx request IRQ failed - err %d\n", status);
2609 return status;
2610 }
2611done:
2612 adapter->isr_registered = true;
2613 return 0;
2614}
2615
2616static void be_irq_unregister(struct be_adapter *adapter)
2617{
2618 struct net_device *netdev = adapter->netdev;
10ef9ab4 2619 struct be_eq_obj *eqo;
3abcdeda 2620 int i;
6b7c5b94
SP
2621
2622 if (!adapter->isr_registered)
2623 return;
2624
2625 /* INTx */
ac6a0c4a 2626 if (!msix_enabled(adapter)) {
e49cc34f 2627 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2628 goto done;
2629 }
2630
2631 /* MSIx */
10ef9ab4
SP
2632 for_all_evt_queues(adapter, eqo, i)
2633 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2634
6b7c5b94
SP
2635done:
2636 adapter->isr_registered = false;
6b7c5b94
SP
2637}
2638
10ef9ab4 2639static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2640{
2641 struct be_queue_info *q;
2642 struct be_rx_obj *rxo;
2643 int i;
2644
2645 for_all_rx_queues(adapter, rxo, i) {
2646 q = &rxo->q;
2647 if (q->created) {
2648 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2649 be_rx_cq_clean(rxo);
482c9e79 2650 }
10ef9ab4 2651 be_queue_free(adapter, q);
482c9e79
SP
2652 }
2653}
2654
889cd4b2
SP
2655static int be_close(struct net_device *netdev)
2656{
2657 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2658 struct be_eq_obj *eqo;
2659 int i;
889cd4b2 2660
045508a8
PP
2661 be_roce_dev_close(adapter);
2662
dff345c5
IV
2663 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2664 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2665 napi_disable(&eqo->napi);
6384a4d0
SP
2666 be_disable_busy_poll(eqo);
2667 }
71237b6f 2668 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2669 }
a323d9bf
SP
2670
2671 be_async_mcc_disable(adapter);
2672
2673 /* Wait for all pending tx completions to arrive so that
2674 * all tx skbs are freed.
2675 */
fba87559 2676 netif_tx_disable(netdev);
6e1f9975 2677 be_tx_compl_clean(adapter);
a323d9bf
SP
2678
2679 be_rx_qs_destroy(adapter);
2680
d11a347d
AK
2681 for (i = 1; i < (adapter->uc_macs + 1); i++)
2682 be_cmd_pmac_del(adapter, adapter->if_handle,
2683 adapter->pmac_id[i], 0);
2684 adapter->uc_macs = 0;
2685
a323d9bf 2686 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2687 if (msix_enabled(adapter))
2688 synchronize_irq(be_msix_vec_get(adapter, eqo));
2689 else
2690 synchronize_irq(netdev->irq);
2691 be_eq_clean(eqo);
63fcb27f
PR
2692 }
2693
889cd4b2
SP
2694 be_irq_unregister(adapter);
2695
482c9e79
SP
2696 return 0;
2697}
2698
10ef9ab4 2699static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2700{
2701 struct be_rx_obj *rxo;
e9008ee9
PR
2702 int rc, i, j;
2703 u8 rsstable[128];
482c9e79
SP
2704
2705 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2706 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2707 sizeof(struct be_eth_rx_d));
2708 if (rc)
2709 return rc;
2710 }
2711
2712 /* The FW would like the default RXQ to be created first */
2713 rxo = default_rxo(adapter);
2714 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2715 adapter->if_handle, false, &rxo->rss_id);
2716 if (rc)
2717 return rc;
2718
2719 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2720 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2721 rx_frag_size, adapter->if_handle,
2722 true, &rxo->rss_id);
482c9e79
SP
2723 if (rc)
2724 return rc;
2725 }
2726
2727 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2728 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2729 for_all_rss_queues(adapter, rxo, i) {
2730 if ((j + i) >= 128)
2731 break;
2732 rsstable[j + i] = rxo->rss_id;
2733 }
2734 }
594ad54a
SR
2735 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2736 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2737
2738 if (!BEx_chip(adapter))
2739 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2740 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2741 } else {
2742 /* Disable RSS, if only default RX Q is created */
2743 adapter->rss_flags = RSS_ENABLE_NONE;
2744 }
594ad54a 2745
da1388d6
VV
2746 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2747 128);
2748 if (rc) {
2749 adapter->rss_flags = RSS_ENABLE_NONE;
2750 return rc;
482c9e79
SP
2751 }
2752
2753 /* First time posting */
10ef9ab4 2754 for_all_rx_queues(adapter, rxo, i)
482c9e79 2755 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2756 return 0;
2757}
2758
6b7c5b94
SP
2759static int be_open(struct net_device *netdev)
2760{
2761 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2762 struct be_eq_obj *eqo;
3abcdeda 2763 struct be_rx_obj *rxo;
10ef9ab4 2764 struct be_tx_obj *txo;
b236916a 2765 u8 link_status;
3abcdeda 2766 int status, i;
5fb379ee 2767
10ef9ab4 2768 status = be_rx_qs_create(adapter);
482c9e79
SP
2769 if (status)
2770 goto err;
2771
c2bba3df
SK
2772 status = be_irq_register(adapter);
2773 if (status)
2774 goto err;
5fb379ee 2775
10ef9ab4 2776 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2777 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2778
10ef9ab4
SP
2779 for_all_tx_queues(adapter, txo, i)
2780 be_cq_notify(adapter, txo->cq.id, true, 0);
2781
7a1e9b20
SP
2782 be_async_mcc_enable(adapter);
2783
10ef9ab4
SP
2784 for_all_evt_queues(adapter, eqo, i) {
2785 napi_enable(&eqo->napi);
6384a4d0 2786 be_enable_busy_poll(eqo);
10ef9ab4
SP
2787 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2788 }
04d3d624 2789 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2790
323ff71e 2791 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2792 if (!status)
2793 be_link_status_update(adapter, link_status);
2794
fba87559 2795 netif_tx_start_all_queues(netdev);
045508a8 2796 be_roce_dev_open(adapter);
889cd4b2
SP
2797 return 0;
2798err:
2799 be_close(adapter->netdev);
2800 return -EIO;
5fb379ee
SP
2801}
2802
71d8d1b5
AK
2803static int be_setup_wol(struct be_adapter *adapter, bool enable)
2804{
2805 struct be_dma_mem cmd;
2806 int status = 0;
2807 u8 mac[ETH_ALEN];
2808
2809 memset(mac, 0, ETH_ALEN);
2810
2811 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2812 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2813 GFP_KERNEL);
71d8d1b5
AK
2814 if (cmd.va == NULL)
2815 return -1;
71d8d1b5
AK
2816
2817 if (enable) {
2818 status = pci_write_config_dword(adapter->pdev,
2819 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2820 if (status) {
2821 dev_err(&adapter->pdev->dev,
2381a55c 2822 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2823 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2824 cmd.dma);
71d8d1b5
AK
2825 return status;
2826 }
2827 status = be_cmd_enable_magic_wol(adapter,
2828 adapter->netdev->dev_addr, &cmd);
2829 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2830 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2831 } else {
2832 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2833 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2834 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2835 }
2836
2b7bcebf 2837 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2838 return status;
2839}
2840
6d87f5c3
AK
2841/*
2842 * Generate a seed MAC address from the PF MAC Address using jhash.
2843 * MAC Address for VFs are assigned incrementally starting from the seed.
2844 * These addresses are programmed in the ASIC by the PF and the VF driver
2845 * queries for the MAC address during its probe.
2846 */
4c876616 2847static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2848{
f9449ab7 2849 u32 vf;
3abcdeda 2850 int status = 0;
6d87f5c3 2851 u8 mac[ETH_ALEN];
11ac75ed 2852 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2853
2854 be_vf_eth_addr_generate(adapter, mac);
2855
11ac75ed 2856 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2857 if (BEx_chip(adapter))
590c391d 2858 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2859 vf_cfg->if_handle,
2860 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2861 else
2862 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2863 vf + 1);
590c391d 2864
6d87f5c3
AK
2865 if (status)
2866 dev_err(&adapter->pdev->dev,
590c391d 2867 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2868 else
11ac75ed 2869 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2870
2871 mac[5] += 1;
2872 }
2873 return status;
2874}
2875
4c876616
SP
2876static int be_vfs_mac_query(struct be_adapter *adapter)
2877{
2878 int status, vf;
2879 u8 mac[ETH_ALEN];
2880 struct be_vf_cfg *vf_cfg;
4c876616
SP
2881
2882 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2883 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2884 mac, vf_cfg->if_handle,
2885 false, vf+1);
4c876616
SP
2886 if (status)
2887 return status;
2888 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2889 }
2890 return 0;
2891}
2892
f9449ab7 2893static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2894{
11ac75ed 2895 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2896 u32 vf;
2897
257a3feb 2898 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2899 dev_warn(&adapter->pdev->dev,
2900 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2901 goto done;
2902 }
2903
b4c1df93
SP
2904 pci_disable_sriov(adapter->pdev);
2905
11ac75ed 2906 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2907 if (BEx_chip(adapter))
11ac75ed
SP
2908 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2909 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2910 else
2911 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2912 vf + 1);
f9449ab7 2913
11ac75ed
SP
2914 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2915 }
39f1d94d
SP
2916done:
2917 kfree(adapter->vf_cfg);
2918 adapter->num_vfs = 0;
6d87f5c3
AK
2919}
2920
7707133c
SP
2921static void be_clear_queues(struct be_adapter *adapter)
2922{
2923 be_mcc_queues_destroy(adapter);
2924 be_rx_cqs_destroy(adapter);
2925 be_tx_queues_destroy(adapter);
2926 be_evt_queues_destroy(adapter);
2927}
2928
68d7bdcb 2929static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2930{
191eb756
SP
2931 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2932 cancel_delayed_work_sync(&adapter->work);
2933 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2934 }
68d7bdcb
SP
2935}
2936
b05004ad 2937static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
2938{
2939 int i;
2940
b05004ad
SK
2941 if (adapter->pmac_id) {
2942 for (i = 0; i < (adapter->uc_macs + 1); i++)
2943 be_cmd_pmac_del(adapter, adapter->if_handle,
2944 adapter->pmac_id[i], 0);
2945 adapter->uc_macs = 0;
2946
2947 kfree(adapter->pmac_id);
2948 adapter->pmac_id = NULL;
2949 }
2950}
2951
2952static int be_clear(struct be_adapter *adapter)
2953{
68d7bdcb 2954 be_cancel_worker(adapter);
191eb756 2955
11ac75ed 2956 if (sriov_enabled(adapter))
f9449ab7
SP
2957 be_vf_clear(adapter);
2958
2d17f403 2959 /* delete the primary mac along with the uc-mac list */
b05004ad 2960 be_mac_clear(adapter);
fbc13f01 2961
f9449ab7 2962 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2963
7707133c 2964 be_clear_queues(adapter);
a54769f5 2965
10ef9ab4 2966 be_msix_disable(adapter);
a54769f5
SP
2967 return 0;
2968}
2969
4c876616 2970static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2971{
92bf14ab 2972 struct be_resources res = {0};
4c876616
SP
2973 struct be_vf_cfg *vf_cfg;
2974 u32 cap_flags, en_flags, vf;
922bbe88 2975 int status = 0;
abb93951 2976
4c876616
SP
2977 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2978 BE_IF_FLAGS_MULTICAST;
abb93951 2979
4c876616 2980 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2981 if (!BE3_chip(adapter)) {
2982 status = be_cmd_get_profile_config(adapter, &res,
2983 vf + 1);
2984 if (!status)
2985 cap_flags = res.if_cap_flags;
2986 }
4c876616
SP
2987
2988 /* If a FW profile exists, then cap_flags are updated */
2989 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2990 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2991 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2992 &vf_cfg->if_handle, vf + 1);
2993 if (status)
2994 goto err;
2995 }
2996err:
2997 return status;
abb93951
PR
2998}
2999
39f1d94d 3000static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3001{
11ac75ed 3002 struct be_vf_cfg *vf_cfg;
30128031
SP
3003 int vf;
3004
39f1d94d
SP
3005 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3006 GFP_KERNEL);
3007 if (!adapter->vf_cfg)
3008 return -ENOMEM;
3009
11ac75ed
SP
3010 for_all_vfs(adapter, vf_cfg, vf) {
3011 vf_cfg->if_handle = -1;
3012 vf_cfg->pmac_id = -1;
30128031 3013 }
39f1d94d 3014 return 0;
30128031
SP
3015}
3016
f9449ab7
SP
3017static int be_vf_setup(struct be_adapter *adapter)
3018{
c502224e 3019 struct device *dev = &adapter->pdev->dev;
11ac75ed 3020 struct be_vf_cfg *vf_cfg;
4c876616 3021 int status, old_vfs, vf;
04a06028 3022 u32 privileges;
c502224e 3023 u16 lnk_speed;
39f1d94d 3024
257a3feb 3025 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3026 if (old_vfs) {
3027 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3028 if (old_vfs != num_vfs)
3029 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3030 adapter->num_vfs = old_vfs;
39f1d94d 3031 } else {
92bf14ab 3032 if (num_vfs > be_max_vfs(adapter))
4c876616 3033 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3034 be_max_vfs(adapter), num_vfs);
3035 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3036 if (!adapter->num_vfs)
4c876616 3037 return 0;
39f1d94d
SP
3038 }
3039
3040 status = be_vf_setup_init(adapter);
3041 if (status)
3042 goto err;
30128031 3043
4c876616
SP
3044 if (old_vfs) {
3045 for_all_vfs(adapter, vf_cfg, vf) {
3046 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3047 if (status)
3048 goto err;
3049 }
3050 } else {
3051 status = be_vfs_if_create(adapter);
f9449ab7
SP
3052 if (status)
3053 goto err;
f9449ab7
SP
3054 }
3055
4c876616
SP
3056 if (old_vfs) {
3057 status = be_vfs_mac_query(adapter);
3058 if (status)
3059 goto err;
3060 } else {
39f1d94d
SP
3061 status = be_vf_eth_addr_config(adapter);
3062 if (status)
3063 goto err;
3064 }
f9449ab7 3065
11ac75ed 3066 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3067 /* Allow VFs to programs MAC/VLAN filters */
3068 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3069 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3070 status = be_cmd_set_fn_privileges(adapter,
3071 privileges |
3072 BE_PRIV_FILTMGMT,
3073 vf + 1);
3074 if (!status)
3075 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3076 vf);
3077 }
3078
4c876616
SP
3079 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3080 * Allow full available bandwidth
3081 */
3082 if (BE3_chip(adapter) && !old_vfs)
3083 be_cmd_set_qos(adapter, 1000, vf+1);
3084
3085 status = be_cmd_link_status_query(adapter, &lnk_speed,
3086 NULL, vf + 1);
3087 if (!status)
3088 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3089
0599863d
VV
3090 if (!old_vfs)
3091 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 3092 }
b4c1df93
SP
3093
3094 if (!old_vfs) {
3095 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3096 if (status) {
3097 dev_err(dev, "SRIOV enable failed\n");
3098 adapter->num_vfs = 0;
3099 goto err;
3100 }
3101 }
f9449ab7
SP
3102 return 0;
3103err:
4c876616
SP
3104 dev_err(dev, "VF setup failed\n");
3105 be_vf_clear(adapter);
f9449ab7
SP
3106 return status;
3107}
3108
92bf14ab
SP
3109/* On BE2/BE3 FW does not suggest the supported limits */
3110static void BEx_get_resources(struct be_adapter *adapter,
3111 struct be_resources *res)
3112{
3113 struct pci_dev *pdev = adapter->pdev;
3114 bool use_sriov = false;
e3dc867c 3115 int max_vfs;
92bf14ab 3116
e3dc867c 3117 max_vfs = pci_sriov_get_totalvfs(pdev);
92bf14ab 3118
e3dc867c 3119 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab 3120 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 3121 use_sriov = res->max_vfs;
92bf14ab
SP
3122 }
3123
3124 if (be_physfn(adapter))
3125 res->max_uc_mac = BE_UC_PMAC_COUNT;
3126 else
3127 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3128
3129 if (adapter->function_mode & FLEX10_MODE)
3130 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
1aa9673c
AK
3131 else if (adapter->function_mode & UMC_ENABLED)
3132 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
92bf14ab
SP
3133 else
3134 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3135 res->max_mcast_mac = BE_MAX_MC;
3136
30f3fe45 3137 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 3138 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 3139 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
3140 res->max_tx_qs = 1;
3141 else
3142 res->max_tx_qs = BE3_MAX_TX_QS;
3143
3144 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3145 !use_sriov && be_physfn(adapter))
3146 res->max_rss_qs = (adapter->be3_native) ?
3147 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3148 res->max_rx_qs = res->max_rss_qs + 1;
3149
e3dc867c
SR
3150 if (be_physfn(adapter))
3151 res->max_evt_qs = (max_vfs > 0) ?
3152 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3153 else
3154 res->max_evt_qs = 1;
92bf14ab
SP
3155
3156 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3157 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3158 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3159}
3160
30128031
SP
3161static void be_setup_init(struct be_adapter *adapter)
3162{
3163 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3164 adapter->phy.link_speed = -1;
30128031
SP
3165 adapter->if_handle = -1;
3166 adapter->be3_native = false;
3167 adapter->promiscuous = false;
f25b119c
PR
3168 if (be_physfn(adapter))
3169 adapter->cmd_privileges = MAX_PRIVILEGES;
3170 else
3171 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3172}
3173
92bf14ab 3174static int be_get_resources(struct be_adapter *adapter)
abb93951 3175{
92bf14ab
SP
3176 struct device *dev = &adapter->pdev->dev;
3177 struct be_resources res = {0};
3178 int status;
abb93951 3179
92bf14ab
SP
3180 if (BEx_chip(adapter)) {
3181 BEx_get_resources(adapter, &res);
3182 adapter->res = res;
abb93951
PR
3183 }
3184
92bf14ab
SP
3185 /* For Lancer, SH etc read per-function resource limits from FW.
3186 * GET_FUNC_CONFIG returns per function guaranteed limits.
3187 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3188 */
3189 if (!BEx_chip(adapter)) {
3190 status = be_cmd_get_func_config(adapter, &res);
3191 if (status)
3192 return status;
abb93951 3193
92bf14ab
SP
3194 /* If RoCE may be enabled stash away half the EQs for RoCE */
3195 if (be_roce_supported(adapter))
3196 res.max_evt_qs /= 2;
3197 adapter->res = res;
abb93951 3198
92bf14ab
SP
3199 if (be_physfn(adapter)) {
3200 status = be_cmd_get_profile_config(adapter, &res, 0);
3201 if (status)
3202 return status;
3203 adapter->res.max_vfs = res.max_vfs;
3204 }
abb93951 3205
92bf14ab
SP
3206 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3207 be_max_txqs(adapter), be_max_rxqs(adapter),
3208 be_max_rss(adapter), be_max_eqs(adapter),
3209 be_max_vfs(adapter));
3210 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3211 be_max_uc(adapter), be_max_mc(adapter),
3212 be_max_vlans(adapter));
abb93951 3213 }
4c876616 3214
92bf14ab 3215 return 0;
abb93951
PR
3216}
3217
39f1d94d
SP
3218/* Routine to query per function resource limits */
3219static int be_get_config(struct be_adapter *adapter)
3220{
542963b7 3221 u16 profile_id;
4c876616 3222 int status;
39f1d94d 3223
abb93951
PR
3224 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3225 &adapter->function_mode,
0ad3157e
VV
3226 &adapter->function_caps,
3227 &adapter->asic_rev);
abb93951 3228 if (status)
92bf14ab 3229 return status;
abb93951 3230
542963b7
VV
3231 if (be_physfn(adapter)) {
3232 status = be_cmd_get_active_profile(adapter, &profile_id);
3233 if (!status)
3234 dev_info(&adapter->pdev->dev,
3235 "Using profile 0x%x\n", profile_id);
3236 }
3237
92bf14ab
SP
3238 status = be_get_resources(adapter);
3239 if (status)
3240 return status;
abb93951
PR
3241
3242 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3243 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3244 GFP_KERNEL);
3245 if (!adapter->pmac_id)
3246 return -ENOMEM;
abb93951 3247
92bf14ab
SP
3248 /* Sanitize cfg_num_qs based on HW and platform limits */
3249 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3250
3251 return 0;
39f1d94d
SP
3252}
3253
95046b92
SP
3254static int be_mac_setup(struct be_adapter *adapter)
3255{
3256 u8 mac[ETH_ALEN];
3257 int status;
3258
3259 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3260 status = be_cmd_get_perm_mac(adapter, mac);
3261 if (status)
3262 return status;
3263
3264 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3265 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3266 } else {
3267 /* Maybe the HW was reset; dev_addr must be re-programmed */
3268 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3269 }
3270
2c7a9dc1
AK
3271 /* For BE3-R VFs, the PF programs the initial MAC address */
3272 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3273 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3274 &adapter->pmac_id[0], 0);
95046b92
SP
3275 return 0;
3276}
3277
68d7bdcb
SP
3278static void be_schedule_worker(struct be_adapter *adapter)
3279{
3280 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3281 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3282}
3283
7707133c 3284static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3285{
68d7bdcb 3286 struct net_device *netdev = adapter->netdev;
10ef9ab4 3287 int status;
ba343c77 3288
7707133c 3289 status = be_evt_queues_create(adapter);
abb93951
PR
3290 if (status)
3291 goto err;
73d540f2 3292
7707133c 3293 status = be_tx_qs_create(adapter);
c2bba3df
SK
3294 if (status)
3295 goto err;
10ef9ab4 3296
7707133c 3297 status = be_rx_cqs_create(adapter);
10ef9ab4 3298 if (status)
a54769f5 3299 goto err;
6b7c5b94 3300
7707133c 3301 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3302 if (status)
3303 goto err;
3304
68d7bdcb
SP
3305 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3306 if (status)
3307 goto err;
3308
3309 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3310 if (status)
3311 goto err;
3312
7707133c
SP
3313 return 0;
3314err:
3315 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3316 return status;
3317}
3318
68d7bdcb
SP
3319int be_update_queues(struct be_adapter *adapter)
3320{
3321 struct net_device *netdev = adapter->netdev;
3322 int status;
3323
3324 if (netif_running(netdev))
3325 be_close(netdev);
3326
3327 be_cancel_worker(adapter);
3328
3329 /* If any vectors have been shared with RoCE we cannot re-program
3330 * the MSIx table.
3331 */
3332 if (!adapter->num_msix_roce_vec)
3333 be_msix_disable(adapter);
3334
3335 be_clear_queues(adapter);
3336
3337 if (!msix_enabled(adapter)) {
3338 status = be_msix_enable(adapter);
3339 if (status)
3340 return status;
3341 }
3342
3343 status = be_setup_queues(adapter);
3344 if (status)
3345 return status;
3346
3347 be_schedule_worker(adapter);
3348
3349 if (netif_running(netdev))
3350 status = be_open(netdev);
3351
3352 return status;
3353}
3354
7707133c
SP
3355static int be_setup(struct be_adapter *adapter)
3356{
3357 struct device *dev = &adapter->pdev->dev;
3358 u32 tx_fc, rx_fc, en_flags;
3359 int status;
3360
3361 be_setup_init(adapter);
3362
3363 if (!lancer_chip(adapter))
3364 be_cmd_req_native_mode(adapter);
3365
3366 status = be_get_config(adapter);
10ef9ab4 3367 if (status)
a54769f5 3368 goto err;
6b7c5b94 3369
7707133c 3370 status = be_msix_enable(adapter);
10ef9ab4 3371 if (status)
a54769f5 3372 goto err;
6b7c5b94 3373
f9449ab7 3374 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3375 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3376 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3377 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3378 en_flags = en_flags & be_if_cap_flags(adapter);
3379 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3380 &adapter->if_handle, 0);
7707133c 3381 if (status)
a54769f5 3382 goto err;
6b7c5b94 3383
68d7bdcb
SP
3384 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3385 rtnl_lock();
7707133c 3386 status = be_setup_queues(adapter);
68d7bdcb 3387 rtnl_unlock();
95046b92 3388 if (status)
1578e777
PR
3389 goto err;
3390
7707133c 3391 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3392
3393 status = be_mac_setup(adapter);
10ef9ab4
SP
3394 if (status)
3395 goto err;
3396
eeb65ced 3397 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3398
e9e2a904
SK
3399 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3400 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3401 adapter->fw_ver);
3402 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3403 }
3404
1d1e9a46 3405 if (adapter->vlans_added)
10329df8 3406 be_vid_config(adapter);
7ab8b0b4 3407
a54769f5 3408 be_set_rx_mode(adapter->netdev);
5fb379ee 3409
76a9e08e
SR
3410 be_cmd_get_acpi_wol_cap(adapter);
3411
ddc3f5cb 3412 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3413
ddc3f5cb
AK
3414 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3415 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3416 adapter->rx_fc);
2dc1deb6 3417
b905b5d4 3418 if (sriov_want(adapter)) {
92bf14ab 3419 if (be_max_vfs(adapter))
39f1d94d
SP
3420 be_vf_setup(adapter);
3421 else
3422 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3423 }
3424
f25b119c
PR
3425 status = be_cmd_get_phy_info(adapter);
3426 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3427 adapter->phy.fc_autoneg = 1;
3428
68d7bdcb 3429 be_schedule_worker(adapter);
f9449ab7 3430 return 0;
a54769f5
SP
3431err:
3432 be_clear(adapter);
3433 return status;
3434}
6b7c5b94 3435
66268739
IV
3436#ifdef CONFIG_NET_POLL_CONTROLLER
3437static void be_netpoll(struct net_device *netdev)
3438{
3439 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3440 struct be_eq_obj *eqo;
66268739
IV
3441 int i;
3442
e49cc34f
SP
3443 for_all_evt_queues(adapter, eqo, i) {
3444 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3445 napi_schedule(&eqo->napi);
3446 }
10ef9ab4
SP
3447
3448 return;
66268739
IV
3449}
3450#endif
3451
84517482 3452#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3453static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3454
fa9a6fed 3455static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3456 const u8 *p, u32 img_start, int image_size,
3457 int hdr_size)
fa9a6fed
SB
3458{
3459 u32 crc_offset;
3460 u8 flashed_crc[4];
3461 int status;
3f0d4560
AK
3462
3463 crc_offset = hdr_size + img_start + image_size - 4;
3464
fa9a6fed 3465 p += crc_offset;
3f0d4560
AK
3466
3467 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3468 (image_size - 4));
fa9a6fed
SB
3469 if (status) {
3470 dev_err(&adapter->pdev->dev,
3471 "could not get crc from flash, not flashing redboot\n");
3472 return false;
3473 }
3474
3475 /*update redboot only if crc does not match*/
3476 if (!memcmp(flashed_crc, p, 4))
3477 return false;
3478 else
3479 return true;
fa9a6fed
SB
3480}
3481
306f1348
SP
3482static bool phy_flashing_required(struct be_adapter *adapter)
3483{
42f11cf2
AK
3484 return (adapter->phy.phy_type == TN_8022 &&
3485 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3486}
3487
c165541e
PR
3488static bool is_comp_in_ufi(struct be_adapter *adapter,
3489 struct flash_section_info *fsec, int type)
3490{
3491 int i = 0, img_type = 0;
3492 struct flash_section_info_g2 *fsec_g2 = NULL;
3493
ca34fe38 3494 if (BE2_chip(adapter))
c165541e
PR
3495 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3496
3497 for (i = 0; i < MAX_FLASH_COMP; i++) {
3498 if (fsec_g2)
3499 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3500 else
3501 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3502
3503 if (img_type == type)
3504 return true;
3505 }
3506 return false;
3507
3508}
3509
4188e7df 3510static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3511 int header_size,
3512 const struct firmware *fw)
3513{
3514 struct flash_section_info *fsec = NULL;
3515 const u8 *p = fw->data;
3516
3517 p += header_size;
3518 while (p < (fw->data + fw->size)) {
3519 fsec = (struct flash_section_info *)p;
3520 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3521 return fsec;
3522 p += 32;
3523 }
3524 return NULL;
3525}
3526
773a2d7c
PR
3527static int be_flash(struct be_adapter *adapter, const u8 *img,
3528 struct be_dma_mem *flash_cmd, int optype, int img_size)
3529{
3530 u32 total_bytes = 0, flash_op, num_bytes = 0;
3531 int status = 0;
3532 struct be_cmd_write_flashrom *req = flash_cmd->va;
3533
3534 total_bytes = img_size;
3535 while (total_bytes) {
3536 num_bytes = min_t(u32, 32*1024, total_bytes);
3537
3538 total_bytes -= num_bytes;
3539
3540 if (!total_bytes) {
3541 if (optype == OPTYPE_PHY_FW)
3542 flash_op = FLASHROM_OPER_PHY_FLASH;
3543 else
3544 flash_op = FLASHROM_OPER_FLASH;
3545 } else {
3546 if (optype == OPTYPE_PHY_FW)
3547 flash_op = FLASHROM_OPER_PHY_SAVE;
3548 else
3549 flash_op = FLASHROM_OPER_SAVE;
3550 }
3551
be716446 3552 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3553 img += num_bytes;
3554 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3555 flash_op, num_bytes);
3556 if (status) {
3557 if (status == ILLEGAL_IOCTL_REQ &&
3558 optype == OPTYPE_PHY_FW)
3559 break;
3560 dev_err(&adapter->pdev->dev,
3561 "cmd to write to flash rom failed.\n");
3562 return status;
3563 }
3564 }
3565 return 0;
3566}
3567
0ad3157e 3568/* For BE2, BE3 and BE3-R */
ca34fe38 3569static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3570 const struct firmware *fw,
3571 struct be_dma_mem *flash_cmd,
3572 int num_of_images)
3f0d4560 3573
84517482 3574{
3f0d4560 3575 int status = 0, i, filehdr_size = 0;
c165541e 3576 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3577 const u8 *p = fw->data;
215faf9c 3578 const struct flash_comp *pflashcomp;
773a2d7c 3579 int num_comp, redboot;
c165541e
PR
3580 struct flash_section_info *fsec = NULL;
3581
3582 struct flash_comp gen3_flash_types[] = {
3583 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3584 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3585 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3586 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3587 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3588 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3589 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3590 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3591 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3592 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3593 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3594 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3595 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3596 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3597 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3598 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3599 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3600 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3601 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3602 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3603 };
c165541e
PR
3604
3605 struct flash_comp gen2_flash_types[] = {
3606 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3607 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3608 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3609 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3610 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3611 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3612 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3613 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3614 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3615 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3616 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3617 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3618 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3619 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3620 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3621 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3622 };
3623
ca34fe38 3624 if (BE3_chip(adapter)) {
3f0d4560
AK
3625 pflashcomp = gen3_flash_types;
3626 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3627 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3628 } else {
3629 pflashcomp = gen2_flash_types;
3630 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3631 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3632 }
ca34fe38 3633
c165541e
PR
3634 /* Get flash section info*/
3635 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3636 if (!fsec) {
3637 dev_err(&adapter->pdev->dev,
3638 "Invalid Cookie. UFI corrupted ?\n");
3639 return -1;
3640 }
9fe96934 3641 for (i = 0; i < num_comp; i++) {
c165541e 3642 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3643 continue;
c165541e
PR
3644
3645 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3646 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3647 continue;
3648
773a2d7c
PR
3649 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3650 !phy_flashing_required(adapter))
306f1348 3651 continue;
c165541e 3652
773a2d7c
PR
3653 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3654 redboot = be_flash_redboot(adapter, fw->data,
3655 pflashcomp[i].offset, pflashcomp[i].size,
3656 filehdr_size + img_hdrs_size);
3657 if (!redboot)
3658 continue;
3659 }
c165541e 3660
3f0d4560 3661 p = fw->data;
c165541e 3662 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3663 if (p + pflashcomp[i].size > fw->data + fw->size)
3664 return -1;
773a2d7c
PR
3665
3666 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3667 pflashcomp[i].size);
3668 if (status) {
3669 dev_err(&adapter->pdev->dev,
3670 "Flashing section type %d failed.\n",
3671 pflashcomp[i].img_type);
3672 return status;
84517482 3673 }
84517482 3674 }
84517482
AK
3675 return 0;
3676}
3677
773a2d7c
PR
3678static int be_flash_skyhawk(struct be_adapter *adapter,
3679 const struct firmware *fw,
3680 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3681{
773a2d7c
PR
3682 int status = 0, i, filehdr_size = 0;
3683 int img_offset, img_size, img_optype, redboot;
3684 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3685 const u8 *p = fw->data;
3686 struct flash_section_info *fsec = NULL;
3687
3688 filehdr_size = sizeof(struct flash_file_hdr_g3);
3689 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3690 if (!fsec) {
3691 dev_err(&adapter->pdev->dev,
3692 "Invalid Cookie. UFI corrupted ?\n");
3693 return -1;
3694 }
3695
3696 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3697 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3698 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3699
3700 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3701 case IMAGE_FIRMWARE_iSCSI:
3702 img_optype = OPTYPE_ISCSI_ACTIVE;
3703 break;
3704 case IMAGE_BOOT_CODE:
3705 img_optype = OPTYPE_REDBOOT;
3706 break;
3707 case IMAGE_OPTION_ROM_ISCSI:
3708 img_optype = OPTYPE_BIOS;
3709 break;
3710 case IMAGE_OPTION_ROM_PXE:
3711 img_optype = OPTYPE_PXE_BIOS;
3712 break;
3713 case IMAGE_OPTION_ROM_FCoE:
3714 img_optype = OPTYPE_FCOE_BIOS;
3715 break;
3716 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3717 img_optype = OPTYPE_ISCSI_BACKUP;
3718 break;
3719 case IMAGE_NCSI:
3720 img_optype = OPTYPE_NCSI_FW;
3721 break;
3722 default:
3723 continue;
3724 }
3725
3726 if (img_optype == OPTYPE_REDBOOT) {
3727 redboot = be_flash_redboot(adapter, fw->data,
3728 img_offset, img_size,
3729 filehdr_size + img_hdrs_size);
3730 if (!redboot)
3731 continue;
3732 }
3733
3734 p = fw->data;
3735 p += filehdr_size + img_offset + img_hdrs_size;
3736 if (p + img_size > fw->data + fw->size)
3737 return -1;
3738
3739 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3740 if (status) {
3741 dev_err(&adapter->pdev->dev,
3742 "Flashing section type %d failed.\n",
3743 fsec->fsec_entry[i].type);
3744 return status;
3745 }
3746 }
3747 return 0;
3f0d4560
AK
3748}
3749
485bf569
SN
3750static int lancer_fw_download(struct be_adapter *adapter,
3751 const struct firmware *fw)
84517482 3752{
485bf569
SN
3753#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3754#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3755 struct be_dma_mem flash_cmd;
485bf569
SN
3756 const u8 *data_ptr = NULL;
3757 u8 *dest_image_ptr = NULL;
3758 size_t image_size = 0;
3759 u32 chunk_size = 0;
3760 u32 data_written = 0;
3761 u32 offset = 0;
3762 int status = 0;
3763 u8 add_status = 0;
f67ef7ba 3764 u8 change_status;
84517482 3765
485bf569 3766 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3767 dev_err(&adapter->pdev->dev,
485bf569
SN
3768 "FW Image not properly aligned. "
3769 "Length must be 4 byte aligned.\n");
3770 status = -EINVAL;
3771 goto lancer_fw_exit;
d9efd2af
SB
3772 }
3773
485bf569
SN
3774 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3775 + LANCER_FW_DOWNLOAD_CHUNK;
3776 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3777 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3778 if (!flash_cmd.va) {
3779 status = -ENOMEM;
485bf569
SN
3780 goto lancer_fw_exit;
3781 }
84517482 3782
485bf569
SN
3783 dest_image_ptr = flash_cmd.va +
3784 sizeof(struct lancer_cmd_req_write_object);
3785 image_size = fw->size;
3786 data_ptr = fw->data;
3787
3788 while (image_size) {
3789 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3790
3791 /* Copy the image chunk content. */
3792 memcpy(dest_image_ptr, data_ptr, chunk_size);
3793
3794 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3795 chunk_size, offset,
3796 LANCER_FW_DOWNLOAD_LOCATION,
3797 &data_written, &change_status,
3798 &add_status);
485bf569
SN
3799 if (status)
3800 break;
3801
3802 offset += data_written;
3803 data_ptr += data_written;
3804 image_size -= data_written;
3805 }
3806
3807 if (!status) {
3808 /* Commit the FW written */
3809 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3810 0, offset,
3811 LANCER_FW_DOWNLOAD_LOCATION,
3812 &data_written, &change_status,
3813 &add_status);
485bf569
SN
3814 }
3815
3816 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3817 flash_cmd.dma);
3818 if (status) {
3819 dev_err(&adapter->pdev->dev,
3820 "Firmware load error. "
3821 "Status code: 0x%x Additional Status: 0x%x\n",
3822 status, add_status);
3823 goto lancer_fw_exit;
3824 }
3825
f67ef7ba 3826 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3827 dev_info(&adapter->pdev->dev,
3828 "Resetting adapter to activate new FW\n");
5c510811
SK
3829 status = lancer_physdev_ctrl(adapter,
3830 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3831 if (status) {
3832 dev_err(&adapter->pdev->dev,
3833 "Adapter busy for FW reset.\n"
3834 "New FW will not be active.\n");
3835 goto lancer_fw_exit;
3836 }
3837 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3838 dev_err(&adapter->pdev->dev,
3839 "System reboot required for new FW"
3840 " to be active\n");
3841 }
3842
485bf569
SN
3843 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3844lancer_fw_exit:
3845 return status;
3846}
3847
ca34fe38
SP
3848#define UFI_TYPE2 2
3849#define UFI_TYPE3 3
0ad3157e 3850#define UFI_TYPE3R 10
ca34fe38
SP
3851#define UFI_TYPE4 4
3852static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3853 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3854{
3855 if (fhdr == NULL)
3856 goto be_get_ufi_exit;
3857
ca34fe38
SP
3858 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3859 return UFI_TYPE4;
0ad3157e
VV
3860 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3861 if (fhdr->asic_type_rev == 0x10)
3862 return UFI_TYPE3R;
3863 else
3864 return UFI_TYPE3;
3865 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3866 return UFI_TYPE2;
773a2d7c
PR
3867
3868be_get_ufi_exit:
3869 dev_err(&adapter->pdev->dev,
3870 "UFI and Interface are not compatible for flashing\n");
3871 return -1;
3872}
3873
485bf569
SN
3874static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3875{
485bf569
SN
3876 struct flash_file_hdr_g3 *fhdr3;
3877 struct image_hdr *img_hdr_ptr = NULL;
3878 struct be_dma_mem flash_cmd;
3879 const u8 *p;
773a2d7c 3880 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3881
be716446 3882 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3883 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3884 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3885 if (!flash_cmd.va) {
3886 status = -ENOMEM;
485bf569 3887 goto be_fw_exit;
84517482
AK
3888 }
3889
773a2d7c 3890 p = fw->data;
0ad3157e 3891 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3892
0ad3157e 3893 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3894
773a2d7c
PR
3895 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3896 for (i = 0; i < num_imgs; i++) {
3897 img_hdr_ptr = (struct image_hdr *)(fw->data +
3898 (sizeof(struct flash_file_hdr_g3) +
3899 i * sizeof(struct image_hdr)));
3900 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3901 switch (ufi_type) {
3902 case UFI_TYPE4:
773a2d7c
PR
3903 status = be_flash_skyhawk(adapter, fw,
3904 &flash_cmd, num_imgs);
0ad3157e
VV
3905 break;
3906 case UFI_TYPE3R:
ca34fe38
SP
3907 status = be_flash_BEx(adapter, fw, &flash_cmd,
3908 num_imgs);
0ad3157e
VV
3909 break;
3910 case UFI_TYPE3:
3911 /* Do not flash this ufi on BE3-R cards */
3912 if (adapter->asic_rev < 0x10)
3913 status = be_flash_BEx(adapter, fw,
3914 &flash_cmd,
3915 num_imgs);
3916 else {
3917 status = -1;
3918 dev_err(&adapter->pdev->dev,
3919 "Can't load BE3 UFI on BE3R\n");
3920 }
3921 }
3f0d4560 3922 }
773a2d7c
PR
3923 }
3924
ca34fe38
SP
3925 if (ufi_type == UFI_TYPE2)
3926 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3927 else if (ufi_type == -1)
3f0d4560 3928 status = -1;
84517482 3929
2b7bcebf
IV
3930 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3931 flash_cmd.dma);
84517482
AK
3932 if (status) {
3933 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3934 goto be_fw_exit;
84517482
AK
3935 }
3936
af901ca1 3937 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3938
485bf569
SN
3939be_fw_exit:
3940 return status;
3941}
3942
3943int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3944{
3945 const struct firmware *fw;
3946 int status;
3947
3948 if (!netif_running(adapter->netdev)) {
3949 dev_err(&adapter->pdev->dev,
3950 "Firmware load not allowed (interface is down)\n");
3951 return -1;
3952 }
3953
3954 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3955 if (status)
3956 goto fw_exit;
3957
3958 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3959
3960 if (lancer_chip(adapter))
3961 status = lancer_fw_download(adapter, fw);
3962 else
3963 status = be_fw_download(adapter, fw);
3964
eeb65ced
SK
3965 if (!status)
3966 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3967 adapter->fw_on_flash);
3968
84517482
AK
3969fw_exit:
3970 release_firmware(fw);
3971 return status;
3972}
3973
a77dcb8c
AK
3974static int be_ndo_bridge_setlink(struct net_device *dev,
3975 struct nlmsghdr *nlh)
3976{
3977 struct be_adapter *adapter = netdev_priv(dev);
3978 struct nlattr *attr, *br_spec;
3979 int rem;
3980 int status = 0;
3981 u16 mode = 0;
3982
3983 if (!sriov_enabled(adapter))
3984 return -EOPNOTSUPP;
3985
3986 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3987
3988 nla_for_each_nested(attr, br_spec, rem) {
3989 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3990 continue;
3991
3992 mode = nla_get_u16(attr);
3993 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3994 return -EINVAL;
3995
3996 status = be_cmd_set_hsw_config(adapter, 0, 0,
3997 adapter->if_handle,
3998 mode == BRIDGE_MODE_VEPA ?
3999 PORT_FWD_TYPE_VEPA :
4000 PORT_FWD_TYPE_VEB);
4001 if (status)
4002 goto err;
4003
4004 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4005 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4006
4007 return status;
4008 }
4009err:
4010 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4011 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4012
4013 return status;
4014}
4015
4016static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4017 struct net_device *dev,
4018 u32 filter_mask)
4019{
4020 struct be_adapter *adapter = netdev_priv(dev);
4021 int status = 0;
4022 u8 hsw_mode;
4023
4024 if (!sriov_enabled(adapter))
4025 return 0;
4026
4027 /* BE and Lancer chips support VEB mode only */
4028 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4029 hsw_mode = PORT_FWD_TYPE_VEB;
4030 } else {
4031 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4032 adapter->if_handle, &hsw_mode);
4033 if (status)
4034 return 0;
4035 }
4036
4037 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4038 hsw_mode == PORT_FWD_TYPE_VEPA ?
4039 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4040}
4041
e5686ad8 4042static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4043 .ndo_open = be_open,
4044 .ndo_stop = be_close,
4045 .ndo_start_xmit = be_xmit,
a54769f5 4046 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4047 .ndo_set_mac_address = be_mac_addr_set,
4048 .ndo_change_mtu = be_change_mtu,
ab1594e9 4049 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4050 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4051 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4052 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4053 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4054 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4055 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
4056 .ndo_get_vf_config = be_get_vf_config,
4057#ifdef CONFIG_NET_POLL_CONTROLLER
4058 .ndo_poll_controller = be_netpoll,
4059#endif
a77dcb8c
AK
4060 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4061 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0
SP
4062#ifdef CONFIG_NET_RX_BUSY_POLL
4063 .ndo_busy_poll = be_busy_poll
4064#endif
6b7c5b94
SP
4065};
4066
4067static void be_netdev_init(struct net_device *netdev)
4068{
4069 struct be_adapter *adapter = netdev_priv(netdev);
4070
6332c8d3 4071 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4072 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4073 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4074 if (be_multi_rxq(adapter))
4075 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4076
4077 netdev->features |= netdev->hw_features |
f646968f 4078 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4079
eb8a50d9 4080 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4081 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4082
fbc13f01
AK
4083 netdev->priv_flags |= IFF_UNICAST_FLT;
4084
6b7c5b94
SP
4085 netdev->flags |= IFF_MULTICAST;
4086
b7e5887e 4087 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4088
10ef9ab4 4089 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
4090
4091 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
4092}
4093
4094static void be_unmap_pci_bars(struct be_adapter *adapter)
4095{
c5b3ad4c
SP
4096 if (adapter->csr)
4097 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4098 if (adapter->db)
ce66f781 4099 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4100}
4101
ce66f781
SP
4102static int db_bar(struct be_adapter *adapter)
4103{
4104 if (lancer_chip(adapter) || !be_physfn(adapter))
4105 return 0;
4106 else
4107 return 4;
4108}
4109
4110static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4111{
dbf0f2a7 4112 if (skyhawk_chip(adapter)) {
ce66f781
SP
4113 adapter->roce_db.size = 4096;
4114 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4115 db_bar(adapter));
4116 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4117 db_bar(adapter));
4118 }
045508a8 4119 return 0;
6b7c5b94
SP
4120}
4121
4122static int be_map_pci_bars(struct be_adapter *adapter)
4123{
4124 u8 __iomem *addr;
fe6d2a38 4125
c5b3ad4c
SP
4126 if (BEx_chip(adapter) && be_physfn(adapter)) {
4127 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4128 if (adapter->csr == NULL)
4129 return -ENOMEM;
4130 }
4131
ce66f781 4132 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4133 if (addr == NULL)
4134 goto pci_map_err;
ba343c77 4135 adapter->db = addr;
ce66f781
SP
4136
4137 be_roce_map_pci_bars(adapter);
6b7c5b94 4138 return 0;
ce66f781 4139
6b7c5b94
SP
4140pci_map_err:
4141 be_unmap_pci_bars(adapter);
4142 return -ENOMEM;
4143}
4144
6b7c5b94
SP
4145static void be_ctrl_cleanup(struct be_adapter *adapter)
4146{
8788fdc2 4147 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4148
4149 be_unmap_pci_bars(adapter);
4150
4151 if (mem->va)
2b7bcebf
IV
4152 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4153 mem->dma);
e7b909a6 4154
5b8821b7 4155 mem = &adapter->rx_filter;
e7b909a6 4156 if (mem->va)
2b7bcebf
IV
4157 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4158 mem->dma);
6b7c5b94
SP
4159}
4160
6b7c5b94
SP
4161static int be_ctrl_init(struct be_adapter *adapter)
4162{
8788fdc2
SP
4163 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4164 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4165 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4166 u32 sli_intf;
6b7c5b94 4167 int status;
6b7c5b94 4168
ce66f781
SP
4169 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4170 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4171 SLI_INTF_FAMILY_SHIFT;
4172 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4173
6b7c5b94
SP
4174 status = be_map_pci_bars(adapter);
4175 if (status)
e7b909a6 4176 goto done;
6b7c5b94
SP
4177
4178 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4179 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4180 mbox_mem_alloc->size,
4181 &mbox_mem_alloc->dma,
4182 GFP_KERNEL);
6b7c5b94 4183 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4184 status = -ENOMEM;
4185 goto unmap_pci_bars;
6b7c5b94
SP
4186 }
4187 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4188 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4189 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4190 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4191
5b8821b7 4192 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4193 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4194 rx_filter->size, &rx_filter->dma,
4195 GFP_KERNEL);
5b8821b7 4196 if (rx_filter->va == NULL) {
e7b909a6
SP
4197 status = -ENOMEM;
4198 goto free_mbox;
4199 }
1f9061d2 4200
2984961c 4201 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4202 spin_lock_init(&adapter->mcc_lock);
4203 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4204
5eeff635 4205 init_completion(&adapter->et_cmd_compl);
cf588477 4206 pci_save_state(adapter->pdev);
6b7c5b94 4207 return 0;
e7b909a6
SP
4208
4209free_mbox:
2b7bcebf
IV
4210 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4211 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4212
4213unmap_pci_bars:
4214 be_unmap_pci_bars(adapter);
4215
4216done:
4217 return status;
6b7c5b94
SP
4218}
4219
4220static void be_stats_cleanup(struct be_adapter *adapter)
4221{
3abcdeda 4222 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4223
4224 if (cmd->va)
2b7bcebf
IV
4225 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4226 cmd->va, cmd->dma);
6b7c5b94
SP
4227}
4228
4229static int be_stats_init(struct be_adapter *adapter)
4230{
3abcdeda 4231 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4232
ca34fe38
SP
4233 if (lancer_chip(adapter))
4234 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4235 else if (BE2_chip(adapter))
89a88ab8 4236 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4237 else if (BE3_chip(adapter))
ca34fe38 4238 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4239 else
4240 /* ALL non-BE ASICs */
4241 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4242
ede23fa8
JP
4243 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4244 GFP_KERNEL);
6b7c5b94
SP
4245 if (cmd->va == NULL)
4246 return -1;
4247 return 0;
4248}
4249
3bc6b06c 4250static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4251{
4252 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4253
6b7c5b94
SP
4254 if (!adapter)
4255 return;
4256
045508a8 4257 be_roce_dev_remove(adapter);
8cef7a78 4258 be_intr_set(adapter, false);
045508a8 4259
f67ef7ba
PR
4260 cancel_delayed_work_sync(&adapter->func_recovery_work);
4261
6b7c5b94
SP
4262 unregister_netdev(adapter->netdev);
4263
5fb379ee
SP
4264 be_clear(adapter);
4265
bf99e50d
PR
4266 /* tell fw we're done with firing cmds */
4267 be_cmd_fw_clean(adapter);
4268
6b7c5b94
SP
4269 be_stats_cleanup(adapter);
4270
4271 be_ctrl_cleanup(adapter);
4272
d6b6d987
SP
4273 pci_disable_pcie_error_reporting(pdev);
4274
6b7c5b94
SP
4275 pci_release_regions(pdev);
4276 pci_disable_device(pdev);
4277
4278 free_netdev(adapter->netdev);
4279}
4280
39f1d94d 4281static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4282{
baaa08d1 4283 int status, level;
6b7c5b94 4284
9e1453c5
AK
4285 status = be_cmd_get_cntl_attributes(adapter);
4286 if (status)
4287 return status;
4288
7aeb2156
PR
4289 /* Must be a power of 2 or else MODULO will BUG_ON */
4290 adapter->be_get_temp_freq = 64;
4291
baaa08d1
VV
4292 if (BEx_chip(adapter)) {
4293 level = be_cmd_get_fw_log_level(adapter);
4294 adapter->msg_enable =
4295 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4296 }
941a77d5 4297
92bf14ab 4298 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4299 return 0;
6b7c5b94
SP
4300}
4301
f67ef7ba 4302static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4303{
01e5b2c4 4304 struct device *dev = &adapter->pdev->dev;
d8110f62 4305 int status;
d8110f62 4306
f67ef7ba
PR
4307 status = lancer_test_and_set_rdy_state(adapter);
4308 if (status)
4309 goto err;
d8110f62 4310
f67ef7ba
PR
4311 if (netif_running(adapter->netdev))
4312 be_close(adapter->netdev);
d8110f62 4313
f67ef7ba
PR
4314 be_clear(adapter);
4315
01e5b2c4 4316 be_clear_all_error(adapter);
f67ef7ba
PR
4317
4318 status = be_setup(adapter);
4319 if (status)
4320 goto err;
d8110f62 4321
f67ef7ba
PR
4322 if (netif_running(adapter->netdev)) {
4323 status = be_open(adapter->netdev);
d8110f62
PR
4324 if (status)
4325 goto err;
f67ef7ba 4326 }
d8110f62 4327
4bebb56a 4328 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4329 return 0;
4330err:
01e5b2c4
SK
4331 if (status == -EAGAIN)
4332 dev_err(dev, "Waiting for resource provisioning\n");
4333 else
4bebb56a 4334 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4335
f67ef7ba
PR
4336 return status;
4337}
4338
4339static void be_func_recovery_task(struct work_struct *work)
4340{
4341 struct be_adapter *adapter =
4342 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4343 int status = 0;
d8110f62 4344
f67ef7ba 4345 be_detect_error(adapter);
d8110f62 4346
f67ef7ba 4347 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4348
f67ef7ba
PR
4349 rtnl_lock();
4350 netif_device_detach(adapter->netdev);
4351 rtnl_unlock();
d8110f62 4352
f67ef7ba 4353 status = lancer_recover_func(adapter);
f67ef7ba
PR
4354 if (!status)
4355 netif_device_attach(adapter->netdev);
d8110f62 4356 }
f67ef7ba 4357
01e5b2c4
SK
4358 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4359 * no need to attempt further recovery.
4360 */
4361 if (!status || status == -EAGAIN)
4362 schedule_delayed_work(&adapter->func_recovery_work,
4363 msecs_to_jiffies(1000));
d8110f62
PR
4364}
4365
4366static void be_worker(struct work_struct *work)
4367{
4368 struct be_adapter *adapter =
4369 container_of(work, struct be_adapter, work.work);
4370 struct be_rx_obj *rxo;
4371 int i;
4372
d8110f62
PR
4373 /* when interrupts are not yet enabled, just reap any pending
4374 * mcc completions */
4375 if (!netif_running(adapter->netdev)) {
072a9c48 4376 local_bh_disable();
10ef9ab4 4377 be_process_mcc(adapter);
072a9c48 4378 local_bh_enable();
d8110f62
PR
4379 goto reschedule;
4380 }
4381
4382 if (!adapter->stats_cmd_sent) {
4383 if (lancer_chip(adapter))
4384 lancer_cmd_get_pport_stats(adapter,
4385 &adapter->stats_cmd);
4386 else
4387 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4388 }
4389
d696b5e2
VV
4390 if (be_physfn(adapter) &&
4391 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4392 be_cmd_get_die_temperature(adapter);
4393
d8110f62 4394 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4395 /* Replenish RX-queues starved due to memory
4396 * allocation failures.
4397 */
4398 if (rxo->rx_post_starved)
d8110f62 4399 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4400 }
4401
2632bafd 4402 be_eqd_update(adapter);
10ef9ab4 4403
d8110f62
PR
4404reschedule:
4405 adapter->work_counter++;
4406 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4407}
4408
257a3feb 4409/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4410static bool be_reset_required(struct be_adapter *adapter)
4411{
257a3feb 4412 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4413}
4414
d379142b
SP
4415static char *mc_name(struct be_adapter *adapter)
4416{
4417 if (adapter->function_mode & FLEX10_MODE)
4418 return "FLEX10";
4419 else if (adapter->function_mode & VNIC_MODE)
4420 return "vNIC";
4421 else if (adapter->function_mode & UMC_ENABLED)
4422 return "UMC";
4423 else
4424 return "";
4425}
4426
4427static inline char *func_name(struct be_adapter *adapter)
4428{
4429 return be_physfn(adapter) ? "PF" : "VF";
4430}
4431
1dd06ae8 4432static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4433{
4434 int status = 0;
4435 struct be_adapter *adapter;
4436 struct net_device *netdev;
b4e32a71 4437 char port_name;
6b7c5b94
SP
4438
4439 status = pci_enable_device(pdev);
4440 if (status)
4441 goto do_none;
4442
4443 status = pci_request_regions(pdev, DRV_NAME);
4444 if (status)
4445 goto disable_dev;
4446 pci_set_master(pdev);
4447
7f640062 4448 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4449 if (netdev == NULL) {
4450 status = -ENOMEM;
4451 goto rel_reg;
4452 }
4453 adapter = netdev_priv(netdev);
4454 adapter->pdev = pdev;
4455 pci_set_drvdata(pdev, adapter);
4456 adapter->netdev = netdev;
2243e2e9 4457 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4458
4c15c243 4459 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4460 if (!status) {
4461 netdev->features |= NETIF_F_HIGHDMA;
4462 } else {
4c15c243 4463 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4464 if (status) {
4465 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4466 goto free_netdev;
4467 }
4468 }
4469
ea58c180
AK
4470 if (be_physfn(adapter)) {
4471 status = pci_enable_pcie_error_reporting(pdev);
4472 if (!status)
4473 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4474 }
d6b6d987 4475
6b7c5b94
SP
4476 status = be_ctrl_init(adapter);
4477 if (status)
39f1d94d 4478 goto free_netdev;
6b7c5b94 4479
2243e2e9 4480 /* sync up with fw's ready state */
ba343c77 4481 if (be_physfn(adapter)) {
bf99e50d 4482 status = be_fw_wait_ready(adapter);
ba343c77
SB
4483 if (status)
4484 goto ctrl_clean;
ba343c77 4485 }
6b7c5b94 4486
39f1d94d
SP
4487 if (be_reset_required(adapter)) {
4488 status = be_cmd_reset_function(adapter);
4489 if (status)
4490 goto ctrl_clean;
556ae191 4491
2d177be8
KA
4492 /* Wait for interrupts to quiesce after an FLR */
4493 msleep(100);
4494 }
8cef7a78
SK
4495
4496 /* Allow interrupts for other ULPs running on NIC function */
4497 be_intr_set(adapter, true);
10ef9ab4 4498
2d177be8
KA
4499 /* tell fw we're ready to fire cmds */
4500 status = be_cmd_fw_init(adapter);
4501 if (status)
4502 goto ctrl_clean;
4503
2243e2e9
SP
4504 status = be_stats_init(adapter);
4505 if (status)
4506 goto ctrl_clean;
4507
39f1d94d 4508 status = be_get_initial_config(adapter);
6b7c5b94
SP
4509 if (status)
4510 goto stats_clean;
6b7c5b94
SP
4511
4512 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4513 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4514 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4515
5fb379ee
SP
4516 status = be_setup(adapter);
4517 if (status)
55f5c3c5 4518 goto stats_clean;
2243e2e9 4519
3abcdeda 4520 be_netdev_init(netdev);
6b7c5b94
SP
4521 status = register_netdev(netdev);
4522 if (status != 0)
5fb379ee 4523 goto unsetup;
6b7c5b94 4524
045508a8
PP
4525 be_roce_dev_add(adapter);
4526
f67ef7ba
PR
4527 schedule_delayed_work(&adapter->func_recovery_work,
4528 msecs_to_jiffies(1000));
b4e32a71
PR
4529
4530 be_cmd_query_port_name(adapter, &port_name);
4531
d379142b
SP
4532 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4533 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4534
6b7c5b94
SP
4535 return 0;
4536
5fb379ee
SP
4537unsetup:
4538 be_clear(adapter);
6b7c5b94
SP
4539stats_clean:
4540 be_stats_cleanup(adapter);
4541ctrl_clean:
4542 be_ctrl_cleanup(adapter);
f9449ab7 4543free_netdev:
fe6d2a38 4544 free_netdev(netdev);
6b7c5b94
SP
4545rel_reg:
4546 pci_release_regions(pdev);
4547disable_dev:
4548 pci_disable_device(pdev);
4549do_none:
c4ca2374 4550 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4551 return status;
4552}
4553
4554static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4555{
4556 struct be_adapter *adapter = pci_get_drvdata(pdev);
4557 struct net_device *netdev = adapter->netdev;
4558
76a9e08e 4559 if (adapter->wol_en)
71d8d1b5
AK
4560 be_setup_wol(adapter, true);
4561
d4360d6f 4562 be_intr_set(adapter, false);
f67ef7ba
PR
4563 cancel_delayed_work_sync(&adapter->func_recovery_work);
4564
6b7c5b94
SP
4565 netif_device_detach(netdev);
4566 if (netif_running(netdev)) {
4567 rtnl_lock();
4568 be_close(netdev);
4569 rtnl_unlock();
4570 }
9b0365f1 4571 be_clear(adapter);
6b7c5b94
SP
4572
4573 pci_save_state(pdev);
4574 pci_disable_device(pdev);
4575 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4576 return 0;
4577}
4578
4579static int be_resume(struct pci_dev *pdev)
4580{
4581 int status = 0;
4582 struct be_adapter *adapter = pci_get_drvdata(pdev);
4583 struct net_device *netdev = adapter->netdev;
4584
4585 netif_device_detach(netdev);
4586
4587 status = pci_enable_device(pdev);
4588 if (status)
4589 return status;
4590
1ca01512 4591 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4592 pci_restore_state(pdev);
4593
dd5746bf
SB
4594 status = be_fw_wait_ready(adapter);
4595 if (status)
4596 return status;
4597
d4360d6f 4598 be_intr_set(adapter, true);
2243e2e9
SP
4599 /* tell fw we're ready to fire cmds */
4600 status = be_cmd_fw_init(adapter);
4601 if (status)
4602 return status;
4603
9b0365f1 4604 be_setup(adapter);
6b7c5b94
SP
4605 if (netif_running(netdev)) {
4606 rtnl_lock();
4607 be_open(netdev);
4608 rtnl_unlock();
4609 }
f67ef7ba
PR
4610
4611 schedule_delayed_work(&adapter->func_recovery_work,
4612 msecs_to_jiffies(1000));
6b7c5b94 4613 netif_device_attach(netdev);
71d8d1b5 4614
76a9e08e 4615 if (adapter->wol_en)
71d8d1b5 4616 be_setup_wol(adapter, false);
a4ca055f 4617
6b7c5b94
SP
4618 return 0;
4619}
4620
82456b03
SP
4621/*
4622 * An FLR will stop BE from DMAing any data.
4623 */
4624static void be_shutdown(struct pci_dev *pdev)
4625{
4626 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4627
2d5d4154
AK
4628 if (!adapter)
4629 return;
82456b03 4630
0f4a6828 4631 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4632 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4633
2d5d4154 4634 netif_device_detach(adapter->netdev);
82456b03 4635
57841869
AK
4636 be_cmd_reset_function(adapter);
4637
82456b03 4638 pci_disable_device(pdev);
82456b03
SP
4639}
4640
cf588477
SP
4641static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4642 pci_channel_state_t state)
4643{
4644 struct be_adapter *adapter = pci_get_drvdata(pdev);
4645 struct net_device *netdev = adapter->netdev;
4646
4647 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4648
01e5b2c4
SK
4649 if (!adapter->eeh_error) {
4650 adapter->eeh_error = true;
cf588477 4651
01e5b2c4 4652 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4653
cf588477 4654 rtnl_lock();
01e5b2c4
SK
4655 netif_device_detach(netdev);
4656 if (netif_running(netdev))
4657 be_close(netdev);
cf588477 4658 rtnl_unlock();
01e5b2c4
SK
4659
4660 be_clear(adapter);
cf588477 4661 }
cf588477
SP
4662
4663 if (state == pci_channel_io_perm_failure)
4664 return PCI_ERS_RESULT_DISCONNECT;
4665
4666 pci_disable_device(pdev);
4667
eeb7fc7b
SK
4668 /* The error could cause the FW to trigger a flash debug dump.
4669 * Resetting the card while flash dump is in progress
c8a54163
PR
4670 * can cause it not to recover; wait for it to finish.
4671 * Wait only for first function as it is needed only once per
4672 * adapter.
eeb7fc7b 4673 */
c8a54163
PR
4674 if (pdev->devfn == 0)
4675 ssleep(30);
4676
cf588477
SP
4677 return PCI_ERS_RESULT_NEED_RESET;
4678}
4679
4680static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4681{
4682 struct be_adapter *adapter = pci_get_drvdata(pdev);
4683 int status;
4684
4685 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4686
4687 status = pci_enable_device(pdev);
4688 if (status)
4689 return PCI_ERS_RESULT_DISCONNECT;
4690
4691 pci_set_master(pdev);
1ca01512 4692 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4693 pci_restore_state(pdev);
4694
4695 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4696 dev_info(&adapter->pdev->dev,
4697 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4698 status = be_fw_wait_ready(adapter);
cf588477
SP
4699 if (status)
4700 return PCI_ERS_RESULT_DISCONNECT;
4701
d6b6d987 4702 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4703 be_clear_all_error(adapter);
cf588477
SP
4704 return PCI_ERS_RESULT_RECOVERED;
4705}
4706
4707static void be_eeh_resume(struct pci_dev *pdev)
4708{
4709 int status = 0;
4710 struct be_adapter *adapter = pci_get_drvdata(pdev);
4711 struct net_device *netdev = adapter->netdev;
4712
4713 dev_info(&adapter->pdev->dev, "EEH resume\n");
4714
4715 pci_save_state(pdev);
4716
2d177be8 4717 status = be_cmd_reset_function(adapter);
cf588477
SP
4718 if (status)
4719 goto err;
4720
2d177be8
KA
4721 /* tell fw we're ready to fire cmds */
4722 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4723 if (status)
4724 goto err;
4725
cf588477
SP
4726 status = be_setup(adapter);
4727 if (status)
4728 goto err;
4729
4730 if (netif_running(netdev)) {
4731 status = be_open(netdev);
4732 if (status)
4733 goto err;
4734 }
f67ef7ba
PR
4735
4736 schedule_delayed_work(&adapter->func_recovery_work,
4737 msecs_to_jiffies(1000));
cf588477
SP
4738 netif_device_attach(netdev);
4739 return;
4740err:
4741 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4742}
4743
3646f0e5 4744static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4745 .error_detected = be_eeh_err_detected,
4746 .slot_reset = be_eeh_reset,
4747 .resume = be_eeh_resume,
4748};
4749
6b7c5b94
SP
4750static struct pci_driver be_driver = {
4751 .name = DRV_NAME,
4752 .id_table = be_dev_ids,
4753 .probe = be_probe,
4754 .remove = be_remove,
4755 .suspend = be_suspend,
cf588477 4756 .resume = be_resume,
82456b03 4757 .shutdown = be_shutdown,
cf588477 4758 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4759};
4760
4761static int __init be_init_module(void)
4762{
8e95a202
JP
4763 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4764 rx_frag_size != 2048) {
6b7c5b94
SP
4765 printk(KERN_WARNING DRV_NAME
4766 " : Module param rx_frag_size must be 2048/4096/8192."
4767 " Using 2048\n");
4768 rx_frag_size = 2048;
4769 }
6b7c5b94
SP
4770
4771 return pci_register_driver(&be_driver);
4772}
4773module_init(be_init_module);
4774
4775static void __exit be_exit_module(void)
4776{
4777 pci_unregister_driver(&be_driver);
4778}
4779module_exit(be_exit_module);