]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Create single TXQ on BE3-R 1G ports
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6b7c5b94
SP
25
26MODULE_VERSION(DRV_VER);
27MODULE_DEVICE_TABLE(pci, be_dev_ids);
28MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 29MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
30MODULE_LICENSE("GPL");
31
ba343c77 32static unsigned int num_vfs;
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
11ac75ed
SP
36static ushort rx_frag_size = 2048;
37module_param(rx_frag_size, ushort, S_IRUGO);
38MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
39
6b7c5b94 40static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
49 { 0 }
50};
51MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 52/* UE Status Low CSR */
42c8b11e 53static const char * const ue_status_low_desc[] = {
7c185276
AK
54 "CEV",
55 "CTX",
56 "DBUF",
57 "ERX",
58 "Host",
59 "MPU",
60 "NDMA",
61 "PTC ",
62 "RDMA ",
63 "RXF ",
64 "RXIPS ",
65 "RXULP0 ",
66 "RXULP1 ",
67 "RXULP2 ",
68 "TIM ",
69 "TPOST ",
70 "TPRE ",
71 "TXIPS ",
72 "TXULP0 ",
73 "TXULP1 ",
74 "UC ",
75 "WDMA ",
76 "TXULP2 ",
77 "HOST1 ",
78 "P0_OB_LINK ",
79 "P1_OB_LINK ",
80 "HOST_GPIO ",
81 "MBOX ",
82 "AXGMAC0",
83 "AXGMAC1",
84 "JTAG",
85 "MPU_INTPEND"
86};
87/* UE Status High CSR */
42c8b11e 88static const char * const ue_status_hi_desc[] = {
7c185276
AK
89 "LPCMEMHOST",
90 "MGMT_MAC",
91 "PCS0ONLINE",
92 "MPU_IRAM",
93 "PCS1ONLINE",
94 "PCTL0",
95 "PCTL1",
96 "PMEM",
97 "RR",
98 "TXPB",
99 "RXPP",
100 "XAUI",
101 "TXP",
102 "ARM",
103 "IPC",
104 "HOST2",
105 "HOST3",
106 "HOST4",
107 "HOST5",
108 "HOST6",
109 "HOST7",
110 "HOST8",
111 "HOST9",
42c8b11e 112 "NETC",
7c185276
AK
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown"
121};
6b7c5b94 122
752961a1
SP
123/* Is BE in a multi-channel mode */
124static inline bool be_is_mc(struct be_adapter *adapter) {
125 return (adapter->function_mode & FLEX10_MODE ||
126 adapter->function_mode & VNIC_MODE ||
127 adapter->function_mode & UMC_ENABLED);
128}
129
6b7c5b94
SP
130static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 133 if (mem->va) {
2b7bcebf
IV
134 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
135 mem->dma);
1cfafab9
SP
136 mem->va = NULL;
137 }
6b7c5b94
SP
138}
139
140static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
141 u16 len, u16 entry_size)
142{
143 struct be_dma_mem *mem = &q->dma_mem;
144
145 memset(q, 0, sizeof(*q));
146 q->len = len;
147 q->entry_size = entry_size;
148 mem->size = len * entry_size;
ede23fa8
JP
149 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
150 GFP_KERNEL);
6b7c5b94 151 if (!mem->va)
10ef9ab4 152 return -ENOMEM;
6b7c5b94
SP
153 return 0;
154}
155
68c45a2d 156static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
db3ea781
SP
160 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
161 &reg);
162 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163
5f0b849e 164 if (!enabled && enable)
6b7c5b94 165 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 166 else if (enabled && !enable)
6b7c5b94 167 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 168 else
6b7c5b94 169 return;
5f0b849e 170
db3ea781
SP
171 pci_write_config_dword(adapter->pdev,
172 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
173}
174
68c45a2d
SK
175static void be_intr_set(struct be_adapter *adapter, bool enable)
176{
177 int status = 0;
178
179 /* On lancer interrupts can't be controlled via this register */
180 if (lancer_chip(adapter))
181 return;
182
183 if (adapter->eeh_error)
184 return;
185
186 status = be_cmd_intr_set(adapter, enable);
187 if (status)
188 be_reg_intr_set(adapter, enable);
189}
190
8788fdc2 191static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
192{
193 u32 val = 0;
194 val |= qid & DB_RQ_RING_ID_MASK;
195 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
196
197 wmb();
8788fdc2 198 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
199}
200
94d73aaa
VV
201static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
202 u16 posted)
6b7c5b94
SP
203{
204 u32 val = 0;
94d73aaa 205 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 206 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
207
208 wmb();
94d73aaa 209 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
210}
211
8788fdc2 212static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
213 bool arm, bool clear_int, u16 num_popped)
214{
215 u32 val = 0;
216 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
218 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 219
f67ef7ba 220 if (adapter->eeh_error)
cf588477
SP
221 return;
222
6b7c5b94
SP
223 if (arm)
224 val |= 1 << DB_EQ_REARM_SHIFT;
225 if (clear_int)
226 val |= 1 << DB_EQ_CLR_SHIFT;
227 val |= 1 << DB_EQ_EVNT_SHIFT;
228 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 229 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
230}
231
8788fdc2 232void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
233{
234 u32 val = 0;
235 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 238
f67ef7ba 239 if (adapter->eeh_error)
cf588477
SP
240 return;
241
6b7c5b94
SP
242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
246}
247
6b7c5b94
SP
248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 251 struct device *dev = &adapter->pdev->dev;
6b7c5b94 252 struct sockaddr *addr = p;
5a712c13
SP
253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 256
ca9e4988
AK
257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
5a712c13
SP
260 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
261 * privilege or if PF did not provision the new MAC address.
262 * On BE3, this cmd will always fail if the VF doesn't have the
263 * FILTMGMT privilege. This failure is OK, only if the PF programmed
264 * the MAC for the VF.
704e4c88 265 */
5a712c13
SP
266 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
267 adapter->if_handle, &adapter->pmac_id[0], 0);
268 if (!status) {
269 curr_pmac_id = adapter->pmac_id[0];
270
271 /* Delete the old programmed MAC. This call may fail if the
272 * old MAC was already deleted by the PF driver.
273 */
274 if (adapter->pmac_id[0] != old_pmac_id)
275 be_cmd_pmac_del(adapter, adapter->if_handle,
276 old_pmac_id, 0);
704e4c88
PR
277 }
278
5a712c13
SP
279 /* Decide if the new MAC is successfully activated only after
280 * querying the FW
704e4c88 281 */
5a712c13 282 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 283 if (status)
e3a7ae2c 284 goto err;
6b7c5b94 285
5a712c13
SP
286 /* The MAC change did not happen, either due to lack of privilege
287 * or PF didn't pre-provision.
288 */
289 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
290 status = -EPERM;
291 goto err;
292 }
293
e3a7ae2c 294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 295 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
296 return 0;
297err:
5a712c13 298 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
299 return status;
300}
301
ca34fe38
SP
302/* BE2 supports only v0 cmd */
303static void *hw_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
307
308 return &cmd->hw_stats;
309 } else {
310 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
311
312 return &cmd->hw_stats;
313 }
314}
315
316/* BE2 supports only v0 cmd */
317static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
318{
319 if (BE2_chip(adapter)) {
320 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
321
322 return &hw_stats->erx;
323 } else {
324 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
325
326 return &hw_stats->erx;
327 }
328}
329
330static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 331{
ac124ff9
SP
332 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
333 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
334 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 335 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
336 &rxf_stats->port[adapter->port_num];
337 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 338
ac124ff9 339 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
340 drvs->rx_pause_frames = port_stats->rx_pause_frames;
341 drvs->rx_crc_errors = port_stats->rx_crc_errors;
342 drvs->rx_control_frames = port_stats->rx_control_frames;
343 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
344 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
345 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
346 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
347 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
348 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
349 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
350 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
351 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
352 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
353 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 354 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
355 drvs->rx_dropped_header_too_small =
356 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
357 drvs->rx_address_filtered =
358 port_stats->rx_address_filtered +
359 port_stats->rx_vlan_filtered;
89a88ab8
AK
360 drvs->rx_alignment_symbol_errors =
361 port_stats->rx_alignment_symbol_errors;
362
363 drvs->tx_pauseframes = port_stats->tx_pauseframes;
364 drvs->tx_controlframes = port_stats->tx_controlframes;
365
366 if (adapter->port_num)
ac124ff9 367 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 368 else
ac124ff9 369 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 370 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 371 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
372 drvs->forwarded_packets = rxf_stats->forwarded_packets;
373 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
374 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
375 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
376 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
377}
378
ca34fe38 379static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 380{
ac124ff9
SP
381 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
382 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
383 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 384 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
385 &rxf_stats->port[adapter->port_num];
386 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 387
ac124ff9 388 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
389 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
390 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
391 drvs->rx_pause_frames = port_stats->rx_pause_frames;
392 drvs->rx_crc_errors = port_stats->rx_crc_errors;
393 drvs->rx_control_frames = port_stats->rx_control_frames;
394 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
395 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
396 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
397 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
398 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
399 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
400 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
401 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
404 drvs->rx_dropped_header_too_small =
405 port_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop =
407 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 408 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
409 drvs->rx_alignment_symbol_errors =
410 port_stats->rx_alignment_symbol_errors;
ac124ff9 411 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
412 drvs->tx_pauseframes = port_stats->tx_pauseframes;
413 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 414 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
415 drvs->jabber_events = port_stats->jabber_events;
416 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 417 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
418 drvs->forwarded_packets = rxf_stats->forwarded_packets;
419 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
420 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
421 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
422 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
423}
424
005d5696
SX
425static void populate_lancer_stats(struct be_adapter *adapter)
426{
89a88ab8 427
005d5696 428 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
429 struct lancer_pport_stats *pport_stats =
430 pport_stats_from_cmd(adapter);
431
432 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
433 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
434 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
435 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 436 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 437 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
438 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
439 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
440 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
441 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
442 drvs->rx_dropped_tcp_length =
443 pport_stats->rx_dropped_invalid_tcp_length;
444 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
447 drvs->rx_dropped_header_too_small =
448 pport_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
450 drvs->rx_address_filtered =
451 pport_stats->rx_address_filtered +
452 pport_stats->rx_vlan_filtered;
ac124ff9 453 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 454 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
455 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
456 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 457 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
458 drvs->forwarded_packets = pport_stats->num_forwards_lo;
459 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 460 drvs->rx_drops_too_many_frags =
ac124ff9 461 pport_stats->rx_drops_too_many_frags_lo;
005d5696 462}
89a88ab8 463
09c1c68f
SP
464static void accumulate_16bit_val(u32 *acc, u16 val)
465{
466#define lo(x) (x & 0xFFFF)
467#define hi(x) (x & 0xFFFF0000)
468 bool wrapped = val < lo(*acc);
469 u32 newacc = hi(*acc) + val;
470
471 if (wrapped)
472 newacc += 65536;
473 ACCESS_ONCE(*acc) = newacc;
474}
475
4188e7df 476static void populate_erx_stats(struct be_adapter *adapter,
a6c578ef
AK
477 struct be_rx_obj *rxo,
478 u32 erx_stat)
479{
480 if (!BEx_chip(adapter))
481 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
482 else
483 /* below erx HW counter can actually wrap around after
484 * 65535. Driver accumulates a 32-bit value
485 */
486 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
487 (u16)erx_stat);
488}
489
89a88ab8
AK
490void be_parse_stats(struct be_adapter *adapter)
491{
ac124ff9
SP
492 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
493 struct be_rx_obj *rxo;
494 int i;
a6c578ef 495 u32 erx_stat;
ac124ff9 496
ca34fe38
SP
497 if (lancer_chip(adapter)) {
498 populate_lancer_stats(adapter);
005d5696 499 } else {
ca34fe38
SP
500 if (BE2_chip(adapter))
501 populate_be_v0_stats(adapter);
502 else
503 /* for BE3 and Skyhawk */
504 populate_be_v1_stats(adapter);
d51ebd33 505
ca34fe38
SP
506 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
507 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
508 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
509 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 510 }
09c1c68f 511 }
89a88ab8
AK
512}
513
ab1594e9
SP
514static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
515 struct rtnl_link_stats64 *stats)
6b7c5b94 516{
ab1594e9 517 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 518 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 519 struct be_rx_obj *rxo;
3c8def97 520 struct be_tx_obj *txo;
ab1594e9
SP
521 u64 pkts, bytes;
522 unsigned int start;
3abcdeda 523 int i;
6b7c5b94 524
3abcdeda 525 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
526 const struct be_rx_stats *rx_stats = rx_stats(rxo);
527 do {
528 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
529 pkts = rx_stats(rxo)->rx_pkts;
530 bytes = rx_stats(rxo)->rx_bytes;
531 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
532 stats->rx_packets += pkts;
533 stats->rx_bytes += bytes;
534 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
535 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
536 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
537 }
538
3c8def97 539 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
540 const struct be_tx_stats *tx_stats = tx_stats(txo);
541 do {
542 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
543 pkts = tx_stats(txo)->tx_pkts;
544 bytes = tx_stats(txo)->tx_bytes;
545 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
546 stats->tx_packets += pkts;
547 stats->tx_bytes += bytes;
3c8def97 548 }
6b7c5b94
SP
549
550 /* bad pkts received */
ab1594e9 551 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
552 drvs->rx_alignment_symbol_errors +
553 drvs->rx_in_range_errors +
554 drvs->rx_out_range_errors +
555 drvs->rx_frame_too_long +
556 drvs->rx_dropped_too_small +
557 drvs->rx_dropped_too_short +
558 drvs->rx_dropped_header_too_small +
559 drvs->rx_dropped_tcp_length +
ab1594e9 560 drvs->rx_dropped_runt;
68110868 561
6b7c5b94 562 /* detailed rx errors */
ab1594e9 563 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
564 drvs->rx_out_range_errors +
565 drvs->rx_frame_too_long;
68110868 566
ab1594e9 567 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
568
569 /* frame alignment errors */
ab1594e9 570 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 571
6b7c5b94
SP
572 /* receiver fifo overrun */
573 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 574 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
575 drvs->rx_input_fifo_overflow_drop +
576 drvs->rx_drops_no_pbuf;
ab1594e9 577 return stats;
6b7c5b94
SP
578}
579
b236916a 580void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 581{
6b7c5b94
SP
582 struct net_device *netdev = adapter->netdev;
583
b236916a 584 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 585 netif_carrier_off(netdev);
b236916a 586 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 587 }
b236916a
AK
588
589 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
590 netif_carrier_on(netdev);
591 else
592 netif_carrier_off(netdev);
6b7c5b94
SP
593}
594
3c8def97 595static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 596 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 597{
3c8def97
SP
598 struct be_tx_stats *stats = tx_stats(txo);
599
ab1594e9 600 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
601 stats->tx_reqs++;
602 stats->tx_wrbs += wrb_cnt;
603 stats->tx_bytes += copied;
604 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 605 if (stopped)
ac124ff9 606 stats->tx_stops++;
ab1594e9 607 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
608}
609
610/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
611static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
612 bool *dummy)
6b7c5b94 613{
ebc8d2ab
DM
614 int cnt = (skb->len > skb->data_len);
615
616 cnt += skb_shinfo(skb)->nr_frags;
617
6b7c5b94
SP
618 /* to account for hdr wrb */
619 cnt++;
fe6d2a38
SP
620 if (lancer_chip(adapter) || !(cnt & 1)) {
621 *dummy = false;
622 } else {
6b7c5b94
SP
623 /* add a dummy to make it an even num */
624 cnt++;
625 *dummy = true;
fe6d2a38 626 }
6b7c5b94
SP
627 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
628 return cnt;
629}
630
631static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
632{
633 wrb->frag_pa_hi = upper_32_bits(addr);
634 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
635 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 636 wrb->rsvd0 = 0;
6b7c5b94
SP
637}
638
1ded132d
AK
639static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
640 struct sk_buff *skb)
641{
642 u8 vlan_prio;
643 u16 vlan_tag;
644
645 vlan_tag = vlan_tx_tag_get(skb);
646 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
647 /* If vlan priority provided by OS is NOT in available bmap */
648 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
649 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
650 adapter->recommended_prio;
651
652 return vlan_tag;
653}
654
cc4ce020 655static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 656 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 657{
1ded132d 658 u16 vlan_tag;
cc4ce020 659
6b7c5b94
SP
660 memset(hdr, 0, sizeof(*hdr));
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
663
49e4b847 664 if (skb_is_gso(skb)) {
6b7c5b94
SP
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
667 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 668 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
670 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
671 if (is_tcp_pkt(skb))
672 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
673 else if (is_udp_pkt(skb))
674 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
675 }
676
4c5102f9 677 if (vlan_tx_tag_present(skb)) {
6b7c5b94 678 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 679 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 680 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
681 }
682
bc0c3405
AK
683 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
687 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
688}
689
2b7bcebf 690static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
691 bool unmap_single)
692{
693 dma_addr_t dma;
694
695 be_dws_le_to_cpu(wrb, sizeof(*wrb));
696
697 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 698 if (wrb->frag_len) {
7101e111 699 if (unmap_single)
2b7bcebf
IV
700 dma_unmap_single(dev, dma, wrb->frag_len,
701 DMA_TO_DEVICE);
7101e111 702 else
2b7bcebf 703 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
704 }
705}
6b7c5b94 706
3c8def97 707static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
708 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
709 bool skip_hw_vlan)
6b7c5b94 710{
7101e111
SP
711 dma_addr_t busaddr;
712 int i, copied = 0;
2b7bcebf 713 struct device *dev = &adapter->pdev->dev;
6b7c5b94 714 struct sk_buff *first_skb = skb;
6b7c5b94
SP
715 struct be_eth_wrb *wrb;
716 struct be_eth_hdr_wrb *hdr;
7101e111
SP
717 bool map_single = false;
718 u16 map_head;
6b7c5b94 719
6b7c5b94
SP
720 hdr = queue_head_node(txq);
721 queue_head_inc(txq);
7101e111 722 map_head = txq->head;
6b7c5b94 723
ebc8d2ab 724 if (skb->len > skb->data_len) {
e743d313 725 int len = skb_headlen(skb);
2b7bcebf
IV
726 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
727 if (dma_mapping_error(dev, busaddr))
7101e111
SP
728 goto dma_err;
729 map_single = true;
ebc8d2ab
DM
730 wrb = queue_head_node(txq);
731 wrb_fill(wrb, busaddr, len);
732 be_dws_cpu_to_le(wrb, sizeof(*wrb));
733 queue_head_inc(txq);
734 copied += len;
735 }
6b7c5b94 736
ebc8d2ab 737 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 738 const struct skb_frag_struct *frag =
ebc8d2ab 739 &skb_shinfo(skb)->frags[i];
b061b39e 740 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 741 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 742 if (dma_mapping_error(dev, busaddr))
7101e111 743 goto dma_err;
ebc8d2ab 744 wrb = queue_head_node(txq);
9e903e08 745 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
746 be_dws_cpu_to_le(wrb, sizeof(*wrb));
747 queue_head_inc(txq);
9e903e08 748 copied += skb_frag_size(frag);
6b7c5b94
SP
749 }
750
751 if (dummy_wrb) {
752 wrb = queue_head_node(txq);
753 wrb_fill(wrb, 0, 0);
754 be_dws_cpu_to_le(wrb, sizeof(*wrb));
755 queue_head_inc(txq);
756 }
757
bc0c3405 758 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
759 be_dws_cpu_to_le(hdr, sizeof(*hdr));
760
761 return copied;
7101e111
SP
762dma_err:
763 txq->head = map_head;
764 while (copied) {
765 wrb = queue_head_node(txq);
2b7bcebf 766 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
767 map_single = false;
768 copied -= wrb->frag_len;
769 queue_head_inc(txq);
770 }
771 return 0;
6b7c5b94
SP
772}
773
93040ae5 774static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
775 struct sk_buff *skb,
776 bool *skip_hw_vlan)
93040ae5
SK
777{
778 u16 vlan_tag = 0;
779
780 skb = skb_share_check(skb, GFP_ATOMIC);
781 if (unlikely(!skb))
782 return skb;
783
efee8e87 784 if (vlan_tx_tag_present(skb))
93040ae5 785 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
786
787 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
788 if (!vlan_tag)
789 vlan_tag = adapter->pvid;
790 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
791 * skip VLAN insertion
792 */
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
bc0c3405
AK
796
797 if (vlan_tag) {
58717686 798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
799 if (unlikely(!skb))
800 return skb;
bc0c3405
AK
801 skb->vlan_tci = 0;
802 }
803
804 /* Insert the outer VLAN, if any */
805 if (adapter->qnq_vid) {
806 vlan_tag = adapter->qnq_vid;
58717686 807 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
808 if (unlikely(!skb))
809 return skb;
810 if (skip_hw_vlan)
811 *skip_hw_vlan = true;
812 }
813
93040ae5
SK
814 return skb;
815}
816
bc0c3405
AK
817static bool be_ipv6_exthdr_check(struct sk_buff *skb)
818{
819 struct ethhdr *eh = (struct ethhdr *)skb->data;
820 u16 offset = ETH_HLEN;
821
822 if (eh->h_proto == htons(ETH_P_IPV6)) {
823 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
824
825 offset += sizeof(struct ipv6hdr);
826 if (ip6h->nexthdr != NEXTHDR_TCP &&
827 ip6h->nexthdr != NEXTHDR_UDP) {
828 struct ipv6_opt_hdr *ehdr =
829 (struct ipv6_opt_hdr *) (skb->data + offset);
830
831 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
832 if (ehdr->hdrlen == 0xff)
833 return true;
834 }
835 }
836 return false;
837}
838
839static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
840{
841 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
842}
843
ee9c799c
SP
844static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
845 struct sk_buff *skb)
bc0c3405 846{
ee9c799c 847 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
848}
849
ee9c799c
SP
850static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
851 struct sk_buff *skb,
852 bool *skip_hw_vlan)
6b7c5b94 853{
d2cb6ce7 854 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
855 unsigned int eth_hdr_len;
856 struct iphdr *ip;
93040ae5 857
48265667
SK
858 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
859 * may cause a transmit stall on that port. So the work-around is to
860 * pad such packets to a 36-byte length.
861 */
862 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
863 if (skb_padto(skb, 36))
864 goto tx_drop;
865 skb->len = 36;
866 }
867
1297f9db
AK
868 /* For padded packets, BE HW modifies tot_len field in IP header
869 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 870 * For padded packets, Lancer computes incorrect checksum.
1ded132d 871 */
ee9c799c
SP
872 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
873 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
874 if (skb->len <= 60 &&
875 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 876 is_ipv4_pkt(skb)) {
93040ae5
SK
877 ip = (struct iphdr *)ip_hdr(skb);
878 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
879 }
1ded132d 880
d2cb6ce7
AK
881 /* If vlan tag is already inlined in the packet, skip HW VLAN
882 * tagging in UMC mode
883 */
884 if ((adapter->function_mode & UMC_ENABLED) &&
885 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 886 *skip_hw_vlan = true;
d2cb6ce7 887
93040ae5
SK
888 /* HW has a bug wherein it will calculate CSUM for VLAN
889 * pkts even though it is disabled.
890 * Manually insert VLAN in pkt.
891 */
892 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
893 vlan_tx_tag_present(skb)) {
894 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
895 if (unlikely(!skb))
896 goto tx_drop;
897 }
898
899 /* HW may lockup when VLAN HW tagging is requested on
900 * certain ipv6 packets. Drop such pkts if the HW workaround to
901 * skip HW tagging is not enabled by FW.
902 */
903 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
904 (adapter->pvid || adapter->qnq_vid) &&
905 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
906 goto tx_drop;
907
908 /* Manual VLAN tag insertion to prevent:
909 * ASIC lockup when the ASIC inserts VLAN tag into
910 * certain ipv6 packets. Insert VLAN tags in driver,
911 * and set event, completion, vlan bits accordingly
912 * in the Tx WRB.
913 */
914 if (be_ipv6_tx_stall_chk(adapter, skb) &&
915 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 916 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
917 if (unlikely(!skb))
918 goto tx_drop;
1ded132d
AK
919 }
920
ee9c799c
SP
921 return skb;
922tx_drop:
923 dev_kfree_skb_any(skb);
924 return NULL;
925}
926
927static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
928{
929 struct be_adapter *adapter = netdev_priv(netdev);
930 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
931 struct be_queue_info *txq = &txo->q;
932 bool dummy_wrb, stopped = false;
933 u32 wrb_cnt = 0, copied = 0;
934 bool skip_hw_vlan = false;
935 u32 start = txq->head;
936
937 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
938 if (!skb)
939 return NETDEV_TX_OK;
940
fe6d2a38 941 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 942
bc0c3405
AK
943 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
944 skip_hw_vlan);
c190e3c8 945 if (copied) {
cd8f76c0
ED
946 int gso_segs = skb_shinfo(skb)->gso_segs;
947
c190e3c8 948 /* record the sent skb in the sent_skb table */
3c8def97
SP
949 BUG_ON(txo->sent_skb_list[start]);
950 txo->sent_skb_list[start] = skb;
c190e3c8
AK
951
952 /* Ensure txq has space for the next skb; Else stop the queue
953 * *BEFORE* ringing the tx doorbell, so that we serialze the
954 * tx compls of the current transmit which'll wake up the queue
955 */
7101e111 956 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
957 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
958 txq->len) {
3c8def97 959 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
960 stopped = true;
961 }
6b7c5b94 962
94d73aaa 963 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 964
cd8f76c0 965 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
966 } else {
967 txq->head = start;
968 dev_kfree_skb_any(skb);
6b7c5b94 969 }
6b7c5b94
SP
970 return NETDEV_TX_OK;
971}
972
973static int be_change_mtu(struct net_device *netdev, int new_mtu)
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
976 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
977 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
978 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
979 dev_info(&adapter->pdev->dev,
980 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
981 BE_MIN_MTU,
982 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
983 return -EINVAL;
984 }
985 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
986 netdev->mtu, new_mtu);
987 netdev->mtu = new_mtu;
988 return 0;
989}
990
991/*
82903e4b
AK
992 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
993 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 994 */
10329df8 995static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 996{
10329df8
SP
997 u16 vids[BE_NUM_VLANS_SUPPORTED];
998 u16 num = 0, i;
82903e4b 999 int status = 0;
1da87b7f 1000
c0e64ef4
SP
1001 /* No need to further configure vids if in promiscuous mode */
1002 if (adapter->promiscuous)
1003 return 0;
1004
92bf14ab 1005 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1006 goto set_vlan_promisc;
1007
1008 /* Construct VLAN Table to give to HW */
1009 for (i = 0; i < VLAN_N_VID; i++)
1010 if (adapter->vlan_tag[i])
10329df8 1011 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1012
1013 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1014 vids, num, 1, 0);
0fc16ebf
PR
1015
1016 /* Set to VLAN promisc mode as setting VLAN filter failed */
1017 if (status) {
1018 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1019 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1020 goto set_vlan_promisc;
6b7c5b94 1021 }
1da87b7f 1022
b31c50a7 1023 return status;
0fc16ebf
PR
1024
1025set_vlan_promisc:
1026 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1027 NULL, 0, 1, 1);
1028 return status;
6b7c5b94
SP
1029}
1030
80d5c368 1031static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1032{
1033 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1034 int status = 0;
6b7c5b94 1035
a85e9986 1036 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1037 status = -EINVAL;
1038 goto ret;
1039 }
ba343c77 1040
a85e9986
PR
1041 /* Packets with VID 0 are always received by Lancer by default */
1042 if (lancer_chip(adapter) && vid == 0)
1043 goto ret;
1044
6b7c5b94 1045 adapter->vlan_tag[vid] = 1;
92bf14ab 1046 if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
10329df8 1047 status = be_vid_config(adapter);
8e586137 1048
80817cbf
AK
1049 if (!status)
1050 adapter->vlans_added++;
1051 else
1052 adapter->vlan_tag[vid] = 0;
1053ret:
1054 return status;
6b7c5b94
SP
1055}
1056
80d5c368 1057static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1058{
1059 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1060 int status = 0;
6b7c5b94 1061
a85e9986 1062 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1063 status = -EINVAL;
1064 goto ret;
1065 }
ba343c77 1066
a85e9986
PR
1067 /* Packets with VID 0 are always received by Lancer by default */
1068 if (lancer_chip(adapter) && vid == 0)
1069 goto ret;
1070
6b7c5b94 1071 adapter->vlan_tag[vid] = 0;
92bf14ab 1072 if (adapter->vlans_added <= be_max_vlans(adapter))
10329df8 1073 status = be_vid_config(adapter);
8e586137 1074
80817cbf
AK
1075 if (!status)
1076 adapter->vlans_added--;
1077 else
1078 adapter->vlan_tag[vid] = 1;
1079ret:
1080 return status;
6b7c5b94
SP
1081}
1082
a54769f5 1083static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1084{
1085 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1086 int status;
6b7c5b94 1087
24307eef 1088 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1089 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1090 adapter->promiscuous = true;
1091 goto done;
6b7c5b94
SP
1092 }
1093
25985edc 1094 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1095 if (adapter->promiscuous) {
1096 adapter->promiscuous = false;
5b8821b7 1097 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1098
1099 if (adapter->vlans_added)
10329df8 1100 be_vid_config(adapter);
6b7c5b94
SP
1101 }
1102
e7b909a6 1103 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1104 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1105 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1106 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1107 goto done;
6b7c5b94 1108 }
6b7c5b94 1109
fbc13f01
AK
1110 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1111 struct netdev_hw_addr *ha;
1112 int i = 1; /* First slot is claimed by the Primary MAC */
1113
1114 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1115 be_cmd_pmac_del(adapter, adapter->if_handle,
1116 adapter->pmac_id[i], 0);
1117 }
1118
92bf14ab 1119 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1120 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1121 adapter->promiscuous = true;
1122 goto done;
1123 }
1124
1125 netdev_for_each_uc_addr(ha, adapter->netdev) {
1126 adapter->uc_macs++; /* First slot is for Primary MAC */
1127 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1128 adapter->if_handle,
1129 &adapter->pmac_id[adapter->uc_macs], 0);
1130 }
1131 }
1132
0fc16ebf
PR
1133 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1134
1135 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1136 if (status) {
1137 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1138 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1139 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1140 }
24307eef
SP
1141done:
1142 return;
6b7c5b94
SP
1143}
1144
ba343c77
SB
1145static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1146{
1147 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1148 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1149 int status;
1150
11ac75ed 1151 if (!sriov_enabled(adapter))
ba343c77
SB
1152 return -EPERM;
1153
11ac75ed 1154 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1155 return -EINVAL;
1156
3175d8c2
SP
1157 if (BEx_chip(adapter)) {
1158 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1159 vf + 1);
ba343c77 1160
11ac75ed
SP
1161 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1162 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1163 } else {
1164 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1165 vf + 1);
590c391d
PR
1166 }
1167
64600ea5 1168 if (status)
ba343c77
SB
1169 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1170 mac, vf);
64600ea5 1171 else
11ac75ed 1172 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1173
ba343c77
SB
1174 return status;
1175}
1176
64600ea5
AK
1177static int be_get_vf_config(struct net_device *netdev, int vf,
1178 struct ifla_vf_info *vi)
1179{
1180 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1181 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1182
11ac75ed 1183 if (!sriov_enabled(adapter))
64600ea5
AK
1184 return -EPERM;
1185
11ac75ed 1186 if (vf >= adapter->num_vfs)
64600ea5
AK
1187 return -EINVAL;
1188
1189 vi->vf = vf;
11ac75ed
SP
1190 vi->tx_rate = vf_cfg->tx_rate;
1191 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1192 vi->qos = 0;
11ac75ed 1193 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1194
1195 return 0;
1196}
1197
1da87b7f
AK
1198static int be_set_vf_vlan(struct net_device *netdev,
1199 int vf, u16 vlan, u8 qos)
1200{
1201 struct be_adapter *adapter = netdev_priv(netdev);
1202 int status = 0;
1203
11ac75ed 1204 if (!sriov_enabled(adapter))
1da87b7f
AK
1205 return -EPERM;
1206
11ac75ed 1207 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1208 return -EINVAL;
1209
1210 if (vlan) {
f1f3ee1b
AK
1211 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1212 /* If this is new value, program it. Else skip. */
1213 adapter->vf_cfg[vf].vlan_tag = vlan;
1214
1215 status = be_cmd_set_hsw_config(adapter, vlan,
a77dcb8c 1216 vf + 1, adapter->vf_cfg[vf].if_handle, 0);
f1f3ee1b 1217 }
1da87b7f 1218 } else {
f1f3ee1b 1219 /* Reset Transparent Vlan Tagging. */
11ac75ed 1220 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1221 vlan = adapter->vf_cfg[vf].def_vid;
1222 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
a77dcb8c 1223 adapter->vf_cfg[vf].if_handle, 0);
1da87b7f
AK
1224 }
1225
1da87b7f
AK
1226
1227 if (status)
1228 dev_info(&adapter->pdev->dev,
1229 "VLAN %d config on VF %d failed\n", vlan, vf);
1230 return status;
1231}
1232
e1d18735
AK
1233static int be_set_vf_tx_rate(struct net_device *netdev,
1234 int vf, int rate)
1235{
1236 struct be_adapter *adapter = netdev_priv(netdev);
1237 int status = 0;
1238
11ac75ed 1239 if (!sriov_enabled(adapter))
e1d18735
AK
1240 return -EPERM;
1241
94f434c2 1242 if (vf >= adapter->num_vfs)
e1d18735
AK
1243 return -EINVAL;
1244
94f434c2
AK
1245 if (rate < 100 || rate > 10000) {
1246 dev_err(&adapter->pdev->dev,
1247 "tx rate must be between 100 and 10000 Mbps\n");
1248 return -EINVAL;
1249 }
e1d18735 1250
d5c18473
PR
1251 if (lancer_chip(adapter))
1252 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1253 else
1254 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1255
1256 if (status)
94f434c2 1257 dev_err(&adapter->pdev->dev,
e1d18735 1258 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1259 else
1260 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1261 return status;
1262}
1263
10ef9ab4 1264static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1265{
10ef9ab4 1266 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1267 ulong now = jiffies;
ac124ff9 1268 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1269 u64 pkts;
1270 unsigned int start, eqd;
ac124ff9 1271
10ef9ab4
SP
1272 if (!eqo->enable_aic) {
1273 eqd = eqo->eqd;
1274 goto modify_eqd;
1275 }
1276
1277 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1278 return;
6b7c5b94 1279
10ef9ab4
SP
1280 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1281
4097f663 1282 /* Wrapped around */
3abcdeda
SP
1283 if (time_before(now, stats->rx_jiffies)) {
1284 stats->rx_jiffies = now;
4097f663
SP
1285 return;
1286 }
6b7c5b94 1287
ac124ff9
SP
1288 /* Update once a second */
1289 if (delta < HZ)
6b7c5b94
SP
1290 return;
1291
ab1594e9
SP
1292 do {
1293 start = u64_stats_fetch_begin_bh(&stats->sync);
1294 pkts = stats->rx_pkts;
1295 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1296
68c3e5a7 1297 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1298 stats->rx_pkts_prev = pkts;
3abcdeda 1299 stats->rx_jiffies = now;
10ef9ab4
SP
1300 eqd = (stats->rx_pps / 110000) << 3;
1301 eqd = min(eqd, eqo->max_eqd);
1302 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1303 if (eqd < 10)
1304 eqd = 0;
10ef9ab4
SP
1305
1306modify_eqd:
1307 if (eqd != eqo->cur_eqd) {
1308 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1309 eqo->cur_eqd = eqd;
ac124ff9 1310 }
6b7c5b94
SP
1311}
1312
3abcdeda 1313static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1314 struct be_rx_compl_info *rxcp)
4097f663 1315{
ac124ff9 1316 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1317
ab1594e9 1318 u64_stats_update_begin(&stats->sync);
3abcdeda 1319 stats->rx_compl++;
2e588f84 1320 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1321 stats->rx_pkts++;
2e588f84 1322 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1323 stats->rx_mcast_pkts++;
2e588f84 1324 if (rxcp->err)
ac124ff9 1325 stats->rx_compl_err++;
ab1594e9 1326 u64_stats_update_end(&stats->sync);
4097f663
SP
1327}
1328
2e588f84 1329static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1330{
19fad86f
PR
1331 /* L4 checksum is not reliable for non TCP/UDP packets.
1332 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1333 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1334 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1335}
1336
10ef9ab4
SP
1337static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1338 u16 frag_idx)
6b7c5b94 1339{
10ef9ab4 1340 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1341 struct be_rx_page_info *rx_page_info;
3abcdeda 1342 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1343
3abcdeda 1344 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1345 BUG_ON(!rx_page_info->page);
1346
205859a2 1347 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1348 dma_unmap_page(&adapter->pdev->dev,
1349 dma_unmap_addr(rx_page_info, bus),
1350 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1351 rx_page_info->last_page_user = false;
1352 }
6b7c5b94
SP
1353
1354 atomic_dec(&rxq->used);
1355 return rx_page_info;
1356}
1357
1358/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1359static void be_rx_compl_discard(struct be_rx_obj *rxo,
1360 struct be_rx_compl_info *rxcp)
6b7c5b94 1361{
3abcdeda 1362 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1363 struct be_rx_page_info *page_info;
2e588f84 1364 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1365
e80d9da6 1366 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1367 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1368 put_page(page_info->page);
1369 memset(page_info, 0, sizeof(*page_info));
2e588f84 1370 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1371 }
1372}
1373
1374/*
1375 * skb_fill_rx_data forms a complete skb for an ether frame
1376 * indicated by rxcp.
1377 */
10ef9ab4
SP
1378static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1379 struct be_rx_compl_info *rxcp)
6b7c5b94 1380{
3abcdeda 1381 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1382 struct be_rx_page_info *page_info;
2e588f84
SP
1383 u16 i, j;
1384 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1385 u8 *start;
6b7c5b94 1386
10ef9ab4 1387 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1388 start = page_address(page_info->page) + page_info->page_offset;
1389 prefetch(start);
1390
1391 /* Copy data in the first descriptor of this completion */
2e588f84 1392 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1393
6b7c5b94
SP
1394 skb->len = curr_frag_len;
1395 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1396 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1397 /* Complete packet has now been moved to data */
1398 put_page(page_info->page);
1399 skb->data_len = 0;
1400 skb->tail += curr_frag_len;
1401 } else {
ac1ae5f3
ED
1402 hdr_len = ETH_HLEN;
1403 memcpy(skb->data, start, hdr_len);
6b7c5b94 1404 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1405 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1406 skb_shinfo(skb)->frags[0].page_offset =
1407 page_info->page_offset + hdr_len;
9e903e08 1408 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1409 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1410 skb->truesize += rx_frag_size;
6b7c5b94
SP
1411 skb->tail += hdr_len;
1412 }
205859a2 1413 page_info->page = NULL;
6b7c5b94 1414
2e588f84
SP
1415 if (rxcp->pkt_size <= rx_frag_size) {
1416 BUG_ON(rxcp->num_rcvd != 1);
1417 return;
6b7c5b94
SP
1418 }
1419
1420 /* More frags present for this completion */
2e588f84
SP
1421 index_inc(&rxcp->rxq_idx, rxq->len);
1422 remaining = rxcp->pkt_size - curr_frag_len;
1423 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1424 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1425 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1426
bd46cb6c
AK
1427 /* Coalesce all frags from the same physical page in one slot */
1428 if (page_info->page_offset == 0) {
1429 /* Fresh page */
1430 j++;
b061b39e 1431 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1432 skb_shinfo(skb)->frags[j].page_offset =
1433 page_info->page_offset;
9e903e08 1434 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1435 skb_shinfo(skb)->nr_frags++;
1436 } else {
1437 put_page(page_info->page);
1438 }
1439
9e903e08 1440 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1441 skb->len += curr_frag_len;
1442 skb->data_len += curr_frag_len;
bdb28a97 1443 skb->truesize += rx_frag_size;
2e588f84
SP
1444 remaining -= curr_frag_len;
1445 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1446 page_info->page = NULL;
6b7c5b94 1447 }
bd46cb6c 1448 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1449}
1450
5be93b9a 1451/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1452static void be_rx_compl_process(struct be_rx_obj *rxo,
1453 struct be_rx_compl_info *rxcp)
6b7c5b94 1454{
10ef9ab4 1455 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1456 struct net_device *netdev = adapter->netdev;
6b7c5b94 1457 struct sk_buff *skb;
89420424 1458
bb349bb4 1459 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1460 if (unlikely(!skb)) {
ac124ff9 1461 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1462 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1463 return;
1464 }
1465
10ef9ab4 1466 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1467
6332c8d3 1468 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1469 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1470 else
1471 skb_checksum_none_assert(skb);
6b7c5b94 1472
6332c8d3 1473 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1474 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1475 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1476 skb->rxhash = rxcp->rss_hash;
1477
6b7c5b94 1478
343e43c0 1479 if (rxcp->vlanf)
86a9bad3 1480 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1481
1482 netif_receive_skb(skb);
6b7c5b94
SP
1483}
1484
5be93b9a 1485/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1486static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1487 struct napi_struct *napi,
1488 struct be_rx_compl_info *rxcp)
6b7c5b94 1489{
10ef9ab4 1490 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1491 struct be_rx_page_info *page_info;
5be93b9a 1492 struct sk_buff *skb = NULL;
3abcdeda 1493 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1494 u16 remaining, curr_frag_len;
1495 u16 i, j;
3968fa1e 1496
10ef9ab4 1497 skb = napi_get_frags(napi);
5be93b9a 1498 if (!skb) {
10ef9ab4 1499 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1500 return;
1501 }
1502
2e588f84
SP
1503 remaining = rxcp->pkt_size;
1504 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1505 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1506
1507 curr_frag_len = min(remaining, rx_frag_size);
1508
bd46cb6c
AK
1509 /* Coalesce all frags from the same physical page in one slot */
1510 if (i == 0 || page_info->page_offset == 0) {
1511 /* First frag or Fresh page */
1512 j++;
b061b39e 1513 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1514 skb_shinfo(skb)->frags[j].page_offset =
1515 page_info->page_offset;
9e903e08 1516 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1517 } else {
1518 put_page(page_info->page);
1519 }
9e903e08 1520 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1521 skb->truesize += rx_frag_size;
bd46cb6c 1522 remaining -= curr_frag_len;
2e588f84 1523 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1524 memset(page_info, 0, sizeof(*page_info));
1525 }
bd46cb6c 1526 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1527
5be93b9a 1528 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1529 skb->len = rxcp->pkt_size;
1530 skb->data_len = rxcp->pkt_size;
5be93b9a 1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1532 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1533 if (adapter->netdev->features & NETIF_F_RXHASH)
1534 skb->rxhash = rxcp->rss_hash;
5be93b9a 1535
343e43c0 1536 if (rxcp->vlanf)
86a9bad3 1537 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1538
10ef9ab4 1539 napi_gro_frags(napi);
2e588f84
SP
1540}
1541
10ef9ab4
SP
1542static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1543 struct be_rx_compl_info *rxcp)
2e588f84
SP
1544{
1545 rxcp->pkt_size =
1546 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1547 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1548 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1549 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1550 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1551 rxcp->ip_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1553 rxcp->l4_csum =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1555 rxcp->ipv6 =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1557 rxcp->rxq_idx =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1559 rxcp->num_rcvd =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1561 rxcp->pkt_type =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1563 rxcp->rss_hash =
c297977e 1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1565 if (rxcp->vlanf) {
1566 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1567 compl);
1568 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1569 compl);
15d72184 1570 }
12004ae9 1571 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1572}
1573
10ef9ab4
SP
1574static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1575 struct be_rx_compl_info *rxcp)
2e588f84
SP
1576{
1577 rxcp->pkt_size =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1579 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1580 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1581 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1582 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1583 rxcp->ip_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1585 rxcp->l4_csum =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1587 rxcp->ipv6 =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1589 rxcp->rxq_idx =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1591 rxcp->num_rcvd =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1593 rxcp->pkt_type =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1595 rxcp->rss_hash =
c297977e 1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1597 if (rxcp->vlanf) {
1598 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1599 compl);
1600 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1601 compl);
15d72184 1602 }
12004ae9 1603 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1604 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1605 ip_frag, compl);
2e588f84
SP
1606}
1607
1608static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1609{
1610 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1611 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1612 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1613
2e588f84
SP
1614 /* For checking the valid bit it is Ok to use either definition as the
1615 * valid bit is at the same position in both v0 and v1 Rx compl */
1616 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1617 return NULL;
6b7c5b94 1618
2e588f84
SP
1619 rmb();
1620 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1621
2e588f84 1622 if (adapter->be3_native)
10ef9ab4 1623 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1624 else
10ef9ab4 1625 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1626
e38b1706
SK
1627 if (rxcp->ip_frag)
1628 rxcp->l4_csum = 0;
1629
15d72184
SP
1630 if (rxcp->vlanf) {
1631 /* vlanf could be wrongly set in some cards.
1632 * ignore if vtm is not set */
752961a1 1633 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1634 rxcp->vlanf = 0;
6b7c5b94 1635
15d72184 1636 if (!lancer_chip(adapter))
3c709f8f 1637 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1638
939cf306 1639 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1640 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1641 rxcp->vlanf = 0;
1642 }
2e588f84
SP
1643
1644 /* As the compl has been parsed, reset it; we wont touch it again */
1645 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1646
3abcdeda 1647 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1648 return rxcp;
1649}
1650
1829b086 1651static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1652{
6b7c5b94 1653 u32 order = get_order(size);
1829b086 1654
6b7c5b94 1655 if (order > 0)
1829b086
ED
1656 gfp |= __GFP_COMP;
1657 return alloc_pages(gfp, order);
6b7c5b94
SP
1658}
1659
1660/*
1661 * Allocate a page, split it to fragments of size rx_frag_size and post as
1662 * receive buffers to BE
1663 */
1829b086 1664static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1665{
3abcdeda 1666 struct be_adapter *adapter = rxo->adapter;
26d92f92 1667 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1668 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1669 struct page *pagep = NULL;
1670 struct be_eth_rx_d *rxd;
1671 u64 page_dmaaddr = 0, frag_dmaaddr;
1672 u32 posted, page_offset = 0;
1673
3abcdeda 1674 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1675 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1676 if (!pagep) {
1829b086 1677 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1678 if (unlikely(!pagep)) {
ac124ff9 1679 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1680 break;
1681 }
2b7bcebf
IV
1682 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1683 0, adapter->big_page_size,
1684 DMA_FROM_DEVICE);
6b7c5b94
SP
1685 page_info->page_offset = 0;
1686 } else {
1687 get_page(pagep);
1688 page_info->page_offset = page_offset + rx_frag_size;
1689 }
1690 page_offset = page_info->page_offset;
1691 page_info->page = pagep;
fac6da5b 1692 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1693 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1694
1695 rxd = queue_head_node(rxq);
1696 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1697 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1698
1699 /* Any space left in the current big page for another frag? */
1700 if ((page_offset + rx_frag_size + rx_frag_size) >
1701 adapter->big_page_size) {
1702 pagep = NULL;
1703 page_info->last_page_user = true;
1704 }
26d92f92
SP
1705
1706 prev_page_info = page_info;
1707 queue_head_inc(rxq);
10ef9ab4 1708 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1709 }
1710 if (pagep)
26d92f92 1711 prev_page_info->last_page_user = true;
6b7c5b94
SP
1712
1713 if (posted) {
6b7c5b94 1714 atomic_add(posted, &rxq->used);
8788fdc2 1715 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1716 } else if (atomic_read(&rxq->used) == 0) {
1717 /* Let be_worker replenish when memory is available */
3abcdeda 1718 rxo->rx_post_starved = true;
6b7c5b94 1719 }
6b7c5b94
SP
1720}
1721
5fb379ee 1722static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1723{
6b7c5b94
SP
1724 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1725
1726 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1727 return NULL;
1728
f3eb62d2 1729 rmb();
6b7c5b94
SP
1730 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1731
1732 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1733
1734 queue_tail_inc(tx_cq);
1735 return txcp;
1736}
1737
3c8def97
SP
1738static u16 be_tx_compl_process(struct be_adapter *adapter,
1739 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1740{
3c8def97 1741 struct be_queue_info *txq = &txo->q;
a73b796e 1742 struct be_eth_wrb *wrb;
3c8def97 1743 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1744 struct sk_buff *sent_skb;
ec43b1a6
SP
1745 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1746 bool unmap_skb_hdr = true;
6b7c5b94 1747
ec43b1a6 1748 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1749 BUG_ON(!sent_skb);
ec43b1a6
SP
1750 sent_skbs[txq->tail] = NULL;
1751
1752 /* skip header wrb */
a73b796e 1753 queue_tail_inc(txq);
6b7c5b94 1754
ec43b1a6 1755 do {
6b7c5b94 1756 cur_index = txq->tail;
a73b796e 1757 wrb = queue_tail_node(txq);
2b7bcebf
IV
1758 unmap_tx_frag(&adapter->pdev->dev, wrb,
1759 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1760 unmap_skb_hdr = false;
1761
6b7c5b94
SP
1762 num_wrbs++;
1763 queue_tail_inc(txq);
ec43b1a6 1764 } while (cur_index != last_index);
6b7c5b94 1765
6b7c5b94 1766 kfree_skb(sent_skb);
4d586b82 1767 return num_wrbs;
6b7c5b94
SP
1768}
1769
10ef9ab4
SP
1770/* Return the number of events in the event queue */
1771static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1772{
10ef9ab4
SP
1773 struct be_eq_entry *eqe;
1774 int num = 0;
859b1e4e 1775
10ef9ab4
SP
1776 do {
1777 eqe = queue_tail_node(&eqo->q);
1778 if (eqe->evt == 0)
1779 break;
859b1e4e 1780
10ef9ab4
SP
1781 rmb();
1782 eqe->evt = 0;
1783 num++;
1784 queue_tail_inc(&eqo->q);
1785 } while (true);
1786
1787 return num;
859b1e4e
SP
1788}
1789
10ef9ab4
SP
1790/* Leaves the EQ is disarmed state */
1791static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1792{
10ef9ab4 1793 int num = events_get(eqo);
859b1e4e 1794
10ef9ab4 1795 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1796}
1797
10ef9ab4 1798static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1799{
1800 struct be_rx_page_info *page_info;
3abcdeda
SP
1801 struct be_queue_info *rxq = &rxo->q;
1802 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1803 struct be_rx_compl_info *rxcp;
d23e946c
SP
1804 struct be_adapter *adapter = rxo->adapter;
1805 int flush_wait = 0;
6b7c5b94
SP
1806 u16 tail;
1807
d23e946c
SP
1808 /* Consume pending rx completions.
1809 * Wait for the flush completion (identified by zero num_rcvd)
1810 * to arrive. Notify CQ even when there are no more CQ entries
1811 * for HW to flush partially coalesced CQ entries.
1812 * In Lancer, there is no need to wait for flush compl.
1813 */
1814 for (;;) {
1815 rxcp = be_rx_compl_get(rxo);
1816 if (rxcp == NULL) {
1817 if (lancer_chip(adapter))
1818 break;
1819
1820 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1821 dev_warn(&adapter->pdev->dev,
1822 "did not receive flush compl\n");
1823 break;
1824 }
1825 be_cq_notify(adapter, rx_cq->id, true, 0);
1826 mdelay(1);
1827 } else {
1828 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1829 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1830 if (rxcp->num_rcvd == 0)
1831 break;
1832 }
6b7c5b94
SP
1833 }
1834
d23e946c
SP
1835 /* After cleanup, leave the CQ in unarmed state */
1836 be_cq_notify(adapter, rx_cq->id, false, 0);
1837
1838 /* Then free posted rx buffers that were not used */
6b7c5b94 1839 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1840 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1841 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1842 put_page(page_info->page);
1843 memset(page_info, 0, sizeof(*page_info));
1844 }
1845 BUG_ON(atomic_read(&rxq->used));
482c9e79 1846 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1847}
1848
0ae57bb3 1849static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1850{
0ae57bb3
SP
1851 struct be_tx_obj *txo;
1852 struct be_queue_info *txq;
a8e9179a 1853 struct be_eth_tx_compl *txcp;
4d586b82 1854 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1855 struct sk_buff *sent_skb;
1856 bool dummy_wrb;
0ae57bb3 1857 int i, pending_txqs;
a8e9179a
SP
1858
1859 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1860 do {
0ae57bb3
SP
1861 pending_txqs = adapter->num_tx_qs;
1862
1863 for_all_tx_queues(adapter, txo, i) {
1864 txq = &txo->q;
1865 while ((txcp = be_tx_compl_get(&txo->cq))) {
1866 end_idx =
1867 AMAP_GET_BITS(struct amap_eth_tx_compl,
1868 wrb_index, txcp);
1869 num_wrbs += be_tx_compl_process(adapter, txo,
1870 end_idx);
1871 cmpl++;
1872 }
1873 if (cmpl) {
1874 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1875 atomic_sub(num_wrbs, &txq->used);
1876 cmpl = 0;
1877 num_wrbs = 0;
1878 }
1879 if (atomic_read(&txq->used) == 0)
1880 pending_txqs--;
a8e9179a
SP
1881 }
1882
0ae57bb3 1883 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1884 break;
1885
1886 mdelay(1);
1887 } while (true);
1888
0ae57bb3
SP
1889 for_all_tx_queues(adapter, txo, i) {
1890 txq = &txo->q;
1891 if (atomic_read(&txq->used))
1892 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1893 atomic_read(&txq->used));
1894
1895 /* free posted tx for which compls will never arrive */
1896 while (atomic_read(&txq->used)) {
1897 sent_skb = txo->sent_skb_list[txq->tail];
1898 end_idx = txq->tail;
1899 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1900 &dummy_wrb);
1901 index_adv(&end_idx, num_wrbs - 1, txq->len);
1902 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1903 atomic_sub(num_wrbs, &txq->used);
1904 }
b03388d6 1905 }
6b7c5b94
SP
1906}
1907
10ef9ab4
SP
1908static void be_evt_queues_destroy(struct be_adapter *adapter)
1909{
1910 struct be_eq_obj *eqo;
1911 int i;
1912
1913 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1914 if (eqo->q.created) {
1915 be_eq_clean(eqo);
10ef9ab4 1916 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 1917 netif_napi_del(&eqo->napi);
19d59aa7 1918 }
10ef9ab4
SP
1919 be_queue_free(adapter, &eqo->q);
1920 }
1921}
1922
1923static int be_evt_queues_create(struct be_adapter *adapter)
1924{
1925 struct be_queue_info *eq;
1926 struct be_eq_obj *eqo;
1927 int i, rc;
1928
92bf14ab
SP
1929 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
1930 adapter->cfg_num_qs);
10ef9ab4
SP
1931
1932 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
1933 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
1934 BE_NAPI_WEIGHT);
10ef9ab4
SP
1935 eqo->adapter = adapter;
1936 eqo->tx_budget = BE_TX_BUDGET;
1937 eqo->idx = i;
1938 eqo->max_eqd = BE_MAX_EQD;
1939 eqo->enable_aic = true;
1940
1941 eq = &eqo->q;
1942 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1943 sizeof(struct be_eq_entry));
1944 if (rc)
1945 return rc;
1946
f2f781a7 1947 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
1948 if (rc)
1949 return rc;
1950 }
1cfafab9 1951 return 0;
10ef9ab4
SP
1952}
1953
5fb379ee
SP
1954static void be_mcc_queues_destroy(struct be_adapter *adapter)
1955{
1956 struct be_queue_info *q;
5fb379ee 1957
8788fdc2 1958 q = &adapter->mcc_obj.q;
5fb379ee 1959 if (q->created)
8788fdc2 1960 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1961 be_queue_free(adapter, q);
1962
8788fdc2 1963 q = &adapter->mcc_obj.cq;
5fb379ee 1964 if (q->created)
8788fdc2 1965 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1966 be_queue_free(adapter, q);
1967}
1968
1969/* Must be called only after TX qs are created as MCC shares TX EQ */
1970static int be_mcc_queues_create(struct be_adapter *adapter)
1971{
1972 struct be_queue_info *q, *cq;
5fb379ee 1973
8788fdc2 1974 cq = &adapter->mcc_obj.cq;
5fb379ee 1975 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1976 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1977 goto err;
1978
10ef9ab4
SP
1979 /* Use the default EQ for MCC completions */
1980 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1981 goto mcc_cq_free;
1982
8788fdc2 1983 q = &adapter->mcc_obj.q;
5fb379ee
SP
1984 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1985 goto mcc_cq_destroy;
1986
8788fdc2 1987 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1988 goto mcc_q_free;
1989
1990 return 0;
1991
1992mcc_q_free:
1993 be_queue_free(adapter, q);
1994mcc_cq_destroy:
8788fdc2 1995 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1996mcc_cq_free:
1997 be_queue_free(adapter, cq);
1998err:
1999 return -1;
2000}
2001
6b7c5b94
SP
2002static void be_tx_queues_destroy(struct be_adapter *adapter)
2003{
2004 struct be_queue_info *q;
3c8def97
SP
2005 struct be_tx_obj *txo;
2006 u8 i;
6b7c5b94 2007
3c8def97
SP
2008 for_all_tx_queues(adapter, txo, i) {
2009 q = &txo->q;
2010 if (q->created)
2011 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2012 be_queue_free(adapter, q);
6b7c5b94 2013
3c8def97
SP
2014 q = &txo->cq;
2015 if (q->created)
2016 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2017 be_queue_free(adapter, q);
2018 }
6b7c5b94
SP
2019}
2020
7707133c 2021static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2022{
10ef9ab4 2023 struct be_queue_info *cq, *eq;
3c8def97 2024 struct be_tx_obj *txo;
92bf14ab 2025 int status, i;
6b7c5b94 2026
92bf14ab 2027 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2028
10ef9ab4
SP
2029 for_all_tx_queues(adapter, txo, i) {
2030 cq = &txo->cq;
2031 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2032 sizeof(struct be_eth_tx_compl));
2033 if (status)
2034 return status;
3c8def97 2035
10ef9ab4
SP
2036 /* If num_evt_qs is less than num_tx_qs, then more than
2037 * one txq share an eq
2038 */
2039 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2040 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2041 if (status)
2042 return status;
6b7c5b94 2043
10ef9ab4
SP
2044 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2045 sizeof(struct be_eth_wrb));
2046 if (status)
2047 return status;
6b7c5b94 2048
94d73aaa 2049 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2050 if (status)
2051 return status;
3c8def97 2052 }
6b7c5b94 2053
d379142b
SP
2054 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2055 adapter->num_tx_qs);
10ef9ab4 2056 return 0;
6b7c5b94
SP
2057}
2058
10ef9ab4 2059static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2060{
2061 struct be_queue_info *q;
3abcdeda
SP
2062 struct be_rx_obj *rxo;
2063 int i;
2064
2065 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2066 q = &rxo->cq;
2067 if (q->created)
2068 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2069 be_queue_free(adapter, q);
ac6a0c4a
SP
2070 }
2071}
2072
10ef9ab4 2073static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2074{
10ef9ab4 2075 struct be_queue_info *eq, *cq;
3abcdeda
SP
2076 struct be_rx_obj *rxo;
2077 int rc, i;
6b7c5b94 2078
92bf14ab
SP
2079 /* We can create as many RSS rings as there are EQs. */
2080 adapter->num_rx_qs = adapter->num_evt_qs;
2081
2082 /* We'll use RSS only if atleast 2 RSS rings are supported.
2083 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2084 */
92bf14ab
SP
2085 if (adapter->num_rx_qs > 1)
2086 adapter->num_rx_qs++;
2087
6b7c5b94 2088 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2089 for_all_rx_queues(adapter, rxo, i) {
2090 rxo->adapter = adapter;
3abcdeda
SP
2091 cq = &rxo->cq;
2092 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2093 sizeof(struct be_eth_rx_compl));
2094 if (rc)
10ef9ab4 2095 return rc;
3abcdeda 2096
10ef9ab4
SP
2097 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2098 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2099 if (rc)
10ef9ab4 2100 return rc;
3abcdeda 2101 }
6b7c5b94 2102
d379142b
SP
2103 dev_info(&adapter->pdev->dev,
2104 "created %d RSS queue(s) and 1 default RX queue\n",
2105 adapter->num_rx_qs - 1);
10ef9ab4 2106 return 0;
b628bde2
SP
2107}
2108
6b7c5b94
SP
2109static irqreturn_t be_intx(int irq, void *dev)
2110{
e49cc34f
SP
2111 struct be_eq_obj *eqo = dev;
2112 struct be_adapter *adapter = eqo->adapter;
2113 int num_evts = 0;
6b7c5b94 2114
d0b9cec3
SP
2115 /* IRQ is not expected when NAPI is scheduled as the EQ
2116 * will not be armed.
2117 * But, this can happen on Lancer INTx where it takes
2118 * a while to de-assert INTx or in BE2 where occasionaly
2119 * an interrupt may be raised even when EQ is unarmed.
2120 * If NAPI is already scheduled, then counting & notifying
2121 * events will orphan them.
e49cc34f 2122 */
d0b9cec3 2123 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2124 num_evts = events_get(eqo);
d0b9cec3
SP
2125 __napi_schedule(&eqo->napi);
2126 if (num_evts)
2127 eqo->spurious_intr = 0;
2128 }
2129 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2130
d0b9cec3
SP
2131 /* Return IRQ_HANDLED only for the the first spurious intr
2132 * after a valid intr to stop the kernel from branding
2133 * this irq as a bad one!
e49cc34f 2134 */
d0b9cec3
SP
2135 if (num_evts || eqo->spurious_intr++ == 0)
2136 return IRQ_HANDLED;
2137 else
2138 return IRQ_NONE;
6b7c5b94
SP
2139}
2140
10ef9ab4 2141static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2142{
10ef9ab4 2143 struct be_eq_obj *eqo = dev;
6b7c5b94 2144
0b545a62
SP
2145 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2146 napi_schedule(&eqo->napi);
6b7c5b94
SP
2147 return IRQ_HANDLED;
2148}
2149
2e588f84 2150static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2151{
e38b1706 2152 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2153}
2154
10ef9ab4
SP
2155static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2156 int budget)
6b7c5b94 2157{
3abcdeda
SP
2158 struct be_adapter *adapter = rxo->adapter;
2159 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2160 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2161 u32 work_done;
2162
2163 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2164 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2165 if (!rxcp)
2166 break;
2167
12004ae9
SP
2168 /* Is it a flush compl that has no data */
2169 if (unlikely(rxcp->num_rcvd == 0))
2170 goto loop_continue;
2171
2172 /* Discard compl with partial DMA Lancer B0 */
2173 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2174 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2175 goto loop_continue;
2176 }
2177
2178 /* On BE drop pkts that arrive due to imperfect filtering in
2179 * promiscuous mode on some skews
2180 */
2181 if (unlikely(rxcp->port != adapter->port_num &&
2182 !lancer_chip(adapter))) {
10ef9ab4 2183 be_rx_compl_discard(rxo, rxcp);
12004ae9 2184 goto loop_continue;
64642811 2185 }
009dd872 2186
12004ae9 2187 if (do_gro(rxcp))
10ef9ab4 2188 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2189 else
10ef9ab4 2190 be_rx_compl_process(rxo, rxcp);
12004ae9 2191loop_continue:
2e588f84 2192 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2193 }
2194
10ef9ab4
SP
2195 if (work_done) {
2196 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2197
10ef9ab4
SP
2198 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2199 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2200 }
10ef9ab4 2201
6b7c5b94
SP
2202 return work_done;
2203}
2204
10ef9ab4
SP
2205static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2206 int budget, int idx)
6b7c5b94 2207{
6b7c5b94 2208 struct be_eth_tx_compl *txcp;
10ef9ab4 2209 int num_wrbs = 0, work_done;
3c8def97 2210
10ef9ab4
SP
2211 for (work_done = 0; work_done < budget; work_done++) {
2212 txcp = be_tx_compl_get(&txo->cq);
2213 if (!txcp)
2214 break;
2215 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2216 AMAP_GET_BITS(struct amap_eth_tx_compl,
2217 wrb_index, txcp));
10ef9ab4 2218 }
6b7c5b94 2219
10ef9ab4
SP
2220 if (work_done) {
2221 be_cq_notify(adapter, txo->cq.id, true, work_done);
2222 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2223
10ef9ab4
SP
2224 /* As Tx wrbs have been freed up, wake up netdev queue
2225 * if it was stopped due to lack of tx wrbs. */
2226 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2227 atomic_read(&txo->q.used) < txo->q.len / 2) {
2228 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2229 }
10ef9ab4
SP
2230
2231 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2232 tx_stats(txo)->tx_compl += work_done;
2233 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2234 }
10ef9ab4
SP
2235 return (work_done < budget); /* Done */
2236}
6b7c5b94 2237
68d7bdcb 2238int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2239{
2240 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2241 struct be_adapter *adapter = eqo->adapter;
0b545a62 2242 int max_work = 0, work, i, num_evts;
10ef9ab4 2243 bool tx_done;
f31e50a8 2244
0b545a62
SP
2245 num_evts = events_get(eqo);
2246
10ef9ab4
SP
2247 /* Process all TXQs serviced by this EQ */
2248 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2249 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2250 eqo->tx_budget, i);
2251 if (!tx_done)
2252 max_work = budget;
f31e50a8
SP
2253 }
2254
10ef9ab4
SP
2255 /* This loop will iterate twice for EQ0 in which
2256 * completions of the last RXQ (default one) are also processed
2257 * For other EQs the loop iterates only once
2258 */
2259 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2260 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2261 max_work = max(work, max_work);
2262 }
6b7c5b94 2263
10ef9ab4
SP
2264 if (is_mcc_eqo(eqo))
2265 be_process_mcc(adapter);
93c86700 2266
10ef9ab4
SP
2267 if (max_work < budget) {
2268 napi_complete(napi);
0b545a62 2269 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2270 } else {
2271 /* As we'll continue in polling mode, count and clear events */
0b545a62 2272 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2273 }
10ef9ab4 2274 return max_work;
6b7c5b94
SP
2275}
2276
f67ef7ba 2277void be_detect_error(struct be_adapter *adapter)
7c185276 2278{
e1cfb67a
PR
2279 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2280 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2281 u32 i;
2282
d23e946c 2283 if (be_hw_error(adapter))
72f02485
SP
2284 return;
2285
e1cfb67a
PR
2286 if (lancer_chip(adapter)) {
2287 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2288 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2289 sliport_err1 = ioread32(adapter->db +
2290 SLIPORT_ERROR1_OFFSET);
2291 sliport_err2 = ioread32(adapter->db +
2292 SLIPORT_ERROR2_OFFSET);
2293 }
2294 } else {
2295 pci_read_config_dword(adapter->pdev,
2296 PCICFG_UE_STATUS_LOW, &ue_lo);
2297 pci_read_config_dword(adapter->pdev,
2298 PCICFG_UE_STATUS_HIGH, &ue_hi);
2299 pci_read_config_dword(adapter->pdev,
2300 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2301 pci_read_config_dword(adapter->pdev,
2302 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2303
f67ef7ba
PR
2304 ue_lo = (ue_lo & ~ue_lo_mask);
2305 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2306 }
7c185276 2307
1451ae6e
AK
2308 /* On certain platforms BE hardware can indicate spurious UEs.
2309 * Allow the h/w to stop working completely in case of a real UE.
2310 * Hence not setting the hw_error for UE detection.
2311 */
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2313 adapter->hw_error = true;
434b3648 2314 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2315 "Error detected in the card\n");
2316 }
2317
2318 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2319 dev_err(&adapter->pdev->dev,
2320 "ERR: sliport status 0x%x\n", sliport_status);
2321 dev_err(&adapter->pdev->dev,
2322 "ERR: sliport error1 0x%x\n", sliport_err1);
2323 dev_err(&adapter->pdev->dev,
2324 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2325 }
2326
e1cfb67a
PR
2327 if (ue_lo) {
2328 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2329 if (ue_lo & 1)
7c185276
AK
2330 dev_err(&adapter->pdev->dev,
2331 "UE: %s bit set\n", ue_status_low_desc[i]);
2332 }
2333 }
f67ef7ba 2334
e1cfb67a
PR
2335 if (ue_hi) {
2336 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2337 if (ue_hi & 1)
7c185276
AK
2338 dev_err(&adapter->pdev->dev,
2339 "UE: %s bit set\n", ue_status_hi_desc[i]);
2340 }
2341 }
2342
2343}
2344
8d56ff11
SP
2345static void be_msix_disable(struct be_adapter *adapter)
2346{
ac6a0c4a 2347 if (msix_enabled(adapter)) {
8d56ff11 2348 pci_disable_msix(adapter->pdev);
ac6a0c4a 2349 adapter->num_msix_vec = 0;
68d7bdcb 2350 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2351 }
2352}
2353
c2bba3df 2354static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2355{
92bf14ab 2356 int i, status, num_vec;
d379142b 2357 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2358
92bf14ab
SP
2359 /* If RoCE is supported, program the max number of NIC vectors that
2360 * may be configured via set-channels, along with vectors needed for
2361 * RoCe. Else, just program the number we'll use initially.
2362 */
2363 if (be_roce_supported(adapter))
2364 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2365 2 * num_online_cpus());
2366 else
2367 num_vec = adapter->cfg_num_qs;
3abcdeda 2368
ac6a0c4a 2369 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2370 adapter->msix_entries[i].entry = i;
2371
ac6a0c4a 2372 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2373 if (status == 0) {
2374 goto done;
92bf14ab 2375 } else if (status >= MIN_MSIX_VECTORS) {
ac6a0c4a 2376 num_vec = status;
c2bba3df
SK
2377 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2378 num_vec);
2379 if (!status)
3abcdeda 2380 goto done;
3abcdeda 2381 }
d379142b
SP
2382
2383 dev_warn(dev, "MSIx enable failed\n");
92bf14ab 2384
c2bba3df
SK
2385 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2386 if (!be_physfn(adapter))
2387 return status;
2388 return 0;
3abcdeda 2389done:
92bf14ab
SP
2390 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2391 adapter->num_msix_roce_vec = num_vec / 2;
2392 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2393 adapter->num_msix_roce_vec);
2394 }
2395
2396 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2397
2398 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2399 adapter->num_msix_vec);
c2bba3df 2400 return 0;
6b7c5b94
SP
2401}
2402
fe6d2a38 2403static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2404 struct be_eq_obj *eqo)
b628bde2 2405{
f2f781a7 2406 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2407}
6b7c5b94 2408
b628bde2
SP
2409static int be_msix_register(struct be_adapter *adapter)
2410{
10ef9ab4
SP
2411 struct net_device *netdev = adapter->netdev;
2412 struct be_eq_obj *eqo;
2413 int status, i, vec;
6b7c5b94 2414
10ef9ab4
SP
2415 for_all_evt_queues(adapter, eqo, i) {
2416 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2417 vec = be_msix_vec_get(adapter, eqo);
2418 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2419 if (status)
2420 goto err_msix;
2421 }
b628bde2 2422
6b7c5b94 2423 return 0;
3abcdeda 2424err_msix:
10ef9ab4
SP
2425 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2426 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2427 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2428 status);
ac6a0c4a 2429 be_msix_disable(adapter);
6b7c5b94
SP
2430 return status;
2431}
2432
2433static int be_irq_register(struct be_adapter *adapter)
2434{
2435 struct net_device *netdev = adapter->netdev;
2436 int status;
2437
ac6a0c4a 2438 if (msix_enabled(adapter)) {
6b7c5b94
SP
2439 status = be_msix_register(adapter);
2440 if (status == 0)
2441 goto done;
ba343c77
SB
2442 /* INTx is not supported for VF */
2443 if (!be_physfn(adapter))
2444 return status;
6b7c5b94
SP
2445 }
2446
e49cc34f 2447 /* INTx: only the first EQ is used */
6b7c5b94
SP
2448 netdev->irq = adapter->pdev->irq;
2449 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2450 &adapter->eq_obj[0]);
6b7c5b94
SP
2451 if (status) {
2452 dev_err(&adapter->pdev->dev,
2453 "INTx request IRQ failed - err %d\n", status);
2454 return status;
2455 }
2456done:
2457 adapter->isr_registered = true;
2458 return 0;
2459}
2460
2461static void be_irq_unregister(struct be_adapter *adapter)
2462{
2463 struct net_device *netdev = adapter->netdev;
10ef9ab4 2464 struct be_eq_obj *eqo;
3abcdeda 2465 int i;
6b7c5b94
SP
2466
2467 if (!adapter->isr_registered)
2468 return;
2469
2470 /* INTx */
ac6a0c4a 2471 if (!msix_enabled(adapter)) {
e49cc34f 2472 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2473 goto done;
2474 }
2475
2476 /* MSIx */
10ef9ab4
SP
2477 for_all_evt_queues(adapter, eqo, i)
2478 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2479
6b7c5b94
SP
2480done:
2481 adapter->isr_registered = false;
6b7c5b94
SP
2482}
2483
10ef9ab4 2484static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2485{
2486 struct be_queue_info *q;
2487 struct be_rx_obj *rxo;
2488 int i;
2489
2490 for_all_rx_queues(adapter, rxo, i) {
2491 q = &rxo->q;
2492 if (q->created) {
2493 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2494 be_rx_cq_clean(rxo);
482c9e79 2495 }
10ef9ab4 2496 be_queue_free(adapter, q);
482c9e79
SP
2497 }
2498}
2499
889cd4b2
SP
2500static int be_close(struct net_device *netdev)
2501{
2502 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2503 struct be_eq_obj *eqo;
2504 int i;
889cd4b2 2505
045508a8
PP
2506 be_roce_dev_close(adapter);
2507
04d3d624
SK
2508 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2509 for_all_evt_queues(adapter, eqo, i)
2510 napi_disable(&eqo->napi);
2511 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2512 }
a323d9bf
SP
2513
2514 be_async_mcc_disable(adapter);
2515
2516 /* Wait for all pending tx completions to arrive so that
2517 * all tx skbs are freed.
2518 */
fba87559 2519 netif_tx_disable(netdev);
6e1f9975 2520 be_tx_compl_clean(adapter);
a323d9bf
SP
2521
2522 be_rx_qs_destroy(adapter);
2523
2524 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2525 if (msix_enabled(adapter))
2526 synchronize_irq(be_msix_vec_get(adapter, eqo));
2527 else
2528 synchronize_irq(netdev->irq);
2529 be_eq_clean(eqo);
63fcb27f
PR
2530 }
2531
889cd4b2
SP
2532 be_irq_unregister(adapter);
2533
482c9e79
SP
2534 return 0;
2535}
2536
10ef9ab4 2537static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2538{
2539 struct be_rx_obj *rxo;
e9008ee9
PR
2540 int rc, i, j;
2541 u8 rsstable[128];
482c9e79
SP
2542
2543 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2544 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2545 sizeof(struct be_eth_rx_d));
2546 if (rc)
2547 return rc;
2548 }
2549
2550 /* The FW would like the default RXQ to be created first */
2551 rxo = default_rxo(adapter);
2552 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2553 adapter->if_handle, false, &rxo->rss_id);
2554 if (rc)
2555 return rc;
2556
2557 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2558 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2559 rx_frag_size, adapter->if_handle,
2560 true, &rxo->rss_id);
482c9e79
SP
2561 if (rc)
2562 return rc;
2563 }
2564
2565 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2566 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2567 for_all_rss_queues(adapter, rxo, i) {
2568 if ((j + i) >= 128)
2569 break;
2570 rsstable[j + i] = rxo->rss_id;
2571 }
2572 }
594ad54a
SR
2573 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2574 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2575
2576 if (!BEx_chip(adapter))
2577 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2578 RSS_ENABLE_UDP_IPV6;
2579
2580 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2581 128);
2582 if (rc) {
2583 adapter->rss_flags = 0;
482c9e79 2584 return rc;
594ad54a 2585 }
482c9e79
SP
2586 }
2587
2588 /* First time posting */
10ef9ab4 2589 for_all_rx_queues(adapter, rxo, i)
482c9e79 2590 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2591 return 0;
2592}
2593
6b7c5b94
SP
2594static int be_open(struct net_device *netdev)
2595{
2596 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2597 struct be_eq_obj *eqo;
3abcdeda 2598 struct be_rx_obj *rxo;
10ef9ab4 2599 struct be_tx_obj *txo;
b236916a 2600 u8 link_status;
3abcdeda 2601 int status, i;
5fb379ee 2602
10ef9ab4 2603 status = be_rx_qs_create(adapter);
482c9e79
SP
2604 if (status)
2605 goto err;
2606
c2bba3df
SK
2607 status = be_irq_register(adapter);
2608 if (status)
2609 goto err;
5fb379ee 2610
10ef9ab4 2611 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2612 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2613
10ef9ab4
SP
2614 for_all_tx_queues(adapter, txo, i)
2615 be_cq_notify(adapter, txo->cq.id, true, 0);
2616
7a1e9b20
SP
2617 be_async_mcc_enable(adapter);
2618
10ef9ab4
SP
2619 for_all_evt_queues(adapter, eqo, i) {
2620 napi_enable(&eqo->napi);
2621 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2622 }
04d3d624 2623 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2624
323ff71e 2625 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2626 if (!status)
2627 be_link_status_update(adapter, link_status);
2628
fba87559 2629 netif_tx_start_all_queues(netdev);
045508a8 2630 be_roce_dev_open(adapter);
889cd4b2
SP
2631 return 0;
2632err:
2633 be_close(adapter->netdev);
2634 return -EIO;
5fb379ee
SP
2635}
2636
71d8d1b5
AK
2637static int be_setup_wol(struct be_adapter *adapter, bool enable)
2638{
2639 struct be_dma_mem cmd;
2640 int status = 0;
2641 u8 mac[ETH_ALEN];
2642
2643 memset(mac, 0, ETH_ALEN);
2644
2645 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2646 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2647 GFP_KERNEL);
71d8d1b5
AK
2648 if (cmd.va == NULL)
2649 return -1;
71d8d1b5
AK
2650
2651 if (enable) {
2652 status = pci_write_config_dword(adapter->pdev,
2653 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2654 if (status) {
2655 dev_err(&adapter->pdev->dev,
2381a55c 2656 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2657 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2658 cmd.dma);
71d8d1b5
AK
2659 return status;
2660 }
2661 status = be_cmd_enable_magic_wol(adapter,
2662 adapter->netdev->dev_addr, &cmd);
2663 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2664 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2665 } else {
2666 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2667 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2668 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2669 }
2670
2b7bcebf 2671 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2672 return status;
2673}
2674
6d87f5c3
AK
2675/*
2676 * Generate a seed MAC address from the PF MAC Address using jhash.
2677 * MAC Address for VFs are assigned incrementally starting from the seed.
2678 * These addresses are programmed in the ASIC by the PF and the VF driver
2679 * queries for the MAC address during its probe.
2680 */
4c876616 2681static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2682{
f9449ab7 2683 u32 vf;
3abcdeda 2684 int status = 0;
6d87f5c3 2685 u8 mac[ETH_ALEN];
11ac75ed 2686 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2687
2688 be_vf_eth_addr_generate(adapter, mac);
2689
11ac75ed 2690 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2691 if (BEx_chip(adapter))
590c391d 2692 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2693 vf_cfg->if_handle,
2694 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2695 else
2696 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2697 vf + 1);
590c391d 2698
6d87f5c3
AK
2699 if (status)
2700 dev_err(&adapter->pdev->dev,
590c391d 2701 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2702 else
11ac75ed 2703 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2704
2705 mac[5] += 1;
2706 }
2707 return status;
2708}
2709
4c876616
SP
2710static int be_vfs_mac_query(struct be_adapter *adapter)
2711{
2712 int status, vf;
2713 u8 mac[ETH_ALEN];
2714 struct be_vf_cfg *vf_cfg;
95046b92 2715 bool active = false;
4c876616
SP
2716
2717 for_all_vfs(adapter, vf_cfg, vf) {
2718 be_cmd_get_mac_from_list(adapter, mac, &active,
2719 &vf_cfg->pmac_id, 0);
2720
2721 status = be_cmd_mac_addr_query(adapter, mac, false,
2722 vf_cfg->if_handle, 0);
2723 if (status)
2724 return status;
2725 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2726 }
2727 return 0;
2728}
2729
f9449ab7 2730static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2731{
11ac75ed 2732 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2733 u32 vf;
2734
257a3feb 2735 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2736 dev_warn(&adapter->pdev->dev,
2737 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2738 goto done;
2739 }
2740
b4c1df93
SP
2741 pci_disable_sriov(adapter->pdev);
2742
11ac75ed 2743 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2744 if (BEx_chip(adapter))
11ac75ed
SP
2745 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2746 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2747 else
2748 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2749 vf + 1);
f9449ab7 2750
11ac75ed
SP
2751 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2752 }
39f1d94d
SP
2753done:
2754 kfree(adapter->vf_cfg);
2755 adapter->num_vfs = 0;
6d87f5c3
AK
2756}
2757
7707133c
SP
2758static void be_clear_queues(struct be_adapter *adapter)
2759{
2760 be_mcc_queues_destroy(adapter);
2761 be_rx_cqs_destroy(adapter);
2762 be_tx_queues_destroy(adapter);
2763 be_evt_queues_destroy(adapter);
2764}
2765
68d7bdcb 2766static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 2767{
191eb756
SP
2768 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2769 cancel_delayed_work_sync(&adapter->work);
2770 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2771 }
68d7bdcb
SP
2772}
2773
2774static int be_clear(struct be_adapter *adapter)
2775{
2776 int i;
2777
2778 be_cancel_worker(adapter);
191eb756 2779
11ac75ed 2780 if (sriov_enabled(adapter))
f9449ab7
SP
2781 be_vf_clear(adapter);
2782
2d17f403
SP
2783 /* delete the primary mac along with the uc-mac list */
2784 for (i = 0; i < (adapter->uc_macs + 1); i++)
fbc13f01 2785 be_cmd_pmac_del(adapter, adapter->if_handle,
2d17f403
SP
2786 adapter->pmac_id[i], 0);
2787 adapter->uc_macs = 0;
fbc13f01 2788
f9449ab7 2789 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 2790
7707133c 2791 be_clear_queues(adapter);
a54769f5 2792
abb93951
PR
2793 kfree(adapter->pmac_id);
2794 adapter->pmac_id = NULL;
2795
10ef9ab4 2796 be_msix_disable(adapter);
a54769f5
SP
2797 return 0;
2798}
2799
4c876616 2800static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2801{
92bf14ab 2802 struct be_resources res = {0};
4c876616
SP
2803 struct be_vf_cfg *vf_cfg;
2804 u32 cap_flags, en_flags, vf;
922bbe88 2805 int status = 0;
abb93951 2806
4c876616
SP
2807 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2808 BE_IF_FLAGS_MULTICAST;
abb93951 2809
4c876616 2810 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
2811 if (!BE3_chip(adapter)) {
2812 status = be_cmd_get_profile_config(adapter, &res,
2813 vf + 1);
2814 if (!status)
2815 cap_flags = res.if_cap_flags;
2816 }
4c876616
SP
2817
2818 /* If a FW profile exists, then cap_flags are updated */
2819 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2820 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2821 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2822 &vf_cfg->if_handle, vf + 1);
2823 if (status)
2824 goto err;
2825 }
2826err:
2827 return status;
abb93951
PR
2828}
2829
39f1d94d 2830static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2831{
11ac75ed 2832 struct be_vf_cfg *vf_cfg;
30128031
SP
2833 int vf;
2834
39f1d94d
SP
2835 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2836 GFP_KERNEL);
2837 if (!adapter->vf_cfg)
2838 return -ENOMEM;
2839
11ac75ed
SP
2840 for_all_vfs(adapter, vf_cfg, vf) {
2841 vf_cfg->if_handle = -1;
2842 vf_cfg->pmac_id = -1;
30128031 2843 }
39f1d94d 2844 return 0;
30128031
SP
2845}
2846
f9449ab7
SP
2847static int be_vf_setup(struct be_adapter *adapter)
2848{
11ac75ed 2849 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2850 u16 def_vlan, lnk_speed;
4c876616
SP
2851 int status, old_vfs, vf;
2852 struct device *dev = &adapter->pdev->dev;
04a06028 2853 u32 privileges;
39f1d94d 2854
257a3feb 2855 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2856 if (old_vfs) {
2857 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2858 if (old_vfs != num_vfs)
2859 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2860 adapter->num_vfs = old_vfs;
39f1d94d 2861 } else {
92bf14ab 2862 if (num_vfs > be_max_vfs(adapter))
4c876616 2863 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
2864 be_max_vfs(adapter), num_vfs);
2865 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 2866 if (!adapter->num_vfs)
4c876616 2867 return 0;
39f1d94d
SP
2868 }
2869
2870 status = be_vf_setup_init(adapter);
2871 if (status)
2872 goto err;
30128031 2873
4c876616
SP
2874 if (old_vfs) {
2875 for_all_vfs(adapter, vf_cfg, vf) {
2876 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2877 if (status)
2878 goto err;
2879 }
2880 } else {
2881 status = be_vfs_if_create(adapter);
f9449ab7
SP
2882 if (status)
2883 goto err;
f9449ab7
SP
2884 }
2885
4c876616
SP
2886 if (old_vfs) {
2887 status = be_vfs_mac_query(adapter);
2888 if (status)
2889 goto err;
2890 } else {
39f1d94d
SP
2891 status = be_vf_eth_addr_config(adapter);
2892 if (status)
2893 goto err;
2894 }
f9449ab7 2895
11ac75ed 2896 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
2897 /* Allow VFs to programs MAC/VLAN filters */
2898 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2899 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2900 status = be_cmd_set_fn_privileges(adapter,
2901 privileges |
2902 BE_PRIV_FILTMGMT,
2903 vf + 1);
2904 if (!status)
2905 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2906 vf);
2907 }
2908
4c876616
SP
2909 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2910 * Allow full available bandwidth
2911 */
2912 if (BE3_chip(adapter) && !old_vfs)
2913 be_cmd_set_qos(adapter, 1000, vf+1);
2914
2915 status = be_cmd_link_status_query(adapter, &lnk_speed,
2916 NULL, vf + 1);
2917 if (!status)
2918 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2919
2920 status = be_cmd_get_hsw_config(adapter, &def_vlan,
a77dcb8c 2921 vf + 1, vf_cfg->if_handle, NULL);
f1f3ee1b
AK
2922 if (status)
2923 goto err;
2924 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2925
2926 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2927 }
b4c1df93
SP
2928
2929 if (!old_vfs) {
2930 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2931 if (status) {
2932 dev_err(dev, "SRIOV enable failed\n");
2933 adapter->num_vfs = 0;
2934 goto err;
2935 }
2936 }
f9449ab7
SP
2937 return 0;
2938err:
4c876616
SP
2939 dev_err(dev, "VF setup failed\n");
2940 be_vf_clear(adapter);
f9449ab7
SP
2941 return status;
2942}
2943
92bf14ab
SP
2944/* On BE2/BE3 FW does not suggest the supported limits */
2945static void BEx_get_resources(struct be_adapter *adapter,
2946 struct be_resources *res)
2947{
2948 struct pci_dev *pdev = adapter->pdev;
2949 bool use_sriov = false;
2950
b905b5d4 2951 if (BE3_chip(adapter) && sriov_want(adapter)) {
92bf14ab
SP
2952 int max_vfs;
2953
2954 max_vfs = pci_sriov_get_totalvfs(pdev);
2955 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
b905b5d4 2956 use_sriov = res->max_vfs;
92bf14ab
SP
2957 }
2958
2959 if (be_physfn(adapter))
2960 res->max_uc_mac = BE_UC_PMAC_COUNT;
2961 else
2962 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
2963
2964 if (adapter->function_mode & FLEX10_MODE)
2965 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2966 else
2967 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
2968 res->max_mcast_mac = BE_MAX_MC;
2969
30f3fe45 2970 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
92bf14ab 2971 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
30f3fe45 2972 !be_physfn(adapter) || (adapter->port_num > 1))
92bf14ab
SP
2973 res->max_tx_qs = 1;
2974 else
2975 res->max_tx_qs = BE3_MAX_TX_QS;
2976
2977 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2978 !use_sriov && be_physfn(adapter))
2979 res->max_rss_qs = (adapter->be3_native) ?
2980 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2981 res->max_rx_qs = res->max_rss_qs + 1;
2982
68d7bdcb 2983 res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
92bf14ab
SP
2984
2985 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
2986 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
2987 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
2988}
2989
30128031
SP
2990static void be_setup_init(struct be_adapter *adapter)
2991{
2992 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2993 adapter->phy.link_speed = -1;
30128031
SP
2994 adapter->if_handle = -1;
2995 adapter->be3_native = false;
2996 adapter->promiscuous = false;
f25b119c
PR
2997 if (be_physfn(adapter))
2998 adapter->cmd_privileges = MAX_PRIVILEGES;
2999 else
3000 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3001}
3002
92bf14ab 3003static int be_get_resources(struct be_adapter *adapter)
abb93951 3004{
92bf14ab
SP
3005 struct device *dev = &adapter->pdev->dev;
3006 struct be_resources res = {0};
3007 int status;
abb93951 3008
92bf14ab
SP
3009 if (BEx_chip(adapter)) {
3010 BEx_get_resources(adapter, &res);
3011 adapter->res = res;
abb93951
PR
3012 }
3013
92bf14ab
SP
3014 /* For Lancer, SH etc read per-function resource limits from FW.
3015 * GET_FUNC_CONFIG returns per function guaranteed limits.
3016 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3017 */
3018 if (!BEx_chip(adapter)) {
3019 status = be_cmd_get_func_config(adapter, &res);
3020 if (status)
3021 return status;
abb93951 3022
92bf14ab
SP
3023 /* If RoCE may be enabled stash away half the EQs for RoCE */
3024 if (be_roce_supported(adapter))
3025 res.max_evt_qs /= 2;
3026 adapter->res = res;
abb93951 3027
92bf14ab
SP
3028 if (be_physfn(adapter)) {
3029 status = be_cmd_get_profile_config(adapter, &res, 0);
3030 if (status)
3031 return status;
3032 adapter->res.max_vfs = res.max_vfs;
3033 }
abb93951 3034
92bf14ab
SP
3035 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3036 be_max_txqs(adapter), be_max_rxqs(adapter),
3037 be_max_rss(adapter), be_max_eqs(adapter),
3038 be_max_vfs(adapter));
3039 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3040 be_max_uc(adapter), be_max_mc(adapter),
3041 be_max_vlans(adapter));
abb93951 3042 }
4c876616 3043
92bf14ab 3044 return 0;
abb93951
PR
3045}
3046
39f1d94d
SP
3047/* Routine to query per function resource limits */
3048static int be_get_config(struct be_adapter *adapter)
3049{
4c876616 3050 int status;
39f1d94d 3051
abb93951
PR
3052 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3053 &adapter->function_mode,
0ad3157e
VV
3054 &adapter->function_caps,
3055 &adapter->asic_rev);
abb93951 3056 if (status)
92bf14ab 3057 return status;
abb93951 3058
92bf14ab
SP
3059 status = be_get_resources(adapter);
3060 if (status)
3061 return status;
abb93951
PR
3062
3063 /* primary mac needs 1 pmac entry */
92bf14ab
SP
3064 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3065 GFP_KERNEL);
3066 if (!adapter->pmac_id)
3067 return -ENOMEM;
abb93951 3068
92bf14ab
SP
3069 /* Sanitize cfg_num_qs based on HW and platform limits */
3070 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3071
3072 return 0;
39f1d94d
SP
3073}
3074
95046b92
SP
3075static int be_mac_setup(struct be_adapter *adapter)
3076{
3077 u8 mac[ETH_ALEN];
3078 int status;
3079
3080 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3081 status = be_cmd_get_perm_mac(adapter, mac);
3082 if (status)
3083 return status;
3084
3085 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3086 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3087 } else {
3088 /* Maybe the HW was reset; dev_addr must be re-programmed */
3089 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3090 }
3091
3092 /* On BE3 VFs this cmd may fail due to lack of privilege.
3093 * Ignore the failure as in this case pmac_id is fetched
3094 * in the IFACE_CREATE cmd.
3095 */
3096 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3097 &adapter->pmac_id[0], 0);
3098 return 0;
3099}
3100
68d7bdcb
SP
3101static void be_schedule_worker(struct be_adapter *adapter)
3102{
3103 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3104 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3105}
3106
7707133c 3107static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3108{
68d7bdcb 3109 struct net_device *netdev = adapter->netdev;
10ef9ab4 3110 int status;
ba343c77 3111
7707133c 3112 status = be_evt_queues_create(adapter);
abb93951
PR
3113 if (status)
3114 goto err;
73d540f2 3115
7707133c 3116 status = be_tx_qs_create(adapter);
c2bba3df
SK
3117 if (status)
3118 goto err;
10ef9ab4 3119
7707133c 3120 status = be_rx_cqs_create(adapter);
10ef9ab4 3121 if (status)
a54769f5 3122 goto err;
6b7c5b94 3123
7707133c 3124 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3125 if (status)
3126 goto err;
3127
68d7bdcb
SP
3128 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3129 if (status)
3130 goto err;
3131
3132 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3133 if (status)
3134 goto err;
3135
7707133c
SP
3136 return 0;
3137err:
3138 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3139 return status;
3140}
3141
68d7bdcb
SP
3142int be_update_queues(struct be_adapter *adapter)
3143{
3144 struct net_device *netdev = adapter->netdev;
3145 int status;
3146
3147 if (netif_running(netdev))
3148 be_close(netdev);
3149
3150 be_cancel_worker(adapter);
3151
3152 /* If any vectors have been shared with RoCE we cannot re-program
3153 * the MSIx table.
3154 */
3155 if (!adapter->num_msix_roce_vec)
3156 be_msix_disable(adapter);
3157
3158 be_clear_queues(adapter);
3159
3160 if (!msix_enabled(adapter)) {
3161 status = be_msix_enable(adapter);
3162 if (status)
3163 return status;
3164 }
3165
3166 status = be_setup_queues(adapter);
3167 if (status)
3168 return status;
3169
3170 be_schedule_worker(adapter);
3171
3172 if (netif_running(netdev))
3173 status = be_open(netdev);
3174
3175 return status;
3176}
3177
7707133c
SP
3178static int be_setup(struct be_adapter *adapter)
3179{
3180 struct device *dev = &adapter->pdev->dev;
3181 u32 tx_fc, rx_fc, en_flags;
3182 int status;
3183
3184 be_setup_init(adapter);
3185
3186 if (!lancer_chip(adapter))
3187 be_cmd_req_native_mode(adapter);
3188
3189 status = be_get_config(adapter);
10ef9ab4 3190 if (status)
a54769f5 3191 goto err;
6b7c5b94 3192
7707133c 3193 status = be_msix_enable(adapter);
10ef9ab4 3194 if (status)
a54769f5 3195 goto err;
6b7c5b94 3196
f9449ab7 3197 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3198 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3199 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3200 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3201 en_flags = en_flags & be_if_cap_flags(adapter);
3202 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3203 &adapter->if_handle, 0);
7707133c 3204 if (status)
a54769f5 3205 goto err;
6b7c5b94 3206
68d7bdcb
SP
3207 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3208 rtnl_lock();
7707133c 3209 status = be_setup_queues(adapter);
68d7bdcb 3210 rtnl_unlock();
95046b92 3211 if (status)
1578e777
PR
3212 goto err;
3213
7707133c
SP
3214 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3215 /* In UMC mode FW does not return right privileges.
3216 * Override with correct privilege equivalent to PF.
3217 */
3218 if (be_is_mc(adapter))
3219 adapter->cmd_privileges = MAX_PRIVILEGES;
3220
3221 status = be_mac_setup(adapter);
10ef9ab4
SP
3222 if (status)
3223 goto err;
3224
eeb65ced 3225 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3226
1d1e9a46 3227 if (adapter->vlans_added)
10329df8 3228 be_vid_config(adapter);
7ab8b0b4 3229
a54769f5 3230 be_set_rx_mode(adapter->netdev);
5fb379ee 3231
ddc3f5cb 3232 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3233
ddc3f5cb
AK
3234 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3235 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3236 adapter->rx_fc);
2dc1deb6 3237
b905b5d4 3238 if (sriov_want(adapter)) {
92bf14ab 3239 if (be_max_vfs(adapter))
39f1d94d
SP
3240 be_vf_setup(adapter);
3241 else
3242 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3243 }
3244
f25b119c
PR
3245 status = be_cmd_get_phy_info(adapter);
3246 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3247 adapter->phy.fc_autoneg = 1;
3248
68d7bdcb 3249 be_schedule_worker(adapter);
f9449ab7 3250 return 0;
a54769f5
SP
3251err:
3252 be_clear(adapter);
3253 return status;
3254}
6b7c5b94 3255
66268739
IV
3256#ifdef CONFIG_NET_POLL_CONTROLLER
3257static void be_netpoll(struct net_device *netdev)
3258{
3259 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3260 struct be_eq_obj *eqo;
66268739
IV
3261 int i;
3262
e49cc34f
SP
3263 for_all_evt_queues(adapter, eqo, i) {
3264 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3265 napi_schedule(&eqo->napi);
3266 }
10ef9ab4
SP
3267
3268 return;
66268739
IV
3269}
3270#endif
3271
84517482 3272#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3273static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3274
fa9a6fed 3275static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3276 const u8 *p, u32 img_start, int image_size,
3277 int hdr_size)
fa9a6fed
SB
3278{
3279 u32 crc_offset;
3280 u8 flashed_crc[4];
3281 int status;
3f0d4560
AK
3282
3283 crc_offset = hdr_size + img_start + image_size - 4;
3284
fa9a6fed 3285 p += crc_offset;
3f0d4560
AK
3286
3287 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3288 (image_size - 4));
fa9a6fed
SB
3289 if (status) {
3290 dev_err(&adapter->pdev->dev,
3291 "could not get crc from flash, not flashing redboot\n");
3292 return false;
3293 }
3294
3295 /*update redboot only if crc does not match*/
3296 if (!memcmp(flashed_crc, p, 4))
3297 return false;
3298 else
3299 return true;
fa9a6fed
SB
3300}
3301
306f1348
SP
3302static bool phy_flashing_required(struct be_adapter *adapter)
3303{
42f11cf2
AK
3304 return (adapter->phy.phy_type == TN_8022 &&
3305 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3306}
3307
c165541e
PR
3308static bool is_comp_in_ufi(struct be_adapter *adapter,
3309 struct flash_section_info *fsec, int type)
3310{
3311 int i = 0, img_type = 0;
3312 struct flash_section_info_g2 *fsec_g2 = NULL;
3313
ca34fe38 3314 if (BE2_chip(adapter))
c165541e
PR
3315 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3316
3317 for (i = 0; i < MAX_FLASH_COMP; i++) {
3318 if (fsec_g2)
3319 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3320 else
3321 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3322
3323 if (img_type == type)
3324 return true;
3325 }
3326 return false;
3327
3328}
3329
4188e7df 3330static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
c165541e
PR
3331 int header_size,
3332 const struct firmware *fw)
3333{
3334 struct flash_section_info *fsec = NULL;
3335 const u8 *p = fw->data;
3336
3337 p += header_size;
3338 while (p < (fw->data + fw->size)) {
3339 fsec = (struct flash_section_info *)p;
3340 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3341 return fsec;
3342 p += 32;
3343 }
3344 return NULL;
3345}
3346
773a2d7c
PR
3347static int be_flash(struct be_adapter *adapter, const u8 *img,
3348 struct be_dma_mem *flash_cmd, int optype, int img_size)
3349{
3350 u32 total_bytes = 0, flash_op, num_bytes = 0;
3351 int status = 0;
3352 struct be_cmd_write_flashrom *req = flash_cmd->va;
3353
3354 total_bytes = img_size;
3355 while (total_bytes) {
3356 num_bytes = min_t(u32, 32*1024, total_bytes);
3357
3358 total_bytes -= num_bytes;
3359
3360 if (!total_bytes) {
3361 if (optype == OPTYPE_PHY_FW)
3362 flash_op = FLASHROM_OPER_PHY_FLASH;
3363 else
3364 flash_op = FLASHROM_OPER_FLASH;
3365 } else {
3366 if (optype == OPTYPE_PHY_FW)
3367 flash_op = FLASHROM_OPER_PHY_SAVE;
3368 else
3369 flash_op = FLASHROM_OPER_SAVE;
3370 }
3371
be716446 3372 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3373 img += num_bytes;
3374 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3375 flash_op, num_bytes);
3376 if (status) {
3377 if (status == ILLEGAL_IOCTL_REQ &&
3378 optype == OPTYPE_PHY_FW)
3379 break;
3380 dev_err(&adapter->pdev->dev,
3381 "cmd to write to flash rom failed.\n");
3382 return status;
3383 }
3384 }
3385 return 0;
3386}
3387
0ad3157e 3388/* For BE2, BE3 and BE3-R */
ca34fe38 3389static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3390 const struct firmware *fw,
3391 struct be_dma_mem *flash_cmd,
3392 int num_of_images)
3f0d4560 3393
84517482 3394{
3f0d4560 3395 int status = 0, i, filehdr_size = 0;
c165541e 3396 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3397 const u8 *p = fw->data;
215faf9c 3398 const struct flash_comp *pflashcomp;
773a2d7c 3399 int num_comp, redboot;
c165541e
PR
3400 struct flash_section_info *fsec = NULL;
3401
3402 struct flash_comp gen3_flash_types[] = {
3403 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3404 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3405 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3406 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3407 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3408 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3409 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3410 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3411 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3412 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3413 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3414 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3415 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3416 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3417 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3418 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3419 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3420 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3421 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3422 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3423 };
c165541e
PR
3424
3425 struct flash_comp gen2_flash_types[] = {
3426 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3427 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3428 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3429 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3430 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3431 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3432 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3433 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3434 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3435 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3436 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3437 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3438 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3439 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3440 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3441 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3442 };
3443
ca34fe38 3444 if (BE3_chip(adapter)) {
3f0d4560
AK
3445 pflashcomp = gen3_flash_types;
3446 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3447 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3448 } else {
3449 pflashcomp = gen2_flash_types;
3450 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3451 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3452 }
ca34fe38 3453
c165541e
PR
3454 /* Get flash section info*/
3455 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3456 if (!fsec) {
3457 dev_err(&adapter->pdev->dev,
3458 "Invalid Cookie. UFI corrupted ?\n");
3459 return -1;
3460 }
9fe96934 3461 for (i = 0; i < num_comp; i++) {
c165541e 3462 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3463 continue;
c165541e
PR
3464
3465 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3466 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3467 continue;
3468
773a2d7c
PR
3469 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3470 !phy_flashing_required(adapter))
306f1348 3471 continue;
c165541e 3472
773a2d7c
PR
3473 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3474 redboot = be_flash_redboot(adapter, fw->data,
3475 pflashcomp[i].offset, pflashcomp[i].size,
3476 filehdr_size + img_hdrs_size);
3477 if (!redboot)
3478 continue;
3479 }
c165541e 3480
3f0d4560 3481 p = fw->data;
c165541e 3482 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3483 if (p + pflashcomp[i].size > fw->data + fw->size)
3484 return -1;
773a2d7c
PR
3485
3486 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3487 pflashcomp[i].size);
3488 if (status) {
3489 dev_err(&adapter->pdev->dev,
3490 "Flashing section type %d failed.\n",
3491 pflashcomp[i].img_type);
3492 return status;
84517482 3493 }
84517482 3494 }
84517482
AK
3495 return 0;
3496}
3497
773a2d7c
PR
3498static int be_flash_skyhawk(struct be_adapter *adapter,
3499 const struct firmware *fw,
3500 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3501{
773a2d7c
PR
3502 int status = 0, i, filehdr_size = 0;
3503 int img_offset, img_size, img_optype, redboot;
3504 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3505 const u8 *p = fw->data;
3506 struct flash_section_info *fsec = NULL;
3507
3508 filehdr_size = sizeof(struct flash_file_hdr_g3);
3509 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3510 if (!fsec) {
3511 dev_err(&adapter->pdev->dev,
3512 "Invalid Cookie. UFI corrupted ?\n");
3513 return -1;
3514 }
3515
3516 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3517 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3518 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3519
3520 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3521 case IMAGE_FIRMWARE_iSCSI:
3522 img_optype = OPTYPE_ISCSI_ACTIVE;
3523 break;
3524 case IMAGE_BOOT_CODE:
3525 img_optype = OPTYPE_REDBOOT;
3526 break;
3527 case IMAGE_OPTION_ROM_ISCSI:
3528 img_optype = OPTYPE_BIOS;
3529 break;
3530 case IMAGE_OPTION_ROM_PXE:
3531 img_optype = OPTYPE_PXE_BIOS;
3532 break;
3533 case IMAGE_OPTION_ROM_FCoE:
3534 img_optype = OPTYPE_FCOE_BIOS;
3535 break;
3536 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3537 img_optype = OPTYPE_ISCSI_BACKUP;
3538 break;
3539 case IMAGE_NCSI:
3540 img_optype = OPTYPE_NCSI_FW;
3541 break;
3542 default:
3543 continue;
3544 }
3545
3546 if (img_optype == OPTYPE_REDBOOT) {
3547 redboot = be_flash_redboot(adapter, fw->data,
3548 img_offset, img_size,
3549 filehdr_size + img_hdrs_size);
3550 if (!redboot)
3551 continue;
3552 }
3553
3554 p = fw->data;
3555 p += filehdr_size + img_offset + img_hdrs_size;
3556 if (p + img_size > fw->data + fw->size)
3557 return -1;
3558
3559 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3560 if (status) {
3561 dev_err(&adapter->pdev->dev,
3562 "Flashing section type %d failed.\n",
3563 fsec->fsec_entry[i].type);
3564 return status;
3565 }
3566 }
3567 return 0;
3f0d4560
AK
3568}
3569
485bf569
SN
3570static int lancer_fw_download(struct be_adapter *adapter,
3571 const struct firmware *fw)
84517482 3572{
485bf569
SN
3573#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3574#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3575 struct be_dma_mem flash_cmd;
485bf569
SN
3576 const u8 *data_ptr = NULL;
3577 u8 *dest_image_ptr = NULL;
3578 size_t image_size = 0;
3579 u32 chunk_size = 0;
3580 u32 data_written = 0;
3581 u32 offset = 0;
3582 int status = 0;
3583 u8 add_status = 0;
f67ef7ba 3584 u8 change_status;
84517482 3585
485bf569 3586 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3587 dev_err(&adapter->pdev->dev,
485bf569
SN
3588 "FW Image not properly aligned. "
3589 "Length must be 4 byte aligned.\n");
3590 status = -EINVAL;
3591 goto lancer_fw_exit;
d9efd2af
SB
3592 }
3593
485bf569
SN
3594 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3595 + LANCER_FW_DOWNLOAD_CHUNK;
3596 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3597 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3598 if (!flash_cmd.va) {
3599 status = -ENOMEM;
485bf569
SN
3600 goto lancer_fw_exit;
3601 }
84517482 3602
485bf569
SN
3603 dest_image_ptr = flash_cmd.va +
3604 sizeof(struct lancer_cmd_req_write_object);
3605 image_size = fw->size;
3606 data_ptr = fw->data;
3607
3608 while (image_size) {
3609 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3610
3611 /* Copy the image chunk content. */
3612 memcpy(dest_image_ptr, data_ptr, chunk_size);
3613
3614 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3615 chunk_size, offset,
3616 LANCER_FW_DOWNLOAD_LOCATION,
3617 &data_written, &change_status,
3618 &add_status);
485bf569
SN
3619 if (status)
3620 break;
3621
3622 offset += data_written;
3623 data_ptr += data_written;
3624 image_size -= data_written;
3625 }
3626
3627 if (!status) {
3628 /* Commit the FW written */
3629 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3630 0, offset,
3631 LANCER_FW_DOWNLOAD_LOCATION,
3632 &data_written, &change_status,
3633 &add_status);
485bf569
SN
3634 }
3635
3636 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3637 flash_cmd.dma);
3638 if (status) {
3639 dev_err(&adapter->pdev->dev,
3640 "Firmware load error. "
3641 "Status code: 0x%x Additional Status: 0x%x\n",
3642 status, add_status);
3643 goto lancer_fw_exit;
3644 }
3645
f67ef7ba 3646 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3647 status = lancer_physdev_ctrl(adapter,
3648 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3649 if (status) {
3650 dev_err(&adapter->pdev->dev,
3651 "Adapter busy for FW reset.\n"
3652 "New FW will not be active.\n");
3653 goto lancer_fw_exit;
3654 }
3655 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3656 dev_err(&adapter->pdev->dev,
3657 "System reboot required for new FW"
3658 " to be active\n");
3659 }
3660
485bf569
SN
3661 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3662lancer_fw_exit:
3663 return status;
3664}
3665
ca34fe38
SP
3666#define UFI_TYPE2 2
3667#define UFI_TYPE3 3
0ad3157e 3668#define UFI_TYPE3R 10
ca34fe38
SP
3669#define UFI_TYPE4 4
3670static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3671 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3672{
3673 if (fhdr == NULL)
3674 goto be_get_ufi_exit;
3675
ca34fe38
SP
3676 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3677 return UFI_TYPE4;
0ad3157e
VV
3678 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3679 if (fhdr->asic_type_rev == 0x10)
3680 return UFI_TYPE3R;
3681 else
3682 return UFI_TYPE3;
3683 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3684 return UFI_TYPE2;
773a2d7c
PR
3685
3686be_get_ufi_exit:
3687 dev_err(&adapter->pdev->dev,
3688 "UFI and Interface are not compatible for flashing\n");
3689 return -1;
3690}
3691
485bf569
SN
3692static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3693{
485bf569
SN
3694 struct flash_file_hdr_g3 *fhdr3;
3695 struct image_hdr *img_hdr_ptr = NULL;
3696 struct be_dma_mem flash_cmd;
3697 const u8 *p;
773a2d7c 3698 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3699
be716446 3700 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3701 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3702 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3703 if (!flash_cmd.va) {
3704 status = -ENOMEM;
485bf569 3705 goto be_fw_exit;
84517482
AK
3706 }
3707
773a2d7c 3708 p = fw->data;
0ad3157e 3709 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3710
0ad3157e 3711 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3712
773a2d7c
PR
3713 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3714 for (i = 0; i < num_imgs; i++) {
3715 img_hdr_ptr = (struct image_hdr *)(fw->data +
3716 (sizeof(struct flash_file_hdr_g3) +
3717 i * sizeof(struct image_hdr)));
3718 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3719 switch (ufi_type) {
3720 case UFI_TYPE4:
773a2d7c
PR
3721 status = be_flash_skyhawk(adapter, fw,
3722 &flash_cmd, num_imgs);
0ad3157e
VV
3723 break;
3724 case UFI_TYPE3R:
ca34fe38
SP
3725 status = be_flash_BEx(adapter, fw, &flash_cmd,
3726 num_imgs);
0ad3157e
VV
3727 break;
3728 case UFI_TYPE3:
3729 /* Do not flash this ufi on BE3-R cards */
3730 if (adapter->asic_rev < 0x10)
3731 status = be_flash_BEx(adapter, fw,
3732 &flash_cmd,
3733 num_imgs);
3734 else {
3735 status = -1;
3736 dev_err(&adapter->pdev->dev,
3737 "Can't load BE3 UFI on BE3R\n");
3738 }
3739 }
3f0d4560 3740 }
773a2d7c
PR
3741 }
3742
ca34fe38
SP
3743 if (ufi_type == UFI_TYPE2)
3744 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3745 else if (ufi_type == -1)
3f0d4560 3746 status = -1;
84517482 3747
2b7bcebf
IV
3748 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3749 flash_cmd.dma);
84517482
AK
3750 if (status) {
3751 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3752 goto be_fw_exit;
84517482
AK
3753 }
3754
af901ca1 3755 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3756
485bf569
SN
3757be_fw_exit:
3758 return status;
3759}
3760
3761int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3762{
3763 const struct firmware *fw;
3764 int status;
3765
3766 if (!netif_running(adapter->netdev)) {
3767 dev_err(&adapter->pdev->dev,
3768 "Firmware load not allowed (interface is down)\n");
3769 return -1;
3770 }
3771
3772 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3773 if (status)
3774 goto fw_exit;
3775
3776 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3777
3778 if (lancer_chip(adapter))
3779 status = lancer_fw_download(adapter, fw);
3780 else
3781 status = be_fw_download(adapter, fw);
3782
eeb65ced
SK
3783 if (!status)
3784 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3785 adapter->fw_on_flash);
3786
84517482
AK
3787fw_exit:
3788 release_firmware(fw);
3789 return status;
3790}
3791
a77dcb8c
AK
3792static int be_ndo_bridge_setlink(struct net_device *dev,
3793 struct nlmsghdr *nlh)
3794{
3795 struct be_adapter *adapter = netdev_priv(dev);
3796 struct nlattr *attr, *br_spec;
3797 int rem;
3798 int status = 0;
3799 u16 mode = 0;
3800
3801 if (!sriov_enabled(adapter))
3802 return -EOPNOTSUPP;
3803
3804 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3805
3806 nla_for_each_nested(attr, br_spec, rem) {
3807 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3808 continue;
3809
3810 mode = nla_get_u16(attr);
3811 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3812 return -EINVAL;
3813
3814 status = be_cmd_set_hsw_config(adapter, 0, 0,
3815 adapter->if_handle,
3816 mode == BRIDGE_MODE_VEPA ?
3817 PORT_FWD_TYPE_VEPA :
3818 PORT_FWD_TYPE_VEB);
3819 if (status)
3820 goto err;
3821
3822 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3823 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3824
3825 return status;
3826 }
3827err:
3828 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
3829 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3830
3831 return status;
3832}
3833
3834static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
3835 struct net_device *dev,
3836 u32 filter_mask)
3837{
3838 struct be_adapter *adapter = netdev_priv(dev);
3839 int status = 0;
3840 u8 hsw_mode;
3841
3842 if (!sriov_enabled(adapter))
3843 return 0;
3844
3845 /* BE and Lancer chips support VEB mode only */
3846 if (BEx_chip(adapter) || lancer_chip(adapter)) {
3847 hsw_mode = PORT_FWD_TYPE_VEB;
3848 } else {
3849 status = be_cmd_get_hsw_config(adapter, NULL, 0,
3850 adapter->if_handle, &hsw_mode);
3851 if (status)
3852 return 0;
3853 }
3854
3855 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
3856 hsw_mode == PORT_FWD_TYPE_VEPA ?
3857 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
3858}
3859
e5686ad8 3860static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3861 .ndo_open = be_open,
3862 .ndo_stop = be_close,
3863 .ndo_start_xmit = be_xmit,
a54769f5 3864 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3865 .ndo_set_mac_address = be_mac_addr_set,
3866 .ndo_change_mtu = be_change_mtu,
ab1594e9 3867 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3868 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3869 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3870 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3871 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3872 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3873 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3874 .ndo_get_vf_config = be_get_vf_config,
3875#ifdef CONFIG_NET_POLL_CONTROLLER
3876 .ndo_poll_controller = be_netpoll,
3877#endif
a77dcb8c
AK
3878 .ndo_bridge_setlink = be_ndo_bridge_setlink,
3879 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6b7c5b94
SP
3880};
3881
3882static void be_netdev_init(struct net_device *netdev)
3883{
3884 struct be_adapter *adapter = netdev_priv(netdev);
3885
6332c8d3 3886 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3887 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3888 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3889 if (be_multi_rxq(adapter))
3890 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3891
3892 netdev->features |= netdev->hw_features |
f646968f 3893 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3894
eb8a50d9 3895 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3896 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3897
fbc13f01
AK
3898 netdev->priv_flags |= IFF_UNICAST_FLT;
3899
6b7c5b94
SP
3900 netdev->flags |= IFF_MULTICAST;
3901
b7e5887e 3902 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3903
10ef9ab4 3904 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3905
3906 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
6b7c5b94
SP
3907}
3908
3909static void be_unmap_pci_bars(struct be_adapter *adapter)
3910{
c5b3ad4c
SP
3911 if (adapter->csr)
3912 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3913 if (adapter->db)
ce66f781 3914 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3915}
3916
ce66f781
SP
3917static int db_bar(struct be_adapter *adapter)
3918{
3919 if (lancer_chip(adapter) || !be_physfn(adapter))
3920 return 0;
3921 else
3922 return 4;
3923}
3924
3925static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3926{
dbf0f2a7 3927 if (skyhawk_chip(adapter)) {
ce66f781
SP
3928 adapter->roce_db.size = 4096;
3929 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3930 db_bar(adapter));
3931 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3932 db_bar(adapter));
3933 }
045508a8 3934 return 0;
6b7c5b94
SP
3935}
3936
3937static int be_map_pci_bars(struct be_adapter *adapter)
3938{
3939 u8 __iomem *addr;
ce66f781 3940 u32 sli_intf;
6b7c5b94 3941
ce66f781
SP
3942 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3943 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3944 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3945
c5b3ad4c
SP
3946 if (BEx_chip(adapter) && be_physfn(adapter)) {
3947 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3948 if (adapter->csr == NULL)
3949 return -ENOMEM;
3950 }
3951
ce66f781 3952 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3953 if (addr == NULL)
3954 goto pci_map_err;
ba343c77 3955 adapter->db = addr;
ce66f781
SP
3956
3957 be_roce_map_pci_bars(adapter);
6b7c5b94 3958 return 0;
ce66f781 3959
6b7c5b94
SP
3960pci_map_err:
3961 be_unmap_pci_bars(adapter);
3962 return -ENOMEM;
3963}
3964
6b7c5b94
SP
3965static void be_ctrl_cleanup(struct be_adapter *adapter)
3966{
8788fdc2 3967 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3968
3969 be_unmap_pci_bars(adapter);
3970
3971 if (mem->va)
2b7bcebf
IV
3972 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3973 mem->dma);
e7b909a6 3974
5b8821b7 3975 mem = &adapter->rx_filter;
e7b909a6 3976 if (mem->va)
2b7bcebf
IV
3977 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3978 mem->dma);
6b7c5b94
SP
3979}
3980
6b7c5b94
SP
3981static int be_ctrl_init(struct be_adapter *adapter)
3982{
8788fdc2
SP
3983 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3984 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3985 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3986 u32 sli_intf;
6b7c5b94 3987 int status;
6b7c5b94 3988
ce66f781
SP
3989 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3990 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3991 SLI_INTF_FAMILY_SHIFT;
3992 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3993
6b7c5b94
SP
3994 status = be_map_pci_bars(adapter);
3995 if (status)
e7b909a6 3996 goto done;
6b7c5b94
SP
3997
3998 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3999 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4000 mbox_mem_alloc->size,
4001 &mbox_mem_alloc->dma,
4002 GFP_KERNEL);
6b7c5b94 4003 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4004 status = -ENOMEM;
4005 goto unmap_pci_bars;
6b7c5b94
SP
4006 }
4007 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4008 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4009 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4010 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4011
5b8821b7 4012 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4013 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4014 rx_filter->size, &rx_filter->dma,
4015 GFP_KERNEL);
5b8821b7 4016 if (rx_filter->va == NULL) {
e7b909a6
SP
4017 status = -ENOMEM;
4018 goto free_mbox;
4019 }
1f9061d2 4020
2984961c 4021 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4022 spin_lock_init(&adapter->mcc_lock);
4023 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4024
dd131e76 4025 init_completion(&adapter->flash_compl);
cf588477 4026 pci_save_state(adapter->pdev);
6b7c5b94 4027 return 0;
e7b909a6
SP
4028
4029free_mbox:
2b7bcebf
IV
4030 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4031 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4032
4033unmap_pci_bars:
4034 be_unmap_pci_bars(adapter);
4035
4036done:
4037 return status;
6b7c5b94
SP
4038}
4039
4040static void be_stats_cleanup(struct be_adapter *adapter)
4041{
3abcdeda 4042 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4043
4044 if (cmd->va)
2b7bcebf
IV
4045 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4046 cmd->va, cmd->dma);
6b7c5b94
SP
4047}
4048
4049static int be_stats_init(struct be_adapter *adapter)
4050{
3abcdeda 4051 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4052
ca34fe38
SP
4053 if (lancer_chip(adapter))
4054 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4055 else if (BE2_chip(adapter))
89a88ab8 4056 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
4057 else
4058 /* BE3 and Skyhawk */
4059 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4060
ede23fa8
JP
4061 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4062 GFP_KERNEL);
6b7c5b94
SP
4063 if (cmd->va == NULL)
4064 return -1;
4065 return 0;
4066}
4067
3bc6b06c 4068static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4069{
4070 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4071
6b7c5b94
SP
4072 if (!adapter)
4073 return;
4074
045508a8 4075 be_roce_dev_remove(adapter);
8cef7a78 4076 be_intr_set(adapter, false);
045508a8 4077
f67ef7ba
PR
4078 cancel_delayed_work_sync(&adapter->func_recovery_work);
4079
6b7c5b94
SP
4080 unregister_netdev(adapter->netdev);
4081
5fb379ee
SP
4082 be_clear(adapter);
4083
bf99e50d
PR
4084 /* tell fw we're done with firing cmds */
4085 be_cmd_fw_clean(adapter);
4086
6b7c5b94
SP
4087 be_stats_cleanup(adapter);
4088
4089 be_ctrl_cleanup(adapter);
4090
d6b6d987
SP
4091 pci_disable_pcie_error_reporting(pdev);
4092
6b7c5b94
SP
4093 pci_set_drvdata(pdev, NULL);
4094 pci_release_regions(pdev);
4095 pci_disable_device(pdev);
4096
4097 free_netdev(adapter->netdev);
4098}
4099
4762f6ce
AK
4100bool be_is_wol_supported(struct be_adapter *adapter)
4101{
4102 return ((adapter->wol_cap & BE_WOL_CAP) &&
4103 !be_is_wol_excluded(adapter)) ? true : false;
4104}
4105
941a77d5
SK
4106u32 be_get_fw_log_level(struct be_adapter *adapter)
4107{
4108 struct be_dma_mem extfat_cmd;
4109 struct be_fat_conf_params *cfgs;
4110 int status;
4111 u32 level = 0;
4112 int j;
4113
f25b119c
PR
4114 if (lancer_chip(adapter))
4115 return 0;
4116
941a77d5
SK
4117 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4118 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4119 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4120 &extfat_cmd.dma);
4121
4122 if (!extfat_cmd.va) {
4123 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4124 __func__);
4125 goto err;
4126 }
4127
4128 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4129 if (!status) {
4130 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4131 sizeof(struct be_cmd_resp_hdr));
ac46a462 4132 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4133 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4134 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4135 }
4136 }
4137 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4138 extfat_cmd.dma);
4139err:
4140 return level;
4141}
abb93951 4142
39f1d94d 4143static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4144{
6b7c5b94 4145 int status;
941a77d5 4146 u32 level;
6b7c5b94 4147
9e1453c5
AK
4148 status = be_cmd_get_cntl_attributes(adapter);
4149 if (status)
4150 return status;
4151
4762f6ce
AK
4152 status = be_cmd_get_acpi_wol_cap(adapter);
4153 if (status) {
4154 /* in case of a failure to get wol capabillities
4155 * check the exclusion list to determine WOL capability */
4156 if (!be_is_wol_excluded(adapter))
4157 adapter->wol_cap |= BE_WOL_CAP;
4158 }
4159
4160 if (be_is_wol_supported(adapter))
4161 adapter->wol = true;
4162
7aeb2156
PR
4163 /* Must be a power of 2 or else MODULO will BUG_ON */
4164 adapter->be_get_temp_freq = 64;
4165
941a77d5
SK
4166 level = be_get_fw_log_level(adapter);
4167 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4168
92bf14ab 4169 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4170 return 0;
6b7c5b94
SP
4171}
4172
f67ef7ba 4173static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4174{
01e5b2c4 4175 struct device *dev = &adapter->pdev->dev;
d8110f62 4176 int status;
d8110f62 4177
f67ef7ba
PR
4178 status = lancer_test_and_set_rdy_state(adapter);
4179 if (status)
4180 goto err;
d8110f62 4181
f67ef7ba
PR
4182 if (netif_running(adapter->netdev))
4183 be_close(adapter->netdev);
d8110f62 4184
f67ef7ba
PR
4185 be_clear(adapter);
4186
01e5b2c4 4187 be_clear_all_error(adapter);
f67ef7ba
PR
4188
4189 status = be_setup(adapter);
4190 if (status)
4191 goto err;
d8110f62 4192
f67ef7ba
PR
4193 if (netif_running(adapter->netdev)) {
4194 status = be_open(adapter->netdev);
d8110f62
PR
4195 if (status)
4196 goto err;
f67ef7ba 4197 }
d8110f62 4198
01e5b2c4 4199 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4200 return 0;
4201err:
01e5b2c4
SK
4202 if (status == -EAGAIN)
4203 dev_err(dev, "Waiting for resource provisioning\n");
4204 else
4205 dev_err(dev, "Error recovery failed\n");
d8110f62 4206
f67ef7ba
PR
4207 return status;
4208}
4209
4210static void be_func_recovery_task(struct work_struct *work)
4211{
4212 struct be_adapter *adapter =
4213 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4214 int status = 0;
d8110f62 4215
f67ef7ba 4216 be_detect_error(adapter);
d8110f62 4217
f67ef7ba 4218 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4219
f67ef7ba
PR
4220 rtnl_lock();
4221 netif_device_detach(adapter->netdev);
4222 rtnl_unlock();
d8110f62 4223
f67ef7ba 4224 status = lancer_recover_func(adapter);
f67ef7ba
PR
4225 if (!status)
4226 netif_device_attach(adapter->netdev);
d8110f62 4227 }
f67ef7ba 4228
01e5b2c4
SK
4229 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4230 * no need to attempt further recovery.
4231 */
4232 if (!status || status == -EAGAIN)
4233 schedule_delayed_work(&adapter->func_recovery_work,
4234 msecs_to_jiffies(1000));
d8110f62
PR
4235}
4236
4237static void be_worker(struct work_struct *work)
4238{
4239 struct be_adapter *adapter =
4240 container_of(work, struct be_adapter, work.work);
4241 struct be_rx_obj *rxo;
10ef9ab4 4242 struct be_eq_obj *eqo;
d8110f62
PR
4243 int i;
4244
d8110f62
PR
4245 /* when interrupts are not yet enabled, just reap any pending
4246 * mcc completions */
4247 if (!netif_running(adapter->netdev)) {
072a9c48 4248 local_bh_disable();
10ef9ab4 4249 be_process_mcc(adapter);
072a9c48 4250 local_bh_enable();
d8110f62
PR
4251 goto reschedule;
4252 }
4253
4254 if (!adapter->stats_cmd_sent) {
4255 if (lancer_chip(adapter))
4256 lancer_cmd_get_pport_stats(adapter,
4257 &adapter->stats_cmd);
4258 else
4259 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4260 }
4261
d696b5e2
VV
4262 if (be_physfn(adapter) &&
4263 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4264 be_cmd_get_die_temperature(adapter);
4265
d8110f62 4266 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4267 if (rxo->rx_post_starved) {
4268 rxo->rx_post_starved = false;
4269 be_post_rx_frags(rxo, GFP_KERNEL);
4270 }
4271 }
4272
10ef9ab4
SP
4273 for_all_evt_queues(adapter, eqo, i)
4274 be_eqd_update(adapter, eqo);
4275
d8110f62
PR
4276reschedule:
4277 adapter->work_counter++;
4278 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4279}
4280
257a3feb 4281/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4282static bool be_reset_required(struct be_adapter *adapter)
4283{
257a3feb 4284 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4285}
4286
d379142b
SP
4287static char *mc_name(struct be_adapter *adapter)
4288{
4289 if (adapter->function_mode & FLEX10_MODE)
4290 return "FLEX10";
4291 else if (adapter->function_mode & VNIC_MODE)
4292 return "vNIC";
4293 else if (adapter->function_mode & UMC_ENABLED)
4294 return "UMC";
4295 else
4296 return "";
4297}
4298
4299static inline char *func_name(struct be_adapter *adapter)
4300{
4301 return be_physfn(adapter) ? "PF" : "VF";
4302}
4303
1dd06ae8 4304static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4305{
4306 int status = 0;
4307 struct be_adapter *adapter;
4308 struct net_device *netdev;
b4e32a71 4309 char port_name;
6b7c5b94
SP
4310
4311 status = pci_enable_device(pdev);
4312 if (status)
4313 goto do_none;
4314
4315 status = pci_request_regions(pdev, DRV_NAME);
4316 if (status)
4317 goto disable_dev;
4318 pci_set_master(pdev);
4319
7f640062 4320 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4321 if (netdev == NULL) {
4322 status = -ENOMEM;
4323 goto rel_reg;
4324 }
4325 adapter = netdev_priv(netdev);
4326 adapter->pdev = pdev;
4327 pci_set_drvdata(pdev, adapter);
4328 adapter->netdev = netdev;
2243e2e9 4329 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4330
2b7bcebf 4331 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4332 if (!status) {
2bd92cd2
CH
4333 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4334 if (status < 0) {
4335 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4336 goto free_netdev;
4337 }
6b7c5b94
SP
4338 netdev->features |= NETIF_F_HIGHDMA;
4339 } else {
2b7bcebf 4340 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4341 if (!status)
4342 status = dma_set_coherent_mask(&pdev->dev,
4343 DMA_BIT_MASK(32));
6b7c5b94
SP
4344 if (status) {
4345 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4346 goto free_netdev;
4347 }
4348 }
4349
d6b6d987
SP
4350 status = pci_enable_pcie_error_reporting(pdev);
4351 if (status)
4ce1fd61 4352 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
d6b6d987 4353
6b7c5b94
SP
4354 status = be_ctrl_init(adapter);
4355 if (status)
39f1d94d 4356 goto free_netdev;
6b7c5b94 4357
2243e2e9 4358 /* sync up with fw's ready state */
ba343c77 4359 if (be_physfn(adapter)) {
bf99e50d 4360 status = be_fw_wait_ready(adapter);
ba343c77
SB
4361 if (status)
4362 goto ctrl_clean;
ba343c77 4363 }
6b7c5b94 4364
39f1d94d
SP
4365 if (be_reset_required(adapter)) {
4366 status = be_cmd_reset_function(adapter);
4367 if (status)
4368 goto ctrl_clean;
556ae191 4369
2d177be8
KA
4370 /* Wait for interrupts to quiesce after an FLR */
4371 msleep(100);
4372 }
8cef7a78
SK
4373
4374 /* Allow interrupts for other ULPs running on NIC function */
4375 be_intr_set(adapter, true);
10ef9ab4 4376
2d177be8
KA
4377 /* tell fw we're ready to fire cmds */
4378 status = be_cmd_fw_init(adapter);
4379 if (status)
4380 goto ctrl_clean;
4381
2243e2e9
SP
4382 status = be_stats_init(adapter);
4383 if (status)
4384 goto ctrl_clean;
4385
39f1d94d 4386 status = be_get_initial_config(adapter);
6b7c5b94
SP
4387 if (status)
4388 goto stats_clean;
6b7c5b94
SP
4389
4390 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4391 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4392 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4393
5fb379ee
SP
4394 status = be_setup(adapter);
4395 if (status)
55f5c3c5 4396 goto stats_clean;
2243e2e9 4397
3abcdeda 4398 be_netdev_init(netdev);
6b7c5b94
SP
4399 status = register_netdev(netdev);
4400 if (status != 0)
5fb379ee 4401 goto unsetup;
6b7c5b94 4402
045508a8
PP
4403 be_roce_dev_add(adapter);
4404
f67ef7ba
PR
4405 schedule_delayed_work(&adapter->func_recovery_work,
4406 msecs_to_jiffies(1000));
b4e32a71
PR
4407
4408 be_cmd_query_port_name(adapter, &port_name);
4409
d379142b
SP
4410 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4411 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4412
6b7c5b94
SP
4413 return 0;
4414
5fb379ee
SP
4415unsetup:
4416 be_clear(adapter);
6b7c5b94
SP
4417stats_clean:
4418 be_stats_cleanup(adapter);
4419ctrl_clean:
4420 be_ctrl_cleanup(adapter);
f9449ab7 4421free_netdev:
fe6d2a38 4422 free_netdev(netdev);
8d56ff11 4423 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4424rel_reg:
4425 pci_release_regions(pdev);
4426disable_dev:
4427 pci_disable_device(pdev);
4428do_none:
c4ca2374 4429 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4430 return status;
4431}
4432
4433static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4434{
4435 struct be_adapter *adapter = pci_get_drvdata(pdev);
4436 struct net_device *netdev = adapter->netdev;
4437
71d8d1b5
AK
4438 if (adapter->wol)
4439 be_setup_wol(adapter, true);
4440
f67ef7ba
PR
4441 cancel_delayed_work_sync(&adapter->func_recovery_work);
4442
6b7c5b94
SP
4443 netif_device_detach(netdev);
4444 if (netif_running(netdev)) {
4445 rtnl_lock();
4446 be_close(netdev);
4447 rtnl_unlock();
4448 }
9b0365f1 4449 be_clear(adapter);
6b7c5b94
SP
4450
4451 pci_save_state(pdev);
4452 pci_disable_device(pdev);
4453 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4454 return 0;
4455}
4456
4457static int be_resume(struct pci_dev *pdev)
4458{
4459 int status = 0;
4460 struct be_adapter *adapter = pci_get_drvdata(pdev);
4461 struct net_device *netdev = adapter->netdev;
4462
4463 netif_device_detach(netdev);
4464
4465 status = pci_enable_device(pdev);
4466 if (status)
4467 return status;
4468
1ca01512 4469 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4470 pci_restore_state(pdev);
4471
dd5746bf
SB
4472 status = be_fw_wait_ready(adapter);
4473 if (status)
4474 return status;
4475
2243e2e9
SP
4476 /* tell fw we're ready to fire cmds */
4477 status = be_cmd_fw_init(adapter);
4478 if (status)
4479 return status;
4480
9b0365f1 4481 be_setup(adapter);
6b7c5b94
SP
4482 if (netif_running(netdev)) {
4483 rtnl_lock();
4484 be_open(netdev);
4485 rtnl_unlock();
4486 }
f67ef7ba
PR
4487
4488 schedule_delayed_work(&adapter->func_recovery_work,
4489 msecs_to_jiffies(1000));
6b7c5b94 4490 netif_device_attach(netdev);
71d8d1b5
AK
4491
4492 if (adapter->wol)
4493 be_setup_wol(adapter, false);
a4ca055f 4494
6b7c5b94
SP
4495 return 0;
4496}
4497
82456b03
SP
4498/*
4499 * An FLR will stop BE from DMAing any data.
4500 */
4501static void be_shutdown(struct pci_dev *pdev)
4502{
4503 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4504
2d5d4154
AK
4505 if (!adapter)
4506 return;
82456b03 4507
0f4a6828 4508 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4509 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4510
2d5d4154 4511 netif_device_detach(adapter->netdev);
82456b03 4512
57841869
AK
4513 be_cmd_reset_function(adapter);
4514
82456b03 4515 pci_disable_device(pdev);
82456b03
SP
4516}
4517
cf588477
SP
4518static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4519 pci_channel_state_t state)
4520{
4521 struct be_adapter *adapter = pci_get_drvdata(pdev);
4522 struct net_device *netdev = adapter->netdev;
4523
4524 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4525
01e5b2c4
SK
4526 if (!adapter->eeh_error) {
4527 adapter->eeh_error = true;
cf588477 4528
01e5b2c4 4529 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4530
cf588477 4531 rtnl_lock();
01e5b2c4
SK
4532 netif_device_detach(netdev);
4533 if (netif_running(netdev))
4534 be_close(netdev);
cf588477 4535 rtnl_unlock();
01e5b2c4
SK
4536
4537 be_clear(adapter);
cf588477 4538 }
cf588477
SP
4539
4540 if (state == pci_channel_io_perm_failure)
4541 return PCI_ERS_RESULT_DISCONNECT;
4542
4543 pci_disable_device(pdev);
4544
eeb7fc7b
SK
4545 /* The error could cause the FW to trigger a flash debug dump.
4546 * Resetting the card while flash dump is in progress
c8a54163
PR
4547 * can cause it not to recover; wait for it to finish.
4548 * Wait only for first function as it is needed only once per
4549 * adapter.
eeb7fc7b 4550 */
c8a54163
PR
4551 if (pdev->devfn == 0)
4552 ssleep(30);
4553
cf588477
SP
4554 return PCI_ERS_RESULT_NEED_RESET;
4555}
4556
4557static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4558{
4559 struct be_adapter *adapter = pci_get_drvdata(pdev);
4560 int status;
4561
4562 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4563
4564 status = pci_enable_device(pdev);
4565 if (status)
4566 return PCI_ERS_RESULT_DISCONNECT;
4567
4568 pci_set_master(pdev);
1ca01512 4569 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4570 pci_restore_state(pdev);
4571
4572 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4573 dev_info(&adapter->pdev->dev,
4574 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4575 status = be_fw_wait_ready(adapter);
cf588477
SP
4576 if (status)
4577 return PCI_ERS_RESULT_DISCONNECT;
4578
d6b6d987 4579 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4580 be_clear_all_error(adapter);
cf588477
SP
4581 return PCI_ERS_RESULT_RECOVERED;
4582}
4583
4584static void be_eeh_resume(struct pci_dev *pdev)
4585{
4586 int status = 0;
4587 struct be_adapter *adapter = pci_get_drvdata(pdev);
4588 struct net_device *netdev = adapter->netdev;
4589
4590 dev_info(&adapter->pdev->dev, "EEH resume\n");
4591
4592 pci_save_state(pdev);
4593
2d177be8 4594 status = be_cmd_reset_function(adapter);
cf588477
SP
4595 if (status)
4596 goto err;
4597
2d177be8
KA
4598 /* tell fw we're ready to fire cmds */
4599 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4600 if (status)
4601 goto err;
4602
cf588477
SP
4603 status = be_setup(adapter);
4604 if (status)
4605 goto err;
4606
4607 if (netif_running(netdev)) {
4608 status = be_open(netdev);
4609 if (status)
4610 goto err;
4611 }
f67ef7ba
PR
4612
4613 schedule_delayed_work(&adapter->func_recovery_work,
4614 msecs_to_jiffies(1000));
cf588477
SP
4615 netif_device_attach(netdev);
4616 return;
4617err:
4618 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4619}
4620
3646f0e5 4621static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4622 .error_detected = be_eeh_err_detected,
4623 .slot_reset = be_eeh_reset,
4624 .resume = be_eeh_resume,
4625};
4626
6b7c5b94
SP
4627static struct pci_driver be_driver = {
4628 .name = DRV_NAME,
4629 .id_table = be_dev_ids,
4630 .probe = be_probe,
4631 .remove = be_remove,
4632 .suspend = be_suspend,
cf588477 4633 .resume = be_resume,
82456b03 4634 .shutdown = be_shutdown,
cf588477 4635 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4636};
4637
4638static int __init be_init_module(void)
4639{
8e95a202
JP
4640 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4641 rx_frag_size != 2048) {
6b7c5b94
SP
4642 printk(KERN_WARNING DRV_NAME
4643 " : Module param rx_frag_size must be 2048/4096/8192."
4644 " Using 2048\n");
4645 rx_frag_size = 2048;
4646 }
6b7c5b94
SP
4647
4648 return pci_register_driver(&be_driver);
4649}
4650module_init(be_init_module);
4651
4652static void __exit be_exit_module(void)
4653{
4654 pci_unregister_driver(&be_driver);
4655}
4656module_exit(be_exit_module);