]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
bna: Staticize local functions
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
5a712c13
SP
259 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
260 * privilege or if PF did not provision the new MAC address.
261 * On BE3, this cmd will always fail if the VF doesn't have the
262 * FILTMGMT privilege. This failure is OK, only if the PF programmed
263 * the MAC for the VF.
704e4c88 264 */
5a712c13
SP
265 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
266 adapter->if_handle, &adapter->pmac_id[0], 0);
267 if (!status) {
268 curr_pmac_id = adapter->pmac_id[0];
269
270 /* Delete the old programmed MAC. This call may fail if the
271 * old MAC was already deleted by the PF driver.
272 */
273 if (adapter->pmac_id[0] != old_pmac_id)
274 be_cmd_pmac_del(adapter, adapter->if_handle,
275 old_pmac_id, 0);
704e4c88
PR
276 }
277
5a712c13
SP
278 /* Decide if the new MAC is successfully activated only after
279 * querying the FW
704e4c88 280 */
5a712c13 281 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
a65027e4 282 if (status)
e3a7ae2c 283 goto err;
6b7c5b94 284
5a712c13
SP
285 /* The MAC change did not happen, either due to lack of privilege
286 * or PF didn't pre-provision.
287 */
288 if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
289 status = -EPERM;
290 goto err;
291 }
292
e3a7ae2c 293 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 294 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
295 return 0;
296err:
5a712c13 297 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
785
786 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787 if (!vlan_tag)
788 vlan_tag = adapter->pvid;
789 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790 * skip VLAN insertion
791 */
792 if (skip_hw_vlan)
793 *skip_hw_vlan = true;
794 }
bc0c3405
AK
795
796 if (vlan_tag) {
58717686 797 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
798 if (unlikely(!skb))
799 return skb;
bc0c3405
AK
800 skb->vlan_tci = 0;
801 }
802
803 /* Insert the outer VLAN, if any */
804 if (adapter->qnq_vid) {
805 vlan_tag = adapter->qnq_vid;
58717686 806 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
807 if (unlikely(!skb))
808 return skb;
809 if (skip_hw_vlan)
810 *skip_hw_vlan = true;
811 }
812
93040ae5
SK
813 return skb;
814}
815
bc0c3405
AK
816static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817{
818 struct ethhdr *eh = (struct ethhdr *)skb->data;
819 u16 offset = ETH_HLEN;
820
821 if (eh->h_proto == htons(ETH_P_IPV6)) {
822 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824 offset += sizeof(struct ipv6hdr);
825 if (ip6h->nexthdr != NEXTHDR_TCP &&
826 ip6h->nexthdr != NEXTHDR_UDP) {
827 struct ipv6_opt_hdr *ehdr =
828 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831 if (ehdr->hdrlen == 0xff)
832 return true;
833 }
834 }
835 return false;
836}
837
838static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839{
840 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841}
842
ee9c799c
SP
843static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
844 struct sk_buff *skb)
bc0c3405 845{
ee9c799c 846 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
847}
848
ee9c799c
SP
849static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
850 struct sk_buff *skb,
851 bool *skip_hw_vlan)
6b7c5b94 852{
d2cb6ce7 853 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
854 unsigned int eth_hdr_len;
855 struct iphdr *ip;
93040ae5 856
48265667
SK
857 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
858 * may cause a transmit stall on that port. So the work-around is to
859 * pad such packets to a 36-byte length.
860 */
861 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
862 if (skb_padto(skb, 36))
863 goto tx_drop;
864 skb->len = 36;
865 }
866
1297f9db
AK
867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 869 * For padded packets, Lancer computes incorrect checksum.
1ded132d 870 */
ee9c799c
SP
871 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
872 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
873 if (skb->len <= 60 &&
874 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 875 is_ipv4_pkt(skb)) {
93040ae5
SK
876 ip = (struct iphdr *)ip_hdr(skb);
877 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
878 }
1ded132d 879
d2cb6ce7
AK
880 /* If vlan tag is already inlined in the packet, skip HW VLAN
881 * tagging in UMC mode
882 */
883 if ((adapter->function_mode & UMC_ENABLED) &&
884 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 885 *skip_hw_vlan = true;
d2cb6ce7 886
93040ae5
SK
887 /* HW has a bug wherein it will calculate CSUM for VLAN
888 * pkts even though it is disabled.
889 * Manually insert VLAN in pkt.
890 */
891 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
892 vlan_tx_tag_present(skb)) {
893 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
894 if (unlikely(!skb))
895 goto tx_drop;
896 }
897
898 /* HW may lockup when VLAN HW tagging is requested on
899 * certain ipv6 packets. Drop such pkts if the HW workaround to
900 * skip HW tagging is not enabled by FW.
901 */
902 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
903 (adapter->pvid || adapter->qnq_vid) &&
904 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
905 goto tx_drop;
906
907 /* Manual VLAN tag insertion to prevent:
908 * ASIC lockup when the ASIC inserts VLAN tag into
909 * certain ipv6 packets. Insert VLAN tags in driver,
910 * and set event, completion, vlan bits accordingly
911 * in the Tx WRB.
912 */
913 if (be_ipv6_tx_stall_chk(adapter, skb) &&
914 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 915 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
916 if (unlikely(!skb))
917 goto tx_drop;
1ded132d
AK
918 }
919
ee9c799c
SP
920 return skb;
921tx_drop:
922 dev_kfree_skb_any(skb);
923 return NULL;
924}
925
926static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
927{
928 struct be_adapter *adapter = netdev_priv(netdev);
929 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
930 struct be_queue_info *txq = &txo->q;
931 bool dummy_wrb, stopped = false;
932 u32 wrb_cnt = 0, copied = 0;
933 bool skip_hw_vlan = false;
934 u32 start = txq->head;
935
936 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
937 if (!skb)
938 return NETDEV_TX_OK;
939
fe6d2a38 940 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 941
bc0c3405
AK
942 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
943 skip_hw_vlan);
c190e3c8 944 if (copied) {
cd8f76c0
ED
945 int gso_segs = skb_shinfo(skb)->gso_segs;
946
c190e3c8 947 /* record the sent skb in the sent_skb table */
3c8def97
SP
948 BUG_ON(txo->sent_skb_list[start]);
949 txo->sent_skb_list[start] = skb;
c190e3c8
AK
950
951 /* Ensure txq has space for the next skb; Else stop the queue
952 * *BEFORE* ringing the tx doorbell, so that we serialze the
953 * tx compls of the current transmit which'll wake up the queue
954 */
7101e111 955 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
956 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
957 txq->len) {
3c8def97 958 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
959 stopped = true;
960 }
6b7c5b94 961
94d73aaa 962 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 963
cd8f76c0 964 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
965 } else {
966 txq->head = start;
967 dev_kfree_skb_any(skb);
6b7c5b94 968 }
6b7c5b94
SP
969 return NETDEV_TX_OK;
970}
971
972static int be_change_mtu(struct net_device *netdev, int new_mtu)
973{
974 struct be_adapter *adapter = netdev_priv(netdev);
975 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
976 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
977 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
978 dev_info(&adapter->pdev->dev,
979 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
980 BE_MIN_MTU,
981 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
982 return -EINVAL;
983 }
984 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
985 netdev->mtu, new_mtu);
986 netdev->mtu = new_mtu;
987 return 0;
988}
989
990/*
82903e4b
AK
991 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
992 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 993 */
10329df8 994static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 995{
10329df8
SP
996 u16 vids[BE_NUM_VLANS_SUPPORTED];
997 u16 num = 0, i;
82903e4b 998 int status = 0;
1da87b7f 999
c0e64ef4
SP
1000 /* No need to further configure vids if in promiscuous mode */
1001 if (adapter->promiscuous)
1002 return 0;
1003
0fc16ebf
PR
1004 if (adapter->vlans_added > adapter->max_vlans)
1005 goto set_vlan_promisc;
1006
1007 /* Construct VLAN Table to give to HW */
1008 for (i = 0; i < VLAN_N_VID; i++)
1009 if (adapter->vlan_tag[i])
10329df8 1010 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1011
1012 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1013 vids, num, 1, 0);
0fc16ebf
PR
1014
1015 /* Set to VLAN promisc mode as setting VLAN filter failed */
1016 if (status) {
1017 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1018 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1019 goto set_vlan_promisc;
6b7c5b94 1020 }
1da87b7f 1021
b31c50a7 1022 return status;
0fc16ebf
PR
1023
1024set_vlan_promisc:
1025 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1026 NULL, 0, 1, 1);
1027 return status;
6b7c5b94
SP
1028}
1029
80d5c368 1030static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1031{
1032 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1033 int status = 0;
6b7c5b94 1034
a85e9986 1035 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1036 status = -EINVAL;
1037 goto ret;
1038 }
ba343c77 1039
a85e9986
PR
1040 /* Packets with VID 0 are always received by Lancer by default */
1041 if (lancer_chip(adapter) && vid == 0)
1042 goto ret;
1043
6b7c5b94 1044 adapter->vlan_tag[vid] = 1;
82903e4b 1045 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1046 status = be_vid_config(adapter);
8e586137 1047
80817cbf
AK
1048 if (!status)
1049 adapter->vlans_added++;
1050 else
1051 adapter->vlan_tag[vid] = 0;
1052ret:
1053 return status;
6b7c5b94
SP
1054}
1055
80d5c368 1056static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1057{
1058 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1059 int status = 0;
6b7c5b94 1060
a85e9986 1061 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1062 status = -EINVAL;
1063 goto ret;
1064 }
ba343c77 1065
a85e9986
PR
1066 /* Packets with VID 0 are always received by Lancer by default */
1067 if (lancer_chip(adapter) && vid == 0)
1068 goto ret;
1069
6b7c5b94 1070 adapter->vlan_tag[vid] = 0;
82903e4b 1071 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1072 status = be_vid_config(adapter);
8e586137 1073
80817cbf
AK
1074 if (!status)
1075 adapter->vlans_added--;
1076 else
1077 adapter->vlan_tag[vid] = 1;
1078ret:
1079 return status;
6b7c5b94
SP
1080}
1081
a54769f5 1082static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1085 int status;
6b7c5b94 1086
24307eef 1087 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1088 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1089 adapter->promiscuous = true;
1090 goto done;
6b7c5b94
SP
1091 }
1092
25985edc 1093 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1094 if (adapter->promiscuous) {
1095 adapter->promiscuous = false;
5b8821b7 1096 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1097
1098 if (adapter->vlans_added)
10329df8 1099 be_vid_config(adapter);
6b7c5b94
SP
1100 }
1101
e7b909a6 1102 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1103 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1104 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1105 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1106 goto done;
6b7c5b94 1107 }
6b7c5b94 1108
fbc13f01
AK
1109 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1110 struct netdev_hw_addr *ha;
1111 int i = 1; /* First slot is claimed by the Primary MAC */
1112
1113 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1114 be_cmd_pmac_del(adapter, adapter->if_handle,
1115 adapter->pmac_id[i], 0);
1116 }
1117
1118 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1119 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1120 adapter->promiscuous = true;
1121 goto done;
1122 }
1123
1124 netdev_for_each_uc_addr(ha, adapter->netdev) {
1125 adapter->uc_macs++; /* First slot is for Primary MAC */
1126 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1127 adapter->if_handle,
1128 &adapter->pmac_id[adapter->uc_macs], 0);
1129 }
1130 }
1131
0fc16ebf
PR
1132 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1133
1134 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1135 if (status) {
1136 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1137 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1138 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1139 }
24307eef
SP
1140done:
1141 return;
6b7c5b94
SP
1142}
1143
ba343c77
SB
1144static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1145{
1146 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1147 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1148 int status;
1149
11ac75ed 1150 if (!sriov_enabled(adapter))
ba343c77
SB
1151 return -EPERM;
1152
11ac75ed 1153 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1154 return -EINVAL;
1155
3175d8c2
SP
1156 if (BEx_chip(adapter)) {
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1158 vf + 1);
ba343c77 1159
11ac75ed
SP
1160 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1161 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1162 } else {
1163 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1164 vf + 1);
590c391d
PR
1165 }
1166
64600ea5 1167 if (status)
ba343c77
SB
1168 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1169 mac, vf);
64600ea5 1170 else
11ac75ed 1171 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1172
ba343c77
SB
1173 return status;
1174}
1175
64600ea5
AK
1176static int be_get_vf_config(struct net_device *netdev, int vf,
1177 struct ifla_vf_info *vi)
1178{
1179 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1180 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1181
11ac75ed 1182 if (!sriov_enabled(adapter))
64600ea5
AK
1183 return -EPERM;
1184
11ac75ed 1185 if (vf >= adapter->num_vfs)
64600ea5
AK
1186 return -EINVAL;
1187
1188 vi->vf = vf;
11ac75ed
SP
1189 vi->tx_rate = vf_cfg->tx_rate;
1190 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1191 vi->qos = 0;
11ac75ed 1192 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1193
1194 return 0;
1195}
1196
1da87b7f
AK
1197static int be_set_vf_vlan(struct net_device *netdev,
1198 int vf, u16 vlan, u8 qos)
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
1201 int status = 0;
1202
11ac75ed 1203 if (!sriov_enabled(adapter))
1da87b7f
AK
1204 return -EPERM;
1205
11ac75ed 1206 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1207 return -EINVAL;
1208
1209 if (vlan) {
f1f3ee1b
AK
1210 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1211 /* If this is new value, program it. Else skip. */
1212 adapter->vf_cfg[vf].vlan_tag = vlan;
1213
1214 status = be_cmd_set_hsw_config(adapter, vlan,
1215 vf + 1, adapter->vf_cfg[vf].if_handle);
1216 }
1da87b7f 1217 } else {
f1f3ee1b 1218 /* Reset Transparent Vlan Tagging. */
11ac75ed 1219 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1220 vlan = adapter->vf_cfg[vf].def_vid;
1221 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1222 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1223 }
1224
1da87b7f
AK
1225
1226 if (status)
1227 dev_info(&adapter->pdev->dev,
1228 "VLAN %d config on VF %d failed\n", vlan, vf);
1229 return status;
1230}
1231
e1d18735
AK
1232static int be_set_vf_tx_rate(struct net_device *netdev,
1233 int vf, int rate)
1234{
1235 struct be_adapter *adapter = netdev_priv(netdev);
1236 int status = 0;
1237
11ac75ed 1238 if (!sriov_enabled(adapter))
e1d18735
AK
1239 return -EPERM;
1240
94f434c2 1241 if (vf >= adapter->num_vfs)
e1d18735
AK
1242 return -EINVAL;
1243
94f434c2
AK
1244 if (rate < 100 || rate > 10000) {
1245 dev_err(&adapter->pdev->dev,
1246 "tx rate must be between 100 and 10000 Mbps\n");
1247 return -EINVAL;
1248 }
e1d18735 1249
d5c18473
PR
1250 if (lancer_chip(adapter))
1251 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1252 else
1253 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1254
1255 if (status)
94f434c2 1256 dev_err(&adapter->pdev->dev,
e1d18735 1257 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1258 else
1259 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1260 return status;
1261}
1262
10ef9ab4 1263static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1264{
10ef9ab4 1265 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1266 ulong now = jiffies;
ac124ff9 1267 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1268 u64 pkts;
1269 unsigned int start, eqd;
ac124ff9 1270
10ef9ab4
SP
1271 if (!eqo->enable_aic) {
1272 eqd = eqo->eqd;
1273 goto modify_eqd;
1274 }
1275
1276 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1277 return;
6b7c5b94 1278
10ef9ab4
SP
1279 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
4097f663 1281 /* Wrapped around */
3abcdeda
SP
1282 if (time_before(now, stats->rx_jiffies)) {
1283 stats->rx_jiffies = now;
4097f663
SP
1284 return;
1285 }
6b7c5b94 1286
ac124ff9
SP
1287 /* Update once a second */
1288 if (delta < HZ)
6b7c5b94
SP
1289 return;
1290
ab1594e9
SP
1291 do {
1292 start = u64_stats_fetch_begin_bh(&stats->sync);
1293 pkts = stats->rx_pkts;
1294 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
68c3e5a7 1296 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1297 stats->rx_pkts_prev = pkts;
3abcdeda 1298 stats->rx_jiffies = now;
10ef9ab4
SP
1299 eqd = (stats->rx_pps / 110000) << 3;
1300 eqd = min(eqd, eqo->max_eqd);
1301 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1302 if (eqd < 10)
1303 eqd = 0;
10ef9ab4
SP
1304
1305modify_eqd:
1306 if (eqd != eqo->cur_eqd) {
1307 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308 eqo->cur_eqd = eqd;
ac124ff9 1309 }
6b7c5b94
SP
1310}
1311
3abcdeda 1312static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1313 struct be_rx_compl_info *rxcp)
4097f663 1314{
ac124ff9 1315 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1316
ab1594e9 1317 u64_stats_update_begin(&stats->sync);
3abcdeda 1318 stats->rx_compl++;
2e588f84 1319 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1320 stats->rx_pkts++;
2e588f84 1321 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1322 stats->rx_mcast_pkts++;
2e588f84 1323 if (rxcp->err)
ac124ff9 1324 stats->rx_compl_err++;
ab1594e9 1325 u64_stats_update_end(&stats->sync);
4097f663
SP
1326}
1327
2e588f84 1328static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1329{
19fad86f
PR
1330 /* L4 checksum is not reliable for non TCP/UDP packets.
1331 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1332 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1334}
1335
10ef9ab4
SP
1336static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337 u16 frag_idx)
6b7c5b94 1338{
10ef9ab4 1339 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1340 struct be_rx_page_info *rx_page_info;
3abcdeda 1341 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1342
3abcdeda 1343 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1344 BUG_ON(!rx_page_info->page);
1345
205859a2 1346 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1347 dma_unmap_page(&adapter->pdev->dev,
1348 dma_unmap_addr(rx_page_info, bus),
1349 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1350 rx_page_info->last_page_user = false;
1351 }
6b7c5b94
SP
1352
1353 atomic_dec(&rxq->used);
1354 return rx_page_info;
1355}
1356
1357/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1358static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359 struct be_rx_compl_info *rxcp)
6b7c5b94 1360{
3abcdeda 1361 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1362 struct be_rx_page_info *page_info;
2e588f84 1363 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1364
e80d9da6 1365 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1366 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1367 put_page(page_info->page);
1368 memset(page_info, 0, sizeof(*page_info));
2e588f84 1369 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1370 }
1371}
1372
1373/*
1374 * skb_fill_rx_data forms a complete skb for an ether frame
1375 * indicated by rxcp.
1376 */
10ef9ab4
SP
1377static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378 struct be_rx_compl_info *rxcp)
6b7c5b94 1379{
3abcdeda 1380 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1381 struct be_rx_page_info *page_info;
2e588f84
SP
1382 u16 i, j;
1383 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1384 u8 *start;
6b7c5b94 1385
10ef9ab4 1386 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1387 start = page_address(page_info->page) + page_info->page_offset;
1388 prefetch(start);
1389
1390 /* Copy data in the first descriptor of this completion */
2e588f84 1391 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1392
6b7c5b94
SP
1393 skb->len = curr_frag_len;
1394 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1395 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1396 /* Complete packet has now been moved to data */
1397 put_page(page_info->page);
1398 skb->data_len = 0;
1399 skb->tail += curr_frag_len;
1400 } else {
ac1ae5f3
ED
1401 hdr_len = ETH_HLEN;
1402 memcpy(skb->data, start, hdr_len);
6b7c5b94 1403 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1404 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1405 skb_shinfo(skb)->frags[0].page_offset =
1406 page_info->page_offset + hdr_len;
9e903e08 1407 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1408 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1409 skb->truesize += rx_frag_size;
6b7c5b94
SP
1410 skb->tail += hdr_len;
1411 }
205859a2 1412 page_info->page = NULL;
6b7c5b94 1413
2e588f84
SP
1414 if (rxcp->pkt_size <= rx_frag_size) {
1415 BUG_ON(rxcp->num_rcvd != 1);
1416 return;
6b7c5b94
SP
1417 }
1418
1419 /* More frags present for this completion */
2e588f84
SP
1420 index_inc(&rxcp->rxq_idx, rxq->len);
1421 remaining = rxcp->pkt_size - curr_frag_len;
1422 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1423 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1424 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1425
bd46cb6c
AK
1426 /* Coalesce all frags from the same physical page in one slot */
1427 if (page_info->page_offset == 0) {
1428 /* Fresh page */
1429 j++;
b061b39e 1430 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1431 skb_shinfo(skb)->frags[j].page_offset =
1432 page_info->page_offset;
9e903e08 1433 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1434 skb_shinfo(skb)->nr_frags++;
1435 } else {
1436 put_page(page_info->page);
1437 }
1438
9e903e08 1439 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1440 skb->len += curr_frag_len;
1441 skb->data_len += curr_frag_len;
bdb28a97 1442 skb->truesize += rx_frag_size;
2e588f84
SP
1443 remaining -= curr_frag_len;
1444 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1445 page_info->page = NULL;
6b7c5b94 1446 }
bd46cb6c 1447 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1448}
1449
5be93b9a 1450/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1451static void be_rx_compl_process(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
6b7c5b94 1453{
10ef9ab4 1454 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1455 struct net_device *netdev = adapter->netdev;
6b7c5b94 1456 struct sk_buff *skb;
89420424 1457
bb349bb4 1458 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1459 if (unlikely(!skb)) {
ac124ff9 1460 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1461 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1462 return;
1463 }
1464
10ef9ab4 1465 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1466
6332c8d3 1467 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1468 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1469 else
1470 skb_checksum_none_assert(skb);
6b7c5b94 1471
6332c8d3 1472 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1473 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1474 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1475 skb->rxhash = rxcp->rss_hash;
1476
6b7c5b94 1477
343e43c0 1478 if (rxcp->vlanf)
86a9bad3 1479 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1480
1481 netif_receive_skb(skb);
6b7c5b94
SP
1482}
1483
5be93b9a 1484/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1485void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486 struct be_rx_compl_info *rxcp)
6b7c5b94 1487{
10ef9ab4 1488 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1489 struct be_rx_page_info *page_info;
5be93b9a 1490 struct sk_buff *skb = NULL;
3abcdeda 1491 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1492 u16 remaining, curr_frag_len;
1493 u16 i, j;
3968fa1e 1494
10ef9ab4 1495 skb = napi_get_frags(napi);
5be93b9a 1496 if (!skb) {
10ef9ab4 1497 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1498 return;
1499 }
1500
2e588f84
SP
1501 remaining = rxcp->pkt_size;
1502 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1503 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1504
1505 curr_frag_len = min(remaining, rx_frag_size);
1506
bd46cb6c
AK
1507 /* Coalesce all frags from the same physical page in one slot */
1508 if (i == 0 || page_info->page_offset == 0) {
1509 /* First frag or Fresh page */
1510 j++;
b061b39e 1511 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1512 skb_shinfo(skb)->frags[j].page_offset =
1513 page_info->page_offset;
9e903e08 1514 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1515 } else {
1516 put_page(page_info->page);
1517 }
9e903e08 1518 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1519 skb->truesize += rx_frag_size;
bd46cb6c 1520 remaining -= curr_frag_len;
2e588f84 1521 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1522 memset(page_info, 0, sizeof(*page_info));
1523 }
bd46cb6c 1524 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1525
5be93b9a 1526 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1527 skb->len = rxcp->pkt_size;
1528 skb->data_len = rxcp->pkt_size;
5be93b9a 1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1530 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1531 if (adapter->netdev->features & NETIF_F_RXHASH)
1532 skb->rxhash = rxcp->rss_hash;
5be93b9a 1533
343e43c0 1534 if (rxcp->vlanf)
86a9bad3 1535 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1536
10ef9ab4 1537 napi_gro_frags(napi);
2e588f84
SP
1538}
1539
10ef9ab4
SP
1540static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541 struct be_rx_compl_info *rxcp)
2e588f84
SP
1542{
1543 rxcp->pkt_size =
1544 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1548 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1549 rxcp->ip_csum =
1550 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1551 rxcp->l4_csum =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1553 rxcp->ipv6 =
1554 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1555 rxcp->rxq_idx =
1556 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1557 rxcp->num_rcvd =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1559 rxcp->pkt_type =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1561 rxcp->rss_hash =
c297977e 1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1563 if (rxcp->vlanf) {
1564 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1565 compl);
1566 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1567 compl);
15d72184 1568 }
12004ae9 1569 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1570}
1571
10ef9ab4
SP
1572static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573 struct be_rx_compl_info *rxcp)
2e588f84
SP
1574{
1575 rxcp->pkt_size =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1580 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1581 rxcp->ip_csum =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1583 rxcp->l4_csum =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1585 rxcp->ipv6 =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1587 rxcp->rxq_idx =
1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1589 rxcp->num_rcvd =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1591 rxcp->pkt_type =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1593 rxcp->rss_hash =
c297977e 1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1595 if (rxcp->vlanf) {
1596 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1597 compl);
1598 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1599 compl);
15d72184 1600 }
12004ae9 1601 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1602 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1603 ip_frag, compl);
2e588f84
SP
1604}
1605
1606static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1607{
1608 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1609 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1610 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1611
2e588f84
SP
1612 /* For checking the valid bit it is Ok to use either definition as the
1613 * valid bit is at the same position in both v0 and v1 Rx compl */
1614 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1615 return NULL;
6b7c5b94 1616
2e588f84
SP
1617 rmb();
1618 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1619
2e588f84 1620 if (adapter->be3_native)
10ef9ab4 1621 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1622 else
10ef9ab4 1623 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1624
e38b1706
SK
1625 if (rxcp->ip_frag)
1626 rxcp->l4_csum = 0;
1627
15d72184
SP
1628 if (rxcp->vlanf) {
1629 /* vlanf could be wrongly set in some cards.
1630 * ignore if vtm is not set */
752961a1 1631 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1632 rxcp->vlanf = 0;
6b7c5b94 1633
15d72184 1634 if (!lancer_chip(adapter))
3c709f8f 1635 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1636
939cf306 1637 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1638 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1639 rxcp->vlanf = 0;
1640 }
2e588f84
SP
1641
1642 /* As the compl has been parsed, reset it; we wont touch it again */
1643 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1644
3abcdeda 1645 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1646 return rxcp;
1647}
1648
1829b086 1649static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1650{
6b7c5b94 1651 u32 order = get_order(size);
1829b086 1652
6b7c5b94 1653 if (order > 0)
1829b086
ED
1654 gfp |= __GFP_COMP;
1655 return alloc_pages(gfp, order);
6b7c5b94
SP
1656}
1657
1658/*
1659 * Allocate a page, split it to fragments of size rx_frag_size and post as
1660 * receive buffers to BE
1661 */
1829b086 1662static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1663{
3abcdeda 1664 struct be_adapter *adapter = rxo->adapter;
26d92f92 1665 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1666 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1667 struct page *pagep = NULL;
1668 struct be_eth_rx_d *rxd;
1669 u64 page_dmaaddr = 0, frag_dmaaddr;
1670 u32 posted, page_offset = 0;
1671
3abcdeda 1672 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1673 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1674 if (!pagep) {
1829b086 1675 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1676 if (unlikely(!pagep)) {
ac124ff9 1677 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1678 break;
1679 }
2b7bcebf
IV
1680 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1681 0, adapter->big_page_size,
1682 DMA_FROM_DEVICE);
6b7c5b94
SP
1683 page_info->page_offset = 0;
1684 } else {
1685 get_page(pagep);
1686 page_info->page_offset = page_offset + rx_frag_size;
1687 }
1688 page_offset = page_info->page_offset;
1689 page_info->page = pagep;
fac6da5b 1690 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1691 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1692
1693 rxd = queue_head_node(rxq);
1694 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1695 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1696
1697 /* Any space left in the current big page for another frag? */
1698 if ((page_offset + rx_frag_size + rx_frag_size) >
1699 adapter->big_page_size) {
1700 pagep = NULL;
1701 page_info->last_page_user = true;
1702 }
26d92f92
SP
1703
1704 prev_page_info = page_info;
1705 queue_head_inc(rxq);
10ef9ab4 1706 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1707 }
1708 if (pagep)
26d92f92 1709 prev_page_info->last_page_user = true;
6b7c5b94
SP
1710
1711 if (posted) {
6b7c5b94 1712 atomic_add(posted, &rxq->used);
8788fdc2 1713 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1714 } else if (atomic_read(&rxq->used) == 0) {
1715 /* Let be_worker replenish when memory is available */
3abcdeda 1716 rxo->rx_post_starved = true;
6b7c5b94 1717 }
6b7c5b94
SP
1718}
1719
5fb379ee 1720static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1721{
6b7c5b94
SP
1722 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1723
1724 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1725 return NULL;
1726
f3eb62d2 1727 rmb();
6b7c5b94
SP
1728 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1729
1730 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1731
1732 queue_tail_inc(tx_cq);
1733 return txcp;
1734}
1735
3c8def97
SP
1736static u16 be_tx_compl_process(struct be_adapter *adapter,
1737 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1738{
3c8def97 1739 struct be_queue_info *txq = &txo->q;
a73b796e 1740 struct be_eth_wrb *wrb;
3c8def97 1741 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1742 struct sk_buff *sent_skb;
ec43b1a6
SP
1743 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1744 bool unmap_skb_hdr = true;
6b7c5b94 1745
ec43b1a6 1746 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1747 BUG_ON(!sent_skb);
ec43b1a6
SP
1748 sent_skbs[txq->tail] = NULL;
1749
1750 /* skip header wrb */
a73b796e 1751 queue_tail_inc(txq);
6b7c5b94 1752
ec43b1a6 1753 do {
6b7c5b94 1754 cur_index = txq->tail;
a73b796e 1755 wrb = queue_tail_node(txq);
2b7bcebf
IV
1756 unmap_tx_frag(&adapter->pdev->dev, wrb,
1757 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1758 unmap_skb_hdr = false;
1759
6b7c5b94
SP
1760 num_wrbs++;
1761 queue_tail_inc(txq);
ec43b1a6 1762 } while (cur_index != last_index);
6b7c5b94 1763
6b7c5b94 1764 kfree_skb(sent_skb);
4d586b82 1765 return num_wrbs;
6b7c5b94
SP
1766}
1767
10ef9ab4
SP
1768/* Return the number of events in the event queue */
1769static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1770{
10ef9ab4
SP
1771 struct be_eq_entry *eqe;
1772 int num = 0;
859b1e4e 1773
10ef9ab4
SP
1774 do {
1775 eqe = queue_tail_node(&eqo->q);
1776 if (eqe->evt == 0)
1777 break;
859b1e4e 1778
10ef9ab4
SP
1779 rmb();
1780 eqe->evt = 0;
1781 num++;
1782 queue_tail_inc(&eqo->q);
1783 } while (true);
1784
1785 return num;
859b1e4e
SP
1786}
1787
10ef9ab4
SP
1788/* Leaves the EQ is disarmed state */
1789static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1790{
10ef9ab4 1791 int num = events_get(eqo);
859b1e4e 1792
10ef9ab4 1793 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1794}
1795
10ef9ab4 1796static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1797{
1798 struct be_rx_page_info *page_info;
3abcdeda
SP
1799 struct be_queue_info *rxq = &rxo->q;
1800 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1801 struct be_rx_compl_info *rxcp;
d23e946c
SP
1802 struct be_adapter *adapter = rxo->adapter;
1803 int flush_wait = 0;
6b7c5b94
SP
1804 u16 tail;
1805
d23e946c
SP
1806 /* Consume pending rx completions.
1807 * Wait for the flush completion (identified by zero num_rcvd)
1808 * to arrive. Notify CQ even when there are no more CQ entries
1809 * for HW to flush partially coalesced CQ entries.
1810 * In Lancer, there is no need to wait for flush compl.
1811 */
1812 for (;;) {
1813 rxcp = be_rx_compl_get(rxo);
1814 if (rxcp == NULL) {
1815 if (lancer_chip(adapter))
1816 break;
1817
1818 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1819 dev_warn(&adapter->pdev->dev,
1820 "did not receive flush compl\n");
1821 break;
1822 }
1823 be_cq_notify(adapter, rx_cq->id, true, 0);
1824 mdelay(1);
1825 } else {
1826 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1827 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1828 if (rxcp->num_rcvd == 0)
1829 break;
1830 }
6b7c5b94
SP
1831 }
1832
d23e946c
SP
1833 /* After cleanup, leave the CQ in unarmed state */
1834 be_cq_notify(adapter, rx_cq->id, false, 0);
1835
1836 /* Then free posted rx buffers that were not used */
6b7c5b94 1837 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1838 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1839 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1840 put_page(page_info->page);
1841 memset(page_info, 0, sizeof(*page_info));
1842 }
1843 BUG_ON(atomic_read(&rxq->used));
482c9e79 1844 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1845}
1846
0ae57bb3 1847static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1848{
0ae57bb3
SP
1849 struct be_tx_obj *txo;
1850 struct be_queue_info *txq;
a8e9179a 1851 struct be_eth_tx_compl *txcp;
4d586b82 1852 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1853 struct sk_buff *sent_skb;
1854 bool dummy_wrb;
0ae57bb3 1855 int i, pending_txqs;
a8e9179a
SP
1856
1857 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1858 do {
0ae57bb3
SP
1859 pending_txqs = adapter->num_tx_qs;
1860
1861 for_all_tx_queues(adapter, txo, i) {
1862 txq = &txo->q;
1863 while ((txcp = be_tx_compl_get(&txo->cq))) {
1864 end_idx =
1865 AMAP_GET_BITS(struct amap_eth_tx_compl,
1866 wrb_index, txcp);
1867 num_wrbs += be_tx_compl_process(adapter, txo,
1868 end_idx);
1869 cmpl++;
1870 }
1871 if (cmpl) {
1872 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1873 atomic_sub(num_wrbs, &txq->used);
1874 cmpl = 0;
1875 num_wrbs = 0;
1876 }
1877 if (atomic_read(&txq->used) == 0)
1878 pending_txqs--;
a8e9179a
SP
1879 }
1880
0ae57bb3 1881 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1882 break;
1883
1884 mdelay(1);
1885 } while (true);
1886
0ae57bb3
SP
1887 for_all_tx_queues(adapter, txo, i) {
1888 txq = &txo->q;
1889 if (atomic_read(&txq->used))
1890 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1891 atomic_read(&txq->used));
1892
1893 /* free posted tx for which compls will never arrive */
1894 while (atomic_read(&txq->used)) {
1895 sent_skb = txo->sent_skb_list[txq->tail];
1896 end_idx = txq->tail;
1897 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1898 &dummy_wrb);
1899 index_adv(&end_idx, num_wrbs - 1, txq->len);
1900 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1901 atomic_sub(num_wrbs, &txq->used);
1902 }
b03388d6 1903 }
6b7c5b94
SP
1904}
1905
10ef9ab4
SP
1906static void be_evt_queues_destroy(struct be_adapter *adapter)
1907{
1908 struct be_eq_obj *eqo;
1909 int i;
1910
1911 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1912 if (eqo->q.created) {
1913 be_eq_clean(eqo);
10ef9ab4 1914 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1915 }
10ef9ab4
SP
1916 be_queue_free(adapter, &eqo->q);
1917 }
1918}
1919
1920static int be_evt_queues_create(struct be_adapter *adapter)
1921{
1922 struct be_queue_info *eq;
1923 struct be_eq_obj *eqo;
1924 int i, rc;
1925
1926 adapter->num_evt_qs = num_irqs(adapter);
1927
1928 for_all_evt_queues(adapter, eqo, i) {
1929 eqo->adapter = adapter;
1930 eqo->tx_budget = BE_TX_BUDGET;
1931 eqo->idx = i;
1932 eqo->max_eqd = BE_MAX_EQD;
1933 eqo->enable_aic = true;
1934
1935 eq = &eqo->q;
1936 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1937 sizeof(struct be_eq_entry));
1938 if (rc)
1939 return rc;
1940
1941 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1942 if (rc)
1943 return rc;
1944 }
1cfafab9 1945 return 0;
10ef9ab4
SP
1946}
1947
5fb379ee
SP
1948static void be_mcc_queues_destroy(struct be_adapter *adapter)
1949{
1950 struct be_queue_info *q;
5fb379ee 1951
8788fdc2 1952 q = &adapter->mcc_obj.q;
5fb379ee 1953 if (q->created)
8788fdc2 1954 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1955 be_queue_free(adapter, q);
1956
8788fdc2 1957 q = &adapter->mcc_obj.cq;
5fb379ee 1958 if (q->created)
8788fdc2 1959 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1960 be_queue_free(adapter, q);
1961}
1962
1963/* Must be called only after TX qs are created as MCC shares TX EQ */
1964static int be_mcc_queues_create(struct be_adapter *adapter)
1965{
1966 struct be_queue_info *q, *cq;
5fb379ee 1967
8788fdc2 1968 cq = &adapter->mcc_obj.cq;
5fb379ee 1969 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1970 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1971 goto err;
1972
10ef9ab4
SP
1973 /* Use the default EQ for MCC completions */
1974 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1975 goto mcc_cq_free;
1976
8788fdc2 1977 q = &adapter->mcc_obj.q;
5fb379ee
SP
1978 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1979 goto mcc_cq_destroy;
1980
8788fdc2 1981 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1982 goto mcc_q_free;
1983
1984 return 0;
1985
1986mcc_q_free:
1987 be_queue_free(adapter, q);
1988mcc_cq_destroy:
8788fdc2 1989 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1990mcc_cq_free:
1991 be_queue_free(adapter, cq);
1992err:
1993 return -1;
1994}
1995
6b7c5b94
SP
1996static void be_tx_queues_destroy(struct be_adapter *adapter)
1997{
1998 struct be_queue_info *q;
3c8def97
SP
1999 struct be_tx_obj *txo;
2000 u8 i;
6b7c5b94 2001
3c8def97
SP
2002 for_all_tx_queues(adapter, txo, i) {
2003 q = &txo->q;
2004 if (q->created)
2005 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2006 be_queue_free(adapter, q);
6b7c5b94 2007
3c8def97
SP
2008 q = &txo->cq;
2009 if (q->created)
2010 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2011 be_queue_free(adapter, q);
2012 }
6b7c5b94
SP
2013}
2014
dafc0fe3
SP
2015static int be_num_txqs_want(struct be_adapter *adapter)
2016{
abb93951
PR
2017 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2018 be_is_mc(adapter) ||
2019 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2020 BE2_chip(adapter))
dafc0fe3
SP
2021 return 1;
2022 else
abb93951 2023 return adapter->max_tx_queues;
dafc0fe3
SP
2024}
2025
10ef9ab4 2026static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2027{
10ef9ab4
SP
2028 struct be_queue_info *cq, *eq;
2029 int status;
3c8def97
SP
2030 struct be_tx_obj *txo;
2031 u8 i;
6b7c5b94 2032
dafc0fe3 2033 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2034 if (adapter->num_tx_qs != MAX_TX_QS) {
2035 rtnl_lock();
dafc0fe3
SP
2036 netif_set_real_num_tx_queues(adapter->netdev,
2037 adapter->num_tx_qs);
3bb62f4f
PR
2038 rtnl_unlock();
2039 }
dafc0fe3 2040
10ef9ab4
SP
2041 for_all_tx_queues(adapter, txo, i) {
2042 cq = &txo->cq;
2043 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2044 sizeof(struct be_eth_tx_compl));
2045 if (status)
2046 return status;
3c8def97 2047
10ef9ab4
SP
2048 /* If num_evt_qs is less than num_tx_qs, then more than
2049 * one txq share an eq
2050 */
2051 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2052 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2053 if (status)
2054 return status;
2055 }
2056 return 0;
2057}
6b7c5b94 2058
10ef9ab4
SP
2059static int be_tx_qs_create(struct be_adapter *adapter)
2060{
2061 struct be_tx_obj *txo;
2062 int i, status;
fe6d2a38 2063
3c8def97 2064 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2065 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2066 sizeof(struct be_eth_wrb));
2067 if (status)
2068 return status;
6b7c5b94 2069
94d73aaa 2070 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2071 if (status)
2072 return status;
3c8def97 2073 }
6b7c5b94 2074
d379142b
SP
2075 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2076 adapter->num_tx_qs);
10ef9ab4 2077 return 0;
6b7c5b94
SP
2078}
2079
10ef9ab4 2080static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2081{
2082 struct be_queue_info *q;
3abcdeda
SP
2083 struct be_rx_obj *rxo;
2084 int i;
2085
2086 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2087 q = &rxo->cq;
2088 if (q->created)
2089 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2090 be_queue_free(adapter, q);
ac6a0c4a
SP
2091 }
2092}
2093
10ef9ab4 2094static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2095{
10ef9ab4 2096 struct be_queue_info *eq, *cq;
3abcdeda
SP
2097 struct be_rx_obj *rxo;
2098 int rc, i;
6b7c5b94 2099
10ef9ab4
SP
2100 /* We'll create as many RSS rings as there are irqs.
2101 * But when there's only one irq there's no use creating RSS rings
2102 */
2103 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2104 num_irqs(adapter) + 1 : 1;
7f640062
SP
2105 if (adapter->num_rx_qs != MAX_RX_QS) {
2106 rtnl_lock();
2107 netif_set_real_num_rx_queues(adapter->netdev,
2108 adapter->num_rx_qs);
2109 rtnl_unlock();
2110 }
ac6a0c4a 2111
6b7c5b94 2112 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2113 for_all_rx_queues(adapter, rxo, i) {
2114 rxo->adapter = adapter;
3abcdeda
SP
2115 cq = &rxo->cq;
2116 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2117 sizeof(struct be_eth_rx_compl));
2118 if (rc)
10ef9ab4 2119 return rc;
3abcdeda 2120
10ef9ab4
SP
2121 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2122 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2123 if (rc)
10ef9ab4 2124 return rc;
3abcdeda 2125 }
6b7c5b94 2126
d379142b
SP
2127 dev_info(&adapter->pdev->dev,
2128 "created %d RSS queue(s) and 1 default RX queue\n",
2129 adapter->num_rx_qs - 1);
10ef9ab4 2130 return 0;
b628bde2
SP
2131}
2132
6b7c5b94
SP
2133static irqreturn_t be_intx(int irq, void *dev)
2134{
e49cc34f
SP
2135 struct be_eq_obj *eqo = dev;
2136 struct be_adapter *adapter = eqo->adapter;
2137 int num_evts = 0;
6b7c5b94 2138
d0b9cec3
SP
2139 /* IRQ is not expected when NAPI is scheduled as the EQ
2140 * will not be armed.
2141 * But, this can happen on Lancer INTx where it takes
2142 * a while to de-assert INTx or in BE2 where occasionaly
2143 * an interrupt may be raised even when EQ is unarmed.
2144 * If NAPI is already scheduled, then counting & notifying
2145 * events will orphan them.
e49cc34f 2146 */
d0b9cec3 2147 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2148 num_evts = events_get(eqo);
d0b9cec3
SP
2149 __napi_schedule(&eqo->napi);
2150 if (num_evts)
2151 eqo->spurious_intr = 0;
2152 }
2153 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2154
d0b9cec3
SP
2155 /* Return IRQ_HANDLED only for the the first spurious intr
2156 * after a valid intr to stop the kernel from branding
2157 * this irq as a bad one!
e49cc34f 2158 */
d0b9cec3
SP
2159 if (num_evts || eqo->spurious_intr++ == 0)
2160 return IRQ_HANDLED;
2161 else
2162 return IRQ_NONE;
6b7c5b94
SP
2163}
2164
10ef9ab4 2165static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2166{
10ef9ab4 2167 struct be_eq_obj *eqo = dev;
6b7c5b94 2168
0b545a62
SP
2169 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2170 napi_schedule(&eqo->napi);
6b7c5b94
SP
2171 return IRQ_HANDLED;
2172}
2173
2e588f84 2174static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2175{
e38b1706 2176 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2177}
2178
10ef9ab4
SP
2179static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2180 int budget)
6b7c5b94 2181{
3abcdeda
SP
2182 struct be_adapter *adapter = rxo->adapter;
2183 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2184 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2185 u32 work_done;
2186
2187 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2188 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2189 if (!rxcp)
2190 break;
2191
12004ae9
SP
2192 /* Is it a flush compl that has no data */
2193 if (unlikely(rxcp->num_rcvd == 0))
2194 goto loop_continue;
2195
2196 /* Discard compl with partial DMA Lancer B0 */
2197 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2198 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2199 goto loop_continue;
2200 }
2201
2202 /* On BE drop pkts that arrive due to imperfect filtering in
2203 * promiscuous mode on some skews
2204 */
2205 if (unlikely(rxcp->port != adapter->port_num &&
2206 !lancer_chip(adapter))) {
10ef9ab4 2207 be_rx_compl_discard(rxo, rxcp);
12004ae9 2208 goto loop_continue;
64642811 2209 }
009dd872 2210
12004ae9 2211 if (do_gro(rxcp))
10ef9ab4 2212 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2213 else
10ef9ab4 2214 be_rx_compl_process(rxo, rxcp);
12004ae9 2215loop_continue:
2e588f84 2216 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2217 }
2218
10ef9ab4
SP
2219 if (work_done) {
2220 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2221
10ef9ab4
SP
2222 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2223 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2224 }
10ef9ab4 2225
6b7c5b94
SP
2226 return work_done;
2227}
2228
10ef9ab4
SP
2229static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2230 int budget, int idx)
6b7c5b94 2231{
6b7c5b94 2232 struct be_eth_tx_compl *txcp;
10ef9ab4 2233 int num_wrbs = 0, work_done;
3c8def97 2234
10ef9ab4
SP
2235 for (work_done = 0; work_done < budget; work_done++) {
2236 txcp = be_tx_compl_get(&txo->cq);
2237 if (!txcp)
2238 break;
2239 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2240 AMAP_GET_BITS(struct amap_eth_tx_compl,
2241 wrb_index, txcp));
10ef9ab4 2242 }
6b7c5b94 2243
10ef9ab4
SP
2244 if (work_done) {
2245 be_cq_notify(adapter, txo->cq.id, true, work_done);
2246 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2247
10ef9ab4
SP
2248 /* As Tx wrbs have been freed up, wake up netdev queue
2249 * if it was stopped due to lack of tx wrbs. */
2250 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2251 atomic_read(&txo->q.used) < txo->q.len / 2) {
2252 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2253 }
10ef9ab4
SP
2254
2255 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2256 tx_stats(txo)->tx_compl += work_done;
2257 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2258 }
10ef9ab4
SP
2259 return (work_done < budget); /* Done */
2260}
6b7c5b94 2261
10ef9ab4
SP
2262int be_poll(struct napi_struct *napi, int budget)
2263{
2264 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2265 struct be_adapter *adapter = eqo->adapter;
0b545a62 2266 int max_work = 0, work, i, num_evts;
10ef9ab4 2267 bool tx_done;
f31e50a8 2268
0b545a62
SP
2269 num_evts = events_get(eqo);
2270
10ef9ab4
SP
2271 /* Process all TXQs serviced by this EQ */
2272 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2273 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2274 eqo->tx_budget, i);
2275 if (!tx_done)
2276 max_work = budget;
f31e50a8
SP
2277 }
2278
10ef9ab4
SP
2279 /* This loop will iterate twice for EQ0 in which
2280 * completions of the last RXQ (default one) are also processed
2281 * For other EQs the loop iterates only once
2282 */
2283 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2284 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2285 max_work = max(work, max_work);
2286 }
6b7c5b94 2287
10ef9ab4
SP
2288 if (is_mcc_eqo(eqo))
2289 be_process_mcc(adapter);
93c86700 2290
10ef9ab4
SP
2291 if (max_work < budget) {
2292 napi_complete(napi);
0b545a62 2293 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2294 } else {
2295 /* As we'll continue in polling mode, count and clear events */
0b545a62 2296 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2297 }
10ef9ab4 2298 return max_work;
6b7c5b94
SP
2299}
2300
f67ef7ba 2301void be_detect_error(struct be_adapter *adapter)
7c185276 2302{
e1cfb67a
PR
2303 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2304 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2305 u32 i;
2306
d23e946c 2307 if (be_hw_error(adapter))
72f02485
SP
2308 return;
2309
e1cfb67a
PR
2310 if (lancer_chip(adapter)) {
2311 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313 sliport_err1 = ioread32(adapter->db +
2314 SLIPORT_ERROR1_OFFSET);
2315 sliport_err2 = ioread32(adapter->db +
2316 SLIPORT_ERROR2_OFFSET);
2317 }
2318 } else {
2319 pci_read_config_dword(adapter->pdev,
2320 PCICFG_UE_STATUS_LOW, &ue_lo);
2321 pci_read_config_dword(adapter->pdev,
2322 PCICFG_UE_STATUS_HIGH, &ue_hi);
2323 pci_read_config_dword(adapter->pdev,
2324 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2325 pci_read_config_dword(adapter->pdev,
2326 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2327
f67ef7ba
PR
2328 ue_lo = (ue_lo & ~ue_lo_mask);
2329 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2330 }
7c185276 2331
1451ae6e
AK
2332 /* On certain platforms BE hardware can indicate spurious UEs.
2333 * Allow the h/w to stop working completely in case of a real UE.
2334 * Hence not setting the hw_error for UE detection.
2335 */
2336 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2337 adapter->hw_error = true;
434b3648 2338 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2339 "Error detected in the card\n");
2340 }
2341
2342 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2343 dev_err(&adapter->pdev->dev,
2344 "ERR: sliport status 0x%x\n", sliport_status);
2345 dev_err(&adapter->pdev->dev,
2346 "ERR: sliport error1 0x%x\n", sliport_err1);
2347 dev_err(&adapter->pdev->dev,
2348 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2349 }
2350
e1cfb67a
PR
2351 if (ue_lo) {
2352 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2353 if (ue_lo & 1)
7c185276
AK
2354 dev_err(&adapter->pdev->dev,
2355 "UE: %s bit set\n", ue_status_low_desc[i]);
2356 }
2357 }
f67ef7ba 2358
e1cfb67a
PR
2359 if (ue_hi) {
2360 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2361 if (ue_hi & 1)
7c185276
AK
2362 dev_err(&adapter->pdev->dev,
2363 "UE: %s bit set\n", ue_status_hi_desc[i]);
2364 }
2365 }
2366
2367}
2368
8d56ff11
SP
2369static void be_msix_disable(struct be_adapter *adapter)
2370{
ac6a0c4a 2371 if (msix_enabled(adapter)) {
8d56ff11 2372 pci_disable_msix(adapter->pdev);
ac6a0c4a 2373 adapter->num_msix_vec = 0;
3abcdeda
SP
2374 }
2375}
2376
10ef9ab4
SP
2377static uint be_num_rss_want(struct be_adapter *adapter)
2378{
30e80b55 2379 u32 num = 0;
abb93951 2380
10ef9ab4 2381 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2382 (lancer_chip(adapter) ||
2383 (!sriov_want(adapter) && be_physfn(adapter)))) {
2384 num = adapter->max_rss_queues;
30e80b55
YM
2385 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2386 }
2387 return num;
10ef9ab4
SP
2388}
2389
c2bba3df 2390static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2391{
10ef9ab4 2392#define BE_MIN_MSIX_VECTORS 1
045508a8 2393 int i, status, num_vec, num_roce_vec = 0;
d379142b 2394 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2395
10ef9ab4
SP
2396 /* If RSS queues are not used, need a vec for default RX Q */
2397 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2398 if (be_roce_supported(adapter)) {
2399 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2400 (num_online_cpus() + 1));
2401 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2402 num_vec += num_roce_vec;
2403 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2404 }
10ef9ab4 2405 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2406
ac6a0c4a 2407 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2408 adapter->msix_entries[i].entry = i;
2409
ac6a0c4a 2410 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2411 if (status == 0) {
2412 goto done;
2413 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2414 num_vec = status;
c2bba3df
SK
2415 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2416 num_vec);
2417 if (!status)
3abcdeda 2418 goto done;
3abcdeda 2419 }
d379142b
SP
2420
2421 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2422 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2423 if (!be_physfn(adapter))
2424 return status;
2425 return 0;
3abcdeda 2426done:
045508a8
PP
2427 if (be_roce_supported(adapter)) {
2428 if (num_vec > num_roce_vec) {
2429 adapter->num_msix_vec = num_vec - num_roce_vec;
2430 adapter->num_msix_roce_vec =
2431 num_vec - adapter->num_msix_vec;
2432 } else {
2433 adapter->num_msix_vec = num_vec;
2434 adapter->num_msix_roce_vec = 0;
2435 }
2436 } else
2437 adapter->num_msix_vec = num_vec;
d379142b 2438 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2439 return 0;
6b7c5b94
SP
2440}
2441
fe6d2a38 2442static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2443 struct be_eq_obj *eqo)
b628bde2 2444{
10ef9ab4 2445 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2446}
6b7c5b94 2447
b628bde2
SP
2448static int be_msix_register(struct be_adapter *adapter)
2449{
10ef9ab4
SP
2450 struct net_device *netdev = adapter->netdev;
2451 struct be_eq_obj *eqo;
2452 int status, i, vec;
6b7c5b94 2453
10ef9ab4
SP
2454 for_all_evt_queues(adapter, eqo, i) {
2455 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2456 vec = be_msix_vec_get(adapter, eqo);
2457 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2458 if (status)
2459 goto err_msix;
2460 }
b628bde2 2461
6b7c5b94 2462 return 0;
3abcdeda 2463err_msix:
10ef9ab4
SP
2464 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2465 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2466 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2467 status);
ac6a0c4a 2468 be_msix_disable(adapter);
6b7c5b94
SP
2469 return status;
2470}
2471
2472static int be_irq_register(struct be_adapter *adapter)
2473{
2474 struct net_device *netdev = adapter->netdev;
2475 int status;
2476
ac6a0c4a 2477 if (msix_enabled(adapter)) {
6b7c5b94
SP
2478 status = be_msix_register(adapter);
2479 if (status == 0)
2480 goto done;
ba343c77
SB
2481 /* INTx is not supported for VF */
2482 if (!be_physfn(adapter))
2483 return status;
6b7c5b94
SP
2484 }
2485
e49cc34f 2486 /* INTx: only the first EQ is used */
6b7c5b94
SP
2487 netdev->irq = adapter->pdev->irq;
2488 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2489 &adapter->eq_obj[0]);
6b7c5b94
SP
2490 if (status) {
2491 dev_err(&adapter->pdev->dev,
2492 "INTx request IRQ failed - err %d\n", status);
2493 return status;
2494 }
2495done:
2496 adapter->isr_registered = true;
2497 return 0;
2498}
2499
2500static void be_irq_unregister(struct be_adapter *adapter)
2501{
2502 struct net_device *netdev = adapter->netdev;
10ef9ab4 2503 struct be_eq_obj *eqo;
3abcdeda 2504 int i;
6b7c5b94
SP
2505
2506 if (!adapter->isr_registered)
2507 return;
2508
2509 /* INTx */
ac6a0c4a 2510 if (!msix_enabled(adapter)) {
e49cc34f 2511 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2512 goto done;
2513 }
2514
2515 /* MSIx */
10ef9ab4
SP
2516 for_all_evt_queues(adapter, eqo, i)
2517 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2518
6b7c5b94
SP
2519done:
2520 adapter->isr_registered = false;
6b7c5b94
SP
2521}
2522
10ef9ab4 2523static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2524{
2525 struct be_queue_info *q;
2526 struct be_rx_obj *rxo;
2527 int i;
2528
2529 for_all_rx_queues(adapter, rxo, i) {
2530 q = &rxo->q;
2531 if (q->created) {
2532 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2533 be_rx_cq_clean(rxo);
482c9e79 2534 }
10ef9ab4 2535 be_queue_free(adapter, q);
482c9e79
SP
2536 }
2537}
2538
889cd4b2
SP
2539static int be_close(struct net_device *netdev)
2540{
2541 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2542 struct be_eq_obj *eqo;
2543 int i;
889cd4b2 2544
045508a8
PP
2545 be_roce_dev_close(adapter);
2546
04d3d624
SK
2547 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2548 for_all_evt_queues(adapter, eqo, i)
2549 napi_disable(&eqo->napi);
2550 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2551 }
a323d9bf
SP
2552
2553 be_async_mcc_disable(adapter);
2554
2555 /* Wait for all pending tx completions to arrive so that
2556 * all tx skbs are freed.
2557 */
2558 be_tx_compl_clean(adapter);
fba87559 2559 netif_tx_disable(netdev);
a323d9bf
SP
2560
2561 be_rx_qs_destroy(adapter);
2562
2563 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2564 if (msix_enabled(adapter))
2565 synchronize_irq(be_msix_vec_get(adapter, eqo));
2566 else
2567 synchronize_irq(netdev->irq);
2568 be_eq_clean(eqo);
63fcb27f
PR
2569 }
2570
889cd4b2
SP
2571 be_irq_unregister(adapter);
2572
482c9e79
SP
2573 return 0;
2574}
2575
10ef9ab4 2576static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2577{
2578 struct be_rx_obj *rxo;
e9008ee9
PR
2579 int rc, i, j;
2580 u8 rsstable[128];
482c9e79
SP
2581
2582 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2583 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2584 sizeof(struct be_eth_rx_d));
2585 if (rc)
2586 return rc;
2587 }
2588
2589 /* The FW would like the default RXQ to be created first */
2590 rxo = default_rxo(adapter);
2591 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2592 adapter->if_handle, false, &rxo->rss_id);
2593 if (rc)
2594 return rc;
2595
2596 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2597 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2598 rx_frag_size, adapter->if_handle,
2599 true, &rxo->rss_id);
482c9e79
SP
2600 if (rc)
2601 return rc;
2602 }
2603
2604 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2605 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2606 for_all_rss_queues(adapter, rxo, i) {
2607 if ((j + i) >= 128)
2608 break;
2609 rsstable[j + i] = rxo->rss_id;
2610 }
2611 }
594ad54a
SR
2612 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2613 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2614
2615 if (!BEx_chip(adapter))
2616 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2617 RSS_ENABLE_UDP_IPV6;
2618
2619 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2620 128);
2621 if (rc) {
2622 adapter->rss_flags = 0;
482c9e79 2623 return rc;
594ad54a 2624 }
482c9e79
SP
2625 }
2626
2627 /* First time posting */
10ef9ab4 2628 for_all_rx_queues(adapter, rxo, i)
482c9e79 2629 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2630 return 0;
2631}
2632
6b7c5b94
SP
2633static int be_open(struct net_device *netdev)
2634{
2635 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2636 struct be_eq_obj *eqo;
3abcdeda 2637 struct be_rx_obj *rxo;
10ef9ab4 2638 struct be_tx_obj *txo;
b236916a 2639 u8 link_status;
3abcdeda 2640 int status, i;
5fb379ee 2641
10ef9ab4 2642 status = be_rx_qs_create(adapter);
482c9e79
SP
2643 if (status)
2644 goto err;
2645
c2bba3df
SK
2646 status = be_irq_register(adapter);
2647 if (status)
2648 goto err;
5fb379ee 2649
10ef9ab4 2650 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2651 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2652
10ef9ab4
SP
2653 for_all_tx_queues(adapter, txo, i)
2654 be_cq_notify(adapter, txo->cq.id, true, 0);
2655
7a1e9b20
SP
2656 be_async_mcc_enable(adapter);
2657
10ef9ab4
SP
2658 for_all_evt_queues(adapter, eqo, i) {
2659 napi_enable(&eqo->napi);
2660 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2661 }
04d3d624 2662 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2663
323ff71e 2664 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2665 if (!status)
2666 be_link_status_update(adapter, link_status);
2667
fba87559 2668 netif_tx_start_all_queues(netdev);
045508a8 2669 be_roce_dev_open(adapter);
889cd4b2
SP
2670 return 0;
2671err:
2672 be_close(adapter->netdev);
2673 return -EIO;
5fb379ee
SP
2674}
2675
71d8d1b5
AK
2676static int be_setup_wol(struct be_adapter *adapter, bool enable)
2677{
2678 struct be_dma_mem cmd;
2679 int status = 0;
2680 u8 mac[ETH_ALEN];
2681
2682 memset(mac, 0, ETH_ALEN);
2683
2684 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2685 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2686 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2687 if (cmd.va == NULL)
2688 return -1;
71d8d1b5
AK
2689
2690 if (enable) {
2691 status = pci_write_config_dword(adapter->pdev,
2692 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2693 if (status) {
2694 dev_err(&adapter->pdev->dev,
2381a55c 2695 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2696 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2697 cmd.dma);
71d8d1b5
AK
2698 return status;
2699 }
2700 status = be_cmd_enable_magic_wol(adapter,
2701 adapter->netdev->dev_addr, &cmd);
2702 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2703 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2704 } else {
2705 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2706 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2707 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2708 }
2709
2b7bcebf 2710 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2711 return status;
2712}
2713
6d87f5c3
AK
2714/*
2715 * Generate a seed MAC address from the PF MAC Address using jhash.
2716 * MAC Address for VFs are assigned incrementally starting from the seed.
2717 * These addresses are programmed in the ASIC by the PF and the VF driver
2718 * queries for the MAC address during its probe.
2719 */
4c876616 2720static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2721{
f9449ab7 2722 u32 vf;
3abcdeda 2723 int status = 0;
6d87f5c3 2724 u8 mac[ETH_ALEN];
11ac75ed 2725 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2726
2727 be_vf_eth_addr_generate(adapter, mac);
2728
11ac75ed 2729 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2730 if (BEx_chip(adapter))
590c391d 2731 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2732 vf_cfg->if_handle,
2733 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2734 else
2735 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2736 vf + 1);
590c391d 2737
6d87f5c3
AK
2738 if (status)
2739 dev_err(&adapter->pdev->dev,
590c391d 2740 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2741 else
11ac75ed 2742 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2743
2744 mac[5] += 1;
2745 }
2746 return status;
2747}
2748
4c876616
SP
2749static int be_vfs_mac_query(struct be_adapter *adapter)
2750{
2751 int status, vf;
2752 u8 mac[ETH_ALEN];
2753 struct be_vf_cfg *vf_cfg;
95046b92 2754 bool active = false;
4c876616
SP
2755
2756 for_all_vfs(adapter, vf_cfg, vf) {
2757 be_cmd_get_mac_from_list(adapter, mac, &active,
2758 &vf_cfg->pmac_id, 0);
2759
2760 status = be_cmd_mac_addr_query(adapter, mac, false,
2761 vf_cfg->if_handle, 0);
2762 if (status)
2763 return status;
2764 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2765 }
2766 return 0;
2767}
2768
f9449ab7 2769static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2770{
11ac75ed 2771 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2772 u32 vf;
2773
257a3feb 2774 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2775 dev_warn(&adapter->pdev->dev,
2776 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2777 goto done;
2778 }
2779
b4c1df93
SP
2780 pci_disable_sriov(adapter->pdev);
2781
11ac75ed 2782 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2783 if (BEx_chip(adapter))
11ac75ed
SP
2784 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2785 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2786 else
2787 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2788 vf + 1);
f9449ab7 2789
11ac75ed
SP
2790 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2791 }
39f1d94d
SP
2792done:
2793 kfree(adapter->vf_cfg);
2794 adapter->num_vfs = 0;
6d87f5c3
AK
2795}
2796
a54769f5
SP
2797static int be_clear(struct be_adapter *adapter)
2798{
2d17f403 2799 int i;
fbc13f01 2800
191eb756
SP
2801 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2802 cancel_delayed_work_sync(&adapter->work);
2803 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2804 }
2805
11ac75ed 2806 if (sriov_enabled(adapter))
f9449ab7
SP
2807 be_vf_clear(adapter);
2808
2d17f403
SP
2809 /* delete the primary mac along with the uc-mac list */
2810 for (i = 0; i < (adapter->uc_macs + 1); i++)
fbc13f01 2811 be_cmd_pmac_del(adapter, adapter->if_handle,
2d17f403
SP
2812 adapter->pmac_id[i], 0);
2813 adapter->uc_macs = 0;
fbc13f01 2814
f9449ab7 2815 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2816
2817 be_mcc_queues_destroy(adapter);
10ef9ab4 2818 be_rx_cqs_destroy(adapter);
a54769f5 2819 be_tx_queues_destroy(adapter);
10ef9ab4 2820 be_evt_queues_destroy(adapter);
a54769f5 2821
abb93951
PR
2822 kfree(adapter->pmac_id);
2823 adapter->pmac_id = NULL;
2824
10ef9ab4 2825 be_msix_disable(adapter);
a54769f5
SP
2826 return 0;
2827}
2828
4c876616 2829static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2830{
4c876616
SP
2831 struct be_vf_cfg *vf_cfg;
2832 u32 cap_flags, en_flags, vf;
abb93951
PR
2833 int status;
2834
4c876616
SP
2835 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2836 BE_IF_FLAGS_MULTICAST;
abb93951 2837
4c876616
SP
2838 for_all_vfs(adapter, vf_cfg, vf) {
2839 if (!BE3_chip(adapter))
a05f99db
VV
2840 be_cmd_get_profile_config(adapter, &cap_flags,
2841 NULL, vf + 1);
4c876616
SP
2842
2843 /* If a FW profile exists, then cap_flags are updated */
2844 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2845 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2846 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2847 &vf_cfg->if_handle, vf + 1);
2848 if (status)
2849 goto err;
2850 }
2851err:
2852 return status;
abb93951
PR
2853}
2854
39f1d94d 2855static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2856{
11ac75ed 2857 struct be_vf_cfg *vf_cfg;
30128031
SP
2858 int vf;
2859
39f1d94d
SP
2860 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2861 GFP_KERNEL);
2862 if (!adapter->vf_cfg)
2863 return -ENOMEM;
2864
11ac75ed
SP
2865 for_all_vfs(adapter, vf_cfg, vf) {
2866 vf_cfg->if_handle = -1;
2867 vf_cfg->pmac_id = -1;
30128031 2868 }
39f1d94d 2869 return 0;
30128031
SP
2870}
2871
f9449ab7
SP
2872static int be_vf_setup(struct be_adapter *adapter)
2873{
11ac75ed 2874 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2875 u16 def_vlan, lnk_speed;
4c876616
SP
2876 int status, old_vfs, vf;
2877 struct device *dev = &adapter->pdev->dev;
04a06028 2878 u32 privileges;
39f1d94d 2879
257a3feb 2880 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
2881 if (old_vfs) {
2882 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2883 if (old_vfs != num_vfs)
2884 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2885 adapter->num_vfs = old_vfs;
39f1d94d 2886 } else {
4c876616
SP
2887 if (num_vfs > adapter->dev_num_vfs)
2888 dev_info(dev, "Device supports %d VFs and not %d\n",
2889 adapter->dev_num_vfs, num_vfs);
2890 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2891 if (!adapter->num_vfs)
4c876616 2892 return 0;
39f1d94d
SP
2893 }
2894
2895 status = be_vf_setup_init(adapter);
2896 if (status)
2897 goto err;
30128031 2898
4c876616
SP
2899 if (old_vfs) {
2900 for_all_vfs(adapter, vf_cfg, vf) {
2901 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2902 if (status)
2903 goto err;
2904 }
2905 } else {
2906 status = be_vfs_if_create(adapter);
f9449ab7
SP
2907 if (status)
2908 goto err;
f9449ab7
SP
2909 }
2910
4c876616
SP
2911 if (old_vfs) {
2912 status = be_vfs_mac_query(adapter);
2913 if (status)
2914 goto err;
2915 } else {
39f1d94d
SP
2916 status = be_vf_eth_addr_config(adapter);
2917 if (status)
2918 goto err;
2919 }
f9449ab7 2920
11ac75ed 2921 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
2922 /* Allow VFs to programs MAC/VLAN filters */
2923 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
2924 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
2925 status = be_cmd_set_fn_privileges(adapter,
2926 privileges |
2927 BE_PRIV_FILTMGMT,
2928 vf + 1);
2929 if (!status)
2930 dev_info(dev, "VF%d has FILTMGMT privilege\n",
2931 vf);
2932 }
2933
4c876616
SP
2934 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2935 * Allow full available bandwidth
2936 */
2937 if (BE3_chip(adapter) && !old_vfs)
2938 be_cmd_set_qos(adapter, 1000, vf+1);
2939
2940 status = be_cmd_link_status_query(adapter, &lnk_speed,
2941 NULL, vf + 1);
2942 if (!status)
2943 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2944
2945 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2946 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2947 if (status)
2948 goto err;
2949 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2950
2951 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2952 }
b4c1df93
SP
2953
2954 if (!old_vfs) {
2955 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2956 if (status) {
2957 dev_err(dev, "SRIOV enable failed\n");
2958 adapter->num_vfs = 0;
2959 goto err;
2960 }
2961 }
f9449ab7
SP
2962 return 0;
2963err:
4c876616
SP
2964 dev_err(dev, "VF setup failed\n");
2965 be_vf_clear(adapter);
f9449ab7
SP
2966 return status;
2967}
2968
30128031
SP
2969static void be_setup_init(struct be_adapter *adapter)
2970{
2971 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2972 adapter->phy.link_speed = -1;
30128031
SP
2973 adapter->if_handle = -1;
2974 adapter->be3_native = false;
2975 adapter->promiscuous = false;
f25b119c
PR
2976 if (be_physfn(adapter))
2977 adapter->cmd_privileges = MAX_PRIVILEGES;
2978 else
2979 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2980}
2981
abb93951
PR
2982static void be_get_resources(struct be_adapter *adapter)
2983{
4c876616
SP
2984 u16 dev_num_vfs;
2985 int pos, status;
abb93951 2986 bool profile_present = false;
a05f99db 2987 u16 txq_count = 0;
abb93951 2988
4c876616 2989 if (!BEx_chip(adapter)) {
abb93951 2990 status = be_cmd_get_func_config(adapter);
abb93951
PR
2991 if (!status)
2992 profile_present = true;
a05f99db
VV
2993 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2994 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
2995 }
2996
2997 if (profile_present) {
2998 /* Sanity fixes for Lancer */
2999 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3000 BE_UC_PMAC_COUNT);
3001 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3002 BE_NUM_VLANS_SUPPORTED);
3003 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3004 BE_MAX_MC);
3005 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3006 MAX_TX_QS);
3007 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3008 BE3_MAX_RSS_QS);
3009 adapter->max_event_queues = min_t(u16,
3010 adapter->max_event_queues,
3011 BE3_MAX_RSS_QS);
3012
3013 if (adapter->max_rss_queues &&
3014 adapter->max_rss_queues == adapter->max_rx_queues)
3015 adapter->max_rss_queues -= 1;
3016
3017 if (adapter->max_event_queues < adapter->max_rss_queues)
3018 adapter->max_rss_queues = adapter->max_event_queues;
3019
3020 } else {
3021 if (be_physfn(adapter))
3022 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3023 else
3024 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3025
3026 if (adapter->function_mode & FLEX10_MODE)
3027 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3028 else
3029 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3030
3031 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3032 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3033 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3034 MAX_TX_QS);
abb93951
PR
3035 adapter->max_rss_queues = (adapter->be3_native) ?
3036 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3037 adapter->max_event_queues = BE3_MAX_RSS_QS;
3038
3039 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3040 BE_IF_FLAGS_BROADCAST |
3041 BE_IF_FLAGS_MULTICAST |
3042 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3043 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3044 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3045 BE_IF_FLAGS_PROMISCUOUS;
3046
3047 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3048 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3049 }
4c876616
SP
3050
3051 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3052 if (pos) {
3053 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3054 &dev_num_vfs);
3055 if (BE3_chip(adapter))
3056 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3057 adapter->dev_num_vfs = dev_num_vfs;
3058 }
abb93951
PR
3059}
3060
39f1d94d
SP
3061/* Routine to query per function resource limits */
3062static int be_get_config(struct be_adapter *adapter)
3063{
4c876616 3064 int status;
39f1d94d 3065
abb93951
PR
3066 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3067 &adapter->function_mode,
0ad3157e
VV
3068 &adapter->function_caps,
3069 &adapter->asic_rev);
abb93951
PR
3070 if (status)
3071 goto err;
3072
3073 be_get_resources(adapter);
3074
3075 /* primary mac needs 1 pmac entry */
3076 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3077 sizeof(u32), GFP_KERNEL);
3078 if (!adapter->pmac_id) {
3079 status = -ENOMEM;
3080 goto err;
3081 }
3082
abb93951
PR
3083err:
3084 return status;
39f1d94d
SP
3085}
3086
95046b92
SP
3087static int be_mac_setup(struct be_adapter *adapter)
3088{
3089 u8 mac[ETH_ALEN];
3090 int status;
3091
3092 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3093 status = be_cmd_get_perm_mac(adapter, mac);
3094 if (status)
3095 return status;
3096
3097 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3098 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3099 } else {
3100 /* Maybe the HW was reset; dev_addr must be re-programmed */
3101 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3102 }
3103
3104 /* On BE3 VFs this cmd may fail due to lack of privilege.
3105 * Ignore the failure as in this case pmac_id is fetched
3106 * in the IFACE_CREATE cmd.
3107 */
3108 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3109 &adapter->pmac_id[0], 0);
3110 return 0;
3111}
3112
5fb379ee
SP
3113static int be_setup(struct be_adapter *adapter)
3114{
39f1d94d 3115 struct device *dev = &adapter->pdev->dev;
abb93951 3116 u32 en_flags;
a54769f5 3117 u32 tx_fc, rx_fc;
10ef9ab4 3118 int status;
ba343c77 3119
30128031 3120 be_setup_init(adapter);
6b7c5b94 3121
abb93951
PR
3122 if (!lancer_chip(adapter))
3123 be_cmd_req_native_mode(adapter);
39f1d94d 3124
abb93951
PR
3125 status = be_get_config(adapter);
3126 if (status)
3127 goto err;
73d540f2 3128
c2bba3df
SK
3129 status = be_msix_enable(adapter);
3130 if (status)
3131 goto err;
10ef9ab4
SP
3132
3133 status = be_evt_queues_create(adapter);
3134 if (status)
a54769f5 3135 goto err;
6b7c5b94 3136
10ef9ab4
SP
3137 status = be_tx_cqs_create(adapter);
3138 if (status)
3139 goto err;
3140
3141 status = be_rx_cqs_create(adapter);
3142 if (status)
a54769f5 3143 goto err;
6b7c5b94 3144
f9449ab7 3145 status = be_mcc_queues_create(adapter);
10ef9ab4 3146 if (status)
a54769f5 3147 goto err;
6b7c5b94 3148
f25b119c
PR
3149 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3150 /* In UMC mode FW does not return right privileges.
3151 * Override with correct privilege equivalent to PF.
3152 */
3153 if (be_is_mc(adapter))
3154 adapter->cmd_privileges = MAX_PRIVILEGES;
3155
f9449ab7
SP
3156 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3157 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3158 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3159 en_flags |= BE_IF_FLAGS_RSS;
abb93951 3160 en_flags = en_flags & adapter->if_cap_flags;
abb93951 3161 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3162 &adapter->if_handle, 0);
5fb379ee 3163 if (status != 0)
a54769f5 3164 goto err;
6b7c5b94 3165
95046b92
SP
3166 status = be_mac_setup(adapter);
3167 if (status)
1578e777
PR
3168 goto err;
3169
10ef9ab4
SP
3170 status = be_tx_qs_create(adapter);
3171 if (status)
3172 goto err;
3173
eeb65ced 3174 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3175
1d1e9a46 3176 if (adapter->vlans_added)
10329df8 3177 be_vid_config(adapter);
7ab8b0b4 3178
a54769f5 3179 be_set_rx_mode(adapter->netdev);
5fb379ee 3180
ddc3f5cb 3181 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3182
ddc3f5cb
AK
3183 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3184 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3185 adapter->rx_fc);
2dc1deb6 3186
b4c1df93 3187 if (be_physfn(adapter)) {
39f1d94d
SP
3188 if (adapter->dev_num_vfs)
3189 be_vf_setup(adapter);
3190 else
3191 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3192 }
3193
f25b119c
PR
3194 status = be_cmd_get_phy_info(adapter);
3195 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3196 adapter->phy.fc_autoneg = 1;
3197
191eb756
SP
3198 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3199 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3200 return 0;
a54769f5
SP
3201err:
3202 be_clear(adapter);
3203 return status;
3204}
6b7c5b94 3205
66268739
IV
3206#ifdef CONFIG_NET_POLL_CONTROLLER
3207static void be_netpoll(struct net_device *netdev)
3208{
3209 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3210 struct be_eq_obj *eqo;
66268739
IV
3211 int i;
3212
e49cc34f
SP
3213 for_all_evt_queues(adapter, eqo, i) {
3214 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3215 napi_schedule(&eqo->napi);
3216 }
10ef9ab4
SP
3217
3218 return;
66268739
IV
3219}
3220#endif
3221
84517482 3222#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3223char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3224
fa9a6fed 3225static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3226 const u8 *p, u32 img_start, int image_size,
3227 int hdr_size)
fa9a6fed
SB
3228{
3229 u32 crc_offset;
3230 u8 flashed_crc[4];
3231 int status;
3f0d4560
AK
3232
3233 crc_offset = hdr_size + img_start + image_size - 4;
3234
fa9a6fed 3235 p += crc_offset;
3f0d4560
AK
3236
3237 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3238 (image_size - 4));
fa9a6fed
SB
3239 if (status) {
3240 dev_err(&adapter->pdev->dev,
3241 "could not get crc from flash, not flashing redboot\n");
3242 return false;
3243 }
3244
3245 /*update redboot only if crc does not match*/
3246 if (!memcmp(flashed_crc, p, 4))
3247 return false;
3248 else
3249 return true;
fa9a6fed
SB
3250}
3251
306f1348
SP
3252static bool phy_flashing_required(struct be_adapter *adapter)
3253{
42f11cf2
AK
3254 return (adapter->phy.phy_type == TN_8022 &&
3255 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3256}
3257
c165541e
PR
3258static bool is_comp_in_ufi(struct be_adapter *adapter,
3259 struct flash_section_info *fsec, int type)
3260{
3261 int i = 0, img_type = 0;
3262 struct flash_section_info_g2 *fsec_g2 = NULL;
3263
ca34fe38 3264 if (BE2_chip(adapter))
c165541e
PR
3265 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3266
3267 for (i = 0; i < MAX_FLASH_COMP; i++) {
3268 if (fsec_g2)
3269 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3270 else
3271 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3272
3273 if (img_type == type)
3274 return true;
3275 }
3276 return false;
3277
3278}
3279
3280struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3281 int header_size,
3282 const struct firmware *fw)
3283{
3284 struct flash_section_info *fsec = NULL;
3285 const u8 *p = fw->data;
3286
3287 p += header_size;
3288 while (p < (fw->data + fw->size)) {
3289 fsec = (struct flash_section_info *)p;
3290 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3291 return fsec;
3292 p += 32;
3293 }
3294 return NULL;
3295}
3296
773a2d7c
PR
3297static int be_flash(struct be_adapter *adapter, const u8 *img,
3298 struct be_dma_mem *flash_cmd, int optype, int img_size)
3299{
3300 u32 total_bytes = 0, flash_op, num_bytes = 0;
3301 int status = 0;
3302 struct be_cmd_write_flashrom *req = flash_cmd->va;
3303
3304 total_bytes = img_size;
3305 while (total_bytes) {
3306 num_bytes = min_t(u32, 32*1024, total_bytes);
3307
3308 total_bytes -= num_bytes;
3309
3310 if (!total_bytes) {
3311 if (optype == OPTYPE_PHY_FW)
3312 flash_op = FLASHROM_OPER_PHY_FLASH;
3313 else
3314 flash_op = FLASHROM_OPER_FLASH;
3315 } else {
3316 if (optype == OPTYPE_PHY_FW)
3317 flash_op = FLASHROM_OPER_PHY_SAVE;
3318 else
3319 flash_op = FLASHROM_OPER_SAVE;
3320 }
3321
be716446 3322 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3323 img += num_bytes;
3324 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3325 flash_op, num_bytes);
3326 if (status) {
3327 if (status == ILLEGAL_IOCTL_REQ &&
3328 optype == OPTYPE_PHY_FW)
3329 break;
3330 dev_err(&adapter->pdev->dev,
3331 "cmd to write to flash rom failed.\n");
3332 return status;
3333 }
3334 }
3335 return 0;
3336}
3337
0ad3157e 3338/* For BE2, BE3 and BE3-R */
ca34fe38 3339static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3340 const struct firmware *fw,
3341 struct be_dma_mem *flash_cmd,
3342 int num_of_images)
3f0d4560 3343
84517482 3344{
3f0d4560 3345 int status = 0, i, filehdr_size = 0;
c165541e 3346 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3347 const u8 *p = fw->data;
215faf9c 3348 const struct flash_comp *pflashcomp;
773a2d7c 3349 int num_comp, redboot;
c165541e
PR
3350 struct flash_section_info *fsec = NULL;
3351
3352 struct flash_comp gen3_flash_types[] = {
3353 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3354 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3355 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3356 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3357 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3358 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3359 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3360 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3361 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3362 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3363 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3364 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3365 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3366 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3367 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3368 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3369 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3370 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3371 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3372 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3373 };
c165541e
PR
3374
3375 struct flash_comp gen2_flash_types[] = {
3376 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3377 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3378 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3379 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3380 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3381 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3382 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3383 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3384 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3385 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3386 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3387 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3388 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3389 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3390 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3391 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3392 };
3393
ca34fe38 3394 if (BE3_chip(adapter)) {
3f0d4560
AK
3395 pflashcomp = gen3_flash_types;
3396 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3397 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3398 } else {
3399 pflashcomp = gen2_flash_types;
3400 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3401 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3402 }
ca34fe38 3403
c165541e
PR
3404 /* Get flash section info*/
3405 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3406 if (!fsec) {
3407 dev_err(&adapter->pdev->dev,
3408 "Invalid Cookie. UFI corrupted ?\n");
3409 return -1;
3410 }
9fe96934 3411 for (i = 0; i < num_comp; i++) {
c165541e 3412 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3413 continue;
c165541e
PR
3414
3415 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3416 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3417 continue;
3418
773a2d7c
PR
3419 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3420 !phy_flashing_required(adapter))
306f1348 3421 continue;
c165541e 3422
773a2d7c
PR
3423 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3424 redboot = be_flash_redboot(adapter, fw->data,
3425 pflashcomp[i].offset, pflashcomp[i].size,
3426 filehdr_size + img_hdrs_size);
3427 if (!redboot)
3428 continue;
3429 }
c165541e 3430
3f0d4560 3431 p = fw->data;
c165541e 3432 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3433 if (p + pflashcomp[i].size > fw->data + fw->size)
3434 return -1;
773a2d7c
PR
3435
3436 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3437 pflashcomp[i].size);
3438 if (status) {
3439 dev_err(&adapter->pdev->dev,
3440 "Flashing section type %d failed.\n",
3441 pflashcomp[i].img_type);
3442 return status;
84517482 3443 }
84517482 3444 }
84517482
AK
3445 return 0;
3446}
3447
773a2d7c
PR
3448static int be_flash_skyhawk(struct be_adapter *adapter,
3449 const struct firmware *fw,
3450 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3451{
773a2d7c
PR
3452 int status = 0, i, filehdr_size = 0;
3453 int img_offset, img_size, img_optype, redboot;
3454 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3455 const u8 *p = fw->data;
3456 struct flash_section_info *fsec = NULL;
3457
3458 filehdr_size = sizeof(struct flash_file_hdr_g3);
3459 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3460 if (!fsec) {
3461 dev_err(&adapter->pdev->dev,
3462 "Invalid Cookie. UFI corrupted ?\n");
3463 return -1;
3464 }
3465
3466 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3467 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3468 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3469
3470 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3471 case IMAGE_FIRMWARE_iSCSI:
3472 img_optype = OPTYPE_ISCSI_ACTIVE;
3473 break;
3474 case IMAGE_BOOT_CODE:
3475 img_optype = OPTYPE_REDBOOT;
3476 break;
3477 case IMAGE_OPTION_ROM_ISCSI:
3478 img_optype = OPTYPE_BIOS;
3479 break;
3480 case IMAGE_OPTION_ROM_PXE:
3481 img_optype = OPTYPE_PXE_BIOS;
3482 break;
3483 case IMAGE_OPTION_ROM_FCoE:
3484 img_optype = OPTYPE_FCOE_BIOS;
3485 break;
3486 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3487 img_optype = OPTYPE_ISCSI_BACKUP;
3488 break;
3489 case IMAGE_NCSI:
3490 img_optype = OPTYPE_NCSI_FW;
3491 break;
3492 default:
3493 continue;
3494 }
3495
3496 if (img_optype == OPTYPE_REDBOOT) {
3497 redboot = be_flash_redboot(adapter, fw->data,
3498 img_offset, img_size,
3499 filehdr_size + img_hdrs_size);
3500 if (!redboot)
3501 continue;
3502 }
3503
3504 p = fw->data;
3505 p += filehdr_size + img_offset + img_hdrs_size;
3506 if (p + img_size > fw->data + fw->size)
3507 return -1;
3508
3509 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3510 if (status) {
3511 dev_err(&adapter->pdev->dev,
3512 "Flashing section type %d failed.\n",
3513 fsec->fsec_entry[i].type);
3514 return status;
3515 }
3516 }
3517 return 0;
3f0d4560
AK
3518}
3519
485bf569
SN
3520static int lancer_fw_download(struct be_adapter *adapter,
3521 const struct firmware *fw)
84517482 3522{
485bf569
SN
3523#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3524#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3525 struct be_dma_mem flash_cmd;
485bf569
SN
3526 const u8 *data_ptr = NULL;
3527 u8 *dest_image_ptr = NULL;
3528 size_t image_size = 0;
3529 u32 chunk_size = 0;
3530 u32 data_written = 0;
3531 u32 offset = 0;
3532 int status = 0;
3533 u8 add_status = 0;
f67ef7ba 3534 u8 change_status;
84517482 3535
485bf569 3536 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3537 dev_err(&adapter->pdev->dev,
485bf569
SN
3538 "FW Image not properly aligned. "
3539 "Length must be 4 byte aligned.\n");
3540 status = -EINVAL;
3541 goto lancer_fw_exit;
d9efd2af
SB
3542 }
3543
485bf569
SN
3544 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3545 + LANCER_FW_DOWNLOAD_CHUNK;
3546 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3547 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3548 if (!flash_cmd.va) {
3549 status = -ENOMEM;
485bf569
SN
3550 goto lancer_fw_exit;
3551 }
84517482 3552
485bf569
SN
3553 dest_image_ptr = flash_cmd.va +
3554 sizeof(struct lancer_cmd_req_write_object);
3555 image_size = fw->size;
3556 data_ptr = fw->data;
3557
3558 while (image_size) {
3559 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3560
3561 /* Copy the image chunk content. */
3562 memcpy(dest_image_ptr, data_ptr, chunk_size);
3563
3564 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3565 chunk_size, offset,
3566 LANCER_FW_DOWNLOAD_LOCATION,
3567 &data_written, &change_status,
3568 &add_status);
485bf569
SN
3569 if (status)
3570 break;
3571
3572 offset += data_written;
3573 data_ptr += data_written;
3574 image_size -= data_written;
3575 }
3576
3577 if (!status) {
3578 /* Commit the FW written */
3579 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3580 0, offset,
3581 LANCER_FW_DOWNLOAD_LOCATION,
3582 &data_written, &change_status,
3583 &add_status);
485bf569
SN
3584 }
3585
3586 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3587 flash_cmd.dma);
3588 if (status) {
3589 dev_err(&adapter->pdev->dev,
3590 "Firmware load error. "
3591 "Status code: 0x%x Additional Status: 0x%x\n",
3592 status, add_status);
3593 goto lancer_fw_exit;
3594 }
3595
f67ef7ba 3596 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3597 status = lancer_physdev_ctrl(adapter,
3598 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3599 if (status) {
3600 dev_err(&adapter->pdev->dev,
3601 "Adapter busy for FW reset.\n"
3602 "New FW will not be active.\n");
3603 goto lancer_fw_exit;
3604 }
3605 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3606 dev_err(&adapter->pdev->dev,
3607 "System reboot required for new FW"
3608 " to be active\n");
3609 }
3610
485bf569
SN
3611 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3612lancer_fw_exit:
3613 return status;
3614}
3615
ca34fe38
SP
3616#define UFI_TYPE2 2
3617#define UFI_TYPE3 3
0ad3157e 3618#define UFI_TYPE3R 10
ca34fe38
SP
3619#define UFI_TYPE4 4
3620static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3621 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3622{
3623 if (fhdr == NULL)
3624 goto be_get_ufi_exit;
3625
ca34fe38
SP
3626 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3627 return UFI_TYPE4;
0ad3157e
VV
3628 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3629 if (fhdr->asic_type_rev == 0x10)
3630 return UFI_TYPE3R;
3631 else
3632 return UFI_TYPE3;
3633 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3634 return UFI_TYPE2;
773a2d7c
PR
3635
3636be_get_ufi_exit:
3637 dev_err(&adapter->pdev->dev,
3638 "UFI and Interface are not compatible for flashing\n");
3639 return -1;
3640}
3641
485bf569
SN
3642static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3643{
485bf569
SN
3644 struct flash_file_hdr_g3 *fhdr3;
3645 struct image_hdr *img_hdr_ptr = NULL;
3646 struct be_dma_mem flash_cmd;
3647 const u8 *p;
773a2d7c 3648 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3649
be716446 3650 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3651 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3652 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3653 if (!flash_cmd.va) {
3654 status = -ENOMEM;
485bf569 3655 goto be_fw_exit;
84517482
AK
3656 }
3657
773a2d7c 3658 p = fw->data;
0ad3157e 3659 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3660
0ad3157e 3661 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3662
773a2d7c
PR
3663 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3664 for (i = 0; i < num_imgs; i++) {
3665 img_hdr_ptr = (struct image_hdr *)(fw->data +
3666 (sizeof(struct flash_file_hdr_g3) +
3667 i * sizeof(struct image_hdr)));
3668 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3669 switch (ufi_type) {
3670 case UFI_TYPE4:
773a2d7c
PR
3671 status = be_flash_skyhawk(adapter, fw,
3672 &flash_cmd, num_imgs);
0ad3157e
VV
3673 break;
3674 case UFI_TYPE3R:
ca34fe38
SP
3675 status = be_flash_BEx(adapter, fw, &flash_cmd,
3676 num_imgs);
0ad3157e
VV
3677 break;
3678 case UFI_TYPE3:
3679 /* Do not flash this ufi on BE3-R cards */
3680 if (adapter->asic_rev < 0x10)
3681 status = be_flash_BEx(adapter, fw,
3682 &flash_cmd,
3683 num_imgs);
3684 else {
3685 status = -1;
3686 dev_err(&adapter->pdev->dev,
3687 "Can't load BE3 UFI on BE3R\n");
3688 }
3689 }
3f0d4560 3690 }
773a2d7c
PR
3691 }
3692
ca34fe38
SP
3693 if (ufi_type == UFI_TYPE2)
3694 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3695 else if (ufi_type == -1)
3f0d4560 3696 status = -1;
84517482 3697
2b7bcebf
IV
3698 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3699 flash_cmd.dma);
84517482
AK
3700 if (status) {
3701 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3702 goto be_fw_exit;
84517482
AK
3703 }
3704
af901ca1 3705 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3706
485bf569
SN
3707be_fw_exit:
3708 return status;
3709}
3710
3711int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3712{
3713 const struct firmware *fw;
3714 int status;
3715
3716 if (!netif_running(adapter->netdev)) {
3717 dev_err(&adapter->pdev->dev,
3718 "Firmware load not allowed (interface is down)\n");
3719 return -1;
3720 }
3721
3722 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3723 if (status)
3724 goto fw_exit;
3725
3726 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3727
3728 if (lancer_chip(adapter))
3729 status = lancer_fw_download(adapter, fw);
3730 else
3731 status = be_fw_download(adapter, fw);
3732
eeb65ced
SK
3733 if (!status)
3734 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3735 adapter->fw_on_flash);
3736
84517482
AK
3737fw_exit:
3738 release_firmware(fw);
3739 return status;
3740}
3741
e5686ad8 3742static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3743 .ndo_open = be_open,
3744 .ndo_stop = be_close,
3745 .ndo_start_xmit = be_xmit,
a54769f5 3746 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3747 .ndo_set_mac_address = be_mac_addr_set,
3748 .ndo_change_mtu = be_change_mtu,
ab1594e9 3749 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3750 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3751 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3752 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3753 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3754 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3755 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3756 .ndo_get_vf_config = be_get_vf_config,
3757#ifdef CONFIG_NET_POLL_CONTROLLER
3758 .ndo_poll_controller = be_netpoll,
3759#endif
6b7c5b94
SP
3760};
3761
3762static void be_netdev_init(struct net_device *netdev)
3763{
3764 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3765 struct be_eq_obj *eqo;
3abcdeda 3766 int i;
6b7c5b94 3767
6332c8d3 3768 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3769 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3770 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3771 if (be_multi_rxq(adapter))
3772 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3773
3774 netdev->features |= netdev->hw_features |
f646968f 3775 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3776
eb8a50d9 3777 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3778 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3779
fbc13f01
AK
3780 netdev->priv_flags |= IFF_UNICAST_FLT;
3781
6b7c5b94
SP
3782 netdev->flags |= IFF_MULTICAST;
3783
b7e5887e 3784 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3785
10ef9ab4 3786 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3787
3788 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3789
10ef9ab4
SP
3790 for_all_evt_queues(adapter, eqo, i)
3791 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3792}
3793
3794static void be_unmap_pci_bars(struct be_adapter *adapter)
3795{
c5b3ad4c
SP
3796 if (adapter->csr)
3797 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3798 if (adapter->db)
ce66f781 3799 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3800}
3801
ce66f781
SP
3802static int db_bar(struct be_adapter *adapter)
3803{
3804 if (lancer_chip(adapter) || !be_physfn(adapter))
3805 return 0;
3806 else
3807 return 4;
3808}
3809
3810static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3811{
dbf0f2a7 3812 if (skyhawk_chip(adapter)) {
ce66f781
SP
3813 adapter->roce_db.size = 4096;
3814 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3815 db_bar(adapter));
3816 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3817 db_bar(adapter));
3818 }
045508a8 3819 return 0;
6b7c5b94
SP
3820}
3821
3822static int be_map_pci_bars(struct be_adapter *adapter)
3823{
3824 u8 __iomem *addr;
ce66f781 3825 u32 sli_intf;
6b7c5b94 3826
ce66f781
SP
3827 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3828 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3829 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3830
c5b3ad4c
SP
3831 if (BEx_chip(adapter) && be_physfn(adapter)) {
3832 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3833 if (adapter->csr == NULL)
3834 return -ENOMEM;
3835 }
3836
ce66f781 3837 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3838 if (addr == NULL)
3839 goto pci_map_err;
ba343c77 3840 adapter->db = addr;
ce66f781
SP
3841
3842 be_roce_map_pci_bars(adapter);
6b7c5b94 3843 return 0;
ce66f781 3844
6b7c5b94
SP
3845pci_map_err:
3846 be_unmap_pci_bars(adapter);
3847 return -ENOMEM;
3848}
3849
6b7c5b94
SP
3850static void be_ctrl_cleanup(struct be_adapter *adapter)
3851{
8788fdc2 3852 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3853
3854 be_unmap_pci_bars(adapter);
3855
3856 if (mem->va)
2b7bcebf
IV
3857 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3858 mem->dma);
e7b909a6 3859
5b8821b7 3860 mem = &adapter->rx_filter;
e7b909a6 3861 if (mem->va)
2b7bcebf
IV
3862 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3863 mem->dma);
6b7c5b94
SP
3864}
3865
6b7c5b94
SP
3866static int be_ctrl_init(struct be_adapter *adapter)
3867{
8788fdc2
SP
3868 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3869 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3870 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3871 u32 sli_intf;
6b7c5b94 3872 int status;
6b7c5b94 3873
ce66f781
SP
3874 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3875 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3876 SLI_INTF_FAMILY_SHIFT;
3877 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3878
6b7c5b94
SP
3879 status = be_map_pci_bars(adapter);
3880 if (status)
e7b909a6 3881 goto done;
6b7c5b94
SP
3882
3883 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3884 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3885 mbox_mem_alloc->size,
3886 &mbox_mem_alloc->dma,
3887 GFP_KERNEL);
6b7c5b94 3888 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3889 status = -ENOMEM;
3890 goto unmap_pci_bars;
6b7c5b94
SP
3891 }
3892 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3893 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3894 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3895 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3896
5b8821b7
SP
3897 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3898 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3899 &rx_filter->dma,
3900 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3901 if (rx_filter->va == NULL) {
e7b909a6
SP
3902 status = -ENOMEM;
3903 goto free_mbox;
3904 }
1f9061d2 3905
2984961c 3906 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3907 spin_lock_init(&adapter->mcc_lock);
3908 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3909
dd131e76 3910 init_completion(&adapter->flash_compl);
cf588477 3911 pci_save_state(adapter->pdev);
6b7c5b94 3912 return 0;
e7b909a6
SP
3913
3914free_mbox:
2b7bcebf
IV
3915 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3916 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3917
3918unmap_pci_bars:
3919 be_unmap_pci_bars(adapter);
3920
3921done:
3922 return status;
6b7c5b94
SP
3923}
3924
3925static void be_stats_cleanup(struct be_adapter *adapter)
3926{
3abcdeda 3927 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3928
3929 if (cmd->va)
2b7bcebf
IV
3930 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3931 cmd->va, cmd->dma);
6b7c5b94
SP
3932}
3933
3934static int be_stats_init(struct be_adapter *adapter)
3935{
3abcdeda 3936 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3937
ca34fe38
SP
3938 if (lancer_chip(adapter))
3939 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3940 else if (BE2_chip(adapter))
89a88ab8 3941 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3942 else
3943 /* BE3 and Skyhawk */
3944 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3945
2b7bcebf 3946 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3947 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3948 if (cmd->va == NULL)
3949 return -1;
3950 return 0;
3951}
3952
3bc6b06c 3953static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3954{
3955 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3956
6b7c5b94
SP
3957 if (!adapter)
3958 return;
3959
045508a8 3960 be_roce_dev_remove(adapter);
8cef7a78 3961 be_intr_set(adapter, false);
045508a8 3962
f67ef7ba
PR
3963 cancel_delayed_work_sync(&adapter->func_recovery_work);
3964
6b7c5b94
SP
3965 unregister_netdev(adapter->netdev);
3966
5fb379ee
SP
3967 be_clear(adapter);
3968
bf99e50d
PR
3969 /* tell fw we're done with firing cmds */
3970 be_cmd_fw_clean(adapter);
3971
6b7c5b94
SP
3972 be_stats_cleanup(adapter);
3973
3974 be_ctrl_cleanup(adapter);
3975
d6b6d987
SP
3976 pci_disable_pcie_error_reporting(pdev);
3977
6b7c5b94
SP
3978 pci_set_drvdata(pdev, NULL);
3979 pci_release_regions(pdev);
3980 pci_disable_device(pdev);
3981
3982 free_netdev(adapter->netdev);
3983}
3984
4762f6ce
AK
3985bool be_is_wol_supported(struct be_adapter *adapter)
3986{
3987 return ((adapter->wol_cap & BE_WOL_CAP) &&
3988 !be_is_wol_excluded(adapter)) ? true : false;
3989}
3990
941a77d5
SK
3991u32 be_get_fw_log_level(struct be_adapter *adapter)
3992{
3993 struct be_dma_mem extfat_cmd;
3994 struct be_fat_conf_params *cfgs;
3995 int status;
3996 u32 level = 0;
3997 int j;
3998
f25b119c
PR
3999 if (lancer_chip(adapter))
4000 return 0;
4001
941a77d5
SK
4002 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4003 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4004 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4005 &extfat_cmd.dma);
4006
4007 if (!extfat_cmd.va) {
4008 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4009 __func__);
4010 goto err;
4011 }
4012
4013 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4014 if (!status) {
4015 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4016 sizeof(struct be_cmd_resp_hdr));
ac46a462 4017 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4018 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4019 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4020 }
4021 }
4022 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4023 extfat_cmd.dma);
4024err:
4025 return level;
4026}
abb93951 4027
39f1d94d 4028static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4029{
6b7c5b94 4030 int status;
941a77d5 4031 u32 level;
6b7c5b94 4032
9e1453c5
AK
4033 status = be_cmd_get_cntl_attributes(adapter);
4034 if (status)
4035 return status;
4036
4762f6ce
AK
4037 status = be_cmd_get_acpi_wol_cap(adapter);
4038 if (status) {
4039 /* in case of a failure to get wol capabillities
4040 * check the exclusion list to determine WOL capability */
4041 if (!be_is_wol_excluded(adapter))
4042 adapter->wol_cap |= BE_WOL_CAP;
4043 }
4044
4045 if (be_is_wol_supported(adapter))
4046 adapter->wol = true;
4047
7aeb2156
PR
4048 /* Must be a power of 2 or else MODULO will BUG_ON */
4049 adapter->be_get_temp_freq = 64;
4050
941a77d5
SK
4051 level = be_get_fw_log_level(adapter);
4052 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4053
2243e2e9 4054 return 0;
6b7c5b94
SP
4055}
4056
f67ef7ba 4057static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4058{
01e5b2c4 4059 struct device *dev = &adapter->pdev->dev;
d8110f62 4060 int status;
d8110f62 4061
f67ef7ba
PR
4062 status = lancer_test_and_set_rdy_state(adapter);
4063 if (status)
4064 goto err;
d8110f62 4065
f67ef7ba
PR
4066 if (netif_running(adapter->netdev))
4067 be_close(adapter->netdev);
d8110f62 4068
f67ef7ba
PR
4069 be_clear(adapter);
4070
01e5b2c4 4071 be_clear_all_error(adapter);
f67ef7ba
PR
4072
4073 status = be_setup(adapter);
4074 if (status)
4075 goto err;
d8110f62 4076
f67ef7ba
PR
4077 if (netif_running(adapter->netdev)) {
4078 status = be_open(adapter->netdev);
d8110f62
PR
4079 if (status)
4080 goto err;
f67ef7ba 4081 }
d8110f62 4082
01e5b2c4 4083 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4084 return 0;
4085err:
01e5b2c4
SK
4086 if (status == -EAGAIN)
4087 dev_err(dev, "Waiting for resource provisioning\n");
4088 else
4089 dev_err(dev, "Error recovery failed\n");
d8110f62 4090
f67ef7ba
PR
4091 return status;
4092}
4093
4094static void be_func_recovery_task(struct work_struct *work)
4095{
4096 struct be_adapter *adapter =
4097 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4098 int status = 0;
d8110f62 4099
f67ef7ba 4100 be_detect_error(adapter);
d8110f62 4101
f67ef7ba 4102 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4103
f67ef7ba
PR
4104 rtnl_lock();
4105 netif_device_detach(adapter->netdev);
4106 rtnl_unlock();
d8110f62 4107
f67ef7ba 4108 status = lancer_recover_func(adapter);
f67ef7ba
PR
4109 if (!status)
4110 netif_device_attach(adapter->netdev);
d8110f62 4111 }
f67ef7ba 4112
01e5b2c4
SK
4113 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4114 * no need to attempt further recovery.
4115 */
4116 if (!status || status == -EAGAIN)
4117 schedule_delayed_work(&adapter->func_recovery_work,
4118 msecs_to_jiffies(1000));
d8110f62
PR
4119}
4120
4121static void be_worker(struct work_struct *work)
4122{
4123 struct be_adapter *adapter =
4124 container_of(work, struct be_adapter, work.work);
4125 struct be_rx_obj *rxo;
10ef9ab4 4126 struct be_eq_obj *eqo;
d8110f62
PR
4127 int i;
4128
d8110f62
PR
4129 /* when interrupts are not yet enabled, just reap any pending
4130 * mcc completions */
4131 if (!netif_running(adapter->netdev)) {
072a9c48 4132 local_bh_disable();
10ef9ab4 4133 be_process_mcc(adapter);
072a9c48 4134 local_bh_enable();
d8110f62
PR
4135 goto reschedule;
4136 }
4137
4138 if (!adapter->stats_cmd_sent) {
4139 if (lancer_chip(adapter))
4140 lancer_cmd_get_pport_stats(adapter,
4141 &adapter->stats_cmd);
4142 else
4143 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4144 }
4145
7aeb2156
PR
4146 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4147 be_cmd_get_die_temperature(adapter);
4148
d8110f62 4149 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4150 if (rxo->rx_post_starved) {
4151 rxo->rx_post_starved = false;
4152 be_post_rx_frags(rxo, GFP_KERNEL);
4153 }
4154 }
4155
10ef9ab4
SP
4156 for_all_evt_queues(adapter, eqo, i)
4157 be_eqd_update(adapter, eqo);
4158
d8110f62
PR
4159reschedule:
4160 adapter->work_counter++;
4161 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4162}
4163
257a3feb 4164/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4165static bool be_reset_required(struct be_adapter *adapter)
4166{
257a3feb 4167 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4168}
4169
d379142b
SP
4170static char *mc_name(struct be_adapter *adapter)
4171{
4172 if (adapter->function_mode & FLEX10_MODE)
4173 return "FLEX10";
4174 else if (adapter->function_mode & VNIC_MODE)
4175 return "vNIC";
4176 else if (adapter->function_mode & UMC_ENABLED)
4177 return "UMC";
4178 else
4179 return "";
4180}
4181
4182static inline char *func_name(struct be_adapter *adapter)
4183{
4184 return be_physfn(adapter) ? "PF" : "VF";
4185}
4186
1dd06ae8 4187static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4188{
4189 int status = 0;
4190 struct be_adapter *adapter;
4191 struct net_device *netdev;
b4e32a71 4192 char port_name;
6b7c5b94
SP
4193
4194 status = pci_enable_device(pdev);
4195 if (status)
4196 goto do_none;
4197
4198 status = pci_request_regions(pdev, DRV_NAME);
4199 if (status)
4200 goto disable_dev;
4201 pci_set_master(pdev);
4202
7f640062 4203 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4204 if (netdev == NULL) {
4205 status = -ENOMEM;
4206 goto rel_reg;
4207 }
4208 adapter = netdev_priv(netdev);
4209 adapter->pdev = pdev;
4210 pci_set_drvdata(pdev, adapter);
4211 adapter->netdev = netdev;
2243e2e9 4212 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4213
2b7bcebf 4214 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4215 if (!status) {
2bd92cd2
CH
4216 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4217 if (status < 0) {
4218 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4219 goto free_netdev;
4220 }
6b7c5b94
SP
4221 netdev->features |= NETIF_F_HIGHDMA;
4222 } else {
2b7bcebf 4223 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4224 if (!status)
4225 status = dma_set_coherent_mask(&pdev->dev,
4226 DMA_BIT_MASK(32));
6b7c5b94
SP
4227 if (status) {
4228 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4229 goto free_netdev;
4230 }
4231 }
4232
d6b6d987
SP
4233 status = pci_enable_pcie_error_reporting(pdev);
4234 if (status)
4ce1fd61 4235 dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
d6b6d987 4236
6b7c5b94
SP
4237 status = be_ctrl_init(adapter);
4238 if (status)
39f1d94d 4239 goto free_netdev;
6b7c5b94 4240
2243e2e9 4241 /* sync up with fw's ready state */
ba343c77 4242 if (be_physfn(adapter)) {
bf99e50d 4243 status = be_fw_wait_ready(adapter);
ba343c77
SB
4244 if (status)
4245 goto ctrl_clean;
ba343c77 4246 }
6b7c5b94 4247
39f1d94d
SP
4248 if (be_reset_required(adapter)) {
4249 status = be_cmd_reset_function(adapter);
4250 if (status)
4251 goto ctrl_clean;
556ae191 4252
2d177be8
KA
4253 /* Wait for interrupts to quiesce after an FLR */
4254 msleep(100);
4255 }
8cef7a78
SK
4256
4257 /* Allow interrupts for other ULPs running on NIC function */
4258 be_intr_set(adapter, true);
10ef9ab4 4259
2d177be8
KA
4260 /* tell fw we're ready to fire cmds */
4261 status = be_cmd_fw_init(adapter);
4262 if (status)
4263 goto ctrl_clean;
4264
2243e2e9
SP
4265 status = be_stats_init(adapter);
4266 if (status)
4267 goto ctrl_clean;
4268
39f1d94d 4269 status = be_get_initial_config(adapter);
6b7c5b94
SP
4270 if (status)
4271 goto stats_clean;
6b7c5b94
SP
4272
4273 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4274 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4275 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4276
5fb379ee
SP
4277 status = be_setup(adapter);
4278 if (status)
55f5c3c5 4279 goto stats_clean;
2243e2e9 4280
3abcdeda 4281 be_netdev_init(netdev);
6b7c5b94
SP
4282 status = register_netdev(netdev);
4283 if (status != 0)
5fb379ee 4284 goto unsetup;
6b7c5b94 4285
045508a8
PP
4286 be_roce_dev_add(adapter);
4287
f67ef7ba
PR
4288 schedule_delayed_work(&adapter->func_recovery_work,
4289 msecs_to_jiffies(1000));
b4e32a71
PR
4290
4291 be_cmd_query_port_name(adapter, &port_name);
4292
d379142b
SP
4293 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4294 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4295
6b7c5b94
SP
4296 return 0;
4297
5fb379ee
SP
4298unsetup:
4299 be_clear(adapter);
6b7c5b94
SP
4300stats_clean:
4301 be_stats_cleanup(adapter);
4302ctrl_clean:
4303 be_ctrl_cleanup(adapter);
f9449ab7 4304free_netdev:
fe6d2a38 4305 free_netdev(netdev);
8d56ff11 4306 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4307rel_reg:
4308 pci_release_regions(pdev);
4309disable_dev:
4310 pci_disable_device(pdev);
4311do_none:
c4ca2374 4312 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4313 return status;
4314}
4315
4316static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4317{
4318 struct be_adapter *adapter = pci_get_drvdata(pdev);
4319 struct net_device *netdev = adapter->netdev;
4320
71d8d1b5
AK
4321 if (adapter->wol)
4322 be_setup_wol(adapter, true);
4323
f67ef7ba
PR
4324 cancel_delayed_work_sync(&adapter->func_recovery_work);
4325
6b7c5b94
SP
4326 netif_device_detach(netdev);
4327 if (netif_running(netdev)) {
4328 rtnl_lock();
4329 be_close(netdev);
4330 rtnl_unlock();
4331 }
9b0365f1 4332 be_clear(adapter);
6b7c5b94
SP
4333
4334 pci_save_state(pdev);
4335 pci_disable_device(pdev);
4336 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4337 return 0;
4338}
4339
4340static int be_resume(struct pci_dev *pdev)
4341{
4342 int status = 0;
4343 struct be_adapter *adapter = pci_get_drvdata(pdev);
4344 struct net_device *netdev = adapter->netdev;
4345
4346 netif_device_detach(netdev);
4347
4348 status = pci_enable_device(pdev);
4349 if (status)
4350 return status;
4351
1ca01512 4352 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4353 pci_restore_state(pdev);
4354
2243e2e9
SP
4355 /* tell fw we're ready to fire cmds */
4356 status = be_cmd_fw_init(adapter);
4357 if (status)
4358 return status;
4359
9b0365f1 4360 be_setup(adapter);
6b7c5b94
SP
4361 if (netif_running(netdev)) {
4362 rtnl_lock();
4363 be_open(netdev);
4364 rtnl_unlock();
4365 }
f67ef7ba
PR
4366
4367 schedule_delayed_work(&adapter->func_recovery_work,
4368 msecs_to_jiffies(1000));
6b7c5b94 4369 netif_device_attach(netdev);
71d8d1b5
AK
4370
4371 if (adapter->wol)
4372 be_setup_wol(adapter, false);
a4ca055f 4373
6b7c5b94
SP
4374 return 0;
4375}
4376
82456b03
SP
4377/*
4378 * An FLR will stop BE from DMAing any data.
4379 */
4380static void be_shutdown(struct pci_dev *pdev)
4381{
4382 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4383
2d5d4154
AK
4384 if (!adapter)
4385 return;
82456b03 4386
0f4a6828 4387 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4388 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4389
2d5d4154 4390 netif_device_detach(adapter->netdev);
82456b03 4391
57841869
AK
4392 be_cmd_reset_function(adapter);
4393
82456b03 4394 pci_disable_device(pdev);
82456b03
SP
4395}
4396
cf588477
SP
4397static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4398 pci_channel_state_t state)
4399{
4400 struct be_adapter *adapter = pci_get_drvdata(pdev);
4401 struct net_device *netdev = adapter->netdev;
4402
4403 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4404
01e5b2c4
SK
4405 if (!adapter->eeh_error) {
4406 adapter->eeh_error = true;
cf588477 4407
01e5b2c4 4408 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4409
cf588477 4410 rtnl_lock();
01e5b2c4
SK
4411 netif_device_detach(netdev);
4412 if (netif_running(netdev))
4413 be_close(netdev);
cf588477 4414 rtnl_unlock();
01e5b2c4
SK
4415
4416 be_clear(adapter);
cf588477 4417 }
cf588477
SP
4418
4419 if (state == pci_channel_io_perm_failure)
4420 return PCI_ERS_RESULT_DISCONNECT;
4421
4422 pci_disable_device(pdev);
4423
eeb7fc7b
SK
4424 /* The error could cause the FW to trigger a flash debug dump.
4425 * Resetting the card while flash dump is in progress
c8a54163
PR
4426 * can cause it not to recover; wait for it to finish.
4427 * Wait only for first function as it is needed only once per
4428 * adapter.
eeb7fc7b 4429 */
c8a54163
PR
4430 if (pdev->devfn == 0)
4431 ssleep(30);
4432
cf588477
SP
4433 return PCI_ERS_RESULT_NEED_RESET;
4434}
4435
4436static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4437{
4438 struct be_adapter *adapter = pci_get_drvdata(pdev);
4439 int status;
4440
4441 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4442
4443 status = pci_enable_device(pdev);
4444 if (status)
4445 return PCI_ERS_RESULT_DISCONNECT;
4446
4447 pci_set_master(pdev);
1ca01512 4448 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4449 pci_restore_state(pdev);
4450
4451 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4452 dev_info(&adapter->pdev->dev,
4453 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4454 status = be_fw_wait_ready(adapter);
cf588477
SP
4455 if (status)
4456 return PCI_ERS_RESULT_DISCONNECT;
4457
d6b6d987 4458 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4459 be_clear_all_error(adapter);
cf588477
SP
4460 return PCI_ERS_RESULT_RECOVERED;
4461}
4462
4463static void be_eeh_resume(struct pci_dev *pdev)
4464{
4465 int status = 0;
4466 struct be_adapter *adapter = pci_get_drvdata(pdev);
4467 struct net_device *netdev = adapter->netdev;
4468
4469 dev_info(&adapter->pdev->dev, "EEH resume\n");
4470
4471 pci_save_state(pdev);
4472
2d177be8 4473 status = be_cmd_reset_function(adapter);
cf588477
SP
4474 if (status)
4475 goto err;
4476
2d177be8
KA
4477 /* tell fw we're ready to fire cmds */
4478 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4479 if (status)
4480 goto err;
4481
cf588477
SP
4482 status = be_setup(adapter);
4483 if (status)
4484 goto err;
4485
4486 if (netif_running(netdev)) {
4487 status = be_open(netdev);
4488 if (status)
4489 goto err;
4490 }
f67ef7ba
PR
4491
4492 schedule_delayed_work(&adapter->func_recovery_work,
4493 msecs_to_jiffies(1000));
cf588477
SP
4494 netif_device_attach(netdev);
4495 return;
4496err:
4497 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4498}
4499
3646f0e5 4500static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4501 .error_detected = be_eeh_err_detected,
4502 .slot_reset = be_eeh_reset,
4503 .resume = be_eeh_resume,
4504};
4505
6b7c5b94
SP
4506static struct pci_driver be_driver = {
4507 .name = DRV_NAME,
4508 .id_table = be_dev_ids,
4509 .probe = be_probe,
4510 .remove = be_remove,
4511 .suspend = be_suspend,
cf588477 4512 .resume = be_resume,
82456b03 4513 .shutdown = be_shutdown,
cf588477 4514 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4515};
4516
4517static int __init be_init_module(void)
4518{
8e95a202
JP
4519 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4520 rx_frag_size != 2048) {
6b7c5b94
SP
4521 printk(KERN_WARNING DRV_NAME
4522 " : Module param rx_frag_size must be 2048/4096/8192."
4523 " Using 2048\n");
4524 rx_frag_size = 2048;
4525 }
6b7c5b94
SP
4526
4527 return pci_register_driver(&be_driver);
4528}
4529module_init(be_init_module);
4530
4531static void __exit be_exit_module(void)
4532{
4533 pci_unregister_driver(&be_driver);
4534}
4535module_exit(be_exit_module);