]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: fix be_close() to ensure all events are ack'ed
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
ca34fe38
SP
288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 317{
ac124ff9
SP
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 321 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 324
ac124ff9 325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
ac124ff9 353 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 354 else
ac124ff9 355 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
ca34fe38 365static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 366{
ac124ff9
SP
367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 370 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 373
ac124ff9 374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
ac124ff9 397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
005d5696
SX
410static void populate_lancer_stats(struct be_adapter *adapter)
411{
89a88ab8 412
005d5696 413 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 442 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 445 drvs->rx_drops_too_many_frags =
ac124ff9 446 pport_stats->rx_drops_too_many_frags_lo;
005d5696 447}
89a88ab8 448
09c1c68f
SP
449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
89a88ab8
AK
461void be_parse_stats(struct be_adapter *adapter)
462{
ac124ff9
SP
463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
ca34fe38
SP
467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
005d5696 469 } else {
ca34fe38
SP
470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
d51ebd33 475
ca34fe38
SP
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
09c1c68f 485 }
89a88ab8
AK
486}
487
ab1594e9
SP
488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
6b7c5b94 490{
ab1594e9 491 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 492 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 493 struct be_rx_obj *rxo;
3c8def97 494 struct be_tx_obj *txo;
ab1594e9
SP
495 u64 pkts, bytes;
496 unsigned int start;
3abcdeda 497 int i;
6b7c5b94 498
3abcdeda 499 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
511 }
512
3c8def97 513 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
3c8def97 522 }
6b7c5b94
SP
523
524 /* bad pkts received */
ab1594e9 525 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
ab1594e9 534 drvs->rx_dropped_runt;
68110868 535
6b7c5b94 536 /* detailed rx errors */
ab1594e9 537 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
68110868 540
ab1594e9 541 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
542
543 /* frame alignment errors */
ab1594e9 544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 545
6b7c5b94
SP
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
ab1594e9 551 return stats;
6b7c5b94
SP
552}
553
b236916a 554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 555{
6b7c5b94
SP
556 struct net_device *netdev = adapter->netdev;
557
b236916a 558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 559 netif_carrier_off(netdev);
b236916a 560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 561 }
b236916a
AK
562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
6b7c5b94
SP
567}
568
3c8def97 569static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 571{
3c8def97
SP
572 struct be_tx_stats *stats = tx_stats(txo);
573
ab1594e9 574 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 579 if (stopped)
ac124ff9 580 stats->tx_stops++;
ab1594e9 581 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
6b7c5b94 587{
ebc8d2ab
DM
588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
6b7c5b94
SP
592 /* to account for hdr wrb */
593 cnt++;
fe6d2a38
SP
594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
6b7c5b94
SP
597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
fe6d2a38 600 }
6b7c5b94
SP
601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 610 wrb->rsvd0 = 0;
6b7c5b94
SP
611}
612
1ded132d
AK
613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
93040ae5
SK
629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
cc4ce020
SK
634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 636{
1ded132d 637 u16 vlan_tag;
cc4ce020 638
6b7c5b94
SP
639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
49e4b847 643 if (skb_is_gso(skb)) {
6b7c5b94
SP
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
4c5102f9 656 if (vlan_tx_tag_present(skb)) {
6b7c5b94 657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
2b7bcebf 668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 676 if (wrb->frag_len) {
7101e111 677 if (unmap_single)
2b7bcebf
IV
678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
7101e111 680 else
2b7bcebf 681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
682 }
683}
6b7c5b94 684
3c8def97 685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
7101e111
SP
688 dma_addr_t busaddr;
689 int i, copied = 0;
2b7bcebf 690 struct device *dev = &adapter->pdev->dev;
6b7c5b94 691 struct sk_buff *first_skb = skb;
6b7c5b94
SP
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
7101e111
SP
694 bool map_single = false;
695 u16 map_head;
6b7c5b94 696
6b7c5b94
SP
697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
7101e111 699 map_head = txq->head;
6b7c5b94 700
ebc8d2ab 701 if (skb->len > skb->data_len) {
e743d313 702 int len = skb_headlen(skb);
2b7bcebf
IV
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
7101e111
SP
705 goto dma_err;
706 map_single = true;
ebc8d2ab
DM
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
6b7c5b94 713
ebc8d2ab 714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 715 const struct skb_frag_struct *frag =
ebc8d2ab 716 &skb_shinfo(skb)->frags[i];
b061b39e 717 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 718 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 719 if (dma_mapping_error(dev, busaddr))
7101e111 720 goto dma_err;
ebc8d2ab 721 wrb = queue_head_node(txq);
9e903e08 722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
9e903e08 725 copied += skb_frag_size(frag);
6b7c5b94
SP
726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
cc4ce020 735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
7101e111
SP
739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
2b7bcebf 743 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
6b7c5b94
SP
749}
750
93040ae5
SK
751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753{
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762 __vlan_put_tag(skb, vlan_tag);
763 skb->vlan_tci = 0;
764 }
765
766 return skb;
767}
768
61357325 769static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 770 struct net_device *netdev)
6b7c5b94
SP
771{
772 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
773 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774 struct be_queue_info *txq = &txo->q;
93040ae5 775 struct iphdr *ip = NULL;
6b7c5b94 776 u32 wrb_cnt = 0, copied = 0;
93040ae5 777 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
778 bool dummy_wrb, stopped = false;
779
93040ae5
SK
780 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781 VLAN_ETH_HLEN : ETH_HLEN;
782
783 /* HW has a bug which considers padding bytes as legal
784 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 785 */
93040ae5
SK
786 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787 is_ipv4_pkt(skb)) {
788 ip = (struct iphdr *)ip_hdr(skb);
789 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790 }
1ded132d 791
93040ae5
SK
792 /* HW has a bug wherein it will calculate CSUM for VLAN
793 * pkts even though it is disabled.
794 * Manually insert VLAN in pkt.
795 */
796 if (skb->ip_summed != CHECKSUM_PARTIAL &&
797 be_vlan_tag_chk(adapter, skb)) {
798 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
799 if (unlikely(!skb))
800 goto tx_drop;
1ded132d
AK
801 }
802
fe6d2a38 803 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 804
3c8def97 805 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 806 if (copied) {
cd8f76c0
ED
807 int gso_segs = skb_shinfo(skb)->gso_segs;
808
c190e3c8 809 /* record the sent skb in the sent_skb table */
3c8def97
SP
810 BUG_ON(txo->sent_skb_list[start]);
811 txo->sent_skb_list[start] = skb;
c190e3c8
AK
812
813 /* Ensure txq has space for the next skb; Else stop the queue
814 * *BEFORE* ringing the tx doorbell, so that we serialze the
815 * tx compls of the current transmit which'll wake up the queue
816 */
7101e111 817 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
818 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819 txq->len) {
3c8def97 820 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
821 stopped = true;
822 }
6b7c5b94 823
c190e3c8 824 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 825
cd8f76c0 826 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
827 } else {
828 txq->head = start;
829 dev_kfree_skb_any(skb);
6b7c5b94 830 }
1ded132d 831tx_drop:
6b7c5b94
SP
832 return NETDEV_TX_OK;
833}
834
835static int be_change_mtu(struct net_device *netdev, int new_mtu)
836{
837 struct be_adapter *adapter = netdev_priv(netdev);
838 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
839 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
841 dev_info(&adapter->pdev->dev,
842 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
843 BE_MIN_MTU,
844 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
845 return -EINVAL;
846 }
847 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848 netdev->mtu, new_mtu);
849 netdev->mtu = new_mtu;
850 return 0;
851}
852
853/*
82903e4b
AK
854 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 856 */
10329df8 857static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 858{
10329df8
SP
859 u16 vids[BE_NUM_VLANS_SUPPORTED];
860 u16 num = 0, i;
82903e4b 861 int status = 0;
1da87b7f 862
c0e64ef4
SP
863 /* No need to further configure vids if in promiscuous mode */
864 if (adapter->promiscuous)
865 return 0;
866
0fc16ebf
PR
867 if (adapter->vlans_added > adapter->max_vlans)
868 goto set_vlan_promisc;
869
870 /* Construct VLAN Table to give to HW */
871 for (i = 0; i < VLAN_N_VID; i++)
872 if (adapter->vlan_tag[i])
10329df8 873 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
874
875 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 876 vids, num, 1, 0);
0fc16ebf
PR
877
878 /* Set to VLAN promisc mode as setting VLAN filter failed */
879 if (status) {
880 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882 goto set_vlan_promisc;
6b7c5b94 883 }
1da87b7f 884
b31c50a7 885 return status;
0fc16ebf
PR
886
887set_vlan_promisc:
888 status = be_cmd_vlan_config(adapter, adapter->if_handle,
889 NULL, 0, 1, 1);
890 return status;
6b7c5b94
SP
891}
892
8e586137 893static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
894{
895 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 896 int status = 0;
6b7c5b94 897
a85e9986 898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
899 status = -EINVAL;
900 goto ret;
901 }
ba343c77 902
a85e9986
PR
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
6b7c5b94 907 adapter->vlan_tag[vid] = 1;
82903e4b 908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 909 status = be_vid_config(adapter);
8e586137 910
80817cbf
AK
911 if (!status)
912 adapter->vlans_added++;
913 else
914 adapter->vlan_tag[vid] = 0;
915ret:
916 return status;
6b7c5b94
SP
917}
918
8e586137 919static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
920{
921 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 922 int status = 0;
6b7c5b94 923
a85e9986 924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
925 status = -EINVAL;
926 goto ret;
927 }
ba343c77 928
a85e9986
PR
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
6b7c5b94 933 adapter->vlan_tag[vid] = 0;
82903e4b 934 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 935 status = be_vid_config(adapter);
8e586137 936
80817cbf
AK
937 if (!status)
938 adapter->vlans_added--;
939 else
940 adapter->vlan_tag[vid] = 1;
941ret:
942 return status;
6b7c5b94
SP
943}
944
a54769f5 945static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
946{
947 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 948 int status;
6b7c5b94 949
24307eef 950 if (netdev->flags & IFF_PROMISC) {
5b8821b7 951 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
952 adapter->promiscuous = true;
953 goto done;
6b7c5b94
SP
954 }
955
25985edc 956 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
957 if (adapter->promiscuous) {
958 adapter->promiscuous = false;
5b8821b7 959 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
960
961 if (adapter->vlans_added)
10329df8 962 be_vid_config(adapter);
6b7c5b94
SP
963 }
964
e7b909a6 965 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 966 if (netdev->flags & IFF_ALLMULTI ||
abb93951 967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 969 goto done;
6b7c5b94 970 }
6b7c5b94 971
fbc13f01
AK
972 if (netdev_uc_count(netdev) != adapter->uc_macs) {
973 struct netdev_hw_addr *ha;
974 int i = 1; /* First slot is claimed by the Primary MAC */
975
976 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977 be_cmd_pmac_del(adapter, adapter->if_handle,
978 adapter->pmac_id[i], 0);
979 }
980
981 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983 adapter->promiscuous = true;
984 goto done;
985 }
986
987 netdev_for_each_uc_addr(ha, adapter->netdev) {
988 adapter->uc_macs++; /* First slot is for Primary MAC */
989 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990 adapter->if_handle,
991 &adapter->pmac_id[adapter->uc_macs], 0);
992 }
993 }
994
0fc16ebf
PR
995 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997 /* Set to MCAST promisc mode if setting MULTICAST address fails */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002 }
24307eef
SP
1003done:
1004 return;
6b7c5b94
SP
1005}
1006
ba343c77
SB
1007static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008{
1009 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1011 int status;
704e4c88
PR
1012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
ba343c77 1015
11ac75ed 1016 if (!sriov_enabled(adapter))
ba343c77
SB
1017 return -EPERM;
1018
11ac75ed 1019 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1020 return -EINVAL;
1021
590c391d 1022 if (lancer_chip(adapter)) {
704e4c88
PR
1023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
590c391d
PR
1029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1030 } else {
11ac75ed
SP
1031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032 vf_cfg->pmac_id, vf + 1);
ba343c77 1033
11ac75ed
SP
1034 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1036 }
1037
64600ea5 1038 if (status)
ba343c77
SB
1039 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040 mac, vf);
64600ea5 1041 else
11ac75ed 1042 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1043
ba343c77
SB
1044 return status;
1045}
1046
64600ea5
AK
1047static int be_get_vf_config(struct net_device *netdev, int vf,
1048 struct ifla_vf_info *vi)
1049{
1050 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1051 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1052
11ac75ed 1053 if (!sriov_enabled(adapter))
64600ea5
AK
1054 return -EPERM;
1055
11ac75ed 1056 if (vf >= adapter->num_vfs)
64600ea5
AK
1057 return -EINVAL;
1058
1059 vi->vf = vf;
11ac75ed
SP
1060 vi->tx_rate = vf_cfg->tx_rate;
1061 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1062 vi->qos = 0;
11ac75ed 1063 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1064
1065 return 0;
1066}
1067
1da87b7f
AK
1068static int be_set_vf_vlan(struct net_device *netdev,
1069 int vf, u16 vlan, u8 qos)
1070{
1071 struct be_adapter *adapter = netdev_priv(netdev);
1072 int status = 0;
1073
11ac75ed 1074 if (!sriov_enabled(adapter))
1da87b7f
AK
1075 return -EPERM;
1076
11ac75ed 1077 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1078 return -EINVAL;
1079
1080 if (vlan) {
f1f3ee1b
AK
1081 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082 /* If this is new value, program it. Else skip. */
1083 adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085 status = be_cmd_set_hsw_config(adapter, vlan,
1086 vf + 1, adapter->vf_cfg[vf].if_handle);
1087 }
1da87b7f 1088 } else {
f1f3ee1b 1089 /* Reset Transparent Vlan Tagging. */
11ac75ed 1090 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1091 vlan = adapter->vf_cfg[vf].def_vid;
1092 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1094 }
1095
1da87b7f
AK
1096
1097 if (status)
1098 dev_info(&adapter->pdev->dev,
1099 "VLAN %d config on VF %d failed\n", vlan, vf);
1100 return status;
1101}
1102
e1d18735
AK
1103static int be_set_vf_tx_rate(struct net_device *netdev,
1104 int vf, int rate)
1105{
1106 struct be_adapter *adapter = netdev_priv(netdev);
1107 int status = 0;
1108
11ac75ed 1109 if (!sriov_enabled(adapter))
e1d18735
AK
1110 return -EPERM;
1111
94f434c2 1112 if (vf >= adapter->num_vfs)
e1d18735
AK
1113 return -EINVAL;
1114
94f434c2
AK
1115 if (rate < 100 || rate > 10000) {
1116 dev_err(&adapter->pdev->dev,
1117 "tx rate must be between 100 and 10000 Mbps\n");
1118 return -EINVAL;
1119 }
e1d18735 1120
d5c18473
PR
1121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1125
1126 if (status)
94f434c2 1127 dev_err(&adapter->pdev->dev,
e1d18735 1128 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1129 else
1130 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1131 return status;
1132}
1133
39f1d94d
SP
1134static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135{
1136 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1137 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1138 u16 offset, stride;
1139
1140 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1141 if (!pos)
1142 return 0;
39f1d94d
SP
1143 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147 while (dev) {
2f6a0260 1148 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1149 vfs++;
1150 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151 assigned_vfs++;
1152 }
1153 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154 }
1155 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156}
1157
10ef9ab4 1158static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1159{
10ef9ab4 1160 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1161 ulong now = jiffies;
ac124ff9 1162 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1163 u64 pkts;
1164 unsigned int start, eqd;
ac124ff9 1165
10ef9ab4
SP
1166 if (!eqo->enable_aic) {
1167 eqd = eqo->eqd;
1168 goto modify_eqd;
1169 }
1170
1171 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1172 return;
6b7c5b94 1173
10ef9ab4
SP
1174 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
4097f663 1176 /* Wrapped around */
3abcdeda
SP
1177 if (time_before(now, stats->rx_jiffies)) {
1178 stats->rx_jiffies = now;
4097f663
SP
1179 return;
1180 }
6b7c5b94 1181
ac124ff9
SP
1182 /* Update once a second */
1183 if (delta < HZ)
6b7c5b94
SP
1184 return;
1185
ab1594e9
SP
1186 do {
1187 start = u64_stats_fetch_begin_bh(&stats->sync);
1188 pkts = stats->rx_pkts;
1189 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
68c3e5a7 1191 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1192 stats->rx_pkts_prev = pkts;
3abcdeda 1193 stats->rx_jiffies = now;
10ef9ab4
SP
1194 eqd = (stats->rx_pps / 110000) << 3;
1195 eqd = min(eqd, eqo->max_eqd);
1196 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1197 if (eqd < 10)
1198 eqd = 0;
10ef9ab4
SP
1199
1200modify_eqd:
1201 if (eqd != eqo->cur_eqd) {
1202 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203 eqo->cur_eqd = eqd;
ac124ff9 1204 }
6b7c5b94
SP
1205}
1206
3abcdeda 1207static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1208 struct be_rx_compl_info *rxcp)
4097f663 1209{
ac124ff9 1210 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1211
ab1594e9 1212 u64_stats_update_begin(&stats->sync);
3abcdeda 1213 stats->rx_compl++;
2e588f84 1214 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1215 stats->rx_pkts++;
2e588f84 1216 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1217 stats->rx_mcast_pkts++;
2e588f84 1218 if (rxcp->err)
ac124ff9 1219 stats->rx_compl_err++;
ab1594e9 1220 u64_stats_update_end(&stats->sync);
4097f663
SP
1221}
1222
2e588f84 1223static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1224{
19fad86f
PR
1225 /* L4 checksum is not reliable for non TCP/UDP packets.
1226 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1227 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1229}
1230
10ef9ab4
SP
1231static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232 u16 frag_idx)
6b7c5b94 1233{
10ef9ab4 1234 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1235 struct be_rx_page_info *rx_page_info;
3abcdeda 1236 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1237
3abcdeda 1238 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1239 BUG_ON(!rx_page_info->page);
1240
205859a2 1241 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1242 dma_unmap_page(&adapter->pdev->dev,
1243 dma_unmap_addr(rx_page_info, bus),
1244 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1245 rx_page_info->last_page_user = false;
1246 }
6b7c5b94
SP
1247
1248 atomic_dec(&rxq->used);
1249 return rx_page_info;
1250}
1251
1252/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1253static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254 struct be_rx_compl_info *rxcp)
6b7c5b94 1255{
3abcdeda 1256 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1257 struct be_rx_page_info *page_info;
2e588f84 1258 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1259
e80d9da6 1260 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1261 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1262 put_page(page_info->page);
1263 memset(page_info, 0, sizeof(*page_info));
2e588f84 1264 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1265 }
1266}
1267
1268/*
1269 * skb_fill_rx_data forms a complete skb for an ether frame
1270 * indicated by rxcp.
1271 */
10ef9ab4
SP
1272static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273 struct be_rx_compl_info *rxcp)
6b7c5b94 1274{
3abcdeda 1275 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1276 struct be_rx_page_info *page_info;
2e588f84
SP
1277 u16 i, j;
1278 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1279 u8 *start;
6b7c5b94 1280
10ef9ab4 1281 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1282 start = page_address(page_info->page) + page_info->page_offset;
1283 prefetch(start);
1284
1285 /* Copy data in the first descriptor of this completion */
2e588f84 1286 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1287
6b7c5b94
SP
1288 skb->len = curr_frag_len;
1289 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1290 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1291 /* Complete packet has now been moved to data */
1292 put_page(page_info->page);
1293 skb->data_len = 0;
1294 skb->tail += curr_frag_len;
1295 } else {
ac1ae5f3
ED
1296 hdr_len = ETH_HLEN;
1297 memcpy(skb->data, start, hdr_len);
6b7c5b94 1298 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1299 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1300 skb_shinfo(skb)->frags[0].page_offset =
1301 page_info->page_offset + hdr_len;
9e903e08 1302 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1303 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1304 skb->truesize += rx_frag_size;
6b7c5b94
SP
1305 skb->tail += hdr_len;
1306 }
205859a2 1307 page_info->page = NULL;
6b7c5b94 1308
2e588f84
SP
1309 if (rxcp->pkt_size <= rx_frag_size) {
1310 BUG_ON(rxcp->num_rcvd != 1);
1311 return;
6b7c5b94
SP
1312 }
1313
1314 /* More frags present for this completion */
2e588f84
SP
1315 index_inc(&rxcp->rxq_idx, rxq->len);
1316 remaining = rxcp->pkt_size - curr_frag_len;
1317 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1318 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1319 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1320
bd46cb6c
AK
1321 /* Coalesce all frags from the same physical page in one slot */
1322 if (page_info->page_offset == 0) {
1323 /* Fresh page */
1324 j++;
b061b39e 1325 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1326 skb_shinfo(skb)->frags[j].page_offset =
1327 page_info->page_offset;
9e903e08 1328 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1329 skb_shinfo(skb)->nr_frags++;
1330 } else {
1331 put_page(page_info->page);
1332 }
1333
9e903e08 1334 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1335 skb->len += curr_frag_len;
1336 skb->data_len += curr_frag_len;
bdb28a97 1337 skb->truesize += rx_frag_size;
2e588f84
SP
1338 remaining -= curr_frag_len;
1339 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1340 page_info->page = NULL;
6b7c5b94 1341 }
bd46cb6c 1342 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1343}
1344
5be93b9a 1345/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1346static void be_rx_compl_process(struct be_rx_obj *rxo,
1347 struct be_rx_compl_info *rxcp)
6b7c5b94 1348{
10ef9ab4 1349 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1350 struct net_device *netdev = adapter->netdev;
6b7c5b94 1351 struct sk_buff *skb;
89420424 1352
bb349bb4 1353 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1354 if (unlikely(!skb)) {
ac124ff9 1355 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1356 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1357 return;
1358 }
1359
10ef9ab4 1360 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1361
6332c8d3 1362 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1363 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1364 else
1365 skb_checksum_none_assert(skb);
6b7c5b94 1366
6332c8d3 1367 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1368 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1369 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1370 skb->rxhash = rxcp->rss_hash;
1371
6b7c5b94 1372
343e43c0 1373 if (rxcp->vlanf)
4c5102f9
AK
1374 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376 netif_receive_skb(skb);
6b7c5b94
SP
1377}
1378
5be93b9a 1379/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1380void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381 struct be_rx_compl_info *rxcp)
6b7c5b94 1382{
10ef9ab4 1383 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1384 struct be_rx_page_info *page_info;
5be93b9a 1385 struct sk_buff *skb = NULL;
3abcdeda 1386 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1387 u16 remaining, curr_frag_len;
1388 u16 i, j;
3968fa1e 1389
10ef9ab4 1390 skb = napi_get_frags(napi);
5be93b9a 1391 if (!skb) {
10ef9ab4 1392 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1393 return;
1394 }
1395
2e588f84
SP
1396 remaining = rxcp->pkt_size;
1397 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1398 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1399
1400 curr_frag_len = min(remaining, rx_frag_size);
1401
bd46cb6c
AK
1402 /* Coalesce all frags from the same physical page in one slot */
1403 if (i == 0 || page_info->page_offset == 0) {
1404 /* First frag or Fresh page */
1405 j++;
b061b39e 1406 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1407 skb_shinfo(skb)->frags[j].page_offset =
1408 page_info->page_offset;
9e903e08 1409 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1410 } else {
1411 put_page(page_info->page);
1412 }
9e903e08 1413 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1414 skb->truesize += rx_frag_size;
bd46cb6c 1415 remaining -= curr_frag_len;
2e588f84 1416 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1417 memset(page_info, 0, sizeof(*page_info));
1418 }
bd46cb6c 1419 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1420
5be93b9a 1421 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1422 skb->len = rxcp->pkt_size;
1423 skb->data_len = rxcp->pkt_size;
5be93b9a 1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1425 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1426 if (adapter->netdev->features & NETIF_F_RXHASH)
1427 skb->rxhash = rxcp->rss_hash;
5be93b9a 1428
343e43c0 1429 if (rxcp->vlanf)
4c5102f9
AK
1430 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
10ef9ab4 1432 napi_gro_frags(napi);
2e588f84
SP
1433}
1434
10ef9ab4
SP
1435static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436 struct be_rx_compl_info *rxcp)
2e588f84
SP
1437{
1438 rxcp->pkt_size =
1439 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1443 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1444 rxcp->ip_csum =
1445 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446 rxcp->l4_csum =
1447 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448 rxcp->ipv6 =
1449 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450 rxcp->rxq_idx =
1451 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452 rxcp->num_rcvd =
1453 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454 rxcp->pkt_type =
1455 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1456 rxcp->rss_hash =
c297977e 1457 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1458 if (rxcp->vlanf) {
1459 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1460 compl);
1461 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462 compl);
15d72184 1463 }
12004ae9 1464 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1465}
1466
10ef9ab4
SP
1467static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468 struct be_rx_compl_info *rxcp)
2e588f84
SP
1469{
1470 rxcp->pkt_size =
1471 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1475 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1476 rxcp->ip_csum =
1477 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478 rxcp->l4_csum =
1479 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480 rxcp->ipv6 =
1481 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482 rxcp->rxq_idx =
1483 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484 rxcp->num_rcvd =
1485 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486 rxcp->pkt_type =
1487 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1488 rxcp->rss_hash =
c297977e 1489 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1490 if (rxcp->vlanf) {
1491 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1492 compl);
1493 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494 compl);
15d72184 1495 }
12004ae9 1496 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1497}
1498
1499static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500{
1501 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1504
2e588f84
SP
1505 /* For checking the valid bit it is Ok to use either definition as the
1506 * valid bit is at the same position in both v0 and v1 Rx compl */
1507 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508 return NULL;
6b7c5b94 1509
2e588f84
SP
1510 rmb();
1511 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1512
2e588f84 1513 if (adapter->be3_native)
10ef9ab4 1514 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1515 else
10ef9ab4 1516 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1517
15d72184
SP
1518 if (rxcp->vlanf) {
1519 /* vlanf could be wrongly set in some cards.
1520 * ignore if vtm is not set */
752961a1 1521 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1522 rxcp->vlanf = 0;
6b7c5b94 1523
15d72184 1524 if (!lancer_chip(adapter))
3c709f8f 1525 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1526
939cf306 1527 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1528 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1529 rxcp->vlanf = 0;
1530 }
2e588f84
SP
1531
1532 /* As the compl has been parsed, reset it; we wont touch it again */
1533 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1534
3abcdeda 1535 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1536 return rxcp;
1537}
1538
1829b086 1539static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1540{
6b7c5b94 1541 u32 order = get_order(size);
1829b086 1542
6b7c5b94 1543 if (order > 0)
1829b086
ED
1544 gfp |= __GFP_COMP;
1545 return alloc_pages(gfp, order);
6b7c5b94
SP
1546}
1547
1548/*
1549 * Allocate a page, split it to fragments of size rx_frag_size and post as
1550 * receive buffers to BE
1551 */
1829b086 1552static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1553{
3abcdeda 1554 struct be_adapter *adapter = rxo->adapter;
26d92f92 1555 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1556 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1557 struct page *pagep = NULL;
1558 struct be_eth_rx_d *rxd;
1559 u64 page_dmaaddr = 0, frag_dmaaddr;
1560 u32 posted, page_offset = 0;
1561
3abcdeda 1562 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1563 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564 if (!pagep) {
1829b086 1565 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1566 if (unlikely(!pagep)) {
ac124ff9 1567 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1568 break;
1569 }
2b7bcebf
IV
1570 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571 0, adapter->big_page_size,
1572 DMA_FROM_DEVICE);
6b7c5b94
SP
1573 page_info->page_offset = 0;
1574 } else {
1575 get_page(pagep);
1576 page_info->page_offset = page_offset + rx_frag_size;
1577 }
1578 page_offset = page_info->page_offset;
1579 page_info->page = pagep;
fac6da5b 1580 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1581 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583 rxd = queue_head_node(rxq);
1584 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1586
1587 /* Any space left in the current big page for another frag? */
1588 if ((page_offset + rx_frag_size + rx_frag_size) >
1589 adapter->big_page_size) {
1590 pagep = NULL;
1591 page_info->last_page_user = true;
1592 }
26d92f92
SP
1593
1594 prev_page_info = page_info;
1595 queue_head_inc(rxq);
10ef9ab4 1596 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1597 }
1598 if (pagep)
26d92f92 1599 prev_page_info->last_page_user = true;
6b7c5b94
SP
1600
1601 if (posted) {
6b7c5b94 1602 atomic_add(posted, &rxq->used);
8788fdc2 1603 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1604 } else if (atomic_read(&rxq->used) == 0) {
1605 /* Let be_worker replenish when memory is available */
3abcdeda 1606 rxo->rx_post_starved = true;
6b7c5b94 1607 }
6b7c5b94
SP
1608}
1609
5fb379ee 1610static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1611{
6b7c5b94
SP
1612 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615 return NULL;
1616
f3eb62d2 1617 rmb();
6b7c5b94
SP
1618 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622 queue_tail_inc(tx_cq);
1623 return txcp;
1624}
1625
3c8def97
SP
1626static u16 be_tx_compl_process(struct be_adapter *adapter,
1627 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1628{
3c8def97 1629 struct be_queue_info *txq = &txo->q;
a73b796e 1630 struct be_eth_wrb *wrb;
3c8def97 1631 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1632 struct sk_buff *sent_skb;
ec43b1a6
SP
1633 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634 bool unmap_skb_hdr = true;
6b7c5b94 1635
ec43b1a6 1636 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1637 BUG_ON(!sent_skb);
ec43b1a6
SP
1638 sent_skbs[txq->tail] = NULL;
1639
1640 /* skip header wrb */
a73b796e 1641 queue_tail_inc(txq);
6b7c5b94 1642
ec43b1a6 1643 do {
6b7c5b94 1644 cur_index = txq->tail;
a73b796e 1645 wrb = queue_tail_node(txq);
2b7bcebf
IV
1646 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1648 unmap_skb_hdr = false;
1649
6b7c5b94
SP
1650 num_wrbs++;
1651 queue_tail_inc(txq);
ec43b1a6 1652 } while (cur_index != last_index);
6b7c5b94 1653
6b7c5b94 1654 kfree_skb(sent_skb);
4d586b82 1655 return num_wrbs;
6b7c5b94
SP
1656}
1657
10ef9ab4
SP
1658/* Return the number of events in the event queue */
1659static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1660{
10ef9ab4
SP
1661 struct be_eq_entry *eqe;
1662 int num = 0;
859b1e4e 1663
10ef9ab4
SP
1664 do {
1665 eqe = queue_tail_node(&eqo->q);
1666 if (eqe->evt == 0)
1667 break;
859b1e4e 1668
10ef9ab4
SP
1669 rmb();
1670 eqe->evt = 0;
1671 num++;
1672 queue_tail_inc(&eqo->q);
1673 } while (true);
1674
1675 return num;
859b1e4e
SP
1676}
1677
10ef9ab4
SP
1678/* Leaves the EQ is disarmed state */
1679static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1680{
10ef9ab4 1681 int num = events_get(eqo);
859b1e4e 1682
10ef9ab4 1683 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1684}
1685
10ef9ab4 1686static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1687{
1688 struct be_rx_page_info *page_info;
3abcdeda
SP
1689 struct be_queue_info *rxq = &rxo->q;
1690 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1691 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1692 u16 tail;
1693
1694 /* First cleanup pending rx completions */
3abcdeda 1695 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1696 be_rx_compl_discard(rxo, rxcp);
1697 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1698 }
1699
1700 /* Then free posted rx buffer that were not used */
1701 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1702 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1703 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1704 put_page(page_info->page);
1705 memset(page_info, 0, sizeof(*page_info));
1706 }
1707 BUG_ON(atomic_read(&rxq->used));
482c9e79 1708 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1709}
1710
0ae57bb3 1711static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1712{
0ae57bb3
SP
1713 struct be_tx_obj *txo;
1714 struct be_queue_info *txq;
a8e9179a 1715 struct be_eth_tx_compl *txcp;
4d586b82 1716 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1717 struct sk_buff *sent_skb;
1718 bool dummy_wrb;
0ae57bb3 1719 int i, pending_txqs;
a8e9179a
SP
1720
1721 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1722 do {
0ae57bb3
SP
1723 pending_txqs = adapter->num_tx_qs;
1724
1725 for_all_tx_queues(adapter, txo, i) {
1726 txq = &txo->q;
1727 while ((txcp = be_tx_compl_get(&txo->cq))) {
1728 end_idx =
1729 AMAP_GET_BITS(struct amap_eth_tx_compl,
1730 wrb_index, txcp);
1731 num_wrbs += be_tx_compl_process(adapter, txo,
1732 end_idx);
1733 cmpl++;
1734 }
1735 if (cmpl) {
1736 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1737 atomic_sub(num_wrbs, &txq->used);
1738 cmpl = 0;
1739 num_wrbs = 0;
1740 }
1741 if (atomic_read(&txq->used) == 0)
1742 pending_txqs--;
a8e9179a
SP
1743 }
1744
0ae57bb3 1745 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1746 break;
1747
1748 mdelay(1);
1749 } while (true);
1750
0ae57bb3
SP
1751 for_all_tx_queues(adapter, txo, i) {
1752 txq = &txo->q;
1753 if (atomic_read(&txq->used))
1754 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1755 atomic_read(&txq->used));
1756
1757 /* free posted tx for which compls will never arrive */
1758 while (atomic_read(&txq->used)) {
1759 sent_skb = txo->sent_skb_list[txq->tail];
1760 end_idx = txq->tail;
1761 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1762 &dummy_wrb);
1763 index_adv(&end_idx, num_wrbs - 1, txq->len);
1764 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1765 atomic_sub(num_wrbs, &txq->used);
1766 }
b03388d6 1767 }
6b7c5b94
SP
1768}
1769
10ef9ab4
SP
1770static void be_evt_queues_destroy(struct be_adapter *adapter)
1771{
1772 struct be_eq_obj *eqo;
1773 int i;
1774
1775 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1776 if (eqo->q.created) {
1777 be_eq_clean(eqo);
10ef9ab4 1778 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1779 }
10ef9ab4
SP
1780 be_queue_free(adapter, &eqo->q);
1781 }
1782}
1783
1784static int be_evt_queues_create(struct be_adapter *adapter)
1785{
1786 struct be_queue_info *eq;
1787 struct be_eq_obj *eqo;
1788 int i, rc;
1789
1790 adapter->num_evt_qs = num_irqs(adapter);
1791
1792 for_all_evt_queues(adapter, eqo, i) {
1793 eqo->adapter = adapter;
1794 eqo->tx_budget = BE_TX_BUDGET;
1795 eqo->idx = i;
1796 eqo->max_eqd = BE_MAX_EQD;
1797 eqo->enable_aic = true;
1798
1799 eq = &eqo->q;
1800 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1801 sizeof(struct be_eq_entry));
1802 if (rc)
1803 return rc;
1804
1805 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1806 if (rc)
1807 return rc;
1808 }
1cfafab9 1809 return 0;
10ef9ab4
SP
1810}
1811
5fb379ee
SP
1812static void be_mcc_queues_destroy(struct be_adapter *adapter)
1813{
1814 struct be_queue_info *q;
5fb379ee 1815
8788fdc2 1816 q = &adapter->mcc_obj.q;
5fb379ee 1817 if (q->created)
8788fdc2 1818 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1819 be_queue_free(adapter, q);
1820
8788fdc2 1821 q = &adapter->mcc_obj.cq;
5fb379ee 1822 if (q->created)
8788fdc2 1823 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1824 be_queue_free(adapter, q);
1825}
1826
1827/* Must be called only after TX qs are created as MCC shares TX EQ */
1828static int be_mcc_queues_create(struct be_adapter *adapter)
1829{
1830 struct be_queue_info *q, *cq;
5fb379ee 1831
8788fdc2 1832 cq = &adapter->mcc_obj.cq;
5fb379ee 1833 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1834 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1835 goto err;
1836
10ef9ab4
SP
1837 /* Use the default EQ for MCC completions */
1838 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1839 goto mcc_cq_free;
1840
8788fdc2 1841 q = &adapter->mcc_obj.q;
5fb379ee
SP
1842 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1843 goto mcc_cq_destroy;
1844
8788fdc2 1845 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1846 goto mcc_q_free;
1847
1848 return 0;
1849
1850mcc_q_free:
1851 be_queue_free(adapter, q);
1852mcc_cq_destroy:
8788fdc2 1853 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1854mcc_cq_free:
1855 be_queue_free(adapter, cq);
1856err:
1857 return -1;
1858}
1859
6b7c5b94
SP
1860static void be_tx_queues_destroy(struct be_adapter *adapter)
1861{
1862 struct be_queue_info *q;
3c8def97
SP
1863 struct be_tx_obj *txo;
1864 u8 i;
6b7c5b94 1865
3c8def97
SP
1866 for_all_tx_queues(adapter, txo, i) {
1867 q = &txo->q;
1868 if (q->created)
1869 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1870 be_queue_free(adapter, q);
6b7c5b94 1871
3c8def97
SP
1872 q = &txo->cq;
1873 if (q->created)
1874 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1875 be_queue_free(adapter, q);
1876 }
6b7c5b94
SP
1877}
1878
dafc0fe3
SP
1879static int be_num_txqs_want(struct be_adapter *adapter)
1880{
abb93951
PR
1881 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1882 be_is_mc(adapter) ||
1883 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1884 BE2_chip(adapter))
dafc0fe3
SP
1885 return 1;
1886 else
abb93951 1887 return adapter->max_tx_queues;
dafc0fe3
SP
1888}
1889
10ef9ab4 1890static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1891{
10ef9ab4
SP
1892 struct be_queue_info *cq, *eq;
1893 int status;
3c8def97
SP
1894 struct be_tx_obj *txo;
1895 u8 i;
6b7c5b94 1896
dafc0fe3 1897 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1898 if (adapter->num_tx_qs != MAX_TX_QS) {
1899 rtnl_lock();
dafc0fe3
SP
1900 netif_set_real_num_tx_queues(adapter->netdev,
1901 adapter->num_tx_qs);
3bb62f4f
PR
1902 rtnl_unlock();
1903 }
dafc0fe3 1904
10ef9ab4
SP
1905 for_all_tx_queues(adapter, txo, i) {
1906 cq = &txo->cq;
1907 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1908 sizeof(struct be_eth_tx_compl));
1909 if (status)
1910 return status;
3c8def97 1911
10ef9ab4
SP
1912 /* If num_evt_qs is less than num_tx_qs, then more than
1913 * one txq share an eq
1914 */
1915 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1916 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1917 if (status)
1918 return status;
1919 }
1920 return 0;
1921}
6b7c5b94 1922
10ef9ab4
SP
1923static int be_tx_qs_create(struct be_adapter *adapter)
1924{
1925 struct be_tx_obj *txo;
1926 int i, status;
fe6d2a38 1927
3c8def97 1928 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1929 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1930 sizeof(struct be_eth_wrb));
1931 if (status)
1932 return status;
6b7c5b94 1933
10ef9ab4
SP
1934 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1935 if (status)
1936 return status;
3c8def97 1937 }
6b7c5b94 1938
d379142b
SP
1939 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1940 adapter->num_tx_qs);
10ef9ab4 1941 return 0;
6b7c5b94
SP
1942}
1943
10ef9ab4 1944static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1945{
1946 struct be_queue_info *q;
3abcdeda
SP
1947 struct be_rx_obj *rxo;
1948 int i;
1949
1950 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1951 q = &rxo->cq;
1952 if (q->created)
1953 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1954 be_queue_free(adapter, q);
ac6a0c4a
SP
1955 }
1956}
1957
10ef9ab4 1958static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1959{
10ef9ab4 1960 struct be_queue_info *eq, *cq;
3abcdeda
SP
1961 struct be_rx_obj *rxo;
1962 int rc, i;
6b7c5b94 1963
10ef9ab4
SP
1964 /* We'll create as many RSS rings as there are irqs.
1965 * But when there's only one irq there's no use creating RSS rings
1966 */
1967 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1968 num_irqs(adapter) + 1 : 1;
7f640062
SP
1969 if (adapter->num_rx_qs != MAX_RX_QS) {
1970 rtnl_lock();
1971 netif_set_real_num_rx_queues(adapter->netdev,
1972 adapter->num_rx_qs);
1973 rtnl_unlock();
1974 }
ac6a0c4a 1975
6b7c5b94 1976 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1977 for_all_rx_queues(adapter, rxo, i) {
1978 rxo->adapter = adapter;
3abcdeda
SP
1979 cq = &rxo->cq;
1980 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1981 sizeof(struct be_eth_rx_compl));
1982 if (rc)
10ef9ab4 1983 return rc;
3abcdeda 1984
10ef9ab4
SP
1985 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1986 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1987 if (rc)
10ef9ab4 1988 return rc;
3abcdeda 1989 }
6b7c5b94 1990
d379142b
SP
1991 dev_info(&adapter->pdev->dev,
1992 "created %d RSS queue(s) and 1 default RX queue\n",
1993 adapter->num_rx_qs - 1);
10ef9ab4 1994 return 0;
b628bde2
SP
1995}
1996
6b7c5b94
SP
1997static irqreturn_t be_intx(int irq, void *dev)
1998{
e49cc34f
SP
1999 struct be_eq_obj *eqo = dev;
2000 struct be_adapter *adapter = eqo->adapter;
2001 int num_evts = 0;
6b7c5b94 2002
e49cc34f
SP
2003 /* On Lancer, clear-intr bit of the EQ DB does not work.
2004 * INTx is de-asserted only on notifying num evts.
2005 */
2006 if (lancer_chip(adapter))
2007 num_evts = events_get(eqo);
2008
2009 /* The EQ-notify may not de-assert INTx rightaway, causing
2010 * the ISR to be invoked again. So, return HANDLED even when
2011 * num_evts is zero.
2012 */
2013 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2014 napi_schedule(&eqo->napi);
2015 return IRQ_HANDLED;
6b7c5b94
SP
2016}
2017
10ef9ab4 2018static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2019{
10ef9ab4 2020 struct be_eq_obj *eqo = dev;
6b7c5b94 2021
0b545a62
SP
2022 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2023 napi_schedule(&eqo->napi);
6b7c5b94
SP
2024 return IRQ_HANDLED;
2025}
2026
2e588f84 2027static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2028{
2e588f84 2029 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2030}
2031
10ef9ab4
SP
2032static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2033 int budget)
6b7c5b94 2034{
3abcdeda
SP
2035 struct be_adapter *adapter = rxo->adapter;
2036 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2037 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2038 u32 work_done;
2039
2040 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2041 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2042 if (!rxcp)
2043 break;
2044
12004ae9
SP
2045 /* Is it a flush compl that has no data */
2046 if (unlikely(rxcp->num_rcvd == 0))
2047 goto loop_continue;
2048
2049 /* Discard compl with partial DMA Lancer B0 */
2050 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2051 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2052 goto loop_continue;
2053 }
2054
2055 /* On BE drop pkts that arrive due to imperfect filtering in
2056 * promiscuous mode on some skews
2057 */
2058 if (unlikely(rxcp->port != adapter->port_num &&
2059 !lancer_chip(adapter))) {
10ef9ab4 2060 be_rx_compl_discard(rxo, rxcp);
12004ae9 2061 goto loop_continue;
64642811 2062 }
009dd872 2063
12004ae9 2064 if (do_gro(rxcp))
10ef9ab4 2065 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2066 else
10ef9ab4 2067 be_rx_compl_process(rxo, rxcp);
12004ae9 2068loop_continue:
2e588f84 2069 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2070 }
2071
10ef9ab4
SP
2072 if (work_done) {
2073 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2074
10ef9ab4
SP
2075 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2076 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2077 }
10ef9ab4 2078
6b7c5b94
SP
2079 return work_done;
2080}
2081
10ef9ab4
SP
2082static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2083 int budget, int idx)
6b7c5b94 2084{
6b7c5b94 2085 struct be_eth_tx_compl *txcp;
10ef9ab4 2086 int num_wrbs = 0, work_done;
3c8def97 2087
10ef9ab4
SP
2088 for (work_done = 0; work_done < budget; work_done++) {
2089 txcp = be_tx_compl_get(&txo->cq);
2090 if (!txcp)
2091 break;
2092 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2093 AMAP_GET_BITS(struct amap_eth_tx_compl,
2094 wrb_index, txcp));
10ef9ab4 2095 }
6b7c5b94 2096
10ef9ab4
SP
2097 if (work_done) {
2098 be_cq_notify(adapter, txo->cq.id, true, work_done);
2099 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2100
10ef9ab4
SP
2101 /* As Tx wrbs have been freed up, wake up netdev queue
2102 * if it was stopped due to lack of tx wrbs. */
2103 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2104 atomic_read(&txo->q.used) < txo->q.len / 2) {
2105 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2106 }
10ef9ab4
SP
2107
2108 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2109 tx_stats(txo)->tx_compl += work_done;
2110 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2111 }
10ef9ab4
SP
2112 return (work_done < budget); /* Done */
2113}
6b7c5b94 2114
10ef9ab4
SP
2115int be_poll(struct napi_struct *napi, int budget)
2116{
2117 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2118 struct be_adapter *adapter = eqo->adapter;
0b545a62 2119 int max_work = 0, work, i, num_evts;
10ef9ab4 2120 bool tx_done;
f31e50a8 2121
0b545a62
SP
2122 num_evts = events_get(eqo);
2123
10ef9ab4
SP
2124 /* Process all TXQs serviced by this EQ */
2125 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2126 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2127 eqo->tx_budget, i);
2128 if (!tx_done)
2129 max_work = budget;
f31e50a8
SP
2130 }
2131
10ef9ab4
SP
2132 /* This loop will iterate twice for EQ0 in which
2133 * completions of the last RXQ (default one) are also processed
2134 * For other EQs the loop iterates only once
2135 */
2136 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2137 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2138 max_work = max(work, max_work);
2139 }
6b7c5b94 2140
10ef9ab4
SP
2141 if (is_mcc_eqo(eqo))
2142 be_process_mcc(adapter);
93c86700 2143
10ef9ab4
SP
2144 if (max_work < budget) {
2145 napi_complete(napi);
0b545a62 2146 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2147 } else {
2148 /* As we'll continue in polling mode, count and clear events */
0b545a62 2149 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2150 }
10ef9ab4 2151 return max_work;
6b7c5b94
SP
2152}
2153
f67ef7ba 2154void be_detect_error(struct be_adapter *adapter)
7c185276 2155{
e1cfb67a
PR
2156 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2157 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2158 u32 i;
2159
f67ef7ba 2160 if (be_crit_error(adapter))
72f02485
SP
2161 return;
2162
e1cfb67a
PR
2163 if (lancer_chip(adapter)) {
2164 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2165 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2166 sliport_err1 = ioread32(adapter->db +
2167 SLIPORT_ERROR1_OFFSET);
2168 sliport_err2 = ioread32(adapter->db +
2169 SLIPORT_ERROR2_OFFSET);
2170 }
2171 } else {
2172 pci_read_config_dword(adapter->pdev,
2173 PCICFG_UE_STATUS_LOW, &ue_lo);
2174 pci_read_config_dword(adapter->pdev,
2175 PCICFG_UE_STATUS_HIGH, &ue_hi);
2176 pci_read_config_dword(adapter->pdev,
2177 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2178 pci_read_config_dword(adapter->pdev,
2179 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2180
f67ef7ba
PR
2181 ue_lo = (ue_lo & ~ue_lo_mask);
2182 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2183 }
7c185276 2184
1451ae6e
AK
2185 /* On certain platforms BE hardware can indicate spurious UEs.
2186 * Allow the h/w to stop working completely in case of a real UE.
2187 * Hence not setting the hw_error for UE detection.
2188 */
2189 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2190 adapter->hw_error = true;
434b3648 2191 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2192 "Error detected in the card\n");
2193 }
2194
2195 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2196 dev_err(&adapter->pdev->dev,
2197 "ERR: sliport status 0x%x\n", sliport_status);
2198 dev_err(&adapter->pdev->dev,
2199 "ERR: sliport error1 0x%x\n", sliport_err1);
2200 dev_err(&adapter->pdev->dev,
2201 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2202 }
2203
e1cfb67a
PR
2204 if (ue_lo) {
2205 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2206 if (ue_lo & 1)
7c185276
AK
2207 dev_err(&adapter->pdev->dev,
2208 "UE: %s bit set\n", ue_status_low_desc[i]);
2209 }
2210 }
f67ef7ba 2211
e1cfb67a
PR
2212 if (ue_hi) {
2213 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2214 if (ue_hi & 1)
7c185276
AK
2215 dev_err(&adapter->pdev->dev,
2216 "UE: %s bit set\n", ue_status_hi_desc[i]);
2217 }
2218 }
2219
2220}
2221
8d56ff11
SP
2222static void be_msix_disable(struct be_adapter *adapter)
2223{
ac6a0c4a 2224 if (msix_enabled(adapter)) {
8d56ff11 2225 pci_disable_msix(adapter->pdev);
ac6a0c4a 2226 adapter->num_msix_vec = 0;
3abcdeda
SP
2227 }
2228}
2229
10ef9ab4
SP
2230static uint be_num_rss_want(struct be_adapter *adapter)
2231{
30e80b55 2232 u32 num = 0;
abb93951 2233
10ef9ab4 2234 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2235 (lancer_chip(adapter) ||
2236 (!sriov_want(adapter) && be_physfn(adapter)))) {
2237 num = adapter->max_rss_queues;
30e80b55
YM
2238 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2239 }
2240 return num;
10ef9ab4
SP
2241}
2242
6b7c5b94
SP
2243static void be_msix_enable(struct be_adapter *adapter)
2244{
10ef9ab4 2245#define BE_MIN_MSIX_VECTORS 1
045508a8 2246 int i, status, num_vec, num_roce_vec = 0;
d379142b 2247 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2248
10ef9ab4
SP
2249 /* If RSS queues are not used, need a vec for default RX Q */
2250 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2251 if (be_roce_supported(adapter)) {
2252 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2253 (num_online_cpus() + 1));
2254 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2255 num_vec += num_roce_vec;
2256 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2257 }
10ef9ab4 2258 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2259
ac6a0c4a 2260 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2261 adapter->msix_entries[i].entry = i;
2262
ac6a0c4a 2263 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2264 if (status == 0) {
2265 goto done;
2266 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2267 num_vec = status;
3abcdeda 2268 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2269 num_vec) == 0)
3abcdeda 2270 goto done;
3abcdeda 2271 }
d379142b
SP
2272
2273 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2274 return;
2275done:
045508a8
PP
2276 if (be_roce_supported(adapter)) {
2277 if (num_vec > num_roce_vec) {
2278 adapter->num_msix_vec = num_vec - num_roce_vec;
2279 adapter->num_msix_roce_vec =
2280 num_vec - adapter->num_msix_vec;
2281 } else {
2282 adapter->num_msix_vec = num_vec;
2283 adapter->num_msix_roce_vec = 0;
2284 }
2285 } else
2286 adapter->num_msix_vec = num_vec;
d379142b 2287 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2288 return;
6b7c5b94
SP
2289}
2290
fe6d2a38 2291static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2292 struct be_eq_obj *eqo)
b628bde2 2293{
10ef9ab4 2294 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2295}
6b7c5b94 2296
b628bde2
SP
2297static int be_msix_register(struct be_adapter *adapter)
2298{
10ef9ab4
SP
2299 struct net_device *netdev = adapter->netdev;
2300 struct be_eq_obj *eqo;
2301 int status, i, vec;
6b7c5b94 2302
10ef9ab4
SP
2303 for_all_evt_queues(adapter, eqo, i) {
2304 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2305 vec = be_msix_vec_get(adapter, eqo);
2306 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2307 if (status)
2308 goto err_msix;
2309 }
b628bde2 2310
6b7c5b94 2311 return 0;
3abcdeda 2312err_msix:
10ef9ab4
SP
2313 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2314 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2315 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2316 status);
ac6a0c4a 2317 be_msix_disable(adapter);
6b7c5b94
SP
2318 return status;
2319}
2320
2321static int be_irq_register(struct be_adapter *adapter)
2322{
2323 struct net_device *netdev = adapter->netdev;
2324 int status;
2325
ac6a0c4a 2326 if (msix_enabled(adapter)) {
6b7c5b94
SP
2327 status = be_msix_register(adapter);
2328 if (status == 0)
2329 goto done;
ba343c77
SB
2330 /* INTx is not supported for VF */
2331 if (!be_physfn(adapter))
2332 return status;
6b7c5b94
SP
2333 }
2334
e49cc34f 2335 /* INTx: only the first EQ is used */
6b7c5b94
SP
2336 netdev->irq = adapter->pdev->irq;
2337 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2338 &adapter->eq_obj[0]);
6b7c5b94
SP
2339 if (status) {
2340 dev_err(&adapter->pdev->dev,
2341 "INTx request IRQ failed - err %d\n", status);
2342 return status;
2343 }
2344done:
2345 adapter->isr_registered = true;
2346 return 0;
2347}
2348
2349static void be_irq_unregister(struct be_adapter *adapter)
2350{
2351 struct net_device *netdev = adapter->netdev;
10ef9ab4 2352 struct be_eq_obj *eqo;
3abcdeda 2353 int i;
6b7c5b94
SP
2354
2355 if (!adapter->isr_registered)
2356 return;
2357
2358 /* INTx */
ac6a0c4a 2359 if (!msix_enabled(adapter)) {
e49cc34f 2360 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2361 goto done;
2362 }
2363
2364 /* MSIx */
10ef9ab4
SP
2365 for_all_evt_queues(adapter, eqo, i)
2366 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2367
6b7c5b94
SP
2368done:
2369 adapter->isr_registered = false;
6b7c5b94
SP
2370}
2371
10ef9ab4 2372static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2373{
2374 struct be_queue_info *q;
2375 struct be_rx_obj *rxo;
2376 int i;
2377
2378 for_all_rx_queues(adapter, rxo, i) {
2379 q = &rxo->q;
2380 if (q->created) {
2381 be_cmd_rxq_destroy(adapter, q);
2382 /* After the rxq is invalidated, wait for a grace time
2383 * of 1ms for all dma to end and the flush compl to
2384 * arrive
2385 */
2386 mdelay(1);
10ef9ab4 2387 be_rx_cq_clean(rxo);
482c9e79 2388 }
10ef9ab4 2389 be_queue_free(adapter, q);
482c9e79
SP
2390 }
2391}
2392
889cd4b2
SP
2393static int be_close(struct net_device *netdev)
2394{
2395 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2396 struct be_eq_obj *eqo;
2397 int i;
889cd4b2 2398
045508a8
PP
2399 be_roce_dev_close(adapter);
2400
fe6d2a38
SP
2401 if (!lancer_chip(adapter))
2402 be_intr_set(adapter, false);
889cd4b2 2403
a323d9bf 2404 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2405 napi_disable(&eqo->napi);
a323d9bf
SP
2406
2407 be_async_mcc_disable(adapter);
2408
2409 /* Wait for all pending tx completions to arrive so that
2410 * all tx skbs are freed.
2411 */
2412 be_tx_compl_clean(adapter);
2413
2414 be_rx_qs_destroy(adapter);
2415
2416 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2417 if (msix_enabled(adapter))
2418 synchronize_irq(be_msix_vec_get(adapter, eqo));
2419 else
2420 synchronize_irq(netdev->irq);
2421 be_eq_clean(eqo);
63fcb27f
PR
2422 }
2423
889cd4b2
SP
2424 be_irq_unregister(adapter);
2425
482c9e79
SP
2426 return 0;
2427}
2428
10ef9ab4 2429static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2430{
2431 struct be_rx_obj *rxo;
e9008ee9
PR
2432 int rc, i, j;
2433 u8 rsstable[128];
482c9e79
SP
2434
2435 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2436 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2437 sizeof(struct be_eth_rx_d));
2438 if (rc)
2439 return rc;
2440 }
2441
2442 /* The FW would like the default RXQ to be created first */
2443 rxo = default_rxo(adapter);
2444 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2445 adapter->if_handle, false, &rxo->rss_id);
2446 if (rc)
2447 return rc;
2448
2449 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2450 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2451 rx_frag_size, adapter->if_handle,
2452 true, &rxo->rss_id);
482c9e79
SP
2453 if (rc)
2454 return rc;
2455 }
2456
2457 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2458 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2459 for_all_rss_queues(adapter, rxo, i) {
2460 if ((j + i) >= 128)
2461 break;
2462 rsstable[j + i] = rxo->rss_id;
2463 }
2464 }
2465 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2466 if (rc)
2467 return rc;
2468 }
2469
2470 /* First time posting */
10ef9ab4 2471 for_all_rx_queues(adapter, rxo, i)
482c9e79 2472 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2473 return 0;
2474}
2475
6b7c5b94
SP
2476static int be_open(struct net_device *netdev)
2477{
2478 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2479 struct be_eq_obj *eqo;
3abcdeda 2480 struct be_rx_obj *rxo;
10ef9ab4 2481 struct be_tx_obj *txo;
b236916a 2482 u8 link_status;
3abcdeda 2483 int status, i;
5fb379ee 2484
10ef9ab4 2485 status = be_rx_qs_create(adapter);
482c9e79
SP
2486 if (status)
2487 goto err;
2488
5fb379ee
SP
2489 be_irq_register(adapter);
2490
fe6d2a38
SP
2491 if (!lancer_chip(adapter))
2492 be_intr_set(adapter, true);
5fb379ee 2493
10ef9ab4 2494 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2495 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2496
10ef9ab4
SP
2497 for_all_tx_queues(adapter, txo, i)
2498 be_cq_notify(adapter, txo->cq.id, true, 0);
2499
7a1e9b20
SP
2500 be_async_mcc_enable(adapter);
2501
10ef9ab4
SP
2502 for_all_evt_queues(adapter, eqo, i) {
2503 napi_enable(&eqo->napi);
2504 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2505 }
2506
323ff71e 2507 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2508 if (!status)
2509 be_link_status_update(adapter, link_status);
2510
045508a8 2511 be_roce_dev_open(adapter);
889cd4b2
SP
2512 return 0;
2513err:
2514 be_close(adapter->netdev);
2515 return -EIO;
5fb379ee
SP
2516}
2517
71d8d1b5
AK
2518static int be_setup_wol(struct be_adapter *adapter, bool enable)
2519{
2520 struct be_dma_mem cmd;
2521 int status = 0;
2522 u8 mac[ETH_ALEN];
2523
2524 memset(mac, 0, ETH_ALEN);
2525
2526 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2527 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2528 GFP_KERNEL);
71d8d1b5
AK
2529 if (cmd.va == NULL)
2530 return -1;
2531 memset(cmd.va, 0, cmd.size);
2532
2533 if (enable) {
2534 status = pci_write_config_dword(adapter->pdev,
2535 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2536 if (status) {
2537 dev_err(&adapter->pdev->dev,
2381a55c 2538 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2539 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2540 cmd.dma);
71d8d1b5
AK
2541 return status;
2542 }
2543 status = be_cmd_enable_magic_wol(adapter,
2544 adapter->netdev->dev_addr, &cmd);
2545 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2546 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2547 } else {
2548 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2549 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2550 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2551 }
2552
2b7bcebf 2553 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2554 return status;
2555}
2556
6d87f5c3
AK
2557/*
2558 * Generate a seed MAC address from the PF MAC Address using jhash.
2559 * MAC Address for VFs are assigned incrementally starting from the seed.
2560 * These addresses are programmed in the ASIC by the PF and the VF driver
2561 * queries for the MAC address during its probe.
2562 */
2563static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2564{
f9449ab7 2565 u32 vf;
3abcdeda 2566 int status = 0;
6d87f5c3 2567 u8 mac[ETH_ALEN];
11ac75ed 2568 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2569
2570 be_vf_eth_addr_generate(adapter, mac);
2571
11ac75ed 2572 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2573 if (lancer_chip(adapter)) {
2574 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2575 } else {
2576 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2577 vf_cfg->if_handle,
2578 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2579 }
2580
6d87f5c3
AK
2581 if (status)
2582 dev_err(&adapter->pdev->dev,
590c391d 2583 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2584 else
11ac75ed 2585 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2586
2587 mac[5] += 1;
2588 }
2589 return status;
2590}
2591
f9449ab7 2592static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2593{
11ac75ed 2594 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2595 u32 vf;
2596
39f1d94d
SP
2597 if (be_find_vfs(adapter, ASSIGNED)) {
2598 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2599 goto done;
2600 }
2601
11ac75ed 2602 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2603 if (lancer_chip(adapter))
2604 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2605 else
11ac75ed
SP
2606 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2607 vf_cfg->pmac_id, vf + 1);
f9449ab7 2608
11ac75ed
SP
2609 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2610 }
39f1d94d
SP
2611 pci_disable_sriov(adapter->pdev);
2612done:
2613 kfree(adapter->vf_cfg);
2614 adapter->num_vfs = 0;
6d87f5c3
AK
2615}
2616
a54769f5
SP
2617static int be_clear(struct be_adapter *adapter)
2618{
fbc13f01
AK
2619 int i = 1;
2620
191eb756
SP
2621 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2622 cancel_delayed_work_sync(&adapter->work);
2623 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2624 }
2625
11ac75ed 2626 if (sriov_enabled(adapter))
f9449ab7
SP
2627 be_vf_clear(adapter);
2628
fbc13f01
AK
2629 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2630 be_cmd_pmac_del(adapter, adapter->if_handle,
2631 adapter->pmac_id[i], 0);
2632
f9449ab7 2633 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2634
2635 be_mcc_queues_destroy(adapter);
10ef9ab4 2636 be_rx_cqs_destroy(adapter);
a54769f5 2637 be_tx_queues_destroy(adapter);
10ef9ab4 2638 be_evt_queues_destroy(adapter);
a54769f5 2639
abb93951
PR
2640 kfree(adapter->pmac_id);
2641 adapter->pmac_id = NULL;
2642
10ef9ab4 2643 be_msix_disable(adapter);
a54769f5
SP
2644 return 0;
2645}
2646
abb93951
PR
2647static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2648 u32 *cap_flags, u8 domain)
2649{
2650 bool profile_present = false;
2651 int status;
2652
2653 if (lancer_chip(adapter)) {
2654 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2655 if (!status)
2656 profile_present = true;
2657 }
2658
2659 if (!profile_present)
2660 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2661 BE_IF_FLAGS_MULTICAST;
2662}
2663
39f1d94d 2664static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2665{
11ac75ed 2666 struct be_vf_cfg *vf_cfg;
30128031
SP
2667 int vf;
2668
39f1d94d
SP
2669 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2670 GFP_KERNEL);
2671 if (!adapter->vf_cfg)
2672 return -ENOMEM;
2673
11ac75ed
SP
2674 for_all_vfs(adapter, vf_cfg, vf) {
2675 vf_cfg->if_handle = -1;
2676 vf_cfg->pmac_id = -1;
30128031 2677 }
39f1d94d 2678 return 0;
30128031
SP
2679}
2680
f9449ab7
SP
2681static int be_vf_setup(struct be_adapter *adapter)
2682{
11ac75ed 2683 struct be_vf_cfg *vf_cfg;
39f1d94d 2684 struct device *dev = &adapter->pdev->dev;
f9449ab7 2685 u32 cap_flags, en_flags, vf;
f1f3ee1b 2686 u16 def_vlan, lnk_speed;
39f1d94d
SP
2687 int status, enabled_vfs;
2688
2689 enabled_vfs = be_find_vfs(adapter, ENABLED);
2690 if (enabled_vfs) {
2691 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2692 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2693 return 0;
2694 }
f9449ab7 2695
39f1d94d
SP
2696 if (num_vfs > adapter->dev_num_vfs) {
2697 dev_warn(dev, "Device supports %d VFs and not %d\n",
2698 adapter->dev_num_vfs, num_vfs);
2699 num_vfs = adapter->dev_num_vfs;
2700 }
2701
2702 status = pci_enable_sriov(adapter->pdev, num_vfs);
2703 if (!status) {
2704 adapter->num_vfs = num_vfs;
2705 } else {
2706 /* Platform doesn't support SRIOV though device supports it */
2707 dev_warn(dev, "SRIOV enable failed\n");
2708 return 0;
2709 }
2710
2711 status = be_vf_setup_init(adapter);
2712 if (status)
2713 goto err;
30128031 2714
11ac75ed 2715 for_all_vfs(adapter, vf_cfg, vf) {
abb93951
PR
2716 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2717
2718 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2719 BE_IF_FLAGS_BROADCAST |
2720 BE_IF_FLAGS_MULTICAST);
2721
1578e777
PR
2722 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2723 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2724 if (status)
2725 goto err;
f9449ab7
SP
2726 }
2727
39f1d94d
SP
2728 if (!enabled_vfs) {
2729 status = be_vf_eth_addr_config(adapter);
2730 if (status)
2731 goto err;
2732 }
f9449ab7 2733
11ac75ed 2734 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2735 lnk_speed = 1000;
2736 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2737 if (status)
2738 goto err;
11ac75ed 2739 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2740
2741 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2742 vf + 1, vf_cfg->if_handle);
2743 if (status)
2744 goto err;
2745 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2746
2747 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2748 }
2749 return 0;
2750err:
2751 return status;
2752}
2753
30128031
SP
2754static void be_setup_init(struct be_adapter *adapter)
2755{
2756 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2757 adapter->phy.link_speed = -1;
30128031
SP
2758 adapter->if_handle = -1;
2759 adapter->be3_native = false;
2760 adapter->promiscuous = false;
f25b119c
PR
2761 if (be_physfn(adapter))
2762 adapter->cmd_privileges = MAX_PRIVILEGES;
2763 else
2764 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2765}
2766
1578e777
PR
2767static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2768 bool *active_mac, u32 *pmac_id)
590c391d 2769{
1578e777 2770 int status = 0;
e5e1ee89 2771
1578e777
PR
2772 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2773 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2774 if (!lancer_chip(adapter) && !be_physfn(adapter))
2775 *active_mac = true;
2776 else
2777 *active_mac = false;
e5e1ee89 2778
1578e777
PR
2779 return status;
2780 }
e5e1ee89 2781
1578e777
PR
2782 if (lancer_chip(adapter)) {
2783 status = be_cmd_get_mac_from_list(adapter, mac,
2784 active_mac, pmac_id, 0);
2785 if (*active_mac) {
5ee4979b
SP
2786 status = be_cmd_mac_addr_query(adapter, mac, false,
2787 if_handle, *pmac_id);
1578e777
PR
2788 }
2789 } else if (be_physfn(adapter)) {
2790 /* For BE3, for PF get permanent MAC */
5ee4979b 2791 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2792 *active_mac = false;
e5e1ee89 2793 } else {
1578e777 2794 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2795 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2796 if_handle, 0);
2797 *active_mac = true;
e5e1ee89 2798 }
590c391d
PR
2799 return status;
2800}
2801
abb93951
PR
2802static void be_get_resources(struct be_adapter *adapter)
2803{
2804 int status;
2805 bool profile_present = false;
2806
2807 if (lancer_chip(adapter)) {
2808 status = be_cmd_get_func_config(adapter);
2809
2810 if (!status)
2811 profile_present = true;
2812 }
2813
2814 if (profile_present) {
2815 /* Sanity fixes for Lancer */
2816 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2817 BE_UC_PMAC_COUNT);
2818 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2819 BE_NUM_VLANS_SUPPORTED);
2820 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2821 BE_MAX_MC);
2822 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2823 MAX_TX_QS);
2824 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2825 BE3_MAX_RSS_QS);
2826 adapter->max_event_queues = min_t(u16,
2827 adapter->max_event_queues,
2828 BE3_MAX_RSS_QS);
2829
2830 if (adapter->max_rss_queues &&
2831 adapter->max_rss_queues == adapter->max_rx_queues)
2832 adapter->max_rss_queues -= 1;
2833
2834 if (adapter->max_event_queues < adapter->max_rss_queues)
2835 adapter->max_rss_queues = adapter->max_event_queues;
2836
2837 } else {
2838 if (be_physfn(adapter))
2839 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2840 else
2841 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2842
2843 if (adapter->function_mode & FLEX10_MODE)
2844 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2845 else
2846 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2847
2848 adapter->max_mcast_mac = BE_MAX_MC;
2849 adapter->max_tx_queues = MAX_TX_QS;
2850 adapter->max_rss_queues = (adapter->be3_native) ?
2851 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2852 adapter->max_event_queues = BE3_MAX_RSS_QS;
2853
2854 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2855 BE_IF_FLAGS_BROADCAST |
2856 BE_IF_FLAGS_MULTICAST |
2857 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2858 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2859 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2860 BE_IF_FLAGS_PROMISCUOUS;
2861
2862 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2863 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2864 }
2865}
2866
39f1d94d
SP
2867/* Routine to query per function resource limits */
2868static int be_get_config(struct be_adapter *adapter)
2869{
abb93951 2870 int pos, status;
39f1d94d
SP
2871 u16 dev_num_vfs;
2872
abb93951
PR
2873 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2874 &adapter->function_mode,
2875 &adapter->function_caps);
2876 if (status)
2877 goto err;
2878
2879 be_get_resources(adapter);
2880
2881 /* primary mac needs 1 pmac entry */
2882 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2883 sizeof(u32), GFP_KERNEL);
2884 if (!adapter->pmac_id) {
2885 status = -ENOMEM;
2886 goto err;
2887 }
2888
39f1d94d
SP
2889 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2890 if (pos) {
2891 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2892 &dev_num_vfs);
7c5a5242
VV
2893 if (!lancer_chip(adapter))
2894 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2895 adapter->dev_num_vfs = dev_num_vfs;
2896 }
abb93951
PR
2897err:
2898 return status;
39f1d94d
SP
2899}
2900
5fb379ee
SP
2901static int be_setup(struct be_adapter *adapter)
2902{
39f1d94d 2903 struct device *dev = &adapter->pdev->dev;
abb93951 2904 u32 en_flags;
a54769f5 2905 u32 tx_fc, rx_fc;
10ef9ab4 2906 int status;
ba343c77 2907 u8 mac[ETH_ALEN];
1578e777 2908 bool active_mac;
ba343c77 2909
30128031 2910 be_setup_init(adapter);
6b7c5b94 2911
abb93951
PR
2912 if (!lancer_chip(adapter))
2913 be_cmd_req_native_mode(adapter);
39f1d94d 2914
abb93951
PR
2915 status = be_get_config(adapter);
2916 if (status)
2917 goto err;
73d540f2 2918
10ef9ab4
SP
2919 be_msix_enable(adapter);
2920
2921 status = be_evt_queues_create(adapter);
2922 if (status)
a54769f5 2923 goto err;
6b7c5b94 2924
10ef9ab4
SP
2925 status = be_tx_cqs_create(adapter);
2926 if (status)
2927 goto err;
2928
2929 status = be_rx_cqs_create(adapter);
2930 if (status)
a54769f5 2931 goto err;
6b7c5b94 2932
f9449ab7 2933 status = be_mcc_queues_create(adapter);
10ef9ab4 2934 if (status)
a54769f5 2935 goto err;
6b7c5b94 2936
f25b119c
PR
2937 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2938 /* In UMC mode FW does not return right privileges.
2939 * Override with correct privilege equivalent to PF.
2940 */
2941 if (be_is_mc(adapter))
2942 adapter->cmd_privileges = MAX_PRIVILEGES;
2943
f9449ab7
SP
2944 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2945 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 2946
abb93951 2947 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 2948 en_flags |= BE_IF_FLAGS_RSS;
1578e777 2949
abb93951 2950 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 2951
abb93951 2952 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 2953 &adapter->if_handle, 0);
5fb379ee 2954 if (status != 0)
a54769f5 2955 goto err;
6b7c5b94 2956
1578e777
PR
2957 memset(mac, 0, ETH_ALEN);
2958 active_mac = false;
2959 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2960 &active_mac, &adapter->pmac_id[0]);
2961 if (status != 0)
2962 goto err;
2963
2964 if (!active_mac) {
2965 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2966 &adapter->pmac_id[0], 0);
2967 if (status != 0)
2968 goto err;
2969 }
2970
2971 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2972 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2973 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2974 }
0dffc83e 2975
10ef9ab4
SP
2976 status = be_tx_qs_create(adapter);
2977 if (status)
2978 goto err;
2979
04b71175 2980 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2981
1d1e9a46 2982 if (adapter->vlans_added)
10329df8 2983 be_vid_config(adapter);
7ab8b0b4 2984
a54769f5 2985 be_set_rx_mode(adapter->netdev);
5fb379ee 2986
ddc3f5cb 2987 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2988
ddc3f5cb
AK
2989 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2990 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2991 adapter->rx_fc);
2dc1deb6 2992
39f1d94d
SP
2993 if (be_physfn(adapter) && num_vfs) {
2994 if (adapter->dev_num_vfs)
2995 be_vf_setup(adapter);
2996 else
2997 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2998 }
2999
f25b119c
PR
3000 status = be_cmd_get_phy_info(adapter);
3001 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3002 adapter->phy.fc_autoneg = 1;
3003
191eb756
SP
3004 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3005 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3006 return 0;
a54769f5
SP
3007err:
3008 be_clear(adapter);
3009 return status;
3010}
6b7c5b94 3011
66268739
IV
3012#ifdef CONFIG_NET_POLL_CONTROLLER
3013static void be_netpoll(struct net_device *netdev)
3014{
3015 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3016 struct be_eq_obj *eqo;
66268739
IV
3017 int i;
3018
e49cc34f
SP
3019 for_all_evt_queues(adapter, eqo, i) {
3020 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3021 napi_schedule(&eqo->napi);
3022 }
10ef9ab4
SP
3023
3024 return;
66268739
IV
3025}
3026#endif
3027
84517482 3028#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3029char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3030
fa9a6fed 3031static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3032 const u8 *p, u32 img_start, int image_size,
3033 int hdr_size)
fa9a6fed
SB
3034{
3035 u32 crc_offset;
3036 u8 flashed_crc[4];
3037 int status;
3f0d4560
AK
3038
3039 crc_offset = hdr_size + img_start + image_size - 4;
3040
fa9a6fed 3041 p += crc_offset;
3f0d4560
AK
3042
3043 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3044 (image_size - 4));
fa9a6fed
SB
3045 if (status) {
3046 dev_err(&adapter->pdev->dev,
3047 "could not get crc from flash, not flashing redboot\n");
3048 return false;
3049 }
3050
3051 /*update redboot only if crc does not match*/
3052 if (!memcmp(flashed_crc, p, 4))
3053 return false;
3054 else
3055 return true;
fa9a6fed
SB
3056}
3057
306f1348
SP
3058static bool phy_flashing_required(struct be_adapter *adapter)
3059{
42f11cf2
AK
3060 return (adapter->phy.phy_type == TN_8022 &&
3061 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3062}
3063
c165541e
PR
3064static bool is_comp_in_ufi(struct be_adapter *adapter,
3065 struct flash_section_info *fsec, int type)
3066{
3067 int i = 0, img_type = 0;
3068 struct flash_section_info_g2 *fsec_g2 = NULL;
3069
ca34fe38 3070 if (BE2_chip(adapter))
c165541e
PR
3071 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3072
3073 for (i = 0; i < MAX_FLASH_COMP; i++) {
3074 if (fsec_g2)
3075 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3076 else
3077 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3078
3079 if (img_type == type)
3080 return true;
3081 }
3082 return false;
3083
3084}
3085
3086struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3087 int header_size,
3088 const struct firmware *fw)
3089{
3090 struct flash_section_info *fsec = NULL;
3091 const u8 *p = fw->data;
3092
3093 p += header_size;
3094 while (p < (fw->data + fw->size)) {
3095 fsec = (struct flash_section_info *)p;
3096 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3097 return fsec;
3098 p += 32;
3099 }
3100 return NULL;
3101}
3102
773a2d7c
PR
3103static int be_flash(struct be_adapter *adapter, const u8 *img,
3104 struct be_dma_mem *flash_cmd, int optype, int img_size)
3105{
3106 u32 total_bytes = 0, flash_op, num_bytes = 0;
3107 int status = 0;
3108 struct be_cmd_write_flashrom *req = flash_cmd->va;
3109
3110 total_bytes = img_size;
3111 while (total_bytes) {
3112 num_bytes = min_t(u32, 32*1024, total_bytes);
3113
3114 total_bytes -= num_bytes;
3115
3116 if (!total_bytes) {
3117 if (optype == OPTYPE_PHY_FW)
3118 flash_op = FLASHROM_OPER_PHY_FLASH;
3119 else
3120 flash_op = FLASHROM_OPER_FLASH;
3121 } else {
3122 if (optype == OPTYPE_PHY_FW)
3123 flash_op = FLASHROM_OPER_PHY_SAVE;
3124 else
3125 flash_op = FLASHROM_OPER_SAVE;
3126 }
3127
be716446 3128 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3129 img += num_bytes;
3130 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3131 flash_op, num_bytes);
3132 if (status) {
3133 if (status == ILLEGAL_IOCTL_REQ &&
3134 optype == OPTYPE_PHY_FW)
3135 break;
3136 dev_err(&adapter->pdev->dev,
3137 "cmd to write to flash rom failed.\n");
3138 return status;
3139 }
3140 }
3141 return 0;
3142}
3143
ca34fe38
SP
3144/* For BE2 and BE3 */
3145static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3146 const struct firmware *fw,
3147 struct be_dma_mem *flash_cmd,
3148 int num_of_images)
3f0d4560 3149
84517482 3150{
3f0d4560 3151 int status = 0, i, filehdr_size = 0;
c165541e 3152 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3153 const u8 *p = fw->data;
215faf9c 3154 const struct flash_comp *pflashcomp;
773a2d7c 3155 int num_comp, redboot;
c165541e
PR
3156 struct flash_section_info *fsec = NULL;
3157
3158 struct flash_comp gen3_flash_types[] = {
3159 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3160 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3161 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3162 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3163 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3164 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3165 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3166 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3167 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3168 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3169 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3170 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3171 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3172 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3173 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3174 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3175 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3176 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3177 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3178 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3179 };
c165541e
PR
3180
3181 struct flash_comp gen2_flash_types[] = {
3182 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3183 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3184 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3185 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3186 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3187 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3188 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3189 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3190 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3191 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3192 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3193 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3194 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3195 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3196 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3197 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3198 };
3199
ca34fe38 3200 if (BE3_chip(adapter)) {
3f0d4560
AK
3201 pflashcomp = gen3_flash_types;
3202 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3203 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3204 } else {
3205 pflashcomp = gen2_flash_types;
3206 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3207 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3208 }
ca34fe38 3209
c165541e
PR
3210 /* Get flash section info*/
3211 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3212 if (!fsec) {
3213 dev_err(&adapter->pdev->dev,
3214 "Invalid Cookie. UFI corrupted ?\n");
3215 return -1;
3216 }
9fe96934 3217 for (i = 0; i < num_comp; i++) {
c165541e 3218 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3219 continue;
c165541e
PR
3220
3221 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3222 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3223 continue;
3224
773a2d7c
PR
3225 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3226 !phy_flashing_required(adapter))
306f1348 3227 continue;
c165541e 3228
773a2d7c
PR
3229 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3230 redboot = be_flash_redboot(adapter, fw->data,
3231 pflashcomp[i].offset, pflashcomp[i].size,
3232 filehdr_size + img_hdrs_size);
3233 if (!redboot)
3234 continue;
3235 }
c165541e 3236
3f0d4560 3237 p = fw->data;
c165541e 3238 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3239 if (p + pflashcomp[i].size > fw->data + fw->size)
3240 return -1;
773a2d7c
PR
3241
3242 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3243 pflashcomp[i].size);
3244 if (status) {
3245 dev_err(&adapter->pdev->dev,
3246 "Flashing section type %d failed.\n",
3247 pflashcomp[i].img_type);
3248 return status;
84517482 3249 }
84517482 3250 }
84517482
AK
3251 return 0;
3252}
3253
773a2d7c
PR
3254static int be_flash_skyhawk(struct be_adapter *adapter,
3255 const struct firmware *fw,
3256 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3257{
773a2d7c
PR
3258 int status = 0, i, filehdr_size = 0;
3259 int img_offset, img_size, img_optype, redboot;
3260 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3261 const u8 *p = fw->data;
3262 struct flash_section_info *fsec = NULL;
3263
3264 filehdr_size = sizeof(struct flash_file_hdr_g3);
3265 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3266 if (!fsec) {
3267 dev_err(&adapter->pdev->dev,
3268 "Invalid Cookie. UFI corrupted ?\n");
3269 return -1;
3270 }
3271
3272 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3273 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3274 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3275
3276 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3277 case IMAGE_FIRMWARE_iSCSI:
3278 img_optype = OPTYPE_ISCSI_ACTIVE;
3279 break;
3280 case IMAGE_BOOT_CODE:
3281 img_optype = OPTYPE_REDBOOT;
3282 break;
3283 case IMAGE_OPTION_ROM_ISCSI:
3284 img_optype = OPTYPE_BIOS;
3285 break;
3286 case IMAGE_OPTION_ROM_PXE:
3287 img_optype = OPTYPE_PXE_BIOS;
3288 break;
3289 case IMAGE_OPTION_ROM_FCoE:
3290 img_optype = OPTYPE_FCOE_BIOS;
3291 break;
3292 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3293 img_optype = OPTYPE_ISCSI_BACKUP;
3294 break;
3295 case IMAGE_NCSI:
3296 img_optype = OPTYPE_NCSI_FW;
3297 break;
3298 default:
3299 continue;
3300 }
3301
3302 if (img_optype == OPTYPE_REDBOOT) {
3303 redboot = be_flash_redboot(adapter, fw->data,
3304 img_offset, img_size,
3305 filehdr_size + img_hdrs_size);
3306 if (!redboot)
3307 continue;
3308 }
3309
3310 p = fw->data;
3311 p += filehdr_size + img_offset + img_hdrs_size;
3312 if (p + img_size > fw->data + fw->size)
3313 return -1;
3314
3315 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3316 if (status) {
3317 dev_err(&adapter->pdev->dev,
3318 "Flashing section type %d failed.\n",
3319 fsec->fsec_entry[i].type);
3320 return status;
3321 }
3322 }
3323 return 0;
3f0d4560
AK
3324}
3325
f67ef7ba
PR
3326static int lancer_wait_idle(struct be_adapter *adapter)
3327{
3328#define SLIPORT_IDLE_TIMEOUT 30
3329 u32 reg_val;
3330 int status = 0, i;
3331
3332 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3333 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3334 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3335 break;
3336
3337 ssleep(1);
3338 }
3339
3340 if (i == SLIPORT_IDLE_TIMEOUT)
3341 status = -1;
3342
3343 return status;
3344}
3345
3346static int lancer_fw_reset(struct be_adapter *adapter)
3347{
3348 int status = 0;
3349
3350 status = lancer_wait_idle(adapter);
3351 if (status)
3352 return status;
3353
3354 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3355 PHYSDEV_CONTROL_OFFSET);
3356
3357 return status;
3358}
3359
485bf569
SN
3360static int lancer_fw_download(struct be_adapter *adapter,
3361 const struct firmware *fw)
84517482 3362{
485bf569
SN
3363#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3364#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3365 struct be_dma_mem flash_cmd;
485bf569
SN
3366 const u8 *data_ptr = NULL;
3367 u8 *dest_image_ptr = NULL;
3368 size_t image_size = 0;
3369 u32 chunk_size = 0;
3370 u32 data_written = 0;
3371 u32 offset = 0;
3372 int status = 0;
3373 u8 add_status = 0;
f67ef7ba 3374 u8 change_status;
84517482 3375
485bf569 3376 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3377 dev_err(&adapter->pdev->dev,
485bf569
SN
3378 "FW Image not properly aligned. "
3379 "Length must be 4 byte aligned.\n");
3380 status = -EINVAL;
3381 goto lancer_fw_exit;
d9efd2af
SB
3382 }
3383
485bf569
SN
3384 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3385 + LANCER_FW_DOWNLOAD_CHUNK;
3386 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3387 &flash_cmd.dma, GFP_KERNEL);
3388 if (!flash_cmd.va) {
3389 status = -ENOMEM;
3390 dev_err(&adapter->pdev->dev,
3391 "Memory allocation failure while flashing\n");
3392 goto lancer_fw_exit;
3393 }
84517482 3394
485bf569
SN
3395 dest_image_ptr = flash_cmd.va +
3396 sizeof(struct lancer_cmd_req_write_object);
3397 image_size = fw->size;
3398 data_ptr = fw->data;
3399
3400 while (image_size) {
3401 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3402
3403 /* Copy the image chunk content. */
3404 memcpy(dest_image_ptr, data_ptr, chunk_size);
3405
3406 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3407 chunk_size, offset,
3408 LANCER_FW_DOWNLOAD_LOCATION,
3409 &data_written, &change_status,
3410 &add_status);
485bf569
SN
3411 if (status)
3412 break;
3413
3414 offset += data_written;
3415 data_ptr += data_written;
3416 image_size -= data_written;
3417 }
3418
3419 if (!status) {
3420 /* Commit the FW written */
3421 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3422 0, offset,
3423 LANCER_FW_DOWNLOAD_LOCATION,
3424 &data_written, &change_status,
3425 &add_status);
485bf569
SN
3426 }
3427
3428 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3429 flash_cmd.dma);
3430 if (status) {
3431 dev_err(&adapter->pdev->dev,
3432 "Firmware load error. "
3433 "Status code: 0x%x Additional Status: 0x%x\n",
3434 status, add_status);
3435 goto lancer_fw_exit;
3436 }
3437
f67ef7ba
PR
3438 if (change_status == LANCER_FW_RESET_NEEDED) {
3439 status = lancer_fw_reset(adapter);
3440 if (status) {
3441 dev_err(&adapter->pdev->dev,
3442 "Adapter busy for FW reset.\n"
3443 "New FW will not be active.\n");
3444 goto lancer_fw_exit;
3445 }
3446 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3447 dev_err(&adapter->pdev->dev,
3448 "System reboot required for new FW"
3449 " to be active\n");
3450 }
3451
485bf569
SN
3452 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3453lancer_fw_exit:
3454 return status;
3455}
3456
ca34fe38
SP
3457#define UFI_TYPE2 2
3458#define UFI_TYPE3 3
3459#define UFI_TYPE4 4
3460static int be_get_ufi_type(struct be_adapter *adapter,
3461 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3462{
3463 if (fhdr == NULL)
3464 goto be_get_ufi_exit;
3465
ca34fe38
SP
3466 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3467 return UFI_TYPE4;
3468 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3469 return UFI_TYPE3;
3470 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3471 return UFI_TYPE2;
773a2d7c
PR
3472
3473be_get_ufi_exit:
3474 dev_err(&adapter->pdev->dev,
3475 "UFI and Interface are not compatible for flashing\n");
3476 return -1;
3477}
3478
485bf569
SN
3479static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3480{
3481 struct flash_file_hdr_g2 *fhdr;
3482 struct flash_file_hdr_g3 *fhdr3;
3483 struct image_hdr *img_hdr_ptr = NULL;
3484 struct be_dma_mem flash_cmd;
3485 const u8 *p;
773a2d7c 3486 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3487
be716446 3488 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3489 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3490 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3491 if (!flash_cmd.va) {
3492 status = -ENOMEM;
3493 dev_err(&adapter->pdev->dev,
3494 "Memory allocation failure while flashing\n");
485bf569 3495 goto be_fw_exit;
84517482
AK
3496 }
3497
773a2d7c
PR
3498 p = fw->data;
3499 fhdr = (struct flash_file_hdr_g2 *)p;
3500
ca34fe38 3501 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3502
3503 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3504 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3505 for (i = 0; i < num_imgs; i++) {
3506 img_hdr_ptr = (struct image_hdr *)(fw->data +
3507 (sizeof(struct flash_file_hdr_g3) +
3508 i * sizeof(struct image_hdr)));
3509 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3510 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3511 status = be_flash_skyhawk(adapter, fw,
3512 &flash_cmd, num_imgs);
ca34fe38
SP
3513 else if (ufi_type == UFI_TYPE3)
3514 status = be_flash_BEx(adapter, fw, &flash_cmd,
3515 num_imgs);
3f0d4560 3516 }
773a2d7c
PR
3517 }
3518
ca34fe38
SP
3519 if (ufi_type == UFI_TYPE2)
3520 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3521 else if (ufi_type == -1)
3f0d4560 3522 status = -1;
84517482 3523
2b7bcebf
IV
3524 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3525 flash_cmd.dma);
84517482
AK
3526 if (status) {
3527 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3528 goto be_fw_exit;
84517482
AK
3529 }
3530
af901ca1 3531 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3532
485bf569
SN
3533be_fw_exit:
3534 return status;
3535}
3536
3537int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3538{
3539 const struct firmware *fw;
3540 int status;
3541
3542 if (!netif_running(adapter->netdev)) {
3543 dev_err(&adapter->pdev->dev,
3544 "Firmware load not allowed (interface is down)\n");
3545 return -1;
3546 }
3547
3548 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3549 if (status)
3550 goto fw_exit;
3551
3552 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3553
3554 if (lancer_chip(adapter))
3555 status = lancer_fw_download(adapter, fw);
3556 else
3557 status = be_fw_download(adapter, fw);
3558
84517482
AK
3559fw_exit:
3560 release_firmware(fw);
3561 return status;
3562}
3563
e5686ad8 3564static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3565 .ndo_open = be_open,
3566 .ndo_stop = be_close,
3567 .ndo_start_xmit = be_xmit,
a54769f5 3568 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3569 .ndo_set_mac_address = be_mac_addr_set,
3570 .ndo_change_mtu = be_change_mtu,
ab1594e9 3571 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3572 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3573 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3574 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3575 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3576 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3577 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3578 .ndo_get_vf_config = be_get_vf_config,
3579#ifdef CONFIG_NET_POLL_CONTROLLER
3580 .ndo_poll_controller = be_netpoll,
3581#endif
6b7c5b94
SP
3582};
3583
3584static void be_netdev_init(struct net_device *netdev)
3585{
3586 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3587 struct be_eq_obj *eqo;
3abcdeda 3588 int i;
6b7c5b94 3589
6332c8d3 3590 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3591 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3592 NETIF_F_HW_VLAN_TX;
3593 if (be_multi_rxq(adapter))
3594 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3595
3596 netdev->features |= netdev->hw_features |
8b8ddc68 3597 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3598
eb8a50d9 3599 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3601
fbc13f01
AK
3602 netdev->priv_flags |= IFF_UNICAST_FLT;
3603
6b7c5b94
SP
3604 netdev->flags |= IFF_MULTICAST;
3605
b7e5887e 3606 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3607
10ef9ab4 3608 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3609
3610 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3611
10ef9ab4
SP
3612 for_all_evt_queues(adapter, eqo, i)
3613 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3614}
3615
3616static void be_unmap_pci_bars(struct be_adapter *adapter)
3617{
8788fdc2 3618 if (adapter->db)
ce66f781 3619 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3620}
3621
ce66f781
SP
3622static int db_bar(struct be_adapter *adapter)
3623{
3624 if (lancer_chip(adapter) || !be_physfn(adapter))
3625 return 0;
3626 else
3627 return 4;
3628}
3629
3630static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3631{
dbf0f2a7 3632 if (skyhawk_chip(adapter)) {
ce66f781
SP
3633 adapter->roce_db.size = 4096;
3634 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3635 db_bar(adapter));
3636 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3637 db_bar(adapter));
3638 }
045508a8 3639 return 0;
6b7c5b94
SP
3640}
3641
3642static int be_map_pci_bars(struct be_adapter *adapter)
3643{
3644 u8 __iomem *addr;
ce66f781 3645 u32 sli_intf;
6b7c5b94 3646
ce66f781
SP
3647 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3648 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3649 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3650
ce66f781 3651 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3652 if (addr == NULL)
3653 goto pci_map_err;
ba343c77 3654 adapter->db = addr;
ce66f781
SP
3655
3656 be_roce_map_pci_bars(adapter);
6b7c5b94 3657 return 0;
ce66f781 3658
6b7c5b94
SP
3659pci_map_err:
3660 be_unmap_pci_bars(adapter);
3661 return -ENOMEM;
3662}
3663
6b7c5b94
SP
3664static void be_ctrl_cleanup(struct be_adapter *adapter)
3665{
8788fdc2 3666 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3667
3668 be_unmap_pci_bars(adapter);
3669
3670 if (mem->va)
2b7bcebf
IV
3671 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3672 mem->dma);
e7b909a6 3673
5b8821b7 3674 mem = &adapter->rx_filter;
e7b909a6 3675 if (mem->va)
2b7bcebf
IV
3676 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3677 mem->dma);
6b7c5b94
SP
3678}
3679
6b7c5b94
SP
3680static int be_ctrl_init(struct be_adapter *adapter)
3681{
8788fdc2
SP
3682 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3683 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3684 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3685 u32 sli_intf;
6b7c5b94 3686 int status;
6b7c5b94 3687
ce66f781
SP
3688 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3689 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3690 SLI_INTF_FAMILY_SHIFT;
3691 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3692
6b7c5b94
SP
3693 status = be_map_pci_bars(adapter);
3694 if (status)
e7b909a6 3695 goto done;
6b7c5b94
SP
3696
3697 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3698 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3699 mbox_mem_alloc->size,
3700 &mbox_mem_alloc->dma,
3701 GFP_KERNEL);
6b7c5b94 3702 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3703 status = -ENOMEM;
3704 goto unmap_pci_bars;
6b7c5b94
SP
3705 }
3706 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3707 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3708 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3709 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3710
5b8821b7
SP
3711 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3712 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3713 &rx_filter->dma, GFP_KERNEL);
3714 if (rx_filter->va == NULL) {
e7b909a6
SP
3715 status = -ENOMEM;
3716 goto free_mbox;
3717 }
5b8821b7 3718 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3719 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3720 spin_lock_init(&adapter->mcc_lock);
3721 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3722
dd131e76 3723 init_completion(&adapter->flash_compl);
cf588477 3724 pci_save_state(adapter->pdev);
6b7c5b94 3725 return 0;
e7b909a6
SP
3726
3727free_mbox:
2b7bcebf
IV
3728 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3729 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3730
3731unmap_pci_bars:
3732 be_unmap_pci_bars(adapter);
3733
3734done:
3735 return status;
6b7c5b94
SP
3736}
3737
3738static void be_stats_cleanup(struct be_adapter *adapter)
3739{
3abcdeda 3740 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3741
3742 if (cmd->va)
2b7bcebf
IV
3743 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3744 cmd->va, cmd->dma);
6b7c5b94
SP
3745}
3746
3747static int be_stats_init(struct be_adapter *adapter)
3748{
3abcdeda 3749 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3750
ca34fe38
SP
3751 if (lancer_chip(adapter))
3752 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3753 else if (BE2_chip(adapter))
89a88ab8 3754 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3755 else
3756 /* BE3 and Skyhawk */
3757 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3758
2b7bcebf
IV
3759 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3760 GFP_KERNEL);
6b7c5b94
SP
3761 if (cmd->va == NULL)
3762 return -1;
d291b9af 3763 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3764 return 0;
3765}
3766
3bc6b06c 3767static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3768{
3769 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3770
6b7c5b94
SP
3771 if (!adapter)
3772 return;
3773
045508a8
PP
3774 be_roce_dev_remove(adapter);
3775
f67ef7ba
PR
3776 cancel_delayed_work_sync(&adapter->func_recovery_work);
3777
6b7c5b94
SP
3778 unregister_netdev(adapter->netdev);
3779
5fb379ee
SP
3780 be_clear(adapter);
3781
bf99e50d
PR
3782 /* tell fw we're done with firing cmds */
3783 be_cmd_fw_clean(adapter);
3784
6b7c5b94
SP
3785 be_stats_cleanup(adapter);
3786
3787 be_ctrl_cleanup(adapter);
3788
d6b6d987
SP
3789 pci_disable_pcie_error_reporting(pdev);
3790
6b7c5b94
SP
3791 pci_set_drvdata(pdev, NULL);
3792 pci_release_regions(pdev);
3793 pci_disable_device(pdev);
3794
3795 free_netdev(adapter->netdev);
3796}
3797
4762f6ce
AK
3798bool be_is_wol_supported(struct be_adapter *adapter)
3799{
3800 return ((adapter->wol_cap & BE_WOL_CAP) &&
3801 !be_is_wol_excluded(adapter)) ? true : false;
3802}
3803
941a77d5
SK
3804u32 be_get_fw_log_level(struct be_adapter *adapter)
3805{
3806 struct be_dma_mem extfat_cmd;
3807 struct be_fat_conf_params *cfgs;
3808 int status;
3809 u32 level = 0;
3810 int j;
3811
f25b119c
PR
3812 if (lancer_chip(adapter))
3813 return 0;
3814
941a77d5
SK
3815 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3816 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3817 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3818 &extfat_cmd.dma);
3819
3820 if (!extfat_cmd.va) {
3821 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3822 __func__);
3823 goto err;
3824 }
3825
3826 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3827 if (!status) {
3828 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3829 sizeof(struct be_cmd_resp_hdr));
ac46a462 3830 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3831 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3832 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3833 }
3834 }
3835 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3836 extfat_cmd.dma);
3837err:
3838 return level;
3839}
abb93951 3840
39f1d94d 3841static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3842{
6b7c5b94 3843 int status;
941a77d5 3844 u32 level;
6b7c5b94 3845
9e1453c5
AK
3846 status = be_cmd_get_cntl_attributes(adapter);
3847 if (status)
3848 return status;
3849
4762f6ce
AK
3850 status = be_cmd_get_acpi_wol_cap(adapter);
3851 if (status) {
3852 /* in case of a failure to get wol capabillities
3853 * check the exclusion list to determine WOL capability */
3854 if (!be_is_wol_excluded(adapter))
3855 adapter->wol_cap |= BE_WOL_CAP;
3856 }
3857
3858 if (be_is_wol_supported(adapter))
3859 adapter->wol = true;
3860
7aeb2156
PR
3861 /* Must be a power of 2 or else MODULO will BUG_ON */
3862 adapter->be_get_temp_freq = 64;
3863
941a77d5
SK
3864 level = be_get_fw_log_level(adapter);
3865 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3866
2243e2e9 3867 return 0;
6b7c5b94
SP
3868}
3869
f67ef7ba 3870static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3871{
3872 int status;
d8110f62 3873
f67ef7ba
PR
3874 status = lancer_test_and_set_rdy_state(adapter);
3875 if (status)
3876 goto err;
d8110f62 3877
f67ef7ba
PR
3878 if (netif_running(adapter->netdev))
3879 be_close(adapter->netdev);
d8110f62 3880
f67ef7ba
PR
3881 be_clear(adapter);
3882
3883 adapter->hw_error = false;
3884 adapter->fw_timeout = false;
3885
3886 status = be_setup(adapter);
3887 if (status)
3888 goto err;
d8110f62 3889
f67ef7ba
PR
3890 if (netif_running(adapter->netdev)) {
3891 status = be_open(adapter->netdev);
d8110f62
PR
3892 if (status)
3893 goto err;
f67ef7ba 3894 }
d8110f62 3895
f67ef7ba
PR
3896 dev_err(&adapter->pdev->dev,
3897 "Adapter SLIPORT recovery succeeded\n");
3898 return 0;
3899err:
67297ad8
PR
3900 if (adapter->eeh_error)
3901 dev_err(&adapter->pdev->dev,
3902 "Adapter SLIPORT recovery failed\n");
d8110f62 3903
f67ef7ba
PR
3904 return status;
3905}
3906
3907static void be_func_recovery_task(struct work_struct *work)
3908{
3909 struct be_adapter *adapter =
3910 container_of(work, struct be_adapter, func_recovery_work.work);
3911 int status;
d8110f62 3912
f67ef7ba 3913 be_detect_error(adapter);
d8110f62 3914
f67ef7ba 3915 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3916
f67ef7ba
PR
3917 if (adapter->eeh_error)
3918 goto out;
d8110f62 3919
f67ef7ba
PR
3920 rtnl_lock();
3921 netif_device_detach(adapter->netdev);
3922 rtnl_unlock();
d8110f62 3923
f67ef7ba 3924 status = lancer_recover_func(adapter);
d8110f62 3925
f67ef7ba
PR
3926 if (!status)
3927 netif_device_attach(adapter->netdev);
d8110f62 3928 }
f67ef7ba
PR
3929
3930out:
3931 schedule_delayed_work(&adapter->func_recovery_work,
3932 msecs_to_jiffies(1000));
d8110f62
PR
3933}
3934
3935static void be_worker(struct work_struct *work)
3936{
3937 struct be_adapter *adapter =
3938 container_of(work, struct be_adapter, work.work);
3939 struct be_rx_obj *rxo;
10ef9ab4 3940 struct be_eq_obj *eqo;
d8110f62
PR
3941 int i;
3942
d8110f62
PR
3943 /* when interrupts are not yet enabled, just reap any pending
3944 * mcc completions */
3945 if (!netif_running(adapter->netdev)) {
072a9c48 3946 local_bh_disable();
10ef9ab4 3947 be_process_mcc(adapter);
072a9c48 3948 local_bh_enable();
d8110f62
PR
3949 goto reschedule;
3950 }
3951
3952 if (!adapter->stats_cmd_sent) {
3953 if (lancer_chip(adapter))
3954 lancer_cmd_get_pport_stats(adapter,
3955 &adapter->stats_cmd);
3956 else
3957 be_cmd_get_stats(adapter, &adapter->stats_cmd);
3958 }
3959
7aeb2156
PR
3960 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3961 be_cmd_get_die_temperature(adapter);
3962
d8110f62 3963 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
3964 if (rxo->rx_post_starved) {
3965 rxo->rx_post_starved = false;
3966 be_post_rx_frags(rxo, GFP_KERNEL);
3967 }
3968 }
3969
10ef9ab4
SP
3970 for_all_evt_queues(adapter, eqo, i)
3971 be_eqd_update(adapter, eqo);
3972
d8110f62
PR
3973reschedule:
3974 adapter->work_counter++;
3975 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3976}
3977
39f1d94d
SP
3978static bool be_reset_required(struct be_adapter *adapter)
3979{
d79c0a20 3980 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
3981}
3982
d379142b
SP
3983static char *mc_name(struct be_adapter *adapter)
3984{
3985 if (adapter->function_mode & FLEX10_MODE)
3986 return "FLEX10";
3987 else if (adapter->function_mode & VNIC_MODE)
3988 return "vNIC";
3989 else if (adapter->function_mode & UMC_ENABLED)
3990 return "UMC";
3991 else
3992 return "";
3993}
3994
3995static inline char *func_name(struct be_adapter *adapter)
3996{
3997 return be_physfn(adapter) ? "PF" : "VF";
3998}
3999
1dd06ae8 4000static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4001{
4002 int status = 0;
4003 struct be_adapter *adapter;
4004 struct net_device *netdev;
b4e32a71 4005 char port_name;
6b7c5b94
SP
4006
4007 status = pci_enable_device(pdev);
4008 if (status)
4009 goto do_none;
4010
4011 status = pci_request_regions(pdev, DRV_NAME);
4012 if (status)
4013 goto disable_dev;
4014 pci_set_master(pdev);
4015
7f640062 4016 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4017 if (netdev == NULL) {
4018 status = -ENOMEM;
4019 goto rel_reg;
4020 }
4021 adapter = netdev_priv(netdev);
4022 adapter->pdev = pdev;
4023 pci_set_drvdata(pdev, adapter);
4024 adapter->netdev = netdev;
2243e2e9 4025 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4026
2b7bcebf 4027 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4028 if (!status) {
4029 netdev->features |= NETIF_F_HIGHDMA;
4030 } else {
2b7bcebf 4031 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4032 if (status) {
4033 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4034 goto free_netdev;
4035 }
4036 }
4037
d6b6d987
SP
4038 status = pci_enable_pcie_error_reporting(pdev);
4039 if (status)
4040 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4041
6b7c5b94
SP
4042 status = be_ctrl_init(adapter);
4043 if (status)
39f1d94d 4044 goto free_netdev;
6b7c5b94 4045
2243e2e9 4046 /* sync up with fw's ready state */
ba343c77 4047 if (be_physfn(adapter)) {
bf99e50d 4048 status = be_fw_wait_ready(adapter);
ba343c77
SB
4049 if (status)
4050 goto ctrl_clean;
ba343c77 4051 }
6b7c5b94 4052
2243e2e9
SP
4053 /* tell fw we're ready to fire cmds */
4054 status = be_cmd_fw_init(adapter);
6b7c5b94 4055 if (status)
2243e2e9
SP
4056 goto ctrl_clean;
4057
39f1d94d
SP
4058 if (be_reset_required(adapter)) {
4059 status = be_cmd_reset_function(adapter);
4060 if (status)
4061 goto ctrl_clean;
4062 }
556ae191 4063
10ef9ab4
SP
4064 /* The INTR bit may be set in the card when probed by a kdump kernel
4065 * after a crash.
4066 */
4067 if (!lancer_chip(adapter))
4068 be_intr_set(adapter, false);
4069
2243e2e9
SP
4070 status = be_stats_init(adapter);
4071 if (status)
4072 goto ctrl_clean;
4073
39f1d94d 4074 status = be_get_initial_config(adapter);
6b7c5b94
SP
4075 if (status)
4076 goto stats_clean;
6b7c5b94
SP
4077
4078 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4079 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4080 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4081
5fb379ee
SP
4082 status = be_setup(adapter);
4083 if (status)
55f5c3c5 4084 goto stats_clean;
2243e2e9 4085
3abcdeda 4086 be_netdev_init(netdev);
6b7c5b94
SP
4087 status = register_netdev(netdev);
4088 if (status != 0)
5fb379ee 4089 goto unsetup;
6b7c5b94 4090
045508a8
PP
4091 be_roce_dev_add(adapter);
4092
f67ef7ba
PR
4093 schedule_delayed_work(&adapter->func_recovery_work,
4094 msecs_to_jiffies(1000));
b4e32a71
PR
4095
4096 be_cmd_query_port_name(adapter, &port_name);
4097
d379142b
SP
4098 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4099 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4100
6b7c5b94
SP
4101 return 0;
4102
5fb379ee
SP
4103unsetup:
4104 be_clear(adapter);
6b7c5b94
SP
4105stats_clean:
4106 be_stats_cleanup(adapter);
4107ctrl_clean:
4108 be_ctrl_cleanup(adapter);
f9449ab7 4109free_netdev:
fe6d2a38 4110 free_netdev(netdev);
8d56ff11 4111 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4112rel_reg:
4113 pci_release_regions(pdev);
4114disable_dev:
4115 pci_disable_device(pdev);
4116do_none:
c4ca2374 4117 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4118 return status;
4119}
4120
4121static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4122{
4123 struct be_adapter *adapter = pci_get_drvdata(pdev);
4124 struct net_device *netdev = adapter->netdev;
4125
71d8d1b5
AK
4126 if (adapter->wol)
4127 be_setup_wol(adapter, true);
4128
f67ef7ba
PR
4129 cancel_delayed_work_sync(&adapter->func_recovery_work);
4130
6b7c5b94
SP
4131 netif_device_detach(netdev);
4132 if (netif_running(netdev)) {
4133 rtnl_lock();
4134 be_close(netdev);
4135 rtnl_unlock();
4136 }
9b0365f1 4137 be_clear(adapter);
6b7c5b94
SP
4138
4139 pci_save_state(pdev);
4140 pci_disable_device(pdev);
4141 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4142 return 0;
4143}
4144
4145static int be_resume(struct pci_dev *pdev)
4146{
4147 int status = 0;
4148 struct be_adapter *adapter = pci_get_drvdata(pdev);
4149 struct net_device *netdev = adapter->netdev;
4150
4151 netif_device_detach(netdev);
4152
4153 status = pci_enable_device(pdev);
4154 if (status)
4155 return status;
4156
4157 pci_set_power_state(pdev, 0);
4158 pci_restore_state(pdev);
4159
2243e2e9
SP
4160 /* tell fw we're ready to fire cmds */
4161 status = be_cmd_fw_init(adapter);
4162 if (status)
4163 return status;
4164
9b0365f1 4165 be_setup(adapter);
6b7c5b94
SP
4166 if (netif_running(netdev)) {
4167 rtnl_lock();
4168 be_open(netdev);
4169 rtnl_unlock();
4170 }
f67ef7ba
PR
4171
4172 schedule_delayed_work(&adapter->func_recovery_work,
4173 msecs_to_jiffies(1000));
6b7c5b94 4174 netif_device_attach(netdev);
71d8d1b5
AK
4175
4176 if (adapter->wol)
4177 be_setup_wol(adapter, false);
a4ca055f 4178
6b7c5b94
SP
4179 return 0;
4180}
4181
82456b03
SP
4182/*
4183 * An FLR will stop BE from DMAing any data.
4184 */
4185static void be_shutdown(struct pci_dev *pdev)
4186{
4187 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4188
2d5d4154
AK
4189 if (!adapter)
4190 return;
82456b03 4191
0f4a6828 4192 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4193 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4194
2d5d4154 4195 netif_device_detach(adapter->netdev);
82456b03 4196
57841869
AK
4197 be_cmd_reset_function(adapter);
4198
82456b03 4199 pci_disable_device(pdev);
82456b03
SP
4200}
4201
cf588477
SP
4202static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4203 pci_channel_state_t state)
4204{
4205 struct be_adapter *adapter = pci_get_drvdata(pdev);
4206 struct net_device *netdev = adapter->netdev;
4207
4208 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4209
f67ef7ba
PR
4210 adapter->eeh_error = true;
4211
4212 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4213
f67ef7ba 4214 rtnl_lock();
cf588477 4215 netif_device_detach(netdev);
f67ef7ba 4216 rtnl_unlock();
cf588477
SP
4217
4218 if (netif_running(netdev)) {
4219 rtnl_lock();
4220 be_close(netdev);
4221 rtnl_unlock();
4222 }
4223 be_clear(adapter);
4224
4225 if (state == pci_channel_io_perm_failure)
4226 return PCI_ERS_RESULT_DISCONNECT;
4227
4228 pci_disable_device(pdev);
4229
eeb7fc7b
SK
4230 /* The error could cause the FW to trigger a flash debug dump.
4231 * Resetting the card while flash dump is in progress
c8a54163
PR
4232 * can cause it not to recover; wait for it to finish.
4233 * Wait only for first function as it is needed only once per
4234 * adapter.
eeb7fc7b 4235 */
c8a54163
PR
4236 if (pdev->devfn == 0)
4237 ssleep(30);
4238
cf588477
SP
4239 return PCI_ERS_RESULT_NEED_RESET;
4240}
4241
4242static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4243{
4244 struct be_adapter *adapter = pci_get_drvdata(pdev);
4245 int status;
4246
4247 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4248 be_clear_all_error(adapter);
cf588477
SP
4249
4250 status = pci_enable_device(pdev);
4251 if (status)
4252 return PCI_ERS_RESULT_DISCONNECT;
4253
4254 pci_set_master(pdev);
4255 pci_set_power_state(pdev, 0);
4256 pci_restore_state(pdev);
4257
4258 /* Check if card is ok and fw is ready */
bf99e50d 4259 status = be_fw_wait_ready(adapter);
cf588477
SP
4260 if (status)
4261 return PCI_ERS_RESULT_DISCONNECT;
4262
d6b6d987 4263 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4264 return PCI_ERS_RESULT_RECOVERED;
4265}
4266
4267static void be_eeh_resume(struct pci_dev *pdev)
4268{
4269 int status = 0;
4270 struct be_adapter *adapter = pci_get_drvdata(pdev);
4271 struct net_device *netdev = adapter->netdev;
4272
4273 dev_info(&adapter->pdev->dev, "EEH resume\n");
4274
4275 pci_save_state(pdev);
4276
4277 /* tell fw we're ready to fire cmds */
4278 status = be_cmd_fw_init(adapter);
4279 if (status)
4280 goto err;
4281
bf99e50d
PR
4282 status = be_cmd_reset_function(adapter);
4283 if (status)
4284 goto err;
4285
cf588477
SP
4286 status = be_setup(adapter);
4287 if (status)
4288 goto err;
4289
4290 if (netif_running(netdev)) {
4291 status = be_open(netdev);
4292 if (status)
4293 goto err;
4294 }
f67ef7ba
PR
4295
4296 schedule_delayed_work(&adapter->func_recovery_work,
4297 msecs_to_jiffies(1000));
cf588477
SP
4298 netif_device_attach(netdev);
4299 return;
4300err:
4301 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4302}
4303
3646f0e5 4304static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4305 .error_detected = be_eeh_err_detected,
4306 .slot_reset = be_eeh_reset,
4307 .resume = be_eeh_resume,
4308};
4309
6b7c5b94
SP
4310static struct pci_driver be_driver = {
4311 .name = DRV_NAME,
4312 .id_table = be_dev_ids,
4313 .probe = be_probe,
4314 .remove = be_remove,
4315 .suspend = be_suspend,
cf588477 4316 .resume = be_resume,
82456b03 4317 .shutdown = be_shutdown,
cf588477 4318 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4319};
4320
4321static int __init be_init_module(void)
4322{
8e95a202
JP
4323 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4324 rx_frag_size != 2048) {
6b7c5b94
SP
4325 printk(KERN_WARNING DRV_NAME
4326 " : Module param rx_frag_size must be 2048/4096/8192."
4327 " Using 2048\n");
4328 rx_frag_size = 2048;
4329 }
6b7c5b94
SP
4330
4331 return pci_register_driver(&be_driver);
4332}
4333module_init(be_init_module);
4334
4335static void __exit be_exit_module(void)
4336{
4337 pci_unregister_driver(&be_driver);
4338}
4339module_exit(be_exit_module);