]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
Linux 3.9-rc8
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
ca34fe38
SP
288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 317{
ac124ff9
SP
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 321 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 324
ac124ff9 325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
ac124ff9 353 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 354 else
ac124ff9 355 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
ca34fe38 365static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 366{
ac124ff9
SP
367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 370 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 373
ac124ff9 374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
ac124ff9 397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
005d5696
SX
410static void populate_lancer_stats(struct be_adapter *adapter)
411{
89a88ab8 412
005d5696 413 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 442 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 445 drvs->rx_drops_too_many_frags =
ac124ff9 446 pport_stats->rx_drops_too_many_frags_lo;
005d5696 447}
89a88ab8 448
09c1c68f
SP
449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
89a88ab8
AK
461void be_parse_stats(struct be_adapter *adapter)
462{
ac124ff9
SP
463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
ca34fe38
SP
467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
005d5696 469 } else {
ca34fe38
SP
470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
d51ebd33 475
ca34fe38
SP
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
09c1c68f 485 }
89a88ab8
AK
486}
487
ab1594e9
SP
488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
6b7c5b94 490{
ab1594e9 491 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 492 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 493 struct be_rx_obj *rxo;
3c8def97 494 struct be_tx_obj *txo;
ab1594e9
SP
495 u64 pkts, bytes;
496 unsigned int start;
3abcdeda 497 int i;
6b7c5b94 498
3abcdeda 499 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
511 }
512
3c8def97 513 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
3c8def97 522 }
6b7c5b94
SP
523
524 /* bad pkts received */
ab1594e9 525 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
ab1594e9 534 drvs->rx_dropped_runt;
68110868 535
6b7c5b94 536 /* detailed rx errors */
ab1594e9 537 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
68110868 540
ab1594e9 541 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
542
543 /* frame alignment errors */
ab1594e9 544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 545
6b7c5b94
SP
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
ab1594e9 551 return stats;
6b7c5b94
SP
552}
553
b236916a 554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 555{
6b7c5b94
SP
556 struct net_device *netdev = adapter->netdev;
557
b236916a 558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 559 netif_carrier_off(netdev);
b236916a 560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 561 }
b236916a
AK
562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
6b7c5b94
SP
567}
568
3c8def97 569static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 571{
3c8def97
SP
572 struct be_tx_stats *stats = tx_stats(txo);
573
ab1594e9 574 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 579 if (stopped)
ac124ff9 580 stats->tx_stops++;
ab1594e9 581 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
6b7c5b94 587{
ebc8d2ab
DM
588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
6b7c5b94
SP
592 /* to account for hdr wrb */
593 cnt++;
fe6d2a38
SP
594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
6b7c5b94
SP
597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
fe6d2a38 600 }
6b7c5b94
SP
601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 610 wrb->rsvd0 = 0;
6b7c5b94
SP
611}
612
1ded132d
AK
613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
93040ae5
SK
629static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630{
631 return vlan_tx_tag_present(skb) || adapter->pvid;
632}
633
cc4ce020
SK
634static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 636{
1ded132d 637 u16 vlan_tag;
cc4ce020 638
6b7c5b94
SP
639 memset(hdr, 0, sizeof(*hdr));
640
641 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
49e4b847 643 if (skb_is_gso(skb)) {
6b7c5b94
SP
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650 if (is_tcp_pkt(skb))
651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652 else if (is_udp_pkt(skb))
653 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654 }
655
4c5102f9 656 if (vlan_tx_tag_present(skb)) {
6b7c5b94 657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 658 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
660 }
661
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666}
667
2b7bcebf 668static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
669 bool unmap_single)
670{
671 dma_addr_t dma;
672
673 be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 676 if (wrb->frag_len) {
7101e111 677 if (unmap_single)
2b7bcebf
IV
678 dma_unmap_single(dev, dma, wrb->frag_len,
679 DMA_TO_DEVICE);
7101e111 680 else
2b7bcebf 681 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
682 }
683}
6b7c5b94 684
3c8def97 685static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
686 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687{
7101e111
SP
688 dma_addr_t busaddr;
689 int i, copied = 0;
2b7bcebf 690 struct device *dev = &adapter->pdev->dev;
6b7c5b94 691 struct sk_buff *first_skb = skb;
6b7c5b94
SP
692 struct be_eth_wrb *wrb;
693 struct be_eth_hdr_wrb *hdr;
7101e111
SP
694 bool map_single = false;
695 u16 map_head;
6b7c5b94 696
6b7c5b94
SP
697 hdr = queue_head_node(txq);
698 queue_head_inc(txq);
7101e111 699 map_head = txq->head;
6b7c5b94 700
ebc8d2ab 701 if (skb->len > skb->data_len) {
e743d313 702 int len = skb_headlen(skb);
2b7bcebf
IV
703 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704 if (dma_mapping_error(dev, busaddr))
7101e111
SP
705 goto dma_err;
706 map_single = true;
ebc8d2ab
DM
707 wrb = queue_head_node(txq);
708 wrb_fill(wrb, busaddr, len);
709 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710 queue_head_inc(txq);
711 copied += len;
712 }
6b7c5b94 713
ebc8d2ab 714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 715 const struct skb_frag_struct *frag =
ebc8d2ab 716 &skb_shinfo(skb)->frags[i];
b061b39e 717 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 718 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 719 if (dma_mapping_error(dev, busaddr))
7101e111 720 goto dma_err;
ebc8d2ab 721 wrb = queue_head_node(txq);
9e903e08 722 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
723 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724 queue_head_inc(txq);
9e903e08 725 copied += skb_frag_size(frag);
6b7c5b94
SP
726 }
727
728 if (dummy_wrb) {
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, 0, 0);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 }
734
cc4ce020 735 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
736 be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738 return copied;
7101e111
SP
739dma_err:
740 txq->head = map_head;
741 while (copied) {
742 wrb = queue_head_node(txq);
2b7bcebf 743 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
744 map_single = false;
745 copied -= wrb->frag_len;
746 queue_head_inc(txq);
747 }
748 return 0;
6b7c5b94
SP
749}
750
93040ae5
SK
751static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752 struct sk_buff *skb)
753{
754 u16 vlan_tag = 0;
755
756 skb = skb_share_check(skb, GFP_ATOMIC);
757 if (unlikely(!skb))
758 return skb;
759
760 if (vlan_tx_tag_present(skb)) {
761 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
f11a869d
IV
762 skb = __vlan_put_tag(skb, vlan_tag);
763 if (skb)
764 skb->vlan_tci = 0;
93040ae5
SK
765 }
766
767 return skb;
768}
769
61357325 770static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 771 struct net_device *netdev)
6b7c5b94
SP
772{
773 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
774 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
775 struct be_queue_info *txq = &txo->q;
93040ae5 776 struct iphdr *ip = NULL;
6b7c5b94 777 u32 wrb_cnt = 0, copied = 0;
93040ae5 778 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
779 bool dummy_wrb, stopped = false;
780
93040ae5
SK
781 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
782 VLAN_ETH_HLEN : ETH_HLEN;
783
784 /* HW has a bug which considers padding bytes as legal
785 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 786 */
93040ae5
SK
787 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
788 is_ipv4_pkt(skb)) {
789 ip = (struct iphdr *)ip_hdr(skb);
790 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
791 }
1ded132d 792
93040ae5
SK
793 /* HW has a bug wherein it will calculate CSUM for VLAN
794 * pkts even though it is disabled.
795 * Manually insert VLAN in pkt.
796 */
797 if (skb->ip_summed != CHECKSUM_PARTIAL &&
798 be_vlan_tag_chk(adapter, skb)) {
799 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
800 if (unlikely(!skb))
801 goto tx_drop;
1ded132d
AK
802 }
803
fe6d2a38 804 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 805
3c8def97 806 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 807 if (copied) {
cd8f76c0
ED
808 int gso_segs = skb_shinfo(skb)->gso_segs;
809
c190e3c8 810 /* record the sent skb in the sent_skb table */
3c8def97
SP
811 BUG_ON(txo->sent_skb_list[start]);
812 txo->sent_skb_list[start] = skb;
c190e3c8
AK
813
814 /* Ensure txq has space for the next skb; Else stop the queue
815 * *BEFORE* ringing the tx doorbell, so that we serialze the
816 * tx compls of the current transmit which'll wake up the queue
817 */
7101e111 818 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
819 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
820 txq->len) {
3c8def97 821 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
822 stopped = true;
823 }
6b7c5b94 824
c190e3c8 825 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 826
cd8f76c0 827 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
828 } else {
829 txq->head = start;
830 dev_kfree_skb_any(skb);
6b7c5b94 831 }
1ded132d 832tx_drop:
6b7c5b94
SP
833 return NETDEV_TX_OK;
834}
835
836static int be_change_mtu(struct net_device *netdev, int new_mtu)
837{
838 struct be_adapter *adapter = netdev_priv(netdev);
839 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
840 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
841 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
842 dev_info(&adapter->pdev->dev,
843 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
844 BE_MIN_MTU,
845 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
846 return -EINVAL;
847 }
848 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
849 netdev->mtu, new_mtu);
850 netdev->mtu = new_mtu;
851 return 0;
852}
853
854/*
82903e4b
AK
855 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
856 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 857 */
10329df8 858static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 859{
10329df8
SP
860 u16 vids[BE_NUM_VLANS_SUPPORTED];
861 u16 num = 0, i;
82903e4b 862 int status = 0;
1da87b7f 863
c0e64ef4
SP
864 /* No need to further configure vids if in promiscuous mode */
865 if (adapter->promiscuous)
866 return 0;
867
0fc16ebf
PR
868 if (adapter->vlans_added > adapter->max_vlans)
869 goto set_vlan_promisc;
870
871 /* Construct VLAN Table to give to HW */
872 for (i = 0; i < VLAN_N_VID; i++)
873 if (adapter->vlan_tag[i])
10329df8 874 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
875
876 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 877 vids, num, 1, 0);
0fc16ebf
PR
878
879 /* Set to VLAN promisc mode as setting VLAN filter failed */
880 if (status) {
881 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
882 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
883 goto set_vlan_promisc;
6b7c5b94 884 }
1da87b7f 885
b31c50a7 886 return status;
0fc16ebf
PR
887
888set_vlan_promisc:
889 status = be_cmd_vlan_config(adapter, adapter->if_handle,
890 NULL, 0, 1, 1);
891 return status;
6b7c5b94
SP
892}
893
8e586137 894static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
895{
896 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 897 int status = 0;
6b7c5b94 898
a85e9986 899 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
900 status = -EINVAL;
901 goto ret;
902 }
ba343c77 903
a85e9986
PR
904 /* Packets with VID 0 are always received by Lancer by default */
905 if (lancer_chip(adapter) && vid == 0)
906 goto ret;
907
6b7c5b94 908 adapter->vlan_tag[vid] = 1;
82903e4b 909 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 910 status = be_vid_config(adapter);
8e586137 911
80817cbf
AK
912 if (!status)
913 adapter->vlans_added++;
914 else
915 adapter->vlan_tag[vid] = 0;
916ret:
917 return status;
6b7c5b94
SP
918}
919
8e586137 920static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 923 int status = 0;
6b7c5b94 924
a85e9986 925 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
926 status = -EINVAL;
927 goto ret;
928 }
ba343c77 929
a85e9986
PR
930 /* Packets with VID 0 are always received by Lancer by default */
931 if (lancer_chip(adapter) && vid == 0)
932 goto ret;
933
6b7c5b94 934 adapter->vlan_tag[vid] = 0;
82903e4b 935 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 936 status = be_vid_config(adapter);
8e586137 937
80817cbf
AK
938 if (!status)
939 adapter->vlans_added--;
940 else
941 adapter->vlan_tag[vid] = 1;
942ret:
943 return status;
6b7c5b94
SP
944}
945
a54769f5 946static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
947{
948 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 949 int status;
6b7c5b94 950
24307eef 951 if (netdev->flags & IFF_PROMISC) {
5b8821b7 952 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
953 adapter->promiscuous = true;
954 goto done;
6b7c5b94
SP
955 }
956
25985edc 957 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
958 if (adapter->promiscuous) {
959 adapter->promiscuous = false;
5b8821b7 960 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
961
962 if (adapter->vlans_added)
10329df8 963 be_vid_config(adapter);
6b7c5b94
SP
964 }
965
e7b909a6 966 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 967 if (netdev->flags & IFF_ALLMULTI ||
abb93951 968 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 969 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 970 goto done;
6b7c5b94 971 }
6b7c5b94 972
fbc13f01
AK
973 if (netdev_uc_count(netdev) != adapter->uc_macs) {
974 struct netdev_hw_addr *ha;
975 int i = 1; /* First slot is claimed by the Primary MAC */
976
977 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
978 be_cmd_pmac_del(adapter, adapter->if_handle,
979 adapter->pmac_id[i], 0);
980 }
981
982 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
983 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
984 adapter->promiscuous = true;
985 goto done;
986 }
987
988 netdev_for_each_uc_addr(ha, adapter->netdev) {
989 adapter->uc_macs++; /* First slot is for Primary MAC */
990 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
991 adapter->if_handle,
992 &adapter->pmac_id[adapter->uc_macs], 0);
993 }
994 }
995
0fc16ebf
PR
996 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
997
998 /* Set to MCAST promisc mode if setting MULTICAST address fails */
999 if (status) {
1000 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1001 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1002 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1003 }
24307eef
SP
1004done:
1005 return;
6b7c5b94
SP
1006}
1007
ba343c77
SB
1008static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1009{
1010 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1011 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1012 int status;
704e4c88
PR
1013 bool active_mac = false;
1014 u32 pmac_id;
1015 u8 old_mac[ETH_ALEN];
ba343c77 1016
11ac75ed 1017 if (!sriov_enabled(adapter))
ba343c77
SB
1018 return -EPERM;
1019
11ac75ed 1020 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1021 return -EINVAL;
1022
590c391d 1023 if (lancer_chip(adapter)) {
704e4c88
PR
1024 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1025 &pmac_id, vf + 1);
1026 if (!status && active_mac)
1027 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1028 pmac_id, vf + 1);
1029
590c391d
PR
1030 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1031 } else {
11ac75ed
SP
1032 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1033 vf_cfg->pmac_id, vf + 1);
ba343c77 1034
11ac75ed
SP
1035 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1036 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1037 }
1038
64600ea5 1039 if (status)
ba343c77
SB
1040 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1041 mac, vf);
64600ea5 1042 else
11ac75ed 1043 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1044
ba343c77
SB
1045 return status;
1046}
1047
64600ea5
AK
1048static int be_get_vf_config(struct net_device *netdev, int vf,
1049 struct ifla_vf_info *vi)
1050{
1051 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1052 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1053
11ac75ed 1054 if (!sriov_enabled(adapter))
64600ea5
AK
1055 return -EPERM;
1056
11ac75ed 1057 if (vf >= adapter->num_vfs)
64600ea5
AK
1058 return -EINVAL;
1059
1060 vi->vf = vf;
11ac75ed
SP
1061 vi->tx_rate = vf_cfg->tx_rate;
1062 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1063 vi->qos = 0;
11ac75ed 1064 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1065
1066 return 0;
1067}
1068
1da87b7f
AK
1069static int be_set_vf_vlan(struct net_device *netdev,
1070 int vf, u16 vlan, u8 qos)
1071{
1072 struct be_adapter *adapter = netdev_priv(netdev);
1073 int status = 0;
1074
11ac75ed 1075 if (!sriov_enabled(adapter))
1da87b7f
AK
1076 return -EPERM;
1077
11ac75ed 1078 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1079 return -EINVAL;
1080
1081 if (vlan) {
f1f3ee1b
AK
1082 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1083 /* If this is new value, program it. Else skip. */
1084 adapter->vf_cfg[vf].vlan_tag = vlan;
1085
1086 status = be_cmd_set_hsw_config(adapter, vlan,
1087 vf + 1, adapter->vf_cfg[vf].if_handle);
1088 }
1da87b7f 1089 } else {
f1f3ee1b 1090 /* Reset Transparent Vlan Tagging. */
11ac75ed 1091 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1092 vlan = adapter->vf_cfg[vf].def_vid;
1093 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1094 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1095 }
1096
1da87b7f
AK
1097
1098 if (status)
1099 dev_info(&adapter->pdev->dev,
1100 "VLAN %d config on VF %d failed\n", vlan, vf);
1101 return status;
1102}
1103
e1d18735
AK
1104static int be_set_vf_tx_rate(struct net_device *netdev,
1105 int vf, int rate)
1106{
1107 struct be_adapter *adapter = netdev_priv(netdev);
1108 int status = 0;
1109
11ac75ed 1110 if (!sriov_enabled(adapter))
e1d18735
AK
1111 return -EPERM;
1112
94f434c2 1113 if (vf >= adapter->num_vfs)
e1d18735
AK
1114 return -EINVAL;
1115
94f434c2
AK
1116 if (rate < 100 || rate > 10000) {
1117 dev_err(&adapter->pdev->dev,
1118 "tx rate must be between 100 and 10000 Mbps\n");
1119 return -EINVAL;
1120 }
e1d18735 1121
d5c18473
PR
1122 if (lancer_chip(adapter))
1123 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1124 else
1125 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1126
1127 if (status)
94f434c2 1128 dev_err(&adapter->pdev->dev,
e1d18735 1129 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1130 else
1131 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1132 return status;
1133}
1134
39f1d94d
SP
1135static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1136{
1137 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1138 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1139 u16 offset, stride;
1140
1141 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1142 if (!pos)
1143 return 0;
39f1d94d
SP
1144 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1145 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1146
1147 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1148 while (dev) {
2f6a0260 1149 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1150 vfs++;
1151 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1152 assigned_vfs++;
1153 }
1154 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1155 }
1156 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1157}
1158
10ef9ab4 1159static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1160{
10ef9ab4 1161 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1162 ulong now = jiffies;
ac124ff9 1163 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1164 u64 pkts;
1165 unsigned int start, eqd;
ac124ff9 1166
10ef9ab4
SP
1167 if (!eqo->enable_aic) {
1168 eqd = eqo->eqd;
1169 goto modify_eqd;
1170 }
1171
1172 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1173 return;
6b7c5b94 1174
10ef9ab4
SP
1175 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1176
4097f663 1177 /* Wrapped around */
3abcdeda
SP
1178 if (time_before(now, stats->rx_jiffies)) {
1179 stats->rx_jiffies = now;
4097f663
SP
1180 return;
1181 }
6b7c5b94 1182
ac124ff9
SP
1183 /* Update once a second */
1184 if (delta < HZ)
6b7c5b94
SP
1185 return;
1186
ab1594e9
SP
1187 do {
1188 start = u64_stats_fetch_begin_bh(&stats->sync);
1189 pkts = stats->rx_pkts;
1190 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1191
68c3e5a7 1192 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1193 stats->rx_pkts_prev = pkts;
3abcdeda 1194 stats->rx_jiffies = now;
10ef9ab4
SP
1195 eqd = (stats->rx_pps / 110000) << 3;
1196 eqd = min(eqd, eqo->max_eqd);
1197 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1198 if (eqd < 10)
1199 eqd = 0;
10ef9ab4
SP
1200
1201modify_eqd:
1202 if (eqd != eqo->cur_eqd) {
1203 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1204 eqo->cur_eqd = eqd;
ac124ff9 1205 }
6b7c5b94
SP
1206}
1207
3abcdeda 1208static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1209 struct be_rx_compl_info *rxcp)
4097f663 1210{
ac124ff9 1211 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1212
ab1594e9 1213 u64_stats_update_begin(&stats->sync);
3abcdeda 1214 stats->rx_compl++;
2e588f84 1215 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1216 stats->rx_pkts++;
2e588f84 1217 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1218 stats->rx_mcast_pkts++;
2e588f84 1219 if (rxcp->err)
ac124ff9 1220 stats->rx_compl_err++;
ab1594e9 1221 u64_stats_update_end(&stats->sync);
4097f663
SP
1222}
1223
2e588f84 1224static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1225{
19fad86f
PR
1226 /* L4 checksum is not reliable for non TCP/UDP packets.
1227 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1228 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1229 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1230}
1231
10ef9ab4
SP
1232static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1233 u16 frag_idx)
6b7c5b94 1234{
10ef9ab4 1235 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1236 struct be_rx_page_info *rx_page_info;
3abcdeda 1237 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1238
3abcdeda 1239 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1240 BUG_ON(!rx_page_info->page);
1241
205859a2 1242 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1243 dma_unmap_page(&adapter->pdev->dev,
1244 dma_unmap_addr(rx_page_info, bus),
1245 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1246 rx_page_info->last_page_user = false;
1247 }
6b7c5b94
SP
1248
1249 atomic_dec(&rxq->used);
1250 return rx_page_info;
1251}
1252
1253/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1254static void be_rx_compl_discard(struct be_rx_obj *rxo,
1255 struct be_rx_compl_info *rxcp)
6b7c5b94 1256{
3abcdeda 1257 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1258 struct be_rx_page_info *page_info;
2e588f84 1259 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1260
e80d9da6 1261 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1262 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1263 put_page(page_info->page);
1264 memset(page_info, 0, sizeof(*page_info));
2e588f84 1265 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1266 }
1267}
1268
1269/*
1270 * skb_fill_rx_data forms a complete skb for an ether frame
1271 * indicated by rxcp.
1272 */
10ef9ab4
SP
1273static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1274 struct be_rx_compl_info *rxcp)
6b7c5b94 1275{
3abcdeda 1276 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1277 struct be_rx_page_info *page_info;
2e588f84
SP
1278 u16 i, j;
1279 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1280 u8 *start;
6b7c5b94 1281
10ef9ab4 1282 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1283 start = page_address(page_info->page) + page_info->page_offset;
1284 prefetch(start);
1285
1286 /* Copy data in the first descriptor of this completion */
2e588f84 1287 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1288
6b7c5b94
SP
1289 skb->len = curr_frag_len;
1290 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1291 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1292 /* Complete packet has now been moved to data */
1293 put_page(page_info->page);
1294 skb->data_len = 0;
1295 skb->tail += curr_frag_len;
1296 } else {
ac1ae5f3
ED
1297 hdr_len = ETH_HLEN;
1298 memcpy(skb->data, start, hdr_len);
6b7c5b94 1299 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1300 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1301 skb_shinfo(skb)->frags[0].page_offset =
1302 page_info->page_offset + hdr_len;
9e903e08 1303 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1304 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1305 skb->truesize += rx_frag_size;
6b7c5b94
SP
1306 skb->tail += hdr_len;
1307 }
205859a2 1308 page_info->page = NULL;
6b7c5b94 1309
2e588f84
SP
1310 if (rxcp->pkt_size <= rx_frag_size) {
1311 BUG_ON(rxcp->num_rcvd != 1);
1312 return;
6b7c5b94
SP
1313 }
1314
1315 /* More frags present for this completion */
2e588f84
SP
1316 index_inc(&rxcp->rxq_idx, rxq->len);
1317 remaining = rxcp->pkt_size - curr_frag_len;
1318 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1319 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1320 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1321
bd46cb6c
AK
1322 /* Coalesce all frags from the same physical page in one slot */
1323 if (page_info->page_offset == 0) {
1324 /* Fresh page */
1325 j++;
b061b39e 1326 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1327 skb_shinfo(skb)->frags[j].page_offset =
1328 page_info->page_offset;
9e903e08 1329 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1330 skb_shinfo(skb)->nr_frags++;
1331 } else {
1332 put_page(page_info->page);
1333 }
1334
9e903e08 1335 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1336 skb->len += curr_frag_len;
1337 skb->data_len += curr_frag_len;
bdb28a97 1338 skb->truesize += rx_frag_size;
2e588f84
SP
1339 remaining -= curr_frag_len;
1340 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1341 page_info->page = NULL;
6b7c5b94 1342 }
bd46cb6c 1343 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1344}
1345
5be93b9a 1346/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1347static void be_rx_compl_process(struct be_rx_obj *rxo,
1348 struct be_rx_compl_info *rxcp)
6b7c5b94 1349{
10ef9ab4 1350 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1351 struct net_device *netdev = adapter->netdev;
6b7c5b94 1352 struct sk_buff *skb;
89420424 1353
bb349bb4 1354 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1355 if (unlikely(!skb)) {
ac124ff9 1356 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1357 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1358 return;
1359 }
1360
10ef9ab4 1361 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1362
6332c8d3 1363 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1364 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1365 else
1366 skb_checksum_none_assert(skb);
6b7c5b94 1367
6332c8d3 1368 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1369 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1370 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1371 skb->rxhash = rxcp->rss_hash;
1372
6b7c5b94 1373
343e43c0 1374 if (rxcp->vlanf)
4c5102f9
AK
1375 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1376
1377 netif_receive_skb(skb);
6b7c5b94
SP
1378}
1379
5be93b9a 1380/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1381void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1382 struct be_rx_compl_info *rxcp)
6b7c5b94 1383{
10ef9ab4 1384 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1385 struct be_rx_page_info *page_info;
5be93b9a 1386 struct sk_buff *skb = NULL;
3abcdeda 1387 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1388 u16 remaining, curr_frag_len;
1389 u16 i, j;
3968fa1e 1390
10ef9ab4 1391 skb = napi_get_frags(napi);
5be93b9a 1392 if (!skb) {
10ef9ab4 1393 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1394 return;
1395 }
1396
2e588f84
SP
1397 remaining = rxcp->pkt_size;
1398 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1399 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1400
1401 curr_frag_len = min(remaining, rx_frag_size);
1402
bd46cb6c
AK
1403 /* Coalesce all frags from the same physical page in one slot */
1404 if (i == 0 || page_info->page_offset == 0) {
1405 /* First frag or Fresh page */
1406 j++;
b061b39e 1407 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1408 skb_shinfo(skb)->frags[j].page_offset =
1409 page_info->page_offset;
9e903e08 1410 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1411 } else {
1412 put_page(page_info->page);
1413 }
9e903e08 1414 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1415 skb->truesize += rx_frag_size;
bd46cb6c 1416 remaining -= curr_frag_len;
2e588f84 1417 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1418 memset(page_info, 0, sizeof(*page_info));
1419 }
bd46cb6c 1420 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1421
5be93b9a 1422 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1423 skb->len = rxcp->pkt_size;
1424 skb->data_len = rxcp->pkt_size;
5be93b9a 1425 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1426 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1427 if (adapter->netdev->features & NETIF_F_RXHASH)
1428 skb->rxhash = rxcp->rss_hash;
5be93b9a 1429
343e43c0 1430 if (rxcp->vlanf)
4c5102f9
AK
1431 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1432
10ef9ab4 1433 napi_gro_frags(napi);
2e588f84
SP
1434}
1435
10ef9ab4
SP
1436static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1437 struct be_rx_compl_info *rxcp)
2e588f84
SP
1438{
1439 rxcp->pkt_size =
1440 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1441 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1442 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1443 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1444 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1445 rxcp->ip_csum =
1446 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1447 rxcp->l4_csum =
1448 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1449 rxcp->ipv6 =
1450 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1451 rxcp->rxq_idx =
1452 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1453 rxcp->num_rcvd =
1454 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1455 rxcp->pkt_type =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1457 rxcp->rss_hash =
c297977e 1458 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1459 if (rxcp->vlanf) {
1460 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1461 compl);
1462 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1463 compl);
15d72184 1464 }
12004ae9 1465 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1466}
1467
10ef9ab4
SP
1468static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1469 struct be_rx_compl_info *rxcp)
2e588f84
SP
1470{
1471 rxcp->pkt_size =
1472 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1473 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1474 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1475 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1476 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1477 rxcp->ip_csum =
1478 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1479 rxcp->l4_csum =
1480 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1481 rxcp->ipv6 =
1482 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1483 rxcp->rxq_idx =
1484 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1485 rxcp->num_rcvd =
1486 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1487 rxcp->pkt_type =
1488 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1489 rxcp->rss_hash =
c297977e 1490 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1491 if (rxcp->vlanf) {
1492 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1493 compl);
1494 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1495 compl);
15d72184 1496 }
12004ae9 1497 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1498}
1499
1500static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1501{
1502 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1503 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1504 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1505
2e588f84
SP
1506 /* For checking the valid bit it is Ok to use either definition as the
1507 * valid bit is at the same position in both v0 and v1 Rx compl */
1508 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1509 return NULL;
6b7c5b94 1510
2e588f84
SP
1511 rmb();
1512 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1513
2e588f84 1514 if (adapter->be3_native)
10ef9ab4 1515 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1516 else
10ef9ab4 1517 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1518
15d72184
SP
1519 if (rxcp->vlanf) {
1520 /* vlanf could be wrongly set in some cards.
1521 * ignore if vtm is not set */
752961a1 1522 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1523 rxcp->vlanf = 0;
6b7c5b94 1524
15d72184 1525 if (!lancer_chip(adapter))
3c709f8f 1526 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1527
939cf306 1528 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1529 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1530 rxcp->vlanf = 0;
1531 }
2e588f84
SP
1532
1533 /* As the compl has been parsed, reset it; we wont touch it again */
1534 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1535
3abcdeda 1536 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1537 return rxcp;
1538}
1539
1829b086 1540static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1541{
6b7c5b94 1542 u32 order = get_order(size);
1829b086 1543
6b7c5b94 1544 if (order > 0)
1829b086
ED
1545 gfp |= __GFP_COMP;
1546 return alloc_pages(gfp, order);
6b7c5b94
SP
1547}
1548
1549/*
1550 * Allocate a page, split it to fragments of size rx_frag_size and post as
1551 * receive buffers to BE
1552 */
1829b086 1553static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1554{
3abcdeda 1555 struct be_adapter *adapter = rxo->adapter;
26d92f92 1556 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1557 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1558 struct page *pagep = NULL;
1559 struct be_eth_rx_d *rxd;
1560 u64 page_dmaaddr = 0, frag_dmaaddr;
1561 u32 posted, page_offset = 0;
1562
3abcdeda 1563 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1564 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1565 if (!pagep) {
1829b086 1566 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1567 if (unlikely(!pagep)) {
ac124ff9 1568 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1569 break;
1570 }
2b7bcebf
IV
1571 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1572 0, adapter->big_page_size,
1573 DMA_FROM_DEVICE);
6b7c5b94
SP
1574 page_info->page_offset = 0;
1575 } else {
1576 get_page(pagep);
1577 page_info->page_offset = page_offset + rx_frag_size;
1578 }
1579 page_offset = page_info->page_offset;
1580 page_info->page = pagep;
fac6da5b 1581 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1582 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1583
1584 rxd = queue_head_node(rxq);
1585 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1586 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1587
1588 /* Any space left in the current big page for another frag? */
1589 if ((page_offset + rx_frag_size + rx_frag_size) >
1590 adapter->big_page_size) {
1591 pagep = NULL;
1592 page_info->last_page_user = true;
1593 }
26d92f92
SP
1594
1595 prev_page_info = page_info;
1596 queue_head_inc(rxq);
10ef9ab4 1597 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1598 }
1599 if (pagep)
26d92f92 1600 prev_page_info->last_page_user = true;
6b7c5b94
SP
1601
1602 if (posted) {
6b7c5b94 1603 atomic_add(posted, &rxq->used);
8788fdc2 1604 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1605 } else if (atomic_read(&rxq->used) == 0) {
1606 /* Let be_worker replenish when memory is available */
3abcdeda 1607 rxo->rx_post_starved = true;
6b7c5b94 1608 }
6b7c5b94
SP
1609}
1610
5fb379ee 1611static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1612{
6b7c5b94
SP
1613 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1614
1615 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1616 return NULL;
1617
f3eb62d2 1618 rmb();
6b7c5b94
SP
1619 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1620
1621 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1622
1623 queue_tail_inc(tx_cq);
1624 return txcp;
1625}
1626
3c8def97
SP
1627static u16 be_tx_compl_process(struct be_adapter *adapter,
1628 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1629{
3c8def97 1630 struct be_queue_info *txq = &txo->q;
a73b796e 1631 struct be_eth_wrb *wrb;
3c8def97 1632 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1633 struct sk_buff *sent_skb;
ec43b1a6
SP
1634 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1635 bool unmap_skb_hdr = true;
6b7c5b94 1636
ec43b1a6 1637 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1638 BUG_ON(!sent_skb);
ec43b1a6
SP
1639 sent_skbs[txq->tail] = NULL;
1640
1641 /* skip header wrb */
a73b796e 1642 queue_tail_inc(txq);
6b7c5b94 1643
ec43b1a6 1644 do {
6b7c5b94 1645 cur_index = txq->tail;
a73b796e 1646 wrb = queue_tail_node(txq);
2b7bcebf
IV
1647 unmap_tx_frag(&adapter->pdev->dev, wrb,
1648 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1649 unmap_skb_hdr = false;
1650
6b7c5b94
SP
1651 num_wrbs++;
1652 queue_tail_inc(txq);
ec43b1a6 1653 } while (cur_index != last_index);
6b7c5b94 1654
6b7c5b94 1655 kfree_skb(sent_skb);
4d586b82 1656 return num_wrbs;
6b7c5b94
SP
1657}
1658
10ef9ab4
SP
1659/* Return the number of events in the event queue */
1660static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1661{
10ef9ab4
SP
1662 struct be_eq_entry *eqe;
1663 int num = 0;
859b1e4e 1664
10ef9ab4
SP
1665 do {
1666 eqe = queue_tail_node(&eqo->q);
1667 if (eqe->evt == 0)
1668 break;
859b1e4e 1669
10ef9ab4
SP
1670 rmb();
1671 eqe->evt = 0;
1672 num++;
1673 queue_tail_inc(&eqo->q);
1674 } while (true);
1675
1676 return num;
859b1e4e
SP
1677}
1678
10ef9ab4
SP
1679/* Leaves the EQ is disarmed state */
1680static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1681{
10ef9ab4 1682 int num = events_get(eqo);
859b1e4e 1683
10ef9ab4 1684 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1685}
1686
10ef9ab4 1687static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1688{
1689 struct be_rx_page_info *page_info;
3abcdeda
SP
1690 struct be_queue_info *rxq = &rxo->q;
1691 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1692 struct be_rx_compl_info *rxcp;
d23e946c
SP
1693 struct be_adapter *adapter = rxo->adapter;
1694 int flush_wait = 0;
6b7c5b94
SP
1695 u16 tail;
1696
d23e946c
SP
1697 /* Consume pending rx completions.
1698 * Wait for the flush completion (identified by zero num_rcvd)
1699 * to arrive. Notify CQ even when there are no more CQ entries
1700 * for HW to flush partially coalesced CQ entries.
1701 * In Lancer, there is no need to wait for flush compl.
1702 */
1703 for (;;) {
1704 rxcp = be_rx_compl_get(rxo);
1705 if (rxcp == NULL) {
1706 if (lancer_chip(adapter))
1707 break;
1708
1709 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1710 dev_warn(&adapter->pdev->dev,
1711 "did not receive flush compl\n");
1712 break;
1713 }
1714 be_cq_notify(adapter, rx_cq->id, true, 0);
1715 mdelay(1);
1716 } else {
1717 be_rx_compl_discard(rxo, rxcp);
1718 be_cq_notify(adapter, rx_cq->id, true, 1);
1719 if (rxcp->num_rcvd == 0)
1720 break;
1721 }
6b7c5b94
SP
1722 }
1723
d23e946c
SP
1724 /* After cleanup, leave the CQ in unarmed state */
1725 be_cq_notify(adapter, rx_cq->id, false, 0);
1726
1727 /* Then free posted rx buffers that were not used */
6b7c5b94 1728 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1729 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1730 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1731 put_page(page_info->page);
1732 memset(page_info, 0, sizeof(*page_info));
1733 }
1734 BUG_ON(atomic_read(&rxq->used));
482c9e79 1735 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1736}
1737
0ae57bb3 1738static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1739{
0ae57bb3
SP
1740 struct be_tx_obj *txo;
1741 struct be_queue_info *txq;
a8e9179a 1742 struct be_eth_tx_compl *txcp;
4d586b82 1743 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1744 struct sk_buff *sent_skb;
1745 bool dummy_wrb;
0ae57bb3 1746 int i, pending_txqs;
a8e9179a
SP
1747
1748 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1749 do {
0ae57bb3
SP
1750 pending_txqs = adapter->num_tx_qs;
1751
1752 for_all_tx_queues(adapter, txo, i) {
1753 txq = &txo->q;
1754 while ((txcp = be_tx_compl_get(&txo->cq))) {
1755 end_idx =
1756 AMAP_GET_BITS(struct amap_eth_tx_compl,
1757 wrb_index, txcp);
1758 num_wrbs += be_tx_compl_process(adapter, txo,
1759 end_idx);
1760 cmpl++;
1761 }
1762 if (cmpl) {
1763 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1764 atomic_sub(num_wrbs, &txq->used);
1765 cmpl = 0;
1766 num_wrbs = 0;
1767 }
1768 if (atomic_read(&txq->used) == 0)
1769 pending_txqs--;
a8e9179a
SP
1770 }
1771
0ae57bb3 1772 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1773 break;
1774
1775 mdelay(1);
1776 } while (true);
1777
0ae57bb3
SP
1778 for_all_tx_queues(adapter, txo, i) {
1779 txq = &txo->q;
1780 if (atomic_read(&txq->used))
1781 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1782 atomic_read(&txq->used));
1783
1784 /* free posted tx for which compls will never arrive */
1785 while (atomic_read(&txq->used)) {
1786 sent_skb = txo->sent_skb_list[txq->tail];
1787 end_idx = txq->tail;
1788 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1789 &dummy_wrb);
1790 index_adv(&end_idx, num_wrbs - 1, txq->len);
1791 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1792 atomic_sub(num_wrbs, &txq->used);
1793 }
b03388d6 1794 }
6b7c5b94
SP
1795}
1796
10ef9ab4
SP
1797static void be_evt_queues_destroy(struct be_adapter *adapter)
1798{
1799 struct be_eq_obj *eqo;
1800 int i;
1801
1802 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1803 if (eqo->q.created) {
1804 be_eq_clean(eqo);
10ef9ab4 1805 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1806 }
10ef9ab4
SP
1807 be_queue_free(adapter, &eqo->q);
1808 }
1809}
1810
1811static int be_evt_queues_create(struct be_adapter *adapter)
1812{
1813 struct be_queue_info *eq;
1814 struct be_eq_obj *eqo;
1815 int i, rc;
1816
1817 adapter->num_evt_qs = num_irqs(adapter);
1818
1819 for_all_evt_queues(adapter, eqo, i) {
1820 eqo->adapter = adapter;
1821 eqo->tx_budget = BE_TX_BUDGET;
1822 eqo->idx = i;
1823 eqo->max_eqd = BE_MAX_EQD;
1824 eqo->enable_aic = true;
1825
1826 eq = &eqo->q;
1827 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1828 sizeof(struct be_eq_entry));
1829 if (rc)
1830 return rc;
1831
1832 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1833 if (rc)
1834 return rc;
1835 }
1cfafab9 1836 return 0;
10ef9ab4
SP
1837}
1838
5fb379ee
SP
1839static void be_mcc_queues_destroy(struct be_adapter *adapter)
1840{
1841 struct be_queue_info *q;
5fb379ee 1842
8788fdc2 1843 q = &adapter->mcc_obj.q;
5fb379ee 1844 if (q->created)
8788fdc2 1845 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1846 be_queue_free(adapter, q);
1847
8788fdc2 1848 q = &adapter->mcc_obj.cq;
5fb379ee 1849 if (q->created)
8788fdc2 1850 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1851 be_queue_free(adapter, q);
1852}
1853
1854/* Must be called only after TX qs are created as MCC shares TX EQ */
1855static int be_mcc_queues_create(struct be_adapter *adapter)
1856{
1857 struct be_queue_info *q, *cq;
5fb379ee 1858
8788fdc2 1859 cq = &adapter->mcc_obj.cq;
5fb379ee 1860 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1861 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1862 goto err;
1863
10ef9ab4
SP
1864 /* Use the default EQ for MCC completions */
1865 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1866 goto mcc_cq_free;
1867
8788fdc2 1868 q = &adapter->mcc_obj.q;
5fb379ee
SP
1869 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1870 goto mcc_cq_destroy;
1871
8788fdc2 1872 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1873 goto mcc_q_free;
1874
1875 return 0;
1876
1877mcc_q_free:
1878 be_queue_free(adapter, q);
1879mcc_cq_destroy:
8788fdc2 1880 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1881mcc_cq_free:
1882 be_queue_free(adapter, cq);
1883err:
1884 return -1;
1885}
1886
6b7c5b94
SP
1887static void be_tx_queues_destroy(struct be_adapter *adapter)
1888{
1889 struct be_queue_info *q;
3c8def97
SP
1890 struct be_tx_obj *txo;
1891 u8 i;
6b7c5b94 1892
3c8def97
SP
1893 for_all_tx_queues(adapter, txo, i) {
1894 q = &txo->q;
1895 if (q->created)
1896 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1897 be_queue_free(adapter, q);
6b7c5b94 1898
3c8def97
SP
1899 q = &txo->cq;
1900 if (q->created)
1901 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1902 be_queue_free(adapter, q);
1903 }
6b7c5b94
SP
1904}
1905
dafc0fe3
SP
1906static int be_num_txqs_want(struct be_adapter *adapter)
1907{
abb93951
PR
1908 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1909 be_is_mc(adapter) ||
1910 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1911 BE2_chip(adapter))
dafc0fe3
SP
1912 return 1;
1913 else
abb93951 1914 return adapter->max_tx_queues;
dafc0fe3
SP
1915}
1916
10ef9ab4 1917static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1918{
10ef9ab4
SP
1919 struct be_queue_info *cq, *eq;
1920 int status;
3c8def97
SP
1921 struct be_tx_obj *txo;
1922 u8 i;
6b7c5b94 1923
dafc0fe3 1924 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1925 if (adapter->num_tx_qs != MAX_TX_QS) {
1926 rtnl_lock();
dafc0fe3
SP
1927 netif_set_real_num_tx_queues(adapter->netdev,
1928 adapter->num_tx_qs);
3bb62f4f
PR
1929 rtnl_unlock();
1930 }
dafc0fe3 1931
10ef9ab4
SP
1932 for_all_tx_queues(adapter, txo, i) {
1933 cq = &txo->cq;
1934 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1935 sizeof(struct be_eth_tx_compl));
1936 if (status)
1937 return status;
3c8def97 1938
10ef9ab4
SP
1939 /* If num_evt_qs is less than num_tx_qs, then more than
1940 * one txq share an eq
1941 */
1942 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1943 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1944 if (status)
1945 return status;
1946 }
1947 return 0;
1948}
6b7c5b94 1949
10ef9ab4
SP
1950static int be_tx_qs_create(struct be_adapter *adapter)
1951{
1952 struct be_tx_obj *txo;
1953 int i, status;
fe6d2a38 1954
3c8def97 1955 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1956 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1957 sizeof(struct be_eth_wrb));
1958 if (status)
1959 return status;
6b7c5b94 1960
10ef9ab4
SP
1961 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1962 if (status)
1963 return status;
3c8def97 1964 }
6b7c5b94 1965
d379142b
SP
1966 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1967 adapter->num_tx_qs);
10ef9ab4 1968 return 0;
6b7c5b94
SP
1969}
1970
10ef9ab4 1971static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1972{
1973 struct be_queue_info *q;
3abcdeda
SP
1974 struct be_rx_obj *rxo;
1975 int i;
1976
1977 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1978 q = &rxo->cq;
1979 if (q->created)
1980 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1981 be_queue_free(adapter, q);
ac6a0c4a
SP
1982 }
1983}
1984
10ef9ab4 1985static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1986{
10ef9ab4 1987 struct be_queue_info *eq, *cq;
3abcdeda
SP
1988 struct be_rx_obj *rxo;
1989 int rc, i;
6b7c5b94 1990
10ef9ab4
SP
1991 /* We'll create as many RSS rings as there are irqs.
1992 * But when there's only one irq there's no use creating RSS rings
1993 */
1994 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1995 num_irqs(adapter) + 1 : 1;
7f640062
SP
1996 if (adapter->num_rx_qs != MAX_RX_QS) {
1997 rtnl_lock();
1998 netif_set_real_num_rx_queues(adapter->netdev,
1999 adapter->num_rx_qs);
2000 rtnl_unlock();
2001 }
ac6a0c4a 2002
6b7c5b94 2003 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2004 for_all_rx_queues(adapter, rxo, i) {
2005 rxo->adapter = adapter;
3abcdeda
SP
2006 cq = &rxo->cq;
2007 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2008 sizeof(struct be_eth_rx_compl));
2009 if (rc)
10ef9ab4 2010 return rc;
3abcdeda 2011
10ef9ab4
SP
2012 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2013 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2014 if (rc)
10ef9ab4 2015 return rc;
3abcdeda 2016 }
6b7c5b94 2017
d379142b
SP
2018 dev_info(&adapter->pdev->dev,
2019 "created %d RSS queue(s) and 1 default RX queue\n",
2020 adapter->num_rx_qs - 1);
10ef9ab4 2021 return 0;
b628bde2
SP
2022}
2023
6b7c5b94
SP
2024static irqreturn_t be_intx(int irq, void *dev)
2025{
e49cc34f
SP
2026 struct be_eq_obj *eqo = dev;
2027 struct be_adapter *adapter = eqo->adapter;
2028 int num_evts = 0;
6b7c5b94 2029
d0b9cec3
SP
2030 /* IRQ is not expected when NAPI is scheduled as the EQ
2031 * will not be armed.
2032 * But, this can happen on Lancer INTx where it takes
2033 * a while to de-assert INTx or in BE2 where occasionaly
2034 * an interrupt may be raised even when EQ is unarmed.
2035 * If NAPI is already scheduled, then counting & notifying
2036 * events will orphan them.
e49cc34f 2037 */
d0b9cec3 2038 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2039 num_evts = events_get(eqo);
d0b9cec3
SP
2040 __napi_schedule(&eqo->napi);
2041 if (num_evts)
2042 eqo->spurious_intr = 0;
2043 }
2044 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2045
d0b9cec3
SP
2046 /* Return IRQ_HANDLED only for the the first spurious intr
2047 * after a valid intr to stop the kernel from branding
2048 * this irq as a bad one!
e49cc34f 2049 */
d0b9cec3
SP
2050 if (num_evts || eqo->spurious_intr++ == 0)
2051 return IRQ_HANDLED;
2052 else
2053 return IRQ_NONE;
6b7c5b94
SP
2054}
2055
10ef9ab4 2056static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2057{
10ef9ab4 2058 struct be_eq_obj *eqo = dev;
6b7c5b94 2059
0b545a62
SP
2060 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2061 napi_schedule(&eqo->napi);
6b7c5b94
SP
2062 return IRQ_HANDLED;
2063}
2064
2e588f84 2065static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2066{
2e588f84 2067 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2068}
2069
10ef9ab4
SP
2070static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2071 int budget)
6b7c5b94 2072{
3abcdeda
SP
2073 struct be_adapter *adapter = rxo->adapter;
2074 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2075 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2076 u32 work_done;
2077
2078 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2079 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2080 if (!rxcp)
2081 break;
2082
12004ae9
SP
2083 /* Is it a flush compl that has no data */
2084 if (unlikely(rxcp->num_rcvd == 0))
2085 goto loop_continue;
2086
2087 /* Discard compl with partial DMA Lancer B0 */
2088 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2089 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2090 goto loop_continue;
2091 }
2092
2093 /* On BE drop pkts that arrive due to imperfect filtering in
2094 * promiscuous mode on some skews
2095 */
2096 if (unlikely(rxcp->port != adapter->port_num &&
2097 !lancer_chip(adapter))) {
10ef9ab4 2098 be_rx_compl_discard(rxo, rxcp);
12004ae9 2099 goto loop_continue;
64642811 2100 }
009dd872 2101
12004ae9 2102 if (do_gro(rxcp))
10ef9ab4 2103 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2104 else
10ef9ab4 2105 be_rx_compl_process(rxo, rxcp);
12004ae9 2106loop_continue:
2e588f84 2107 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2108 }
2109
10ef9ab4
SP
2110 if (work_done) {
2111 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2112
10ef9ab4
SP
2113 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2114 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2115 }
10ef9ab4 2116
6b7c5b94
SP
2117 return work_done;
2118}
2119
10ef9ab4
SP
2120static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2121 int budget, int idx)
6b7c5b94 2122{
6b7c5b94 2123 struct be_eth_tx_compl *txcp;
10ef9ab4 2124 int num_wrbs = 0, work_done;
3c8def97 2125
10ef9ab4
SP
2126 for (work_done = 0; work_done < budget; work_done++) {
2127 txcp = be_tx_compl_get(&txo->cq);
2128 if (!txcp)
2129 break;
2130 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2131 AMAP_GET_BITS(struct amap_eth_tx_compl,
2132 wrb_index, txcp));
10ef9ab4 2133 }
6b7c5b94 2134
10ef9ab4
SP
2135 if (work_done) {
2136 be_cq_notify(adapter, txo->cq.id, true, work_done);
2137 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2138
10ef9ab4
SP
2139 /* As Tx wrbs have been freed up, wake up netdev queue
2140 * if it was stopped due to lack of tx wrbs. */
2141 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2142 atomic_read(&txo->q.used) < txo->q.len / 2) {
2143 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2144 }
10ef9ab4
SP
2145
2146 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2147 tx_stats(txo)->tx_compl += work_done;
2148 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2149 }
10ef9ab4
SP
2150 return (work_done < budget); /* Done */
2151}
6b7c5b94 2152
10ef9ab4
SP
2153int be_poll(struct napi_struct *napi, int budget)
2154{
2155 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2156 struct be_adapter *adapter = eqo->adapter;
0b545a62 2157 int max_work = 0, work, i, num_evts;
10ef9ab4 2158 bool tx_done;
f31e50a8 2159
0b545a62
SP
2160 num_evts = events_get(eqo);
2161
10ef9ab4
SP
2162 /* Process all TXQs serviced by this EQ */
2163 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2164 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2165 eqo->tx_budget, i);
2166 if (!tx_done)
2167 max_work = budget;
f31e50a8
SP
2168 }
2169
10ef9ab4
SP
2170 /* This loop will iterate twice for EQ0 in which
2171 * completions of the last RXQ (default one) are also processed
2172 * For other EQs the loop iterates only once
2173 */
2174 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2175 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2176 max_work = max(work, max_work);
2177 }
6b7c5b94 2178
10ef9ab4
SP
2179 if (is_mcc_eqo(eqo))
2180 be_process_mcc(adapter);
93c86700 2181
10ef9ab4
SP
2182 if (max_work < budget) {
2183 napi_complete(napi);
0b545a62 2184 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2185 } else {
2186 /* As we'll continue in polling mode, count and clear events */
0b545a62 2187 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2188 }
10ef9ab4 2189 return max_work;
6b7c5b94
SP
2190}
2191
f67ef7ba 2192void be_detect_error(struct be_adapter *adapter)
7c185276 2193{
e1cfb67a
PR
2194 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2195 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2196 u32 i;
2197
d23e946c 2198 if (be_hw_error(adapter))
72f02485
SP
2199 return;
2200
e1cfb67a
PR
2201 if (lancer_chip(adapter)) {
2202 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2203 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2204 sliport_err1 = ioread32(adapter->db +
2205 SLIPORT_ERROR1_OFFSET);
2206 sliport_err2 = ioread32(adapter->db +
2207 SLIPORT_ERROR2_OFFSET);
2208 }
2209 } else {
2210 pci_read_config_dword(adapter->pdev,
2211 PCICFG_UE_STATUS_LOW, &ue_lo);
2212 pci_read_config_dword(adapter->pdev,
2213 PCICFG_UE_STATUS_HIGH, &ue_hi);
2214 pci_read_config_dword(adapter->pdev,
2215 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2216 pci_read_config_dword(adapter->pdev,
2217 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2218
f67ef7ba
PR
2219 ue_lo = (ue_lo & ~ue_lo_mask);
2220 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2221 }
7c185276 2222
1451ae6e
AK
2223 /* On certain platforms BE hardware can indicate spurious UEs.
2224 * Allow the h/w to stop working completely in case of a real UE.
2225 * Hence not setting the hw_error for UE detection.
2226 */
2227 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2228 adapter->hw_error = true;
434b3648 2229 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2230 "Error detected in the card\n");
2231 }
2232
2233 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2234 dev_err(&adapter->pdev->dev,
2235 "ERR: sliport status 0x%x\n", sliport_status);
2236 dev_err(&adapter->pdev->dev,
2237 "ERR: sliport error1 0x%x\n", sliport_err1);
2238 dev_err(&adapter->pdev->dev,
2239 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2240 }
2241
e1cfb67a
PR
2242 if (ue_lo) {
2243 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2244 if (ue_lo & 1)
7c185276
AK
2245 dev_err(&adapter->pdev->dev,
2246 "UE: %s bit set\n", ue_status_low_desc[i]);
2247 }
2248 }
f67ef7ba 2249
e1cfb67a
PR
2250 if (ue_hi) {
2251 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2252 if (ue_hi & 1)
7c185276
AK
2253 dev_err(&adapter->pdev->dev,
2254 "UE: %s bit set\n", ue_status_hi_desc[i]);
2255 }
2256 }
2257
2258}
2259
8d56ff11
SP
2260static void be_msix_disable(struct be_adapter *adapter)
2261{
ac6a0c4a 2262 if (msix_enabled(adapter)) {
8d56ff11 2263 pci_disable_msix(adapter->pdev);
ac6a0c4a 2264 adapter->num_msix_vec = 0;
3abcdeda
SP
2265 }
2266}
2267
10ef9ab4
SP
2268static uint be_num_rss_want(struct be_adapter *adapter)
2269{
30e80b55 2270 u32 num = 0;
abb93951 2271
10ef9ab4 2272 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2273 (lancer_chip(adapter) ||
2274 (!sriov_want(adapter) && be_physfn(adapter)))) {
2275 num = adapter->max_rss_queues;
30e80b55
YM
2276 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2277 }
2278 return num;
10ef9ab4
SP
2279}
2280
6b7c5b94
SP
2281static void be_msix_enable(struct be_adapter *adapter)
2282{
10ef9ab4 2283#define BE_MIN_MSIX_VECTORS 1
045508a8 2284 int i, status, num_vec, num_roce_vec = 0;
d379142b 2285 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2286
10ef9ab4
SP
2287 /* If RSS queues are not used, need a vec for default RX Q */
2288 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2289 if (be_roce_supported(adapter)) {
2290 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2291 (num_online_cpus() + 1));
2292 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2293 num_vec += num_roce_vec;
2294 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2295 }
10ef9ab4 2296 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2297
ac6a0c4a 2298 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2299 adapter->msix_entries[i].entry = i;
2300
ac6a0c4a 2301 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2302 if (status == 0) {
2303 goto done;
2304 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2305 num_vec = status;
3abcdeda 2306 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2307 num_vec) == 0)
3abcdeda 2308 goto done;
3abcdeda 2309 }
d379142b
SP
2310
2311 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2312 return;
2313done:
045508a8
PP
2314 if (be_roce_supported(adapter)) {
2315 if (num_vec > num_roce_vec) {
2316 adapter->num_msix_vec = num_vec - num_roce_vec;
2317 adapter->num_msix_roce_vec =
2318 num_vec - adapter->num_msix_vec;
2319 } else {
2320 adapter->num_msix_vec = num_vec;
2321 adapter->num_msix_roce_vec = 0;
2322 }
2323 } else
2324 adapter->num_msix_vec = num_vec;
d379142b 2325 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2326 return;
6b7c5b94
SP
2327}
2328
fe6d2a38 2329static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2330 struct be_eq_obj *eqo)
b628bde2 2331{
10ef9ab4 2332 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2333}
6b7c5b94 2334
b628bde2
SP
2335static int be_msix_register(struct be_adapter *adapter)
2336{
10ef9ab4
SP
2337 struct net_device *netdev = adapter->netdev;
2338 struct be_eq_obj *eqo;
2339 int status, i, vec;
6b7c5b94 2340
10ef9ab4
SP
2341 for_all_evt_queues(adapter, eqo, i) {
2342 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2343 vec = be_msix_vec_get(adapter, eqo);
2344 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2345 if (status)
2346 goto err_msix;
2347 }
b628bde2 2348
6b7c5b94 2349 return 0;
3abcdeda 2350err_msix:
10ef9ab4
SP
2351 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2352 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2353 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2354 status);
ac6a0c4a 2355 be_msix_disable(adapter);
6b7c5b94
SP
2356 return status;
2357}
2358
2359static int be_irq_register(struct be_adapter *adapter)
2360{
2361 struct net_device *netdev = adapter->netdev;
2362 int status;
2363
ac6a0c4a 2364 if (msix_enabled(adapter)) {
6b7c5b94
SP
2365 status = be_msix_register(adapter);
2366 if (status == 0)
2367 goto done;
ba343c77
SB
2368 /* INTx is not supported for VF */
2369 if (!be_physfn(adapter))
2370 return status;
6b7c5b94
SP
2371 }
2372
e49cc34f 2373 /* INTx: only the first EQ is used */
6b7c5b94
SP
2374 netdev->irq = adapter->pdev->irq;
2375 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2376 &adapter->eq_obj[0]);
6b7c5b94
SP
2377 if (status) {
2378 dev_err(&adapter->pdev->dev,
2379 "INTx request IRQ failed - err %d\n", status);
2380 return status;
2381 }
2382done:
2383 adapter->isr_registered = true;
2384 return 0;
2385}
2386
2387static void be_irq_unregister(struct be_adapter *adapter)
2388{
2389 struct net_device *netdev = adapter->netdev;
10ef9ab4 2390 struct be_eq_obj *eqo;
3abcdeda 2391 int i;
6b7c5b94
SP
2392
2393 if (!adapter->isr_registered)
2394 return;
2395
2396 /* INTx */
ac6a0c4a 2397 if (!msix_enabled(adapter)) {
e49cc34f 2398 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2399 goto done;
2400 }
2401
2402 /* MSIx */
10ef9ab4
SP
2403 for_all_evt_queues(adapter, eqo, i)
2404 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2405
6b7c5b94
SP
2406done:
2407 adapter->isr_registered = false;
6b7c5b94
SP
2408}
2409
10ef9ab4 2410static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2411{
2412 struct be_queue_info *q;
2413 struct be_rx_obj *rxo;
2414 int i;
2415
2416 for_all_rx_queues(adapter, rxo, i) {
2417 q = &rxo->q;
2418 if (q->created) {
2419 be_cmd_rxq_destroy(adapter, q);
2420 /* After the rxq is invalidated, wait for a grace time
2421 * of 1ms for all dma to end and the flush compl to
2422 * arrive
2423 */
2424 mdelay(1);
10ef9ab4 2425 be_rx_cq_clean(rxo);
482c9e79 2426 }
10ef9ab4 2427 be_queue_free(adapter, q);
482c9e79
SP
2428 }
2429}
2430
889cd4b2
SP
2431static int be_close(struct net_device *netdev)
2432{
2433 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2434 struct be_eq_obj *eqo;
2435 int i;
889cd4b2 2436
045508a8
PP
2437 be_roce_dev_close(adapter);
2438
fe6d2a38
SP
2439 if (!lancer_chip(adapter))
2440 be_intr_set(adapter, false);
889cd4b2 2441
a323d9bf 2442 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2443 napi_disable(&eqo->napi);
a323d9bf
SP
2444
2445 be_async_mcc_disable(adapter);
2446
2447 /* Wait for all pending tx completions to arrive so that
2448 * all tx skbs are freed.
2449 */
2450 be_tx_compl_clean(adapter);
2451
2452 be_rx_qs_destroy(adapter);
2453
2454 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2455 if (msix_enabled(adapter))
2456 synchronize_irq(be_msix_vec_get(adapter, eqo));
2457 else
2458 synchronize_irq(netdev->irq);
2459 be_eq_clean(eqo);
63fcb27f
PR
2460 }
2461
889cd4b2
SP
2462 be_irq_unregister(adapter);
2463
482c9e79
SP
2464 return 0;
2465}
2466
10ef9ab4 2467static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2468{
2469 struct be_rx_obj *rxo;
e9008ee9
PR
2470 int rc, i, j;
2471 u8 rsstable[128];
482c9e79
SP
2472
2473 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2475 sizeof(struct be_eth_rx_d));
2476 if (rc)
2477 return rc;
2478 }
2479
2480 /* The FW would like the default RXQ to be created first */
2481 rxo = default_rxo(adapter);
2482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2483 adapter->if_handle, false, &rxo->rss_id);
2484 if (rc)
2485 return rc;
2486
2487 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2488 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2489 rx_frag_size, adapter->if_handle,
2490 true, &rxo->rss_id);
482c9e79
SP
2491 if (rc)
2492 return rc;
2493 }
2494
2495 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2496 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2497 for_all_rss_queues(adapter, rxo, i) {
2498 if ((j + i) >= 128)
2499 break;
2500 rsstable[j + i] = rxo->rss_id;
2501 }
2502 }
2503 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2504 if (rc)
2505 return rc;
2506 }
2507
2508 /* First time posting */
10ef9ab4 2509 for_all_rx_queues(adapter, rxo, i)
482c9e79 2510 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2511 return 0;
2512}
2513
6b7c5b94
SP
2514static int be_open(struct net_device *netdev)
2515{
2516 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2517 struct be_eq_obj *eqo;
3abcdeda 2518 struct be_rx_obj *rxo;
10ef9ab4 2519 struct be_tx_obj *txo;
b236916a 2520 u8 link_status;
3abcdeda 2521 int status, i;
5fb379ee 2522
10ef9ab4 2523 status = be_rx_qs_create(adapter);
482c9e79
SP
2524 if (status)
2525 goto err;
2526
5fb379ee
SP
2527 be_irq_register(adapter);
2528
fe6d2a38
SP
2529 if (!lancer_chip(adapter))
2530 be_intr_set(adapter, true);
5fb379ee 2531
10ef9ab4 2532 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2533 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2534
10ef9ab4
SP
2535 for_all_tx_queues(adapter, txo, i)
2536 be_cq_notify(adapter, txo->cq.id, true, 0);
2537
7a1e9b20
SP
2538 be_async_mcc_enable(adapter);
2539
10ef9ab4
SP
2540 for_all_evt_queues(adapter, eqo, i) {
2541 napi_enable(&eqo->napi);
2542 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2543 }
2544
323ff71e 2545 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2546 if (!status)
2547 be_link_status_update(adapter, link_status);
2548
045508a8 2549 be_roce_dev_open(adapter);
889cd4b2
SP
2550 return 0;
2551err:
2552 be_close(adapter->netdev);
2553 return -EIO;
5fb379ee
SP
2554}
2555
71d8d1b5
AK
2556static int be_setup_wol(struct be_adapter *adapter, bool enable)
2557{
2558 struct be_dma_mem cmd;
2559 int status = 0;
2560 u8 mac[ETH_ALEN];
2561
2562 memset(mac, 0, ETH_ALEN);
2563
2564 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2565 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2566 GFP_KERNEL);
71d8d1b5
AK
2567 if (cmd.va == NULL)
2568 return -1;
2569 memset(cmd.va, 0, cmd.size);
2570
2571 if (enable) {
2572 status = pci_write_config_dword(adapter->pdev,
2573 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2574 if (status) {
2575 dev_err(&adapter->pdev->dev,
2381a55c 2576 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2577 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2578 cmd.dma);
71d8d1b5
AK
2579 return status;
2580 }
2581 status = be_cmd_enable_magic_wol(adapter,
2582 adapter->netdev->dev_addr, &cmd);
2583 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2584 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2585 } else {
2586 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2587 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2588 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2589 }
2590
2b7bcebf 2591 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2592 return status;
2593}
2594
6d87f5c3
AK
2595/*
2596 * Generate a seed MAC address from the PF MAC Address using jhash.
2597 * MAC Address for VFs are assigned incrementally starting from the seed.
2598 * These addresses are programmed in the ASIC by the PF and the VF driver
2599 * queries for the MAC address during its probe.
2600 */
4c876616 2601static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2602{
f9449ab7 2603 u32 vf;
3abcdeda 2604 int status = 0;
6d87f5c3 2605 u8 mac[ETH_ALEN];
11ac75ed 2606 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2607
2608 be_vf_eth_addr_generate(adapter, mac);
2609
11ac75ed 2610 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2611 if (lancer_chip(adapter)) {
2612 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2613 } else {
2614 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2615 vf_cfg->if_handle,
2616 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2617 }
2618
6d87f5c3
AK
2619 if (status)
2620 dev_err(&adapter->pdev->dev,
590c391d 2621 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2622 else
11ac75ed 2623 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2624
2625 mac[5] += 1;
2626 }
2627 return status;
2628}
2629
4c876616
SP
2630static int be_vfs_mac_query(struct be_adapter *adapter)
2631{
2632 int status, vf;
2633 u8 mac[ETH_ALEN];
2634 struct be_vf_cfg *vf_cfg;
2635 bool active;
2636
2637 for_all_vfs(adapter, vf_cfg, vf) {
2638 be_cmd_get_mac_from_list(adapter, mac, &active,
2639 &vf_cfg->pmac_id, 0);
2640
2641 status = be_cmd_mac_addr_query(adapter, mac, false,
2642 vf_cfg->if_handle, 0);
2643 if (status)
2644 return status;
2645 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2646 }
2647 return 0;
2648}
2649
f9449ab7 2650static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2651{
11ac75ed 2652 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2653 u32 vf;
2654
39f1d94d 2655 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2656 dev_warn(&adapter->pdev->dev,
2657 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2658 goto done;
2659 }
2660
11ac75ed 2661 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2662 if (lancer_chip(adapter))
2663 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2664 else
11ac75ed
SP
2665 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2666 vf_cfg->pmac_id, vf + 1);
f9449ab7 2667
11ac75ed
SP
2668 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2669 }
39f1d94d
SP
2670 pci_disable_sriov(adapter->pdev);
2671done:
2672 kfree(adapter->vf_cfg);
2673 adapter->num_vfs = 0;
6d87f5c3
AK
2674}
2675
a54769f5
SP
2676static int be_clear(struct be_adapter *adapter)
2677{
fbc13f01
AK
2678 int i = 1;
2679
191eb756
SP
2680 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2681 cancel_delayed_work_sync(&adapter->work);
2682 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2683 }
2684
11ac75ed 2685 if (sriov_enabled(adapter))
f9449ab7
SP
2686 be_vf_clear(adapter);
2687
fbc13f01
AK
2688 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2689 be_cmd_pmac_del(adapter, adapter->if_handle,
2690 adapter->pmac_id[i], 0);
2691
f9449ab7 2692 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2693
2694 be_mcc_queues_destroy(adapter);
10ef9ab4 2695 be_rx_cqs_destroy(adapter);
a54769f5 2696 be_tx_queues_destroy(adapter);
10ef9ab4 2697 be_evt_queues_destroy(adapter);
a54769f5 2698
abb93951
PR
2699 kfree(adapter->pmac_id);
2700 adapter->pmac_id = NULL;
2701
10ef9ab4 2702 be_msix_disable(adapter);
a54769f5
SP
2703 return 0;
2704}
2705
4c876616 2706static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2707{
4c876616
SP
2708 struct be_vf_cfg *vf_cfg;
2709 u32 cap_flags, en_flags, vf;
abb93951
PR
2710 int status;
2711
4c876616
SP
2712 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2713 BE_IF_FLAGS_MULTICAST;
abb93951 2714
4c876616
SP
2715 for_all_vfs(adapter, vf_cfg, vf) {
2716 if (!BE3_chip(adapter))
2717 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2718
2719 /* If a FW profile exists, then cap_flags are updated */
2720 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2721 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2722 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2723 &vf_cfg->if_handle, vf + 1);
2724 if (status)
2725 goto err;
2726 }
2727err:
2728 return status;
abb93951
PR
2729}
2730
39f1d94d 2731static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2732{
11ac75ed 2733 struct be_vf_cfg *vf_cfg;
30128031
SP
2734 int vf;
2735
39f1d94d
SP
2736 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2737 GFP_KERNEL);
2738 if (!adapter->vf_cfg)
2739 return -ENOMEM;
2740
11ac75ed
SP
2741 for_all_vfs(adapter, vf_cfg, vf) {
2742 vf_cfg->if_handle = -1;
2743 vf_cfg->pmac_id = -1;
30128031 2744 }
39f1d94d 2745 return 0;
30128031
SP
2746}
2747
f9449ab7
SP
2748static int be_vf_setup(struct be_adapter *adapter)
2749{
11ac75ed 2750 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2751 u16 def_vlan, lnk_speed;
4c876616
SP
2752 int status, old_vfs, vf;
2753 struct device *dev = &adapter->pdev->dev;
39f1d94d 2754
4c876616
SP
2755 old_vfs = be_find_vfs(adapter, ENABLED);
2756 if (old_vfs) {
2757 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2758 if (old_vfs != num_vfs)
2759 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2760 adapter->num_vfs = old_vfs;
39f1d94d 2761 } else {
4c876616
SP
2762 if (num_vfs > adapter->dev_num_vfs)
2763 dev_info(dev, "Device supports %d VFs and not %d\n",
2764 adapter->dev_num_vfs, num_vfs);
2765 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2766
2767 status = pci_enable_sriov(adapter->pdev, num_vfs);
2768 if (status) {
2769 dev_err(dev, "SRIOV enable failed\n");
2770 adapter->num_vfs = 0;
2771 return 0;
2772 }
39f1d94d
SP
2773 }
2774
2775 status = be_vf_setup_init(adapter);
2776 if (status)
2777 goto err;
30128031 2778
4c876616
SP
2779 if (old_vfs) {
2780 for_all_vfs(adapter, vf_cfg, vf) {
2781 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2782 if (status)
2783 goto err;
2784 }
2785 } else {
2786 status = be_vfs_if_create(adapter);
f9449ab7
SP
2787 if (status)
2788 goto err;
f9449ab7
SP
2789 }
2790
4c876616
SP
2791 if (old_vfs) {
2792 status = be_vfs_mac_query(adapter);
2793 if (status)
2794 goto err;
2795 } else {
39f1d94d
SP
2796 status = be_vf_eth_addr_config(adapter);
2797 if (status)
2798 goto err;
2799 }
f9449ab7 2800
11ac75ed 2801 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2802 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2803 * Allow full available bandwidth
2804 */
2805 if (BE3_chip(adapter) && !old_vfs)
2806 be_cmd_set_qos(adapter, 1000, vf+1);
2807
2808 status = be_cmd_link_status_query(adapter, &lnk_speed,
2809 NULL, vf + 1);
2810 if (!status)
2811 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2812
2813 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2814 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2815 if (status)
2816 goto err;
2817 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2818
2819 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2820 }
2821 return 0;
2822err:
4c876616
SP
2823 dev_err(dev, "VF setup failed\n");
2824 be_vf_clear(adapter);
f9449ab7
SP
2825 return status;
2826}
2827
30128031
SP
2828static void be_setup_init(struct be_adapter *adapter)
2829{
2830 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2831 adapter->phy.link_speed = -1;
30128031
SP
2832 adapter->if_handle = -1;
2833 adapter->be3_native = false;
2834 adapter->promiscuous = false;
f25b119c
PR
2835 if (be_physfn(adapter))
2836 adapter->cmd_privileges = MAX_PRIVILEGES;
2837 else
2838 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2839}
2840
1578e777
PR
2841static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2842 bool *active_mac, u32 *pmac_id)
590c391d 2843{
1578e777 2844 int status = 0;
e5e1ee89 2845
1578e777
PR
2846 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2847 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2848 if (!lancer_chip(adapter) && !be_physfn(adapter))
2849 *active_mac = true;
2850 else
2851 *active_mac = false;
e5e1ee89 2852
1578e777
PR
2853 return status;
2854 }
e5e1ee89 2855
1578e777
PR
2856 if (lancer_chip(adapter)) {
2857 status = be_cmd_get_mac_from_list(adapter, mac,
2858 active_mac, pmac_id, 0);
2859 if (*active_mac) {
5ee4979b
SP
2860 status = be_cmd_mac_addr_query(adapter, mac, false,
2861 if_handle, *pmac_id);
1578e777
PR
2862 }
2863 } else if (be_physfn(adapter)) {
2864 /* For BE3, for PF get permanent MAC */
5ee4979b 2865 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2866 *active_mac = false;
e5e1ee89 2867 } else {
1578e777 2868 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2869 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2870 if_handle, 0);
2871 *active_mac = true;
e5e1ee89 2872 }
590c391d
PR
2873 return status;
2874}
2875
abb93951
PR
2876static void be_get_resources(struct be_adapter *adapter)
2877{
4c876616
SP
2878 u16 dev_num_vfs;
2879 int pos, status;
abb93951
PR
2880 bool profile_present = false;
2881
4c876616 2882 if (!BEx_chip(adapter)) {
abb93951 2883 status = be_cmd_get_func_config(adapter);
abb93951
PR
2884 if (!status)
2885 profile_present = true;
2886 }
2887
2888 if (profile_present) {
2889 /* Sanity fixes for Lancer */
2890 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2891 BE_UC_PMAC_COUNT);
2892 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2893 BE_NUM_VLANS_SUPPORTED);
2894 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2895 BE_MAX_MC);
2896 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2897 MAX_TX_QS);
2898 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2899 BE3_MAX_RSS_QS);
2900 adapter->max_event_queues = min_t(u16,
2901 adapter->max_event_queues,
2902 BE3_MAX_RSS_QS);
2903
2904 if (adapter->max_rss_queues &&
2905 adapter->max_rss_queues == adapter->max_rx_queues)
2906 adapter->max_rss_queues -= 1;
2907
2908 if (adapter->max_event_queues < adapter->max_rss_queues)
2909 adapter->max_rss_queues = adapter->max_event_queues;
2910
2911 } else {
2912 if (be_physfn(adapter))
2913 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2914 else
2915 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2916
2917 if (adapter->function_mode & FLEX10_MODE)
2918 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2919 else
2920 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2921
2922 adapter->max_mcast_mac = BE_MAX_MC;
2923 adapter->max_tx_queues = MAX_TX_QS;
2924 adapter->max_rss_queues = (adapter->be3_native) ?
2925 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2926 adapter->max_event_queues = BE3_MAX_RSS_QS;
2927
2928 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2929 BE_IF_FLAGS_BROADCAST |
2930 BE_IF_FLAGS_MULTICAST |
2931 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2932 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2933 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2934 BE_IF_FLAGS_PROMISCUOUS;
2935
2936 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2937 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2938 }
4c876616
SP
2939
2940 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2941 if (pos) {
2942 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2943 &dev_num_vfs);
2944 if (BE3_chip(adapter))
2945 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2946 adapter->dev_num_vfs = dev_num_vfs;
2947 }
abb93951
PR
2948}
2949
39f1d94d
SP
2950/* Routine to query per function resource limits */
2951static int be_get_config(struct be_adapter *adapter)
2952{
4c876616 2953 int status;
39f1d94d 2954
abb93951
PR
2955 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2956 &adapter->function_mode,
2957 &adapter->function_caps);
2958 if (status)
2959 goto err;
2960
2961 be_get_resources(adapter);
2962
2963 /* primary mac needs 1 pmac entry */
2964 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2965 sizeof(u32), GFP_KERNEL);
2966 if (!adapter->pmac_id) {
2967 status = -ENOMEM;
2968 goto err;
2969 }
2970
abb93951
PR
2971err:
2972 return status;
39f1d94d
SP
2973}
2974
5fb379ee
SP
2975static int be_setup(struct be_adapter *adapter)
2976{
39f1d94d 2977 struct device *dev = &adapter->pdev->dev;
abb93951 2978 u32 en_flags;
a54769f5 2979 u32 tx_fc, rx_fc;
10ef9ab4 2980 int status;
ba343c77 2981 u8 mac[ETH_ALEN];
1578e777 2982 bool active_mac;
ba343c77 2983
30128031 2984 be_setup_init(adapter);
6b7c5b94 2985
abb93951
PR
2986 if (!lancer_chip(adapter))
2987 be_cmd_req_native_mode(adapter);
39f1d94d 2988
abb93951
PR
2989 status = be_get_config(adapter);
2990 if (status)
2991 goto err;
73d540f2 2992
10ef9ab4
SP
2993 be_msix_enable(adapter);
2994
2995 status = be_evt_queues_create(adapter);
2996 if (status)
a54769f5 2997 goto err;
6b7c5b94 2998
10ef9ab4
SP
2999 status = be_tx_cqs_create(adapter);
3000 if (status)
3001 goto err;
3002
3003 status = be_rx_cqs_create(adapter);
3004 if (status)
a54769f5 3005 goto err;
6b7c5b94 3006
f9449ab7 3007 status = be_mcc_queues_create(adapter);
10ef9ab4 3008 if (status)
a54769f5 3009 goto err;
6b7c5b94 3010
f25b119c
PR
3011 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3012 /* In UMC mode FW does not return right privileges.
3013 * Override with correct privilege equivalent to PF.
3014 */
3015 if (be_is_mc(adapter))
3016 adapter->cmd_privileges = MAX_PRIVILEGES;
3017
f9449ab7
SP
3018 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3019 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3020
abb93951 3021 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3022 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3023
abb93951 3024 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3025
abb93951 3026 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3027 &adapter->if_handle, 0);
5fb379ee 3028 if (status != 0)
a54769f5 3029 goto err;
6b7c5b94 3030
1578e777
PR
3031 memset(mac, 0, ETH_ALEN);
3032 active_mac = false;
3033 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3034 &active_mac, &adapter->pmac_id[0]);
3035 if (status != 0)
3036 goto err;
3037
3038 if (!active_mac) {
3039 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3040 &adapter->pmac_id[0], 0);
3041 if (status != 0)
3042 goto err;
3043 }
3044
3045 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3046 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3047 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3048 }
0dffc83e 3049
10ef9ab4
SP
3050 status = be_tx_qs_create(adapter);
3051 if (status)
3052 goto err;
3053
04b71175 3054 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3055
1d1e9a46 3056 if (adapter->vlans_added)
10329df8 3057 be_vid_config(adapter);
7ab8b0b4 3058
a54769f5 3059 be_set_rx_mode(adapter->netdev);
5fb379ee 3060
ddc3f5cb 3061 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3062
ddc3f5cb
AK
3063 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3064 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3065 adapter->rx_fc);
2dc1deb6 3066
39f1d94d
SP
3067 if (be_physfn(adapter) && num_vfs) {
3068 if (adapter->dev_num_vfs)
3069 be_vf_setup(adapter);
3070 else
3071 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3072 }
3073
f25b119c
PR
3074 status = be_cmd_get_phy_info(adapter);
3075 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3076 adapter->phy.fc_autoneg = 1;
3077
191eb756
SP
3078 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3079 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3080 return 0;
a54769f5
SP
3081err:
3082 be_clear(adapter);
3083 return status;
3084}
6b7c5b94 3085
66268739
IV
3086#ifdef CONFIG_NET_POLL_CONTROLLER
3087static void be_netpoll(struct net_device *netdev)
3088{
3089 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3090 struct be_eq_obj *eqo;
66268739
IV
3091 int i;
3092
e49cc34f
SP
3093 for_all_evt_queues(adapter, eqo, i) {
3094 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3095 napi_schedule(&eqo->napi);
3096 }
10ef9ab4
SP
3097
3098 return;
66268739
IV
3099}
3100#endif
3101
84517482 3102#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3103char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3104
fa9a6fed 3105static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3106 const u8 *p, u32 img_start, int image_size,
3107 int hdr_size)
fa9a6fed
SB
3108{
3109 u32 crc_offset;
3110 u8 flashed_crc[4];
3111 int status;
3f0d4560
AK
3112
3113 crc_offset = hdr_size + img_start + image_size - 4;
3114
fa9a6fed 3115 p += crc_offset;
3f0d4560
AK
3116
3117 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3118 (image_size - 4));
fa9a6fed
SB
3119 if (status) {
3120 dev_err(&adapter->pdev->dev,
3121 "could not get crc from flash, not flashing redboot\n");
3122 return false;
3123 }
3124
3125 /*update redboot only if crc does not match*/
3126 if (!memcmp(flashed_crc, p, 4))
3127 return false;
3128 else
3129 return true;
fa9a6fed
SB
3130}
3131
306f1348
SP
3132static bool phy_flashing_required(struct be_adapter *adapter)
3133{
42f11cf2
AK
3134 return (adapter->phy.phy_type == TN_8022 &&
3135 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3136}
3137
c165541e
PR
3138static bool is_comp_in_ufi(struct be_adapter *adapter,
3139 struct flash_section_info *fsec, int type)
3140{
3141 int i = 0, img_type = 0;
3142 struct flash_section_info_g2 *fsec_g2 = NULL;
3143
ca34fe38 3144 if (BE2_chip(adapter))
c165541e
PR
3145 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3146
3147 for (i = 0; i < MAX_FLASH_COMP; i++) {
3148 if (fsec_g2)
3149 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3150 else
3151 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3152
3153 if (img_type == type)
3154 return true;
3155 }
3156 return false;
3157
3158}
3159
3160struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3161 int header_size,
3162 const struct firmware *fw)
3163{
3164 struct flash_section_info *fsec = NULL;
3165 const u8 *p = fw->data;
3166
3167 p += header_size;
3168 while (p < (fw->data + fw->size)) {
3169 fsec = (struct flash_section_info *)p;
3170 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3171 return fsec;
3172 p += 32;
3173 }
3174 return NULL;
3175}
3176
773a2d7c
PR
3177static int be_flash(struct be_adapter *adapter, const u8 *img,
3178 struct be_dma_mem *flash_cmd, int optype, int img_size)
3179{
3180 u32 total_bytes = 0, flash_op, num_bytes = 0;
3181 int status = 0;
3182 struct be_cmd_write_flashrom *req = flash_cmd->va;
3183
3184 total_bytes = img_size;
3185 while (total_bytes) {
3186 num_bytes = min_t(u32, 32*1024, total_bytes);
3187
3188 total_bytes -= num_bytes;
3189
3190 if (!total_bytes) {
3191 if (optype == OPTYPE_PHY_FW)
3192 flash_op = FLASHROM_OPER_PHY_FLASH;
3193 else
3194 flash_op = FLASHROM_OPER_FLASH;
3195 } else {
3196 if (optype == OPTYPE_PHY_FW)
3197 flash_op = FLASHROM_OPER_PHY_SAVE;
3198 else
3199 flash_op = FLASHROM_OPER_SAVE;
3200 }
3201
be716446 3202 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3203 img += num_bytes;
3204 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3205 flash_op, num_bytes);
3206 if (status) {
3207 if (status == ILLEGAL_IOCTL_REQ &&
3208 optype == OPTYPE_PHY_FW)
3209 break;
3210 dev_err(&adapter->pdev->dev,
3211 "cmd to write to flash rom failed.\n");
3212 return status;
3213 }
3214 }
3215 return 0;
3216}
3217
ca34fe38
SP
3218/* For BE2 and BE3 */
3219static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3220 const struct firmware *fw,
3221 struct be_dma_mem *flash_cmd,
3222 int num_of_images)
3f0d4560 3223
84517482 3224{
3f0d4560 3225 int status = 0, i, filehdr_size = 0;
c165541e 3226 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3227 const u8 *p = fw->data;
215faf9c 3228 const struct flash_comp *pflashcomp;
773a2d7c 3229 int num_comp, redboot;
c165541e
PR
3230 struct flash_section_info *fsec = NULL;
3231
3232 struct flash_comp gen3_flash_types[] = {
3233 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3234 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3235 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3236 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3237 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3238 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3239 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3240 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3241 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3242 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3243 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3244 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3245 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3246 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3247 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3248 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3249 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3250 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3251 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3252 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3253 };
c165541e
PR
3254
3255 struct flash_comp gen2_flash_types[] = {
3256 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3257 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3258 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3259 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3260 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3261 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3262 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3263 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3264 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3265 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3266 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3267 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3268 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3269 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3270 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3271 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3272 };
3273
ca34fe38 3274 if (BE3_chip(adapter)) {
3f0d4560
AK
3275 pflashcomp = gen3_flash_types;
3276 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3277 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3278 } else {
3279 pflashcomp = gen2_flash_types;
3280 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3281 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3282 }
ca34fe38 3283
c165541e
PR
3284 /* Get flash section info*/
3285 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3286 if (!fsec) {
3287 dev_err(&adapter->pdev->dev,
3288 "Invalid Cookie. UFI corrupted ?\n");
3289 return -1;
3290 }
9fe96934 3291 for (i = 0; i < num_comp; i++) {
c165541e 3292 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3293 continue;
c165541e
PR
3294
3295 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3296 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3297 continue;
3298
773a2d7c
PR
3299 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3300 !phy_flashing_required(adapter))
306f1348 3301 continue;
c165541e 3302
773a2d7c
PR
3303 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3304 redboot = be_flash_redboot(adapter, fw->data,
3305 pflashcomp[i].offset, pflashcomp[i].size,
3306 filehdr_size + img_hdrs_size);
3307 if (!redboot)
3308 continue;
3309 }
c165541e 3310
3f0d4560 3311 p = fw->data;
c165541e 3312 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3313 if (p + pflashcomp[i].size > fw->data + fw->size)
3314 return -1;
773a2d7c
PR
3315
3316 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3317 pflashcomp[i].size);
3318 if (status) {
3319 dev_err(&adapter->pdev->dev,
3320 "Flashing section type %d failed.\n",
3321 pflashcomp[i].img_type);
3322 return status;
84517482 3323 }
84517482 3324 }
84517482
AK
3325 return 0;
3326}
3327
773a2d7c
PR
3328static int be_flash_skyhawk(struct be_adapter *adapter,
3329 const struct firmware *fw,
3330 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3331{
773a2d7c
PR
3332 int status = 0, i, filehdr_size = 0;
3333 int img_offset, img_size, img_optype, redboot;
3334 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3335 const u8 *p = fw->data;
3336 struct flash_section_info *fsec = NULL;
3337
3338 filehdr_size = sizeof(struct flash_file_hdr_g3);
3339 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3340 if (!fsec) {
3341 dev_err(&adapter->pdev->dev,
3342 "Invalid Cookie. UFI corrupted ?\n");
3343 return -1;
3344 }
3345
3346 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3347 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3348 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3349
3350 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3351 case IMAGE_FIRMWARE_iSCSI:
3352 img_optype = OPTYPE_ISCSI_ACTIVE;
3353 break;
3354 case IMAGE_BOOT_CODE:
3355 img_optype = OPTYPE_REDBOOT;
3356 break;
3357 case IMAGE_OPTION_ROM_ISCSI:
3358 img_optype = OPTYPE_BIOS;
3359 break;
3360 case IMAGE_OPTION_ROM_PXE:
3361 img_optype = OPTYPE_PXE_BIOS;
3362 break;
3363 case IMAGE_OPTION_ROM_FCoE:
3364 img_optype = OPTYPE_FCOE_BIOS;
3365 break;
3366 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3367 img_optype = OPTYPE_ISCSI_BACKUP;
3368 break;
3369 case IMAGE_NCSI:
3370 img_optype = OPTYPE_NCSI_FW;
3371 break;
3372 default:
3373 continue;
3374 }
3375
3376 if (img_optype == OPTYPE_REDBOOT) {
3377 redboot = be_flash_redboot(adapter, fw->data,
3378 img_offset, img_size,
3379 filehdr_size + img_hdrs_size);
3380 if (!redboot)
3381 continue;
3382 }
3383
3384 p = fw->data;
3385 p += filehdr_size + img_offset + img_hdrs_size;
3386 if (p + img_size > fw->data + fw->size)
3387 return -1;
3388
3389 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3390 if (status) {
3391 dev_err(&adapter->pdev->dev,
3392 "Flashing section type %d failed.\n",
3393 fsec->fsec_entry[i].type);
3394 return status;
3395 }
3396 }
3397 return 0;
3f0d4560
AK
3398}
3399
f67ef7ba
PR
3400static int lancer_wait_idle(struct be_adapter *adapter)
3401{
3402#define SLIPORT_IDLE_TIMEOUT 30
3403 u32 reg_val;
3404 int status = 0, i;
3405
3406 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3407 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3408 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3409 break;
3410
3411 ssleep(1);
3412 }
3413
3414 if (i == SLIPORT_IDLE_TIMEOUT)
3415 status = -1;
3416
3417 return status;
3418}
3419
3420static int lancer_fw_reset(struct be_adapter *adapter)
3421{
3422 int status = 0;
3423
3424 status = lancer_wait_idle(adapter);
3425 if (status)
3426 return status;
3427
3428 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3429 PHYSDEV_CONTROL_OFFSET);
3430
3431 return status;
3432}
3433
485bf569
SN
3434static int lancer_fw_download(struct be_adapter *adapter,
3435 const struct firmware *fw)
84517482 3436{
485bf569
SN
3437#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3438#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3439 struct be_dma_mem flash_cmd;
485bf569
SN
3440 const u8 *data_ptr = NULL;
3441 u8 *dest_image_ptr = NULL;
3442 size_t image_size = 0;
3443 u32 chunk_size = 0;
3444 u32 data_written = 0;
3445 u32 offset = 0;
3446 int status = 0;
3447 u8 add_status = 0;
f67ef7ba 3448 u8 change_status;
84517482 3449
485bf569 3450 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3451 dev_err(&adapter->pdev->dev,
485bf569
SN
3452 "FW Image not properly aligned. "
3453 "Length must be 4 byte aligned.\n");
3454 status = -EINVAL;
3455 goto lancer_fw_exit;
d9efd2af
SB
3456 }
3457
485bf569
SN
3458 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3459 + LANCER_FW_DOWNLOAD_CHUNK;
3460 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3461 &flash_cmd.dma, GFP_KERNEL);
3462 if (!flash_cmd.va) {
3463 status = -ENOMEM;
3464 dev_err(&adapter->pdev->dev,
3465 "Memory allocation failure while flashing\n");
3466 goto lancer_fw_exit;
3467 }
84517482 3468
485bf569
SN
3469 dest_image_ptr = flash_cmd.va +
3470 sizeof(struct lancer_cmd_req_write_object);
3471 image_size = fw->size;
3472 data_ptr = fw->data;
3473
3474 while (image_size) {
3475 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3476
3477 /* Copy the image chunk content. */
3478 memcpy(dest_image_ptr, data_ptr, chunk_size);
3479
3480 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3481 chunk_size, offset,
3482 LANCER_FW_DOWNLOAD_LOCATION,
3483 &data_written, &change_status,
3484 &add_status);
485bf569
SN
3485 if (status)
3486 break;
3487
3488 offset += data_written;
3489 data_ptr += data_written;
3490 image_size -= data_written;
3491 }
3492
3493 if (!status) {
3494 /* Commit the FW written */
3495 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3496 0, offset,
3497 LANCER_FW_DOWNLOAD_LOCATION,
3498 &data_written, &change_status,
3499 &add_status);
485bf569
SN
3500 }
3501
3502 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3503 flash_cmd.dma);
3504 if (status) {
3505 dev_err(&adapter->pdev->dev,
3506 "Firmware load error. "
3507 "Status code: 0x%x Additional Status: 0x%x\n",
3508 status, add_status);
3509 goto lancer_fw_exit;
3510 }
3511
f67ef7ba
PR
3512 if (change_status == LANCER_FW_RESET_NEEDED) {
3513 status = lancer_fw_reset(adapter);
3514 if (status) {
3515 dev_err(&adapter->pdev->dev,
3516 "Adapter busy for FW reset.\n"
3517 "New FW will not be active.\n");
3518 goto lancer_fw_exit;
3519 }
3520 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3521 dev_err(&adapter->pdev->dev,
3522 "System reboot required for new FW"
3523 " to be active\n");
3524 }
3525
485bf569
SN
3526 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3527lancer_fw_exit:
3528 return status;
3529}
3530
ca34fe38
SP
3531#define UFI_TYPE2 2
3532#define UFI_TYPE3 3
3533#define UFI_TYPE4 4
3534static int be_get_ufi_type(struct be_adapter *adapter,
3535 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3536{
3537 if (fhdr == NULL)
3538 goto be_get_ufi_exit;
3539
ca34fe38
SP
3540 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3541 return UFI_TYPE4;
3542 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3543 return UFI_TYPE3;
3544 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3545 return UFI_TYPE2;
773a2d7c
PR
3546
3547be_get_ufi_exit:
3548 dev_err(&adapter->pdev->dev,
3549 "UFI and Interface are not compatible for flashing\n");
3550 return -1;
3551}
3552
485bf569
SN
3553static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3554{
3555 struct flash_file_hdr_g2 *fhdr;
3556 struct flash_file_hdr_g3 *fhdr3;
3557 struct image_hdr *img_hdr_ptr = NULL;
3558 struct be_dma_mem flash_cmd;
3559 const u8 *p;
773a2d7c 3560 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3561
be716446 3562 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3563 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3564 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3565 if (!flash_cmd.va) {
3566 status = -ENOMEM;
3567 dev_err(&adapter->pdev->dev,
3568 "Memory allocation failure while flashing\n");
485bf569 3569 goto be_fw_exit;
84517482
AK
3570 }
3571
773a2d7c
PR
3572 p = fw->data;
3573 fhdr = (struct flash_file_hdr_g2 *)p;
3574
ca34fe38 3575 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3576
3577 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3578 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3579 for (i = 0; i < num_imgs; i++) {
3580 img_hdr_ptr = (struct image_hdr *)(fw->data +
3581 (sizeof(struct flash_file_hdr_g3) +
3582 i * sizeof(struct image_hdr)));
3583 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3584 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3585 status = be_flash_skyhawk(adapter, fw,
3586 &flash_cmd, num_imgs);
ca34fe38
SP
3587 else if (ufi_type == UFI_TYPE3)
3588 status = be_flash_BEx(adapter, fw, &flash_cmd,
3589 num_imgs);
3f0d4560 3590 }
773a2d7c
PR
3591 }
3592
ca34fe38
SP
3593 if (ufi_type == UFI_TYPE2)
3594 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3595 else if (ufi_type == -1)
3f0d4560 3596 status = -1;
84517482 3597
2b7bcebf
IV
3598 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3599 flash_cmd.dma);
84517482
AK
3600 if (status) {
3601 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3602 goto be_fw_exit;
84517482
AK
3603 }
3604
af901ca1 3605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3606
485bf569
SN
3607be_fw_exit:
3608 return status;
3609}
3610
3611int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3612{
3613 const struct firmware *fw;
3614 int status;
3615
3616 if (!netif_running(adapter->netdev)) {
3617 dev_err(&adapter->pdev->dev,
3618 "Firmware load not allowed (interface is down)\n");
3619 return -1;
3620 }
3621
3622 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3623 if (status)
3624 goto fw_exit;
3625
3626 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3627
3628 if (lancer_chip(adapter))
3629 status = lancer_fw_download(adapter, fw);
3630 else
3631 status = be_fw_download(adapter, fw);
3632
84517482
AK
3633fw_exit:
3634 release_firmware(fw);
3635 return status;
3636}
3637
e5686ad8 3638static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3639 .ndo_open = be_open,
3640 .ndo_stop = be_close,
3641 .ndo_start_xmit = be_xmit,
a54769f5 3642 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3643 .ndo_set_mac_address = be_mac_addr_set,
3644 .ndo_change_mtu = be_change_mtu,
ab1594e9 3645 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3646 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3647 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3648 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3649 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3650 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3651 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3652 .ndo_get_vf_config = be_get_vf_config,
3653#ifdef CONFIG_NET_POLL_CONTROLLER
3654 .ndo_poll_controller = be_netpoll,
3655#endif
6b7c5b94
SP
3656};
3657
3658static void be_netdev_init(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3661 struct be_eq_obj *eqo;
3abcdeda 3662 int i;
6b7c5b94 3663
6332c8d3 3664 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3665 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3666 NETIF_F_HW_VLAN_TX;
3667 if (be_multi_rxq(adapter))
3668 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3669
3670 netdev->features |= netdev->hw_features |
8b8ddc68 3671 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3672
eb8a50d9 3673 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3674 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3675
fbc13f01
AK
3676 netdev->priv_flags |= IFF_UNICAST_FLT;
3677
6b7c5b94
SP
3678 netdev->flags |= IFF_MULTICAST;
3679
b7e5887e 3680 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3681
10ef9ab4 3682 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3683
3684 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3685
10ef9ab4
SP
3686 for_all_evt_queues(adapter, eqo, i)
3687 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3688}
3689
3690static void be_unmap_pci_bars(struct be_adapter *adapter)
3691{
c5b3ad4c
SP
3692 if (adapter->csr)
3693 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3694 if (adapter->db)
ce66f781 3695 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3696}
3697
ce66f781
SP
3698static int db_bar(struct be_adapter *adapter)
3699{
3700 if (lancer_chip(adapter) || !be_physfn(adapter))
3701 return 0;
3702 else
3703 return 4;
3704}
3705
3706static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3707{
dbf0f2a7 3708 if (skyhawk_chip(adapter)) {
ce66f781
SP
3709 adapter->roce_db.size = 4096;
3710 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3711 db_bar(adapter));
3712 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3713 db_bar(adapter));
3714 }
045508a8 3715 return 0;
6b7c5b94
SP
3716}
3717
3718static int be_map_pci_bars(struct be_adapter *adapter)
3719{
3720 u8 __iomem *addr;
ce66f781 3721 u32 sli_intf;
6b7c5b94 3722
ce66f781
SP
3723 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3724 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3725 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3726
c5b3ad4c
SP
3727 if (BEx_chip(adapter) && be_physfn(adapter)) {
3728 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3729 if (adapter->csr == NULL)
3730 return -ENOMEM;
3731 }
3732
ce66f781 3733 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3734 if (addr == NULL)
3735 goto pci_map_err;
ba343c77 3736 adapter->db = addr;
ce66f781
SP
3737
3738 be_roce_map_pci_bars(adapter);
6b7c5b94 3739 return 0;
ce66f781 3740
6b7c5b94
SP
3741pci_map_err:
3742 be_unmap_pci_bars(adapter);
3743 return -ENOMEM;
3744}
3745
6b7c5b94
SP
3746static void be_ctrl_cleanup(struct be_adapter *adapter)
3747{
8788fdc2 3748 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3749
3750 be_unmap_pci_bars(adapter);
3751
3752 if (mem->va)
2b7bcebf
IV
3753 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3754 mem->dma);
e7b909a6 3755
5b8821b7 3756 mem = &adapter->rx_filter;
e7b909a6 3757 if (mem->va)
2b7bcebf
IV
3758 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3759 mem->dma);
6b7c5b94
SP
3760}
3761
6b7c5b94
SP
3762static int be_ctrl_init(struct be_adapter *adapter)
3763{
8788fdc2
SP
3764 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3765 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3766 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3767 u32 sli_intf;
6b7c5b94 3768 int status;
6b7c5b94 3769
ce66f781
SP
3770 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3771 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3772 SLI_INTF_FAMILY_SHIFT;
3773 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3774
6b7c5b94
SP
3775 status = be_map_pci_bars(adapter);
3776 if (status)
e7b909a6 3777 goto done;
6b7c5b94
SP
3778
3779 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3780 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3781 mbox_mem_alloc->size,
3782 &mbox_mem_alloc->dma,
3783 GFP_KERNEL);
6b7c5b94 3784 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3785 status = -ENOMEM;
3786 goto unmap_pci_bars;
6b7c5b94
SP
3787 }
3788 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3789 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3790 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3791 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3792
5b8821b7
SP
3793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3795 &rx_filter->dma, GFP_KERNEL);
3796 if (rx_filter->va == NULL) {
e7b909a6
SP
3797 status = -ENOMEM;
3798 goto free_mbox;
3799 }
5b8821b7 3800 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3801 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3802 spin_lock_init(&adapter->mcc_lock);
3803 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3804
dd131e76 3805 init_completion(&adapter->flash_compl);
cf588477 3806 pci_save_state(adapter->pdev);
6b7c5b94 3807 return 0;
e7b909a6
SP
3808
3809free_mbox:
2b7bcebf
IV
3810 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3811 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3812
3813unmap_pci_bars:
3814 be_unmap_pci_bars(adapter);
3815
3816done:
3817 return status;
6b7c5b94
SP
3818}
3819
3820static void be_stats_cleanup(struct be_adapter *adapter)
3821{
3abcdeda 3822 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3823
3824 if (cmd->va)
2b7bcebf
IV
3825 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3826 cmd->va, cmd->dma);
6b7c5b94
SP
3827}
3828
3829static int be_stats_init(struct be_adapter *adapter)
3830{
3abcdeda 3831 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3832
ca34fe38
SP
3833 if (lancer_chip(adapter))
3834 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3835 else if (BE2_chip(adapter))
89a88ab8 3836 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3837 else
3838 /* BE3 and Skyhawk */
3839 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3840
2b7bcebf
IV
3841 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3842 GFP_KERNEL);
6b7c5b94
SP
3843 if (cmd->va == NULL)
3844 return -1;
d291b9af 3845 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3846 return 0;
3847}
3848
3bc6b06c 3849static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3850{
3851 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3852
6b7c5b94
SP
3853 if (!adapter)
3854 return;
3855
045508a8
PP
3856 be_roce_dev_remove(adapter);
3857
f67ef7ba
PR
3858 cancel_delayed_work_sync(&adapter->func_recovery_work);
3859
6b7c5b94
SP
3860 unregister_netdev(adapter->netdev);
3861
5fb379ee
SP
3862 be_clear(adapter);
3863
bf99e50d
PR
3864 /* tell fw we're done with firing cmds */
3865 be_cmd_fw_clean(adapter);
3866
6b7c5b94
SP
3867 be_stats_cleanup(adapter);
3868
3869 be_ctrl_cleanup(adapter);
3870
d6b6d987
SP
3871 pci_disable_pcie_error_reporting(pdev);
3872
6b7c5b94
SP
3873 pci_set_drvdata(pdev, NULL);
3874 pci_release_regions(pdev);
3875 pci_disable_device(pdev);
3876
3877 free_netdev(adapter->netdev);
3878}
3879
4762f6ce
AK
3880bool be_is_wol_supported(struct be_adapter *adapter)
3881{
3882 return ((adapter->wol_cap & BE_WOL_CAP) &&
3883 !be_is_wol_excluded(adapter)) ? true : false;
3884}
3885
941a77d5
SK
3886u32 be_get_fw_log_level(struct be_adapter *adapter)
3887{
3888 struct be_dma_mem extfat_cmd;
3889 struct be_fat_conf_params *cfgs;
3890 int status;
3891 u32 level = 0;
3892 int j;
3893
f25b119c
PR
3894 if (lancer_chip(adapter))
3895 return 0;
3896
941a77d5
SK
3897 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3898 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3899 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3900 &extfat_cmd.dma);
3901
3902 if (!extfat_cmd.va) {
3903 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3904 __func__);
3905 goto err;
3906 }
3907
3908 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3909 if (!status) {
3910 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3911 sizeof(struct be_cmd_resp_hdr));
ac46a462 3912 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3913 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3914 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3915 }
3916 }
3917 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3918 extfat_cmd.dma);
3919err:
3920 return level;
3921}
abb93951 3922
39f1d94d 3923static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3924{
6b7c5b94 3925 int status;
941a77d5 3926 u32 level;
6b7c5b94 3927
9e1453c5
AK
3928 status = be_cmd_get_cntl_attributes(adapter);
3929 if (status)
3930 return status;
3931
4762f6ce
AK
3932 status = be_cmd_get_acpi_wol_cap(adapter);
3933 if (status) {
3934 /* in case of a failure to get wol capabillities
3935 * check the exclusion list to determine WOL capability */
3936 if (!be_is_wol_excluded(adapter))
3937 adapter->wol_cap |= BE_WOL_CAP;
3938 }
3939
3940 if (be_is_wol_supported(adapter))
3941 adapter->wol = true;
3942
7aeb2156
PR
3943 /* Must be a power of 2 or else MODULO will BUG_ON */
3944 adapter->be_get_temp_freq = 64;
3945
941a77d5
SK
3946 level = be_get_fw_log_level(adapter);
3947 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3948
2243e2e9 3949 return 0;
6b7c5b94
SP
3950}
3951
f67ef7ba 3952static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3953{
3954 int status;
d8110f62 3955
f67ef7ba
PR
3956 status = lancer_test_and_set_rdy_state(adapter);
3957 if (status)
3958 goto err;
d8110f62 3959
f67ef7ba
PR
3960 if (netif_running(adapter->netdev))
3961 be_close(adapter->netdev);
d8110f62 3962
f67ef7ba
PR
3963 be_clear(adapter);
3964
3965 adapter->hw_error = false;
3966 adapter->fw_timeout = false;
3967
3968 status = be_setup(adapter);
3969 if (status)
3970 goto err;
d8110f62 3971
f67ef7ba
PR
3972 if (netif_running(adapter->netdev)) {
3973 status = be_open(adapter->netdev);
d8110f62
PR
3974 if (status)
3975 goto err;
f67ef7ba 3976 }
d8110f62 3977
f67ef7ba
PR
3978 dev_err(&adapter->pdev->dev,
3979 "Adapter SLIPORT recovery succeeded\n");
3980 return 0;
3981err:
67297ad8
PR
3982 if (adapter->eeh_error)
3983 dev_err(&adapter->pdev->dev,
3984 "Adapter SLIPORT recovery failed\n");
d8110f62 3985
f67ef7ba
PR
3986 return status;
3987}
3988
3989static void be_func_recovery_task(struct work_struct *work)
3990{
3991 struct be_adapter *adapter =
3992 container_of(work, struct be_adapter, func_recovery_work.work);
3993 int status;
d8110f62 3994
f67ef7ba 3995 be_detect_error(adapter);
d8110f62 3996
f67ef7ba 3997 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3998
f67ef7ba
PR
3999 if (adapter->eeh_error)
4000 goto out;
d8110f62 4001
f67ef7ba
PR
4002 rtnl_lock();
4003 netif_device_detach(adapter->netdev);
4004 rtnl_unlock();
d8110f62 4005
f67ef7ba 4006 status = lancer_recover_func(adapter);
d8110f62 4007
f67ef7ba
PR
4008 if (!status)
4009 netif_device_attach(adapter->netdev);
d8110f62 4010 }
f67ef7ba
PR
4011
4012out:
4013 schedule_delayed_work(&adapter->func_recovery_work,
4014 msecs_to_jiffies(1000));
d8110f62
PR
4015}
4016
4017static void be_worker(struct work_struct *work)
4018{
4019 struct be_adapter *adapter =
4020 container_of(work, struct be_adapter, work.work);
4021 struct be_rx_obj *rxo;
10ef9ab4 4022 struct be_eq_obj *eqo;
d8110f62
PR
4023 int i;
4024
d8110f62
PR
4025 /* when interrupts are not yet enabled, just reap any pending
4026 * mcc completions */
4027 if (!netif_running(adapter->netdev)) {
072a9c48 4028 local_bh_disable();
10ef9ab4 4029 be_process_mcc(adapter);
072a9c48 4030 local_bh_enable();
d8110f62
PR
4031 goto reschedule;
4032 }
4033
4034 if (!adapter->stats_cmd_sent) {
4035 if (lancer_chip(adapter))
4036 lancer_cmd_get_pport_stats(adapter,
4037 &adapter->stats_cmd);
4038 else
4039 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4040 }
4041
7aeb2156
PR
4042 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4043 be_cmd_get_die_temperature(adapter);
4044
d8110f62 4045 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4046 if (rxo->rx_post_starved) {
4047 rxo->rx_post_starved = false;
4048 be_post_rx_frags(rxo, GFP_KERNEL);
4049 }
4050 }
4051
10ef9ab4
SP
4052 for_all_evt_queues(adapter, eqo, i)
4053 be_eqd_update(adapter, eqo);
4054
d8110f62
PR
4055reschedule:
4056 adapter->work_counter++;
4057 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4058}
4059
39f1d94d
SP
4060static bool be_reset_required(struct be_adapter *adapter)
4061{
d79c0a20 4062 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4063}
4064
d379142b
SP
4065static char *mc_name(struct be_adapter *adapter)
4066{
4067 if (adapter->function_mode & FLEX10_MODE)
4068 return "FLEX10";
4069 else if (adapter->function_mode & VNIC_MODE)
4070 return "vNIC";
4071 else if (adapter->function_mode & UMC_ENABLED)
4072 return "UMC";
4073 else
4074 return "";
4075}
4076
4077static inline char *func_name(struct be_adapter *adapter)
4078{
4079 return be_physfn(adapter) ? "PF" : "VF";
4080}
4081
1dd06ae8 4082static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4083{
4084 int status = 0;
4085 struct be_adapter *adapter;
4086 struct net_device *netdev;
b4e32a71 4087 char port_name;
6b7c5b94
SP
4088
4089 status = pci_enable_device(pdev);
4090 if (status)
4091 goto do_none;
4092
4093 status = pci_request_regions(pdev, DRV_NAME);
4094 if (status)
4095 goto disable_dev;
4096 pci_set_master(pdev);
4097
7f640062 4098 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4099 if (netdev == NULL) {
4100 status = -ENOMEM;
4101 goto rel_reg;
4102 }
4103 adapter = netdev_priv(netdev);
4104 adapter->pdev = pdev;
4105 pci_set_drvdata(pdev, adapter);
4106 adapter->netdev = netdev;
2243e2e9 4107 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4108
2b7bcebf 4109 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4110 if (!status) {
4111 netdev->features |= NETIF_F_HIGHDMA;
4112 } else {
2b7bcebf 4113 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4114 if (status) {
4115 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4116 goto free_netdev;
4117 }
4118 }
4119
d6b6d987
SP
4120 status = pci_enable_pcie_error_reporting(pdev);
4121 if (status)
4122 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4123
6b7c5b94
SP
4124 status = be_ctrl_init(adapter);
4125 if (status)
39f1d94d 4126 goto free_netdev;
6b7c5b94 4127
2243e2e9 4128 /* sync up with fw's ready state */
ba343c77 4129 if (be_physfn(adapter)) {
bf99e50d 4130 status = be_fw_wait_ready(adapter);
ba343c77
SB
4131 if (status)
4132 goto ctrl_clean;
ba343c77 4133 }
6b7c5b94 4134
2243e2e9
SP
4135 /* tell fw we're ready to fire cmds */
4136 status = be_cmd_fw_init(adapter);
6b7c5b94 4137 if (status)
2243e2e9
SP
4138 goto ctrl_clean;
4139
39f1d94d
SP
4140 if (be_reset_required(adapter)) {
4141 status = be_cmd_reset_function(adapter);
4142 if (status)
4143 goto ctrl_clean;
4144 }
556ae191 4145
10ef9ab4
SP
4146 /* The INTR bit may be set in the card when probed by a kdump kernel
4147 * after a crash.
4148 */
4149 if (!lancer_chip(adapter))
4150 be_intr_set(adapter, false);
4151
2243e2e9
SP
4152 status = be_stats_init(adapter);
4153 if (status)
4154 goto ctrl_clean;
4155
39f1d94d 4156 status = be_get_initial_config(adapter);
6b7c5b94
SP
4157 if (status)
4158 goto stats_clean;
6b7c5b94
SP
4159
4160 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4161 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4162 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4163
5fb379ee
SP
4164 status = be_setup(adapter);
4165 if (status)
55f5c3c5 4166 goto stats_clean;
2243e2e9 4167
3abcdeda 4168 be_netdev_init(netdev);
6b7c5b94
SP
4169 status = register_netdev(netdev);
4170 if (status != 0)
5fb379ee 4171 goto unsetup;
6b7c5b94 4172
045508a8
PP
4173 be_roce_dev_add(adapter);
4174
f67ef7ba
PR
4175 schedule_delayed_work(&adapter->func_recovery_work,
4176 msecs_to_jiffies(1000));
b4e32a71
PR
4177
4178 be_cmd_query_port_name(adapter, &port_name);
4179
d379142b
SP
4180 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4181 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4182
6b7c5b94
SP
4183 return 0;
4184
5fb379ee
SP
4185unsetup:
4186 be_clear(adapter);
6b7c5b94
SP
4187stats_clean:
4188 be_stats_cleanup(adapter);
4189ctrl_clean:
4190 be_ctrl_cleanup(adapter);
f9449ab7 4191free_netdev:
fe6d2a38 4192 free_netdev(netdev);
8d56ff11 4193 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4194rel_reg:
4195 pci_release_regions(pdev);
4196disable_dev:
4197 pci_disable_device(pdev);
4198do_none:
c4ca2374 4199 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4200 return status;
4201}
4202
4203static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4204{
4205 struct be_adapter *adapter = pci_get_drvdata(pdev);
4206 struct net_device *netdev = adapter->netdev;
4207
71d8d1b5
AK
4208 if (adapter->wol)
4209 be_setup_wol(adapter, true);
4210
f67ef7ba
PR
4211 cancel_delayed_work_sync(&adapter->func_recovery_work);
4212
6b7c5b94
SP
4213 netif_device_detach(netdev);
4214 if (netif_running(netdev)) {
4215 rtnl_lock();
4216 be_close(netdev);
4217 rtnl_unlock();
4218 }
9b0365f1 4219 be_clear(adapter);
6b7c5b94
SP
4220
4221 pci_save_state(pdev);
4222 pci_disable_device(pdev);
4223 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4224 return 0;
4225}
4226
4227static int be_resume(struct pci_dev *pdev)
4228{
4229 int status = 0;
4230 struct be_adapter *adapter = pci_get_drvdata(pdev);
4231 struct net_device *netdev = adapter->netdev;
4232
4233 netif_device_detach(netdev);
4234
4235 status = pci_enable_device(pdev);
4236 if (status)
4237 return status;
4238
4239 pci_set_power_state(pdev, 0);
4240 pci_restore_state(pdev);
4241
2243e2e9
SP
4242 /* tell fw we're ready to fire cmds */
4243 status = be_cmd_fw_init(adapter);
4244 if (status)
4245 return status;
4246
9b0365f1 4247 be_setup(adapter);
6b7c5b94
SP
4248 if (netif_running(netdev)) {
4249 rtnl_lock();
4250 be_open(netdev);
4251 rtnl_unlock();
4252 }
f67ef7ba
PR
4253
4254 schedule_delayed_work(&adapter->func_recovery_work,
4255 msecs_to_jiffies(1000));
6b7c5b94 4256 netif_device_attach(netdev);
71d8d1b5
AK
4257
4258 if (adapter->wol)
4259 be_setup_wol(adapter, false);
a4ca055f 4260
6b7c5b94
SP
4261 return 0;
4262}
4263
82456b03
SP
4264/*
4265 * An FLR will stop BE from DMAing any data.
4266 */
4267static void be_shutdown(struct pci_dev *pdev)
4268{
4269 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4270
2d5d4154
AK
4271 if (!adapter)
4272 return;
82456b03 4273
0f4a6828 4274 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4275 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4276
2d5d4154 4277 netif_device_detach(adapter->netdev);
82456b03 4278
57841869
AK
4279 be_cmd_reset_function(adapter);
4280
82456b03 4281 pci_disable_device(pdev);
82456b03
SP
4282}
4283
cf588477
SP
4284static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4285 pci_channel_state_t state)
4286{
4287 struct be_adapter *adapter = pci_get_drvdata(pdev);
4288 struct net_device *netdev = adapter->netdev;
4289
4290 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4291
f67ef7ba
PR
4292 adapter->eeh_error = true;
4293
4294 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4295
f67ef7ba 4296 rtnl_lock();
cf588477 4297 netif_device_detach(netdev);
f67ef7ba 4298 rtnl_unlock();
cf588477
SP
4299
4300 if (netif_running(netdev)) {
4301 rtnl_lock();
4302 be_close(netdev);
4303 rtnl_unlock();
4304 }
4305 be_clear(adapter);
4306
4307 if (state == pci_channel_io_perm_failure)
4308 return PCI_ERS_RESULT_DISCONNECT;
4309
4310 pci_disable_device(pdev);
4311
eeb7fc7b
SK
4312 /* The error could cause the FW to trigger a flash debug dump.
4313 * Resetting the card while flash dump is in progress
c8a54163
PR
4314 * can cause it not to recover; wait for it to finish.
4315 * Wait only for first function as it is needed only once per
4316 * adapter.
eeb7fc7b 4317 */
c8a54163
PR
4318 if (pdev->devfn == 0)
4319 ssleep(30);
4320
cf588477
SP
4321 return PCI_ERS_RESULT_NEED_RESET;
4322}
4323
4324static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4325{
4326 struct be_adapter *adapter = pci_get_drvdata(pdev);
4327 int status;
4328
4329 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4330 be_clear_all_error(adapter);
cf588477
SP
4331
4332 status = pci_enable_device(pdev);
4333 if (status)
4334 return PCI_ERS_RESULT_DISCONNECT;
4335
4336 pci_set_master(pdev);
4337 pci_set_power_state(pdev, 0);
4338 pci_restore_state(pdev);
4339
4340 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4341 dev_info(&adapter->pdev->dev,
4342 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4343 status = be_fw_wait_ready(adapter);
cf588477
SP
4344 if (status)
4345 return PCI_ERS_RESULT_DISCONNECT;
4346
d6b6d987 4347 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4348 return PCI_ERS_RESULT_RECOVERED;
4349}
4350
4351static void be_eeh_resume(struct pci_dev *pdev)
4352{
4353 int status = 0;
4354 struct be_adapter *adapter = pci_get_drvdata(pdev);
4355 struct net_device *netdev = adapter->netdev;
4356
4357 dev_info(&adapter->pdev->dev, "EEH resume\n");
4358
4359 pci_save_state(pdev);
4360
4361 /* tell fw we're ready to fire cmds */
4362 status = be_cmd_fw_init(adapter);
4363 if (status)
4364 goto err;
4365
bf99e50d
PR
4366 status = be_cmd_reset_function(adapter);
4367 if (status)
4368 goto err;
4369
cf588477
SP
4370 status = be_setup(adapter);
4371 if (status)
4372 goto err;
4373
4374 if (netif_running(netdev)) {
4375 status = be_open(netdev);
4376 if (status)
4377 goto err;
4378 }
f67ef7ba
PR
4379
4380 schedule_delayed_work(&adapter->func_recovery_work,
4381 msecs_to_jiffies(1000));
cf588477
SP
4382 netif_device_attach(netdev);
4383 return;
4384err:
4385 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4386}
4387
3646f0e5 4388static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4389 .error_detected = be_eeh_err_detected,
4390 .slot_reset = be_eeh_reset,
4391 .resume = be_eeh_resume,
4392};
4393
6b7c5b94
SP
4394static struct pci_driver be_driver = {
4395 .name = DRV_NAME,
4396 .id_table = be_dev_ids,
4397 .probe = be_probe,
4398 .remove = be_remove,
4399 .suspend = be_suspend,
cf588477 4400 .resume = be_resume,
82456b03 4401 .shutdown = be_shutdown,
cf588477 4402 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4403};
4404
4405static int __init be_init_module(void)
4406{
8e95a202
JP
4407 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4408 rx_frag_size != 2048) {
6b7c5b94
SP
4409 printk(KERN_WARNING DRV_NAME
4410 " : Module param rx_frag_size must be 2048/4096/8192."
4411 " Using 2048\n");
4412 rx_frag_size = 2048;
4413 }
6b7c5b94
SP
4414
4415 return pci_register_driver(&be_driver);
4416}
4417module_init(be_init_module);
4418
4419static void __exit be_exit_module(void)
4420{
4421 pci_unregister_driver(&be_driver);
4422}
4423module_exit(be_exit_module);