]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: fix a Tx stall bug caused by a specific ipv6 packet
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
ca34fe38
SP
288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 317{
ac124ff9
SP
318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 321 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
322 &rxf_stats->port[adapter->port_num];
323 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 324
ac124ff9 325 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
326 drvs->rx_pause_frames = port_stats->rx_pause_frames;
327 drvs->rx_crc_errors = port_stats->rx_crc_errors;
328 drvs->rx_control_frames = port_stats->rx_control_frames;
329 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 340 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
341 drvs->rx_dropped_header_too_small =
342 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
343 drvs->rx_address_mismatch_drops =
344 port_stats->rx_address_mismatch_drops +
345 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
346 drvs->rx_alignment_symbol_errors =
347 port_stats->rx_alignment_symbol_errors;
348
349 drvs->tx_pauseframes = port_stats->tx_pauseframes;
350 drvs->tx_controlframes = port_stats->tx_controlframes;
351
352 if (adapter->port_num)
ac124ff9 353 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 354 else
ac124ff9 355 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 356 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 357 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
358 drvs->forwarded_packets = rxf_stats->forwarded_packets;
359 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
360 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363}
364
ca34fe38 365static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 366{
ac124ff9
SP
367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 370 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
371 &rxf_stats->port[adapter->port_num];
372 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 373
ac124ff9 374 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
375 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
377 drvs->rx_pause_frames = port_stats->rx_pause_frames;
378 drvs->rx_crc_errors = port_stats->rx_crc_errors;
379 drvs->rx_control_frames = port_stats->rx_control_frames;
380 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390 drvs->rx_dropped_header_too_small =
391 port_stats->rx_dropped_header_too_small;
392 drvs->rx_input_fifo_overflow_drop =
393 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 394 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
395 drvs->rx_alignment_symbol_errors =
396 port_stats->rx_alignment_symbol_errors;
ac124ff9 397 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
398 drvs->tx_pauseframes = port_stats->tx_pauseframes;
399 drvs->tx_controlframes = port_stats->tx_controlframes;
400 drvs->jabber_events = port_stats->jabber_events;
401 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 402 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
403 drvs->forwarded_packets = rxf_stats->forwarded_packets;
404 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
405 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
407 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408}
409
005d5696
SX
410static void populate_lancer_stats(struct be_adapter *adapter)
411{
89a88ab8 412
005d5696 413 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
414 struct lancer_pport_stats *pport_stats =
415 pport_stats_from_cmd(adapter);
416
417 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 421 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 422 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
423 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427 drvs->rx_dropped_tcp_length =
428 pport_stats->rx_dropped_invalid_tcp_length;
429 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432 drvs->rx_dropped_header_too_small =
433 pport_stats->rx_dropped_header_too_small;
434 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
435 drvs->rx_address_mismatch_drops =
436 pport_stats->rx_address_mismatch_drops +
437 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 438 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 439 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
440 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 442 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
443 drvs->forwarded_packets = pport_stats->num_forwards_lo;
444 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 445 drvs->rx_drops_too_many_frags =
ac124ff9 446 pport_stats->rx_drops_too_many_frags_lo;
005d5696 447}
89a88ab8 448
09c1c68f
SP
449static void accumulate_16bit_val(u32 *acc, u16 val)
450{
451#define lo(x) (x & 0xFFFF)
452#define hi(x) (x & 0xFFFF0000)
453 bool wrapped = val < lo(*acc);
454 u32 newacc = hi(*acc) + val;
455
456 if (wrapped)
457 newacc += 65536;
458 ACCESS_ONCE(*acc) = newacc;
459}
460
89a88ab8
AK
461void be_parse_stats(struct be_adapter *adapter)
462{
ac124ff9
SP
463 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464 struct be_rx_obj *rxo;
465 int i;
466
ca34fe38
SP
467 if (lancer_chip(adapter)) {
468 populate_lancer_stats(adapter);
005d5696 469 } else {
ca34fe38
SP
470 if (BE2_chip(adapter))
471 populate_be_v0_stats(adapter);
472 else
473 /* for BE3 and Skyhawk */
474 populate_be_v1_stats(adapter);
d51ebd33 475
ca34fe38
SP
476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477 for_all_rx_queues(adapter, rxo, i) {
478 /* below erx HW counter can actually wrap around after
479 * 65535. Driver accumulates a 32-bit value
480 */
481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
09c1c68f 485 }
89a88ab8
AK
486}
487
ab1594e9
SP
488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489 struct rtnl_link_stats64 *stats)
6b7c5b94 490{
ab1594e9 491 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 492 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 493 struct be_rx_obj *rxo;
3c8def97 494 struct be_tx_obj *txo;
ab1594e9
SP
495 u64 pkts, bytes;
496 unsigned int start;
3abcdeda 497 int i;
6b7c5b94 498
3abcdeda 499 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
500 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501 do {
502 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503 pkts = rx_stats(rxo)->rx_pkts;
504 bytes = rx_stats(rxo)->rx_bytes;
505 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506 stats->rx_packets += pkts;
507 stats->rx_bytes += bytes;
508 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
511 }
512
3c8def97 513 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
514 const struct be_tx_stats *tx_stats = tx_stats(txo);
515 do {
516 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517 pkts = tx_stats(txo)->tx_pkts;
518 bytes = tx_stats(txo)->tx_bytes;
519 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520 stats->tx_packets += pkts;
521 stats->tx_bytes += bytes;
3c8def97 522 }
6b7c5b94
SP
523
524 /* bad pkts received */
ab1594e9 525 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
526 drvs->rx_alignment_symbol_errors +
527 drvs->rx_in_range_errors +
528 drvs->rx_out_range_errors +
529 drvs->rx_frame_too_long +
530 drvs->rx_dropped_too_small +
531 drvs->rx_dropped_too_short +
532 drvs->rx_dropped_header_too_small +
533 drvs->rx_dropped_tcp_length +
ab1594e9 534 drvs->rx_dropped_runt;
68110868 535
6b7c5b94 536 /* detailed rx errors */
ab1594e9 537 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
538 drvs->rx_out_range_errors +
539 drvs->rx_frame_too_long;
68110868 540
ab1594e9 541 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
542
543 /* frame alignment errors */
ab1594e9 544 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 545
6b7c5b94
SP
546 /* receiver fifo overrun */
547 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 548 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
549 drvs->rx_input_fifo_overflow_drop +
550 drvs->rx_drops_no_pbuf;
ab1594e9 551 return stats;
6b7c5b94
SP
552}
553
b236916a 554void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 555{
6b7c5b94
SP
556 struct net_device *netdev = adapter->netdev;
557
b236916a 558 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 559 netif_carrier_off(netdev);
b236916a 560 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 561 }
b236916a
AK
562
563 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564 netif_carrier_on(netdev);
565 else
566 netif_carrier_off(netdev);
6b7c5b94
SP
567}
568
3c8def97 569static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 570 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 571{
3c8def97
SP
572 struct be_tx_stats *stats = tx_stats(txo);
573
ab1594e9 574 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
575 stats->tx_reqs++;
576 stats->tx_wrbs += wrb_cnt;
577 stats->tx_bytes += copied;
578 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 579 if (stopped)
ac124ff9 580 stats->tx_stops++;
ab1594e9 581 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
582}
583
584/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
585static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586 bool *dummy)
6b7c5b94 587{
ebc8d2ab
DM
588 int cnt = (skb->len > skb->data_len);
589
590 cnt += skb_shinfo(skb)->nr_frags;
591
6b7c5b94
SP
592 /* to account for hdr wrb */
593 cnt++;
fe6d2a38
SP
594 if (lancer_chip(adapter) || !(cnt & 1)) {
595 *dummy = false;
596 } else {
6b7c5b94
SP
597 /* add a dummy to make it an even num */
598 cnt++;
599 *dummy = true;
fe6d2a38 600 }
6b7c5b94
SP
601 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602 return cnt;
603}
604
605static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606{
607 wrb->frag_pa_hi = upper_32_bits(addr);
608 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 610 wrb->rsvd0 = 0;
6b7c5b94
SP
611}
612
1ded132d
AK
613static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614 struct sk_buff *skb)
615{
616 u8 vlan_prio;
617 u16 vlan_tag;
618
619 vlan_tag = vlan_tx_tag_get(skb);
620 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621 /* If vlan priority provided by OS is NOT in available bmap */
622 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624 adapter->recommended_prio;
625
626 return vlan_tag;
627}
628
cc4ce020 629static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 630 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 631{
1ded132d 632 u16 vlan_tag;
cc4ce020 633
6b7c5b94
SP
634 memset(hdr, 0, sizeof(*hdr));
635
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
637
49e4b847 638 if (skb_is_gso(skb)) {
6b7c5b94
SP
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
641 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 642 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 643 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
644 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
645 if (is_tcp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
647 else if (is_udp_pkt(skb))
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
649 }
650
4c5102f9 651 if (vlan_tx_tag_present(skb)) {
6b7c5b94 652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 653 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
655 }
656
bc0c3405
AK
657 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 659 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
662}
663
2b7bcebf 664static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
665 bool unmap_single)
666{
667 dma_addr_t dma;
668
669 be_dws_le_to_cpu(wrb, sizeof(*wrb));
670
671 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 672 if (wrb->frag_len) {
7101e111 673 if (unmap_single)
2b7bcebf
IV
674 dma_unmap_single(dev, dma, wrb->frag_len,
675 DMA_TO_DEVICE);
7101e111 676 else
2b7bcebf 677 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
678 }
679}
6b7c5b94 680
3c8def97 681static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
682 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
683 bool skip_hw_vlan)
6b7c5b94 684{
7101e111
SP
685 dma_addr_t busaddr;
686 int i, copied = 0;
2b7bcebf 687 struct device *dev = &adapter->pdev->dev;
6b7c5b94 688 struct sk_buff *first_skb = skb;
6b7c5b94
SP
689 struct be_eth_wrb *wrb;
690 struct be_eth_hdr_wrb *hdr;
7101e111
SP
691 bool map_single = false;
692 u16 map_head;
6b7c5b94 693
6b7c5b94
SP
694 hdr = queue_head_node(txq);
695 queue_head_inc(txq);
7101e111 696 map_head = txq->head;
6b7c5b94 697
ebc8d2ab 698 if (skb->len > skb->data_len) {
e743d313 699 int len = skb_headlen(skb);
2b7bcebf
IV
700 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
701 if (dma_mapping_error(dev, busaddr))
7101e111
SP
702 goto dma_err;
703 map_single = true;
ebc8d2ab
DM
704 wrb = queue_head_node(txq);
705 wrb_fill(wrb, busaddr, len);
706 be_dws_cpu_to_le(wrb, sizeof(*wrb));
707 queue_head_inc(txq);
708 copied += len;
709 }
6b7c5b94 710
ebc8d2ab 711 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 712 const struct skb_frag_struct *frag =
ebc8d2ab 713 &skb_shinfo(skb)->frags[i];
b061b39e 714 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 715 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 716 if (dma_mapping_error(dev, busaddr))
7101e111 717 goto dma_err;
ebc8d2ab 718 wrb = queue_head_node(txq);
9e903e08 719 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
720 be_dws_cpu_to_le(wrb, sizeof(*wrb));
721 queue_head_inc(txq);
9e903e08 722 copied += skb_frag_size(frag);
6b7c5b94
SP
723 }
724
725 if (dummy_wrb) {
726 wrb = queue_head_node(txq);
727 wrb_fill(wrb, 0, 0);
728 be_dws_cpu_to_le(wrb, sizeof(*wrb));
729 queue_head_inc(txq);
730 }
731
bc0c3405 732 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
733 be_dws_cpu_to_le(hdr, sizeof(*hdr));
734
735 return copied;
7101e111
SP
736dma_err:
737 txq->head = map_head;
738 while (copied) {
739 wrb = queue_head_node(txq);
2b7bcebf 740 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
741 map_single = false;
742 copied -= wrb->frag_len;
743 queue_head_inc(txq);
744 }
745 return 0;
6b7c5b94
SP
746}
747
93040ae5 748static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
749 struct sk_buff *skb,
750 bool *skip_hw_vlan)
93040ae5
SK
751{
752 u16 vlan_tag = 0;
753
754 skb = skb_share_check(skb, GFP_ATOMIC);
755 if (unlikely(!skb))
756 return skb;
757
758 if (vlan_tx_tag_present(skb)) {
759 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
f11a869d
IV
760 skb = __vlan_put_tag(skb, vlan_tag);
761 if (skb)
762 skb->vlan_tci = 0;
93040ae5
SK
763 }
764
bc0c3405
AK
765 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
766 if (!vlan_tag)
767 vlan_tag = adapter->pvid;
768 if (skip_hw_vlan)
769 *skip_hw_vlan = true;
770 }
771
772 if (vlan_tag) {
773 skb = __vlan_put_tag(skb, vlan_tag);
774 if (unlikely(!skb))
775 return skb;
776
777 skb->vlan_tci = 0;
778 }
779
780 /* Insert the outer VLAN, if any */
781 if (adapter->qnq_vid) {
782 vlan_tag = adapter->qnq_vid;
783 skb = __vlan_put_tag(skb, vlan_tag);
784 if (unlikely(!skb))
785 return skb;
786 if (skip_hw_vlan)
787 *skip_hw_vlan = true;
788 }
789
93040ae5
SK
790 return skb;
791}
792
bc0c3405
AK
793static bool be_ipv6_exthdr_check(struct sk_buff *skb)
794{
795 struct ethhdr *eh = (struct ethhdr *)skb->data;
796 u16 offset = ETH_HLEN;
797
798 if (eh->h_proto == htons(ETH_P_IPV6)) {
799 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
800
801 offset += sizeof(struct ipv6hdr);
802 if (ip6h->nexthdr != NEXTHDR_TCP &&
803 ip6h->nexthdr != NEXTHDR_UDP) {
804 struct ipv6_opt_hdr *ehdr =
805 (struct ipv6_opt_hdr *) (skb->data + offset);
806
807 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
808 if (ehdr->hdrlen == 0xff)
809 return true;
810 }
811 }
812 return false;
813}
814
815static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
816{
817 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
818}
819
820static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
821{
822 return BE3_chip(adapter) &&
823 be_ipv6_exthdr_check(skb);
824}
825
61357325 826static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 827 struct net_device *netdev)
6b7c5b94
SP
828{
829 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
830 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
831 struct be_queue_info *txq = &txo->q;
93040ae5 832 struct iphdr *ip = NULL;
6b7c5b94 833 u32 wrb_cnt = 0, copied = 0;
93040ae5 834 u32 start = txq->head, eth_hdr_len;
6b7c5b94 835 bool dummy_wrb, stopped = false;
bc0c3405 836 bool skip_hw_vlan = false;
6b7c5b94 837
93040ae5
SK
838 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
839 VLAN_ETH_HLEN : ETH_HLEN;
840
1297f9db
AK
841 /* For padded packets, BE HW modifies tot_len field in IP header
842 * incorrecly when VLAN tag is inserted by HW.
1ded132d 843 */
1297f9db 844 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
93040ae5
SK
845 ip = (struct iphdr *)ip_hdr(skb);
846 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
847 }
1ded132d 848
93040ae5
SK
849 /* HW has a bug wherein it will calculate CSUM for VLAN
850 * pkts even though it is disabled.
851 * Manually insert VLAN in pkt.
852 */
853 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1297f9db 854 vlan_tx_tag_present(skb)) {
bc0c3405
AK
855 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
856 if (unlikely(!skb))
857 goto tx_drop;
858 }
859
860 /* HW may lockup when VLAN HW tagging is requested on
861 * certain ipv6 packets. Drop such pkts if the HW workaround to
862 * skip HW tagging is not enabled by FW.
863 */
864 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
865 (adapter->pvid || adapter->qnq_vid) &&
866 !qnq_async_evt_rcvd(adapter)))
867 goto tx_drop;
868
869 /* Manual VLAN tag insertion to prevent:
870 * ASIC lockup when the ASIC inserts VLAN tag into
871 * certain ipv6 packets. Insert VLAN tags in driver,
872 * and set event, completion, vlan bits accordingly
873 * in the Tx WRB.
874 */
875 if (be_ipv6_tx_stall_chk(adapter, skb) &&
876 be_vlan_tag_tx_chk(adapter, skb)) {
877 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
1ded132d
AK
878 if (unlikely(!skb))
879 goto tx_drop;
1ded132d
AK
880 }
881
fe6d2a38 882 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 883
bc0c3405
AK
884 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
885 skip_hw_vlan);
c190e3c8 886 if (copied) {
cd8f76c0
ED
887 int gso_segs = skb_shinfo(skb)->gso_segs;
888
c190e3c8 889 /* record the sent skb in the sent_skb table */
3c8def97
SP
890 BUG_ON(txo->sent_skb_list[start]);
891 txo->sent_skb_list[start] = skb;
c190e3c8
AK
892
893 /* Ensure txq has space for the next skb; Else stop the queue
894 * *BEFORE* ringing the tx doorbell, so that we serialze the
895 * tx compls of the current transmit which'll wake up the queue
896 */
7101e111 897 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
898 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
899 txq->len) {
3c8def97 900 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
901 stopped = true;
902 }
6b7c5b94 903
c190e3c8 904 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 905
cd8f76c0 906 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
907 } else {
908 txq->head = start;
909 dev_kfree_skb_any(skb);
6b7c5b94 910 }
1ded132d 911tx_drop:
6b7c5b94
SP
912 return NETDEV_TX_OK;
913}
914
915static int be_change_mtu(struct net_device *netdev, int new_mtu)
916{
917 struct be_adapter *adapter = netdev_priv(netdev);
918 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
919 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
920 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
921 dev_info(&adapter->pdev->dev,
922 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
923 BE_MIN_MTU,
924 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
925 return -EINVAL;
926 }
927 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
928 netdev->mtu, new_mtu);
929 netdev->mtu = new_mtu;
930 return 0;
931}
932
933/*
82903e4b
AK
934 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
935 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 936 */
10329df8 937static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 938{
10329df8
SP
939 u16 vids[BE_NUM_VLANS_SUPPORTED];
940 u16 num = 0, i;
82903e4b 941 int status = 0;
1da87b7f 942
c0e64ef4
SP
943 /* No need to further configure vids if in promiscuous mode */
944 if (adapter->promiscuous)
945 return 0;
946
0fc16ebf
PR
947 if (adapter->vlans_added > adapter->max_vlans)
948 goto set_vlan_promisc;
949
950 /* Construct VLAN Table to give to HW */
951 for (i = 0; i < VLAN_N_VID; i++)
952 if (adapter->vlan_tag[i])
10329df8 953 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
954
955 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 956 vids, num, 1, 0);
0fc16ebf
PR
957
958 /* Set to VLAN promisc mode as setting VLAN filter failed */
959 if (status) {
960 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
961 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
962 goto set_vlan_promisc;
6b7c5b94 963 }
1da87b7f 964
b31c50a7 965 return status;
0fc16ebf
PR
966
967set_vlan_promisc:
968 status = be_cmd_vlan_config(adapter, adapter->if_handle,
969 NULL, 0, 1, 1);
970 return status;
6b7c5b94
SP
971}
972
8e586137 973static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
974{
975 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 976 int status = 0;
6b7c5b94 977
a85e9986 978 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
979 status = -EINVAL;
980 goto ret;
981 }
ba343c77 982
a85e9986
PR
983 /* Packets with VID 0 are always received by Lancer by default */
984 if (lancer_chip(adapter) && vid == 0)
985 goto ret;
986
6b7c5b94 987 adapter->vlan_tag[vid] = 1;
82903e4b 988 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 989 status = be_vid_config(adapter);
8e586137 990
80817cbf
AK
991 if (!status)
992 adapter->vlans_added++;
993 else
994 adapter->vlan_tag[vid] = 0;
995ret:
996 return status;
6b7c5b94
SP
997}
998
8e586137 999static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
1000{
1001 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1002 int status = 0;
6b7c5b94 1003
a85e9986 1004 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1005 status = -EINVAL;
1006 goto ret;
1007 }
ba343c77 1008
a85e9986
PR
1009 /* Packets with VID 0 are always received by Lancer by default */
1010 if (lancer_chip(adapter) && vid == 0)
1011 goto ret;
1012
6b7c5b94 1013 adapter->vlan_tag[vid] = 0;
82903e4b 1014 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1015 status = be_vid_config(adapter);
8e586137 1016
80817cbf
AK
1017 if (!status)
1018 adapter->vlans_added--;
1019 else
1020 adapter->vlan_tag[vid] = 1;
1021ret:
1022 return status;
6b7c5b94
SP
1023}
1024
a54769f5 1025static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1026{
1027 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1028 int status;
6b7c5b94 1029
24307eef 1030 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1031 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1032 adapter->promiscuous = true;
1033 goto done;
6b7c5b94
SP
1034 }
1035
25985edc 1036 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1037 if (adapter->promiscuous) {
1038 adapter->promiscuous = false;
5b8821b7 1039 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1040
1041 if (adapter->vlans_added)
10329df8 1042 be_vid_config(adapter);
6b7c5b94
SP
1043 }
1044
e7b909a6 1045 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1046 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1047 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1048 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1049 goto done;
6b7c5b94 1050 }
6b7c5b94 1051
fbc13f01
AK
1052 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1053 struct netdev_hw_addr *ha;
1054 int i = 1; /* First slot is claimed by the Primary MAC */
1055
1056 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1057 be_cmd_pmac_del(adapter, adapter->if_handle,
1058 adapter->pmac_id[i], 0);
1059 }
1060
1061 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1062 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1063 adapter->promiscuous = true;
1064 goto done;
1065 }
1066
1067 netdev_for_each_uc_addr(ha, adapter->netdev) {
1068 adapter->uc_macs++; /* First slot is for Primary MAC */
1069 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1070 adapter->if_handle,
1071 &adapter->pmac_id[adapter->uc_macs], 0);
1072 }
1073 }
1074
0fc16ebf
PR
1075 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1076
1077 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1078 if (status) {
1079 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1080 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1081 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1082 }
24307eef
SP
1083done:
1084 return;
6b7c5b94
SP
1085}
1086
ba343c77
SB
1087static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1088{
1089 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1090 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1091 int status;
704e4c88
PR
1092 bool active_mac = false;
1093 u32 pmac_id;
1094 u8 old_mac[ETH_ALEN];
ba343c77 1095
11ac75ed 1096 if (!sriov_enabled(adapter))
ba343c77
SB
1097 return -EPERM;
1098
11ac75ed 1099 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1100 return -EINVAL;
1101
590c391d 1102 if (lancer_chip(adapter)) {
704e4c88
PR
1103 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1104 &pmac_id, vf + 1);
1105 if (!status && active_mac)
1106 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1107 pmac_id, vf + 1);
1108
590c391d
PR
1109 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1110 } else {
11ac75ed
SP
1111 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1112 vf_cfg->pmac_id, vf + 1);
ba343c77 1113
11ac75ed
SP
1114 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1115 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1116 }
1117
64600ea5 1118 if (status)
ba343c77
SB
1119 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1120 mac, vf);
64600ea5 1121 else
11ac75ed 1122 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1123
ba343c77
SB
1124 return status;
1125}
1126
64600ea5
AK
1127static int be_get_vf_config(struct net_device *netdev, int vf,
1128 struct ifla_vf_info *vi)
1129{
1130 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1131 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1132
11ac75ed 1133 if (!sriov_enabled(adapter))
64600ea5
AK
1134 return -EPERM;
1135
11ac75ed 1136 if (vf >= adapter->num_vfs)
64600ea5
AK
1137 return -EINVAL;
1138
1139 vi->vf = vf;
11ac75ed
SP
1140 vi->tx_rate = vf_cfg->tx_rate;
1141 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1142 vi->qos = 0;
11ac75ed 1143 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1144
1145 return 0;
1146}
1147
1da87b7f
AK
1148static int be_set_vf_vlan(struct net_device *netdev,
1149 int vf, u16 vlan, u8 qos)
1150{
1151 struct be_adapter *adapter = netdev_priv(netdev);
1152 int status = 0;
1153
11ac75ed 1154 if (!sriov_enabled(adapter))
1da87b7f
AK
1155 return -EPERM;
1156
11ac75ed 1157 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1158 return -EINVAL;
1159
1160 if (vlan) {
f1f3ee1b
AK
1161 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1162 /* If this is new value, program it. Else skip. */
1163 adapter->vf_cfg[vf].vlan_tag = vlan;
1164
1165 status = be_cmd_set_hsw_config(adapter, vlan,
1166 vf + 1, adapter->vf_cfg[vf].if_handle);
1167 }
1da87b7f 1168 } else {
f1f3ee1b 1169 /* Reset Transparent Vlan Tagging. */
11ac75ed 1170 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1171 vlan = adapter->vf_cfg[vf].def_vid;
1172 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1173 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1174 }
1175
1da87b7f
AK
1176
1177 if (status)
1178 dev_info(&adapter->pdev->dev,
1179 "VLAN %d config on VF %d failed\n", vlan, vf);
1180 return status;
1181}
1182
e1d18735
AK
1183static int be_set_vf_tx_rate(struct net_device *netdev,
1184 int vf, int rate)
1185{
1186 struct be_adapter *adapter = netdev_priv(netdev);
1187 int status = 0;
1188
11ac75ed 1189 if (!sriov_enabled(adapter))
e1d18735
AK
1190 return -EPERM;
1191
94f434c2 1192 if (vf >= adapter->num_vfs)
e1d18735
AK
1193 return -EINVAL;
1194
94f434c2
AK
1195 if (rate < 100 || rate > 10000) {
1196 dev_err(&adapter->pdev->dev,
1197 "tx rate must be between 100 and 10000 Mbps\n");
1198 return -EINVAL;
1199 }
e1d18735 1200
d5c18473
PR
1201 if (lancer_chip(adapter))
1202 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1203 else
1204 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1205
1206 if (status)
94f434c2 1207 dev_err(&adapter->pdev->dev,
e1d18735 1208 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1209 else
1210 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1211 return status;
1212}
1213
39f1d94d
SP
1214static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1215{
1216 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1217 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1218 u16 offset, stride;
1219
1220 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1221 if (!pos)
1222 return 0;
39f1d94d
SP
1223 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1224 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1225
1226 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1227 while (dev) {
2f6a0260 1228 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1229 vfs++;
1230 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1231 assigned_vfs++;
1232 }
1233 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1234 }
1235 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1236}
1237
10ef9ab4 1238static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1239{
10ef9ab4 1240 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1241 ulong now = jiffies;
ac124ff9 1242 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1243 u64 pkts;
1244 unsigned int start, eqd;
ac124ff9 1245
10ef9ab4
SP
1246 if (!eqo->enable_aic) {
1247 eqd = eqo->eqd;
1248 goto modify_eqd;
1249 }
1250
1251 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1252 return;
6b7c5b94 1253
10ef9ab4
SP
1254 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1255
4097f663 1256 /* Wrapped around */
3abcdeda
SP
1257 if (time_before(now, stats->rx_jiffies)) {
1258 stats->rx_jiffies = now;
4097f663
SP
1259 return;
1260 }
6b7c5b94 1261
ac124ff9
SP
1262 /* Update once a second */
1263 if (delta < HZ)
6b7c5b94
SP
1264 return;
1265
ab1594e9
SP
1266 do {
1267 start = u64_stats_fetch_begin_bh(&stats->sync);
1268 pkts = stats->rx_pkts;
1269 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1270
68c3e5a7 1271 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1272 stats->rx_pkts_prev = pkts;
3abcdeda 1273 stats->rx_jiffies = now;
10ef9ab4
SP
1274 eqd = (stats->rx_pps / 110000) << 3;
1275 eqd = min(eqd, eqo->max_eqd);
1276 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1277 if (eqd < 10)
1278 eqd = 0;
10ef9ab4
SP
1279
1280modify_eqd:
1281 if (eqd != eqo->cur_eqd) {
1282 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1283 eqo->cur_eqd = eqd;
ac124ff9 1284 }
6b7c5b94
SP
1285}
1286
3abcdeda 1287static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1288 struct be_rx_compl_info *rxcp)
4097f663 1289{
ac124ff9 1290 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1291
ab1594e9 1292 u64_stats_update_begin(&stats->sync);
3abcdeda 1293 stats->rx_compl++;
2e588f84 1294 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1295 stats->rx_pkts++;
2e588f84 1296 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1297 stats->rx_mcast_pkts++;
2e588f84 1298 if (rxcp->err)
ac124ff9 1299 stats->rx_compl_err++;
ab1594e9 1300 u64_stats_update_end(&stats->sync);
4097f663
SP
1301}
1302
2e588f84 1303static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1304{
19fad86f
PR
1305 /* L4 checksum is not reliable for non TCP/UDP packets.
1306 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1307 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1308 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1309}
1310
10ef9ab4
SP
1311static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1312 u16 frag_idx)
6b7c5b94 1313{
10ef9ab4 1314 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1315 struct be_rx_page_info *rx_page_info;
3abcdeda 1316 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1317
3abcdeda 1318 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1319 BUG_ON(!rx_page_info->page);
1320
205859a2 1321 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1322 dma_unmap_page(&adapter->pdev->dev,
1323 dma_unmap_addr(rx_page_info, bus),
1324 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1325 rx_page_info->last_page_user = false;
1326 }
6b7c5b94
SP
1327
1328 atomic_dec(&rxq->used);
1329 return rx_page_info;
1330}
1331
1332/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1333static void be_rx_compl_discard(struct be_rx_obj *rxo,
1334 struct be_rx_compl_info *rxcp)
6b7c5b94 1335{
3abcdeda 1336 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1337 struct be_rx_page_info *page_info;
2e588f84 1338 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1339
e80d9da6 1340 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1341 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1342 put_page(page_info->page);
1343 memset(page_info, 0, sizeof(*page_info));
2e588f84 1344 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1345 }
1346}
1347
1348/*
1349 * skb_fill_rx_data forms a complete skb for an ether frame
1350 * indicated by rxcp.
1351 */
10ef9ab4
SP
1352static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1353 struct be_rx_compl_info *rxcp)
6b7c5b94 1354{
3abcdeda 1355 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1356 struct be_rx_page_info *page_info;
2e588f84
SP
1357 u16 i, j;
1358 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1359 u8 *start;
6b7c5b94 1360
10ef9ab4 1361 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1362 start = page_address(page_info->page) + page_info->page_offset;
1363 prefetch(start);
1364
1365 /* Copy data in the first descriptor of this completion */
2e588f84 1366 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1367
6b7c5b94
SP
1368 skb->len = curr_frag_len;
1369 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1370 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1371 /* Complete packet has now been moved to data */
1372 put_page(page_info->page);
1373 skb->data_len = 0;
1374 skb->tail += curr_frag_len;
1375 } else {
ac1ae5f3
ED
1376 hdr_len = ETH_HLEN;
1377 memcpy(skb->data, start, hdr_len);
6b7c5b94 1378 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1379 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1380 skb_shinfo(skb)->frags[0].page_offset =
1381 page_info->page_offset + hdr_len;
9e903e08 1382 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1383 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1384 skb->truesize += rx_frag_size;
6b7c5b94
SP
1385 skb->tail += hdr_len;
1386 }
205859a2 1387 page_info->page = NULL;
6b7c5b94 1388
2e588f84
SP
1389 if (rxcp->pkt_size <= rx_frag_size) {
1390 BUG_ON(rxcp->num_rcvd != 1);
1391 return;
6b7c5b94
SP
1392 }
1393
1394 /* More frags present for this completion */
2e588f84
SP
1395 index_inc(&rxcp->rxq_idx, rxq->len);
1396 remaining = rxcp->pkt_size - curr_frag_len;
1397 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1398 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1399 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1400
bd46cb6c
AK
1401 /* Coalesce all frags from the same physical page in one slot */
1402 if (page_info->page_offset == 0) {
1403 /* Fresh page */
1404 j++;
b061b39e 1405 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1406 skb_shinfo(skb)->frags[j].page_offset =
1407 page_info->page_offset;
9e903e08 1408 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1409 skb_shinfo(skb)->nr_frags++;
1410 } else {
1411 put_page(page_info->page);
1412 }
1413
9e903e08 1414 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1415 skb->len += curr_frag_len;
1416 skb->data_len += curr_frag_len;
bdb28a97 1417 skb->truesize += rx_frag_size;
2e588f84
SP
1418 remaining -= curr_frag_len;
1419 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1420 page_info->page = NULL;
6b7c5b94 1421 }
bd46cb6c 1422 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1423}
1424
5be93b9a 1425/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1426static void be_rx_compl_process(struct be_rx_obj *rxo,
1427 struct be_rx_compl_info *rxcp)
6b7c5b94 1428{
10ef9ab4 1429 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1430 struct net_device *netdev = adapter->netdev;
6b7c5b94 1431 struct sk_buff *skb;
89420424 1432
bb349bb4 1433 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1434 if (unlikely(!skb)) {
ac124ff9 1435 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1436 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1437 return;
1438 }
1439
10ef9ab4 1440 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1441
6332c8d3 1442 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1443 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1444 else
1445 skb_checksum_none_assert(skb);
6b7c5b94 1446
6332c8d3 1447 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1448 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1449 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1450 skb->rxhash = rxcp->rss_hash;
1451
6b7c5b94 1452
343e43c0 1453 if (rxcp->vlanf)
4c5102f9
AK
1454 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1455
1456 netif_receive_skb(skb);
6b7c5b94
SP
1457}
1458
5be93b9a 1459/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1460void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1461 struct be_rx_compl_info *rxcp)
6b7c5b94 1462{
10ef9ab4 1463 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1464 struct be_rx_page_info *page_info;
5be93b9a 1465 struct sk_buff *skb = NULL;
3abcdeda 1466 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1467 u16 remaining, curr_frag_len;
1468 u16 i, j;
3968fa1e 1469
10ef9ab4 1470 skb = napi_get_frags(napi);
5be93b9a 1471 if (!skb) {
10ef9ab4 1472 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1473 return;
1474 }
1475
2e588f84
SP
1476 remaining = rxcp->pkt_size;
1477 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1478 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1479
1480 curr_frag_len = min(remaining, rx_frag_size);
1481
bd46cb6c
AK
1482 /* Coalesce all frags from the same physical page in one slot */
1483 if (i == 0 || page_info->page_offset == 0) {
1484 /* First frag or Fresh page */
1485 j++;
b061b39e 1486 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1487 skb_shinfo(skb)->frags[j].page_offset =
1488 page_info->page_offset;
9e903e08 1489 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1490 } else {
1491 put_page(page_info->page);
1492 }
9e903e08 1493 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1494 skb->truesize += rx_frag_size;
bd46cb6c 1495 remaining -= curr_frag_len;
2e588f84 1496 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1497 memset(page_info, 0, sizeof(*page_info));
1498 }
bd46cb6c 1499 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1500
5be93b9a 1501 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1502 skb->len = rxcp->pkt_size;
1503 skb->data_len = rxcp->pkt_size;
5be93b9a 1504 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1505 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1506 if (adapter->netdev->features & NETIF_F_RXHASH)
1507 skb->rxhash = rxcp->rss_hash;
5be93b9a 1508
343e43c0 1509 if (rxcp->vlanf)
4c5102f9
AK
1510 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1511
10ef9ab4 1512 napi_gro_frags(napi);
2e588f84
SP
1513}
1514
10ef9ab4
SP
1515static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1516 struct be_rx_compl_info *rxcp)
2e588f84
SP
1517{
1518 rxcp->pkt_size =
1519 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1520 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1521 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1522 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1523 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1524 rxcp->ip_csum =
1525 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1526 rxcp->l4_csum =
1527 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1528 rxcp->ipv6 =
1529 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1530 rxcp->rxq_idx =
1531 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1532 rxcp->num_rcvd =
1533 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1534 rxcp->pkt_type =
1535 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1536 rxcp->rss_hash =
c297977e 1537 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1538 if (rxcp->vlanf) {
1539 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1540 compl);
1541 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1542 compl);
15d72184 1543 }
12004ae9 1544 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1545}
1546
10ef9ab4
SP
1547static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1548 struct be_rx_compl_info *rxcp)
2e588f84
SP
1549{
1550 rxcp->pkt_size =
1551 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1552 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1553 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1554 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1555 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1556 rxcp->ip_csum =
1557 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1558 rxcp->l4_csum =
1559 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1560 rxcp->ipv6 =
1561 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1562 rxcp->rxq_idx =
1563 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1564 rxcp->num_rcvd =
1565 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1566 rxcp->pkt_type =
1567 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1568 rxcp->rss_hash =
c297977e 1569 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1570 if (rxcp->vlanf) {
1571 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1572 compl);
1573 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1574 compl);
15d72184 1575 }
12004ae9 1576 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1577}
1578
1579static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1580{
1581 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1582 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1583 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1584
2e588f84
SP
1585 /* For checking the valid bit it is Ok to use either definition as the
1586 * valid bit is at the same position in both v0 and v1 Rx compl */
1587 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1588 return NULL;
6b7c5b94 1589
2e588f84
SP
1590 rmb();
1591 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1592
2e588f84 1593 if (adapter->be3_native)
10ef9ab4 1594 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1595 else
10ef9ab4 1596 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1597
15d72184
SP
1598 if (rxcp->vlanf) {
1599 /* vlanf could be wrongly set in some cards.
1600 * ignore if vtm is not set */
752961a1 1601 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1602 rxcp->vlanf = 0;
6b7c5b94 1603
15d72184 1604 if (!lancer_chip(adapter))
3c709f8f 1605 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1606
939cf306 1607 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1608 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1609 rxcp->vlanf = 0;
1610 }
2e588f84
SP
1611
1612 /* As the compl has been parsed, reset it; we wont touch it again */
1613 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1614
3abcdeda 1615 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1616 return rxcp;
1617}
1618
1829b086 1619static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1620{
6b7c5b94 1621 u32 order = get_order(size);
1829b086 1622
6b7c5b94 1623 if (order > 0)
1829b086
ED
1624 gfp |= __GFP_COMP;
1625 return alloc_pages(gfp, order);
6b7c5b94
SP
1626}
1627
1628/*
1629 * Allocate a page, split it to fragments of size rx_frag_size and post as
1630 * receive buffers to BE
1631 */
1829b086 1632static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1633{
3abcdeda 1634 struct be_adapter *adapter = rxo->adapter;
26d92f92 1635 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1636 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1637 struct page *pagep = NULL;
1638 struct be_eth_rx_d *rxd;
1639 u64 page_dmaaddr = 0, frag_dmaaddr;
1640 u32 posted, page_offset = 0;
1641
3abcdeda 1642 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1643 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1644 if (!pagep) {
1829b086 1645 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1646 if (unlikely(!pagep)) {
ac124ff9 1647 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1648 break;
1649 }
2b7bcebf
IV
1650 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1651 0, adapter->big_page_size,
1652 DMA_FROM_DEVICE);
6b7c5b94
SP
1653 page_info->page_offset = 0;
1654 } else {
1655 get_page(pagep);
1656 page_info->page_offset = page_offset + rx_frag_size;
1657 }
1658 page_offset = page_info->page_offset;
1659 page_info->page = pagep;
fac6da5b 1660 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1661 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1662
1663 rxd = queue_head_node(rxq);
1664 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1665 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1666
1667 /* Any space left in the current big page for another frag? */
1668 if ((page_offset + rx_frag_size + rx_frag_size) >
1669 adapter->big_page_size) {
1670 pagep = NULL;
1671 page_info->last_page_user = true;
1672 }
26d92f92
SP
1673
1674 prev_page_info = page_info;
1675 queue_head_inc(rxq);
10ef9ab4 1676 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1677 }
1678 if (pagep)
26d92f92 1679 prev_page_info->last_page_user = true;
6b7c5b94
SP
1680
1681 if (posted) {
6b7c5b94 1682 atomic_add(posted, &rxq->used);
8788fdc2 1683 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1684 } else if (atomic_read(&rxq->used) == 0) {
1685 /* Let be_worker replenish when memory is available */
3abcdeda 1686 rxo->rx_post_starved = true;
6b7c5b94 1687 }
6b7c5b94
SP
1688}
1689
5fb379ee 1690static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1691{
6b7c5b94
SP
1692 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1693
1694 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1695 return NULL;
1696
f3eb62d2 1697 rmb();
6b7c5b94
SP
1698 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1699
1700 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1701
1702 queue_tail_inc(tx_cq);
1703 return txcp;
1704}
1705
3c8def97
SP
1706static u16 be_tx_compl_process(struct be_adapter *adapter,
1707 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1708{
3c8def97 1709 struct be_queue_info *txq = &txo->q;
a73b796e 1710 struct be_eth_wrb *wrb;
3c8def97 1711 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1712 struct sk_buff *sent_skb;
ec43b1a6
SP
1713 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1714 bool unmap_skb_hdr = true;
6b7c5b94 1715
ec43b1a6 1716 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1717 BUG_ON(!sent_skb);
ec43b1a6
SP
1718 sent_skbs[txq->tail] = NULL;
1719
1720 /* skip header wrb */
a73b796e 1721 queue_tail_inc(txq);
6b7c5b94 1722
ec43b1a6 1723 do {
6b7c5b94 1724 cur_index = txq->tail;
a73b796e 1725 wrb = queue_tail_node(txq);
2b7bcebf
IV
1726 unmap_tx_frag(&adapter->pdev->dev, wrb,
1727 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1728 unmap_skb_hdr = false;
1729
6b7c5b94
SP
1730 num_wrbs++;
1731 queue_tail_inc(txq);
ec43b1a6 1732 } while (cur_index != last_index);
6b7c5b94 1733
6b7c5b94 1734 kfree_skb(sent_skb);
4d586b82 1735 return num_wrbs;
6b7c5b94
SP
1736}
1737
10ef9ab4
SP
1738/* Return the number of events in the event queue */
1739static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1740{
10ef9ab4
SP
1741 struct be_eq_entry *eqe;
1742 int num = 0;
859b1e4e 1743
10ef9ab4
SP
1744 do {
1745 eqe = queue_tail_node(&eqo->q);
1746 if (eqe->evt == 0)
1747 break;
859b1e4e 1748
10ef9ab4
SP
1749 rmb();
1750 eqe->evt = 0;
1751 num++;
1752 queue_tail_inc(&eqo->q);
1753 } while (true);
1754
1755 return num;
859b1e4e
SP
1756}
1757
10ef9ab4
SP
1758/* Leaves the EQ is disarmed state */
1759static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1760{
10ef9ab4 1761 int num = events_get(eqo);
859b1e4e 1762
10ef9ab4 1763 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1764}
1765
10ef9ab4 1766static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1767{
1768 struct be_rx_page_info *page_info;
3abcdeda
SP
1769 struct be_queue_info *rxq = &rxo->q;
1770 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1771 struct be_rx_compl_info *rxcp;
d23e946c
SP
1772 struct be_adapter *adapter = rxo->adapter;
1773 int flush_wait = 0;
6b7c5b94
SP
1774 u16 tail;
1775
d23e946c
SP
1776 /* Consume pending rx completions.
1777 * Wait for the flush completion (identified by zero num_rcvd)
1778 * to arrive. Notify CQ even when there are no more CQ entries
1779 * for HW to flush partially coalesced CQ entries.
1780 * In Lancer, there is no need to wait for flush compl.
1781 */
1782 for (;;) {
1783 rxcp = be_rx_compl_get(rxo);
1784 if (rxcp == NULL) {
1785 if (lancer_chip(adapter))
1786 break;
1787
1788 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1789 dev_warn(&adapter->pdev->dev,
1790 "did not receive flush compl\n");
1791 break;
1792 }
1793 be_cq_notify(adapter, rx_cq->id, true, 0);
1794 mdelay(1);
1795 } else {
1796 be_rx_compl_discard(rxo, rxcp);
1797 be_cq_notify(adapter, rx_cq->id, true, 1);
1798 if (rxcp->num_rcvd == 0)
1799 break;
1800 }
6b7c5b94
SP
1801 }
1802
d23e946c
SP
1803 /* After cleanup, leave the CQ in unarmed state */
1804 be_cq_notify(adapter, rx_cq->id, false, 0);
1805
1806 /* Then free posted rx buffers that were not used */
6b7c5b94 1807 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1808 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1809 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1810 put_page(page_info->page);
1811 memset(page_info, 0, sizeof(*page_info));
1812 }
1813 BUG_ON(atomic_read(&rxq->used));
482c9e79 1814 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1815}
1816
0ae57bb3 1817static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1818{
0ae57bb3
SP
1819 struct be_tx_obj *txo;
1820 struct be_queue_info *txq;
a8e9179a 1821 struct be_eth_tx_compl *txcp;
4d586b82 1822 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1823 struct sk_buff *sent_skb;
1824 bool dummy_wrb;
0ae57bb3 1825 int i, pending_txqs;
a8e9179a
SP
1826
1827 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1828 do {
0ae57bb3
SP
1829 pending_txqs = adapter->num_tx_qs;
1830
1831 for_all_tx_queues(adapter, txo, i) {
1832 txq = &txo->q;
1833 while ((txcp = be_tx_compl_get(&txo->cq))) {
1834 end_idx =
1835 AMAP_GET_BITS(struct amap_eth_tx_compl,
1836 wrb_index, txcp);
1837 num_wrbs += be_tx_compl_process(adapter, txo,
1838 end_idx);
1839 cmpl++;
1840 }
1841 if (cmpl) {
1842 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1843 atomic_sub(num_wrbs, &txq->used);
1844 cmpl = 0;
1845 num_wrbs = 0;
1846 }
1847 if (atomic_read(&txq->used) == 0)
1848 pending_txqs--;
a8e9179a
SP
1849 }
1850
0ae57bb3 1851 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1852 break;
1853
1854 mdelay(1);
1855 } while (true);
1856
0ae57bb3
SP
1857 for_all_tx_queues(adapter, txo, i) {
1858 txq = &txo->q;
1859 if (atomic_read(&txq->used))
1860 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1861 atomic_read(&txq->used));
1862
1863 /* free posted tx for which compls will never arrive */
1864 while (atomic_read(&txq->used)) {
1865 sent_skb = txo->sent_skb_list[txq->tail];
1866 end_idx = txq->tail;
1867 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1868 &dummy_wrb);
1869 index_adv(&end_idx, num_wrbs - 1, txq->len);
1870 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1871 atomic_sub(num_wrbs, &txq->used);
1872 }
b03388d6 1873 }
6b7c5b94
SP
1874}
1875
10ef9ab4
SP
1876static void be_evt_queues_destroy(struct be_adapter *adapter)
1877{
1878 struct be_eq_obj *eqo;
1879 int i;
1880
1881 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1882 if (eqo->q.created) {
1883 be_eq_clean(eqo);
10ef9ab4 1884 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1885 }
10ef9ab4
SP
1886 be_queue_free(adapter, &eqo->q);
1887 }
1888}
1889
1890static int be_evt_queues_create(struct be_adapter *adapter)
1891{
1892 struct be_queue_info *eq;
1893 struct be_eq_obj *eqo;
1894 int i, rc;
1895
1896 adapter->num_evt_qs = num_irqs(adapter);
1897
1898 for_all_evt_queues(adapter, eqo, i) {
1899 eqo->adapter = adapter;
1900 eqo->tx_budget = BE_TX_BUDGET;
1901 eqo->idx = i;
1902 eqo->max_eqd = BE_MAX_EQD;
1903 eqo->enable_aic = true;
1904
1905 eq = &eqo->q;
1906 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1907 sizeof(struct be_eq_entry));
1908 if (rc)
1909 return rc;
1910
1911 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1912 if (rc)
1913 return rc;
1914 }
1cfafab9 1915 return 0;
10ef9ab4
SP
1916}
1917
5fb379ee
SP
1918static void be_mcc_queues_destroy(struct be_adapter *adapter)
1919{
1920 struct be_queue_info *q;
5fb379ee 1921
8788fdc2 1922 q = &adapter->mcc_obj.q;
5fb379ee 1923 if (q->created)
8788fdc2 1924 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1925 be_queue_free(adapter, q);
1926
8788fdc2 1927 q = &adapter->mcc_obj.cq;
5fb379ee 1928 if (q->created)
8788fdc2 1929 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1930 be_queue_free(adapter, q);
1931}
1932
1933/* Must be called only after TX qs are created as MCC shares TX EQ */
1934static int be_mcc_queues_create(struct be_adapter *adapter)
1935{
1936 struct be_queue_info *q, *cq;
5fb379ee 1937
8788fdc2 1938 cq = &adapter->mcc_obj.cq;
5fb379ee 1939 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1940 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1941 goto err;
1942
10ef9ab4
SP
1943 /* Use the default EQ for MCC completions */
1944 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1945 goto mcc_cq_free;
1946
8788fdc2 1947 q = &adapter->mcc_obj.q;
5fb379ee
SP
1948 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1949 goto mcc_cq_destroy;
1950
8788fdc2 1951 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1952 goto mcc_q_free;
1953
1954 return 0;
1955
1956mcc_q_free:
1957 be_queue_free(adapter, q);
1958mcc_cq_destroy:
8788fdc2 1959 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1960mcc_cq_free:
1961 be_queue_free(adapter, cq);
1962err:
1963 return -1;
1964}
1965
6b7c5b94
SP
1966static void be_tx_queues_destroy(struct be_adapter *adapter)
1967{
1968 struct be_queue_info *q;
3c8def97
SP
1969 struct be_tx_obj *txo;
1970 u8 i;
6b7c5b94 1971
3c8def97
SP
1972 for_all_tx_queues(adapter, txo, i) {
1973 q = &txo->q;
1974 if (q->created)
1975 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1976 be_queue_free(adapter, q);
6b7c5b94 1977
3c8def97
SP
1978 q = &txo->cq;
1979 if (q->created)
1980 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1981 be_queue_free(adapter, q);
1982 }
6b7c5b94
SP
1983}
1984
dafc0fe3
SP
1985static int be_num_txqs_want(struct be_adapter *adapter)
1986{
abb93951
PR
1987 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1988 be_is_mc(adapter) ||
1989 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 1990 BE2_chip(adapter))
dafc0fe3
SP
1991 return 1;
1992 else
abb93951 1993 return adapter->max_tx_queues;
dafc0fe3
SP
1994}
1995
10ef9ab4 1996static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1997{
10ef9ab4
SP
1998 struct be_queue_info *cq, *eq;
1999 int status;
3c8def97
SP
2000 struct be_tx_obj *txo;
2001 u8 i;
6b7c5b94 2002
dafc0fe3 2003 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2004 if (adapter->num_tx_qs != MAX_TX_QS) {
2005 rtnl_lock();
dafc0fe3
SP
2006 netif_set_real_num_tx_queues(adapter->netdev,
2007 adapter->num_tx_qs);
3bb62f4f
PR
2008 rtnl_unlock();
2009 }
dafc0fe3 2010
10ef9ab4
SP
2011 for_all_tx_queues(adapter, txo, i) {
2012 cq = &txo->cq;
2013 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2014 sizeof(struct be_eth_tx_compl));
2015 if (status)
2016 return status;
3c8def97 2017
10ef9ab4
SP
2018 /* If num_evt_qs is less than num_tx_qs, then more than
2019 * one txq share an eq
2020 */
2021 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2022 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2023 if (status)
2024 return status;
2025 }
2026 return 0;
2027}
6b7c5b94 2028
10ef9ab4
SP
2029static int be_tx_qs_create(struct be_adapter *adapter)
2030{
2031 struct be_tx_obj *txo;
2032 int i, status;
fe6d2a38 2033
3c8def97 2034 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2035 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2036 sizeof(struct be_eth_wrb));
2037 if (status)
2038 return status;
6b7c5b94 2039
10ef9ab4
SP
2040 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2041 if (status)
2042 return status;
3c8def97 2043 }
6b7c5b94 2044
d379142b
SP
2045 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2046 adapter->num_tx_qs);
10ef9ab4 2047 return 0;
6b7c5b94
SP
2048}
2049
10ef9ab4 2050static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2051{
2052 struct be_queue_info *q;
3abcdeda
SP
2053 struct be_rx_obj *rxo;
2054 int i;
2055
2056 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2057 q = &rxo->cq;
2058 if (q->created)
2059 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2060 be_queue_free(adapter, q);
ac6a0c4a
SP
2061 }
2062}
2063
10ef9ab4 2064static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2065{
10ef9ab4 2066 struct be_queue_info *eq, *cq;
3abcdeda
SP
2067 struct be_rx_obj *rxo;
2068 int rc, i;
6b7c5b94 2069
10ef9ab4
SP
2070 /* We'll create as many RSS rings as there are irqs.
2071 * But when there's only one irq there's no use creating RSS rings
2072 */
2073 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2074 num_irqs(adapter) + 1 : 1;
7f640062
SP
2075 if (adapter->num_rx_qs != MAX_RX_QS) {
2076 rtnl_lock();
2077 netif_set_real_num_rx_queues(adapter->netdev,
2078 adapter->num_rx_qs);
2079 rtnl_unlock();
2080 }
ac6a0c4a 2081
6b7c5b94 2082 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2083 for_all_rx_queues(adapter, rxo, i) {
2084 rxo->adapter = adapter;
3abcdeda
SP
2085 cq = &rxo->cq;
2086 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2087 sizeof(struct be_eth_rx_compl));
2088 if (rc)
10ef9ab4 2089 return rc;
3abcdeda 2090
10ef9ab4
SP
2091 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2092 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2093 if (rc)
10ef9ab4 2094 return rc;
3abcdeda 2095 }
6b7c5b94 2096
d379142b
SP
2097 dev_info(&adapter->pdev->dev,
2098 "created %d RSS queue(s) and 1 default RX queue\n",
2099 adapter->num_rx_qs - 1);
10ef9ab4 2100 return 0;
b628bde2
SP
2101}
2102
6b7c5b94
SP
2103static irqreturn_t be_intx(int irq, void *dev)
2104{
e49cc34f
SP
2105 struct be_eq_obj *eqo = dev;
2106 struct be_adapter *adapter = eqo->adapter;
2107 int num_evts = 0;
6b7c5b94 2108
d0b9cec3
SP
2109 /* IRQ is not expected when NAPI is scheduled as the EQ
2110 * will not be armed.
2111 * But, this can happen on Lancer INTx where it takes
2112 * a while to de-assert INTx or in BE2 where occasionaly
2113 * an interrupt may be raised even when EQ is unarmed.
2114 * If NAPI is already scheduled, then counting & notifying
2115 * events will orphan them.
e49cc34f 2116 */
d0b9cec3 2117 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2118 num_evts = events_get(eqo);
d0b9cec3
SP
2119 __napi_schedule(&eqo->napi);
2120 if (num_evts)
2121 eqo->spurious_intr = 0;
2122 }
2123 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2124
d0b9cec3
SP
2125 /* Return IRQ_HANDLED only for the the first spurious intr
2126 * after a valid intr to stop the kernel from branding
2127 * this irq as a bad one!
e49cc34f 2128 */
d0b9cec3
SP
2129 if (num_evts || eqo->spurious_intr++ == 0)
2130 return IRQ_HANDLED;
2131 else
2132 return IRQ_NONE;
6b7c5b94
SP
2133}
2134
10ef9ab4 2135static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2136{
10ef9ab4 2137 struct be_eq_obj *eqo = dev;
6b7c5b94 2138
0b545a62
SP
2139 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2140 napi_schedule(&eqo->napi);
6b7c5b94
SP
2141 return IRQ_HANDLED;
2142}
2143
2e588f84 2144static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2145{
2e588f84 2146 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2147}
2148
10ef9ab4
SP
2149static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2150 int budget)
6b7c5b94 2151{
3abcdeda
SP
2152 struct be_adapter *adapter = rxo->adapter;
2153 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2154 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2155 u32 work_done;
2156
2157 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2158 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2159 if (!rxcp)
2160 break;
2161
12004ae9
SP
2162 /* Is it a flush compl that has no data */
2163 if (unlikely(rxcp->num_rcvd == 0))
2164 goto loop_continue;
2165
2166 /* Discard compl with partial DMA Lancer B0 */
2167 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2168 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2169 goto loop_continue;
2170 }
2171
2172 /* On BE drop pkts that arrive due to imperfect filtering in
2173 * promiscuous mode on some skews
2174 */
2175 if (unlikely(rxcp->port != adapter->port_num &&
2176 !lancer_chip(adapter))) {
10ef9ab4 2177 be_rx_compl_discard(rxo, rxcp);
12004ae9 2178 goto loop_continue;
64642811 2179 }
009dd872 2180
12004ae9 2181 if (do_gro(rxcp))
10ef9ab4 2182 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2183 else
10ef9ab4 2184 be_rx_compl_process(rxo, rxcp);
12004ae9 2185loop_continue:
2e588f84 2186 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2187 }
2188
10ef9ab4
SP
2189 if (work_done) {
2190 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2191
10ef9ab4
SP
2192 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2193 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2194 }
10ef9ab4 2195
6b7c5b94
SP
2196 return work_done;
2197}
2198
10ef9ab4
SP
2199static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2200 int budget, int idx)
6b7c5b94 2201{
6b7c5b94 2202 struct be_eth_tx_compl *txcp;
10ef9ab4 2203 int num_wrbs = 0, work_done;
3c8def97 2204
10ef9ab4
SP
2205 for (work_done = 0; work_done < budget; work_done++) {
2206 txcp = be_tx_compl_get(&txo->cq);
2207 if (!txcp)
2208 break;
2209 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2210 AMAP_GET_BITS(struct amap_eth_tx_compl,
2211 wrb_index, txcp));
10ef9ab4 2212 }
6b7c5b94 2213
10ef9ab4
SP
2214 if (work_done) {
2215 be_cq_notify(adapter, txo->cq.id, true, work_done);
2216 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2217
10ef9ab4
SP
2218 /* As Tx wrbs have been freed up, wake up netdev queue
2219 * if it was stopped due to lack of tx wrbs. */
2220 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2221 atomic_read(&txo->q.used) < txo->q.len / 2) {
2222 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2223 }
10ef9ab4
SP
2224
2225 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2226 tx_stats(txo)->tx_compl += work_done;
2227 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2228 }
10ef9ab4
SP
2229 return (work_done < budget); /* Done */
2230}
6b7c5b94 2231
10ef9ab4
SP
2232int be_poll(struct napi_struct *napi, int budget)
2233{
2234 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2235 struct be_adapter *adapter = eqo->adapter;
0b545a62 2236 int max_work = 0, work, i, num_evts;
10ef9ab4 2237 bool tx_done;
f31e50a8 2238
0b545a62
SP
2239 num_evts = events_get(eqo);
2240
10ef9ab4
SP
2241 /* Process all TXQs serviced by this EQ */
2242 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2243 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2244 eqo->tx_budget, i);
2245 if (!tx_done)
2246 max_work = budget;
f31e50a8
SP
2247 }
2248
10ef9ab4
SP
2249 /* This loop will iterate twice for EQ0 in which
2250 * completions of the last RXQ (default one) are also processed
2251 * For other EQs the loop iterates only once
2252 */
2253 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2254 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2255 max_work = max(work, max_work);
2256 }
6b7c5b94 2257
10ef9ab4
SP
2258 if (is_mcc_eqo(eqo))
2259 be_process_mcc(adapter);
93c86700 2260
10ef9ab4
SP
2261 if (max_work < budget) {
2262 napi_complete(napi);
0b545a62 2263 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2264 } else {
2265 /* As we'll continue in polling mode, count and clear events */
0b545a62 2266 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2267 }
10ef9ab4 2268 return max_work;
6b7c5b94
SP
2269}
2270
f67ef7ba 2271void be_detect_error(struct be_adapter *adapter)
7c185276 2272{
e1cfb67a
PR
2273 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2274 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2275 u32 i;
2276
d23e946c 2277 if (be_hw_error(adapter))
72f02485
SP
2278 return;
2279
e1cfb67a
PR
2280 if (lancer_chip(adapter)) {
2281 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2282 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2283 sliport_err1 = ioread32(adapter->db +
2284 SLIPORT_ERROR1_OFFSET);
2285 sliport_err2 = ioread32(adapter->db +
2286 SLIPORT_ERROR2_OFFSET);
2287 }
2288 } else {
2289 pci_read_config_dword(adapter->pdev,
2290 PCICFG_UE_STATUS_LOW, &ue_lo);
2291 pci_read_config_dword(adapter->pdev,
2292 PCICFG_UE_STATUS_HIGH, &ue_hi);
2293 pci_read_config_dword(adapter->pdev,
2294 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2295 pci_read_config_dword(adapter->pdev,
2296 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2297
f67ef7ba
PR
2298 ue_lo = (ue_lo & ~ue_lo_mask);
2299 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2300 }
7c185276 2301
1451ae6e
AK
2302 /* On certain platforms BE hardware can indicate spurious UEs.
2303 * Allow the h/w to stop working completely in case of a real UE.
2304 * Hence not setting the hw_error for UE detection.
2305 */
2306 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2307 adapter->hw_error = true;
434b3648 2308 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2309 "Error detected in the card\n");
2310 }
2311
2312 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313 dev_err(&adapter->pdev->dev,
2314 "ERR: sliport status 0x%x\n", sliport_status);
2315 dev_err(&adapter->pdev->dev,
2316 "ERR: sliport error1 0x%x\n", sliport_err1);
2317 dev_err(&adapter->pdev->dev,
2318 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2319 }
2320
e1cfb67a
PR
2321 if (ue_lo) {
2322 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2323 if (ue_lo & 1)
7c185276
AK
2324 dev_err(&adapter->pdev->dev,
2325 "UE: %s bit set\n", ue_status_low_desc[i]);
2326 }
2327 }
f67ef7ba 2328
e1cfb67a
PR
2329 if (ue_hi) {
2330 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2331 if (ue_hi & 1)
7c185276
AK
2332 dev_err(&adapter->pdev->dev,
2333 "UE: %s bit set\n", ue_status_hi_desc[i]);
2334 }
2335 }
2336
2337}
2338
8d56ff11
SP
2339static void be_msix_disable(struct be_adapter *adapter)
2340{
ac6a0c4a 2341 if (msix_enabled(adapter)) {
8d56ff11 2342 pci_disable_msix(adapter->pdev);
ac6a0c4a 2343 adapter->num_msix_vec = 0;
3abcdeda
SP
2344 }
2345}
2346
10ef9ab4
SP
2347static uint be_num_rss_want(struct be_adapter *adapter)
2348{
30e80b55 2349 u32 num = 0;
abb93951 2350
10ef9ab4 2351 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2352 (lancer_chip(adapter) ||
2353 (!sriov_want(adapter) && be_physfn(adapter)))) {
2354 num = adapter->max_rss_queues;
30e80b55
YM
2355 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2356 }
2357 return num;
10ef9ab4
SP
2358}
2359
6b7c5b94
SP
2360static void be_msix_enable(struct be_adapter *adapter)
2361{
10ef9ab4 2362#define BE_MIN_MSIX_VECTORS 1
045508a8 2363 int i, status, num_vec, num_roce_vec = 0;
d379142b 2364 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2365
10ef9ab4
SP
2366 /* If RSS queues are not used, need a vec for default RX Q */
2367 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2368 if (be_roce_supported(adapter)) {
2369 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2370 (num_online_cpus() + 1));
2371 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2372 num_vec += num_roce_vec;
2373 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2374 }
10ef9ab4 2375 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2376
ac6a0c4a 2377 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2378 adapter->msix_entries[i].entry = i;
2379
ac6a0c4a 2380 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2381 if (status == 0) {
2382 goto done;
2383 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2384 num_vec = status;
3abcdeda 2385 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2386 num_vec) == 0)
3abcdeda 2387 goto done;
3abcdeda 2388 }
d379142b
SP
2389
2390 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2391 return;
2392done:
045508a8
PP
2393 if (be_roce_supported(adapter)) {
2394 if (num_vec > num_roce_vec) {
2395 adapter->num_msix_vec = num_vec - num_roce_vec;
2396 adapter->num_msix_roce_vec =
2397 num_vec - adapter->num_msix_vec;
2398 } else {
2399 adapter->num_msix_vec = num_vec;
2400 adapter->num_msix_roce_vec = 0;
2401 }
2402 } else
2403 adapter->num_msix_vec = num_vec;
d379142b 2404 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2405 return;
6b7c5b94
SP
2406}
2407
fe6d2a38 2408static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2409 struct be_eq_obj *eqo)
b628bde2 2410{
10ef9ab4 2411 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2412}
6b7c5b94 2413
b628bde2
SP
2414static int be_msix_register(struct be_adapter *adapter)
2415{
10ef9ab4
SP
2416 struct net_device *netdev = adapter->netdev;
2417 struct be_eq_obj *eqo;
2418 int status, i, vec;
6b7c5b94 2419
10ef9ab4
SP
2420 for_all_evt_queues(adapter, eqo, i) {
2421 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2422 vec = be_msix_vec_get(adapter, eqo);
2423 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2424 if (status)
2425 goto err_msix;
2426 }
b628bde2 2427
6b7c5b94 2428 return 0;
3abcdeda 2429err_msix:
10ef9ab4
SP
2430 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2431 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2432 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2433 status);
ac6a0c4a 2434 be_msix_disable(adapter);
6b7c5b94
SP
2435 return status;
2436}
2437
2438static int be_irq_register(struct be_adapter *adapter)
2439{
2440 struct net_device *netdev = adapter->netdev;
2441 int status;
2442
ac6a0c4a 2443 if (msix_enabled(adapter)) {
6b7c5b94
SP
2444 status = be_msix_register(adapter);
2445 if (status == 0)
2446 goto done;
ba343c77
SB
2447 /* INTx is not supported for VF */
2448 if (!be_physfn(adapter))
2449 return status;
6b7c5b94
SP
2450 }
2451
e49cc34f 2452 /* INTx: only the first EQ is used */
6b7c5b94
SP
2453 netdev->irq = adapter->pdev->irq;
2454 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2455 &adapter->eq_obj[0]);
6b7c5b94
SP
2456 if (status) {
2457 dev_err(&adapter->pdev->dev,
2458 "INTx request IRQ failed - err %d\n", status);
2459 return status;
2460 }
2461done:
2462 adapter->isr_registered = true;
2463 return 0;
2464}
2465
2466static void be_irq_unregister(struct be_adapter *adapter)
2467{
2468 struct net_device *netdev = adapter->netdev;
10ef9ab4 2469 struct be_eq_obj *eqo;
3abcdeda 2470 int i;
6b7c5b94
SP
2471
2472 if (!adapter->isr_registered)
2473 return;
2474
2475 /* INTx */
ac6a0c4a 2476 if (!msix_enabled(adapter)) {
e49cc34f 2477 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2478 goto done;
2479 }
2480
2481 /* MSIx */
10ef9ab4
SP
2482 for_all_evt_queues(adapter, eqo, i)
2483 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2484
6b7c5b94
SP
2485done:
2486 adapter->isr_registered = false;
6b7c5b94
SP
2487}
2488
10ef9ab4 2489static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2490{
2491 struct be_queue_info *q;
2492 struct be_rx_obj *rxo;
2493 int i;
2494
2495 for_all_rx_queues(adapter, rxo, i) {
2496 q = &rxo->q;
2497 if (q->created) {
2498 be_cmd_rxq_destroy(adapter, q);
2499 /* After the rxq is invalidated, wait for a grace time
2500 * of 1ms for all dma to end and the flush compl to
2501 * arrive
2502 */
2503 mdelay(1);
10ef9ab4 2504 be_rx_cq_clean(rxo);
482c9e79 2505 }
10ef9ab4 2506 be_queue_free(adapter, q);
482c9e79
SP
2507 }
2508}
2509
889cd4b2
SP
2510static int be_close(struct net_device *netdev)
2511{
2512 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2513 struct be_eq_obj *eqo;
2514 int i;
889cd4b2 2515
045508a8
PP
2516 be_roce_dev_close(adapter);
2517
fe6d2a38
SP
2518 if (!lancer_chip(adapter))
2519 be_intr_set(adapter, false);
889cd4b2 2520
a323d9bf 2521 for_all_evt_queues(adapter, eqo, i)
10ef9ab4 2522 napi_disable(&eqo->napi);
a323d9bf
SP
2523
2524 be_async_mcc_disable(adapter);
2525
2526 /* Wait for all pending tx completions to arrive so that
2527 * all tx skbs are freed.
2528 */
2529 be_tx_compl_clean(adapter);
2530
2531 be_rx_qs_destroy(adapter);
2532
2533 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2534 if (msix_enabled(adapter))
2535 synchronize_irq(be_msix_vec_get(adapter, eqo));
2536 else
2537 synchronize_irq(netdev->irq);
2538 be_eq_clean(eqo);
63fcb27f
PR
2539 }
2540
889cd4b2
SP
2541 be_irq_unregister(adapter);
2542
482c9e79
SP
2543 return 0;
2544}
2545
10ef9ab4 2546static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2547{
2548 struct be_rx_obj *rxo;
e9008ee9
PR
2549 int rc, i, j;
2550 u8 rsstable[128];
482c9e79
SP
2551
2552 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2553 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2554 sizeof(struct be_eth_rx_d));
2555 if (rc)
2556 return rc;
2557 }
2558
2559 /* The FW would like the default RXQ to be created first */
2560 rxo = default_rxo(adapter);
2561 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2562 adapter->if_handle, false, &rxo->rss_id);
2563 if (rc)
2564 return rc;
2565
2566 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2567 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2568 rx_frag_size, adapter->if_handle,
2569 true, &rxo->rss_id);
482c9e79
SP
2570 if (rc)
2571 return rc;
2572 }
2573
2574 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2575 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2576 for_all_rss_queues(adapter, rxo, i) {
2577 if ((j + i) >= 128)
2578 break;
2579 rsstable[j + i] = rxo->rss_id;
2580 }
2581 }
2582 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2583 if (rc)
2584 return rc;
2585 }
2586
2587 /* First time posting */
10ef9ab4 2588 for_all_rx_queues(adapter, rxo, i)
482c9e79 2589 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2590 return 0;
2591}
2592
6b7c5b94
SP
2593static int be_open(struct net_device *netdev)
2594{
2595 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2596 struct be_eq_obj *eqo;
3abcdeda 2597 struct be_rx_obj *rxo;
10ef9ab4 2598 struct be_tx_obj *txo;
b236916a 2599 u8 link_status;
3abcdeda 2600 int status, i;
5fb379ee 2601
10ef9ab4 2602 status = be_rx_qs_create(adapter);
482c9e79
SP
2603 if (status)
2604 goto err;
2605
5fb379ee
SP
2606 be_irq_register(adapter);
2607
fe6d2a38
SP
2608 if (!lancer_chip(adapter))
2609 be_intr_set(adapter, true);
5fb379ee 2610
10ef9ab4 2611 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2612 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2613
10ef9ab4
SP
2614 for_all_tx_queues(adapter, txo, i)
2615 be_cq_notify(adapter, txo->cq.id, true, 0);
2616
7a1e9b20
SP
2617 be_async_mcc_enable(adapter);
2618
10ef9ab4
SP
2619 for_all_evt_queues(adapter, eqo, i) {
2620 napi_enable(&eqo->napi);
2621 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2622 }
2623
323ff71e 2624 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2625 if (!status)
2626 be_link_status_update(adapter, link_status);
2627
045508a8 2628 be_roce_dev_open(adapter);
889cd4b2
SP
2629 return 0;
2630err:
2631 be_close(adapter->netdev);
2632 return -EIO;
5fb379ee
SP
2633}
2634
71d8d1b5
AK
2635static int be_setup_wol(struct be_adapter *adapter, bool enable)
2636{
2637 struct be_dma_mem cmd;
2638 int status = 0;
2639 u8 mac[ETH_ALEN];
2640
2641 memset(mac, 0, ETH_ALEN);
2642
2643 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2644 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2645 GFP_KERNEL);
71d8d1b5
AK
2646 if (cmd.va == NULL)
2647 return -1;
2648 memset(cmd.va, 0, cmd.size);
2649
2650 if (enable) {
2651 status = pci_write_config_dword(adapter->pdev,
2652 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2653 if (status) {
2654 dev_err(&adapter->pdev->dev,
2381a55c 2655 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2656 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2657 cmd.dma);
71d8d1b5
AK
2658 return status;
2659 }
2660 status = be_cmd_enable_magic_wol(adapter,
2661 adapter->netdev->dev_addr, &cmd);
2662 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2663 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2664 } else {
2665 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2666 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2667 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2668 }
2669
2b7bcebf 2670 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2671 return status;
2672}
2673
6d87f5c3
AK
2674/*
2675 * Generate a seed MAC address from the PF MAC Address using jhash.
2676 * MAC Address for VFs are assigned incrementally starting from the seed.
2677 * These addresses are programmed in the ASIC by the PF and the VF driver
2678 * queries for the MAC address during its probe.
2679 */
4c876616 2680static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2681{
f9449ab7 2682 u32 vf;
3abcdeda 2683 int status = 0;
6d87f5c3 2684 u8 mac[ETH_ALEN];
11ac75ed 2685 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2686
2687 be_vf_eth_addr_generate(adapter, mac);
2688
11ac75ed 2689 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2690 if (lancer_chip(adapter)) {
2691 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2692 } else {
2693 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2694 vf_cfg->if_handle,
2695 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2696 }
2697
6d87f5c3
AK
2698 if (status)
2699 dev_err(&adapter->pdev->dev,
590c391d 2700 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2701 else
11ac75ed 2702 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2703
2704 mac[5] += 1;
2705 }
2706 return status;
2707}
2708
4c876616
SP
2709static int be_vfs_mac_query(struct be_adapter *adapter)
2710{
2711 int status, vf;
2712 u8 mac[ETH_ALEN];
2713 struct be_vf_cfg *vf_cfg;
2714 bool active;
2715
2716 for_all_vfs(adapter, vf_cfg, vf) {
2717 be_cmd_get_mac_from_list(adapter, mac, &active,
2718 &vf_cfg->pmac_id, 0);
2719
2720 status = be_cmd_mac_addr_query(adapter, mac, false,
2721 vf_cfg->if_handle, 0);
2722 if (status)
2723 return status;
2724 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2725 }
2726 return 0;
2727}
2728
f9449ab7 2729static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2730{
11ac75ed 2731 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2732 u32 vf;
2733
39f1d94d 2734 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2735 dev_warn(&adapter->pdev->dev,
2736 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2737 goto done;
2738 }
2739
11ac75ed 2740 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2741 if (lancer_chip(adapter))
2742 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2743 else
11ac75ed
SP
2744 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2745 vf_cfg->pmac_id, vf + 1);
f9449ab7 2746
11ac75ed
SP
2747 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2748 }
39f1d94d
SP
2749 pci_disable_sriov(adapter->pdev);
2750done:
2751 kfree(adapter->vf_cfg);
2752 adapter->num_vfs = 0;
6d87f5c3
AK
2753}
2754
a54769f5
SP
2755static int be_clear(struct be_adapter *adapter)
2756{
fbc13f01
AK
2757 int i = 1;
2758
191eb756
SP
2759 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2760 cancel_delayed_work_sync(&adapter->work);
2761 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2762 }
2763
11ac75ed 2764 if (sriov_enabled(adapter))
f9449ab7
SP
2765 be_vf_clear(adapter);
2766
fbc13f01
AK
2767 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2768 be_cmd_pmac_del(adapter, adapter->if_handle,
2769 adapter->pmac_id[i], 0);
2770
f9449ab7 2771 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2772
2773 be_mcc_queues_destroy(adapter);
10ef9ab4 2774 be_rx_cqs_destroy(adapter);
a54769f5 2775 be_tx_queues_destroy(adapter);
10ef9ab4 2776 be_evt_queues_destroy(adapter);
a54769f5 2777
abb93951
PR
2778 kfree(adapter->pmac_id);
2779 adapter->pmac_id = NULL;
2780
10ef9ab4 2781 be_msix_disable(adapter);
a54769f5
SP
2782 return 0;
2783}
2784
4c876616 2785static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2786{
4c876616
SP
2787 struct be_vf_cfg *vf_cfg;
2788 u32 cap_flags, en_flags, vf;
abb93951
PR
2789 int status;
2790
4c876616
SP
2791 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2792 BE_IF_FLAGS_MULTICAST;
abb93951 2793
4c876616
SP
2794 for_all_vfs(adapter, vf_cfg, vf) {
2795 if (!BE3_chip(adapter))
2796 be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
2797
2798 /* If a FW profile exists, then cap_flags are updated */
2799 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2800 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2801 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2802 &vf_cfg->if_handle, vf + 1);
2803 if (status)
2804 goto err;
2805 }
2806err:
2807 return status;
abb93951
PR
2808}
2809
39f1d94d 2810static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2811{
11ac75ed 2812 struct be_vf_cfg *vf_cfg;
30128031
SP
2813 int vf;
2814
39f1d94d
SP
2815 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2816 GFP_KERNEL);
2817 if (!adapter->vf_cfg)
2818 return -ENOMEM;
2819
11ac75ed
SP
2820 for_all_vfs(adapter, vf_cfg, vf) {
2821 vf_cfg->if_handle = -1;
2822 vf_cfg->pmac_id = -1;
30128031 2823 }
39f1d94d 2824 return 0;
30128031
SP
2825}
2826
f9449ab7
SP
2827static int be_vf_setup(struct be_adapter *adapter)
2828{
11ac75ed 2829 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2830 u16 def_vlan, lnk_speed;
4c876616
SP
2831 int status, old_vfs, vf;
2832 struct device *dev = &adapter->pdev->dev;
39f1d94d 2833
4c876616
SP
2834 old_vfs = be_find_vfs(adapter, ENABLED);
2835 if (old_vfs) {
2836 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2837 if (old_vfs != num_vfs)
2838 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2839 adapter->num_vfs = old_vfs;
39f1d94d 2840 } else {
4c876616
SP
2841 if (num_vfs > adapter->dev_num_vfs)
2842 dev_info(dev, "Device supports %d VFs and not %d\n",
2843 adapter->dev_num_vfs, num_vfs);
2844 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2845
2846 status = pci_enable_sriov(adapter->pdev, num_vfs);
2847 if (status) {
2848 dev_err(dev, "SRIOV enable failed\n");
2849 adapter->num_vfs = 0;
2850 return 0;
2851 }
39f1d94d
SP
2852 }
2853
2854 status = be_vf_setup_init(adapter);
2855 if (status)
2856 goto err;
30128031 2857
4c876616
SP
2858 if (old_vfs) {
2859 for_all_vfs(adapter, vf_cfg, vf) {
2860 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2861 if (status)
2862 goto err;
2863 }
2864 } else {
2865 status = be_vfs_if_create(adapter);
f9449ab7
SP
2866 if (status)
2867 goto err;
f9449ab7
SP
2868 }
2869
4c876616
SP
2870 if (old_vfs) {
2871 status = be_vfs_mac_query(adapter);
2872 if (status)
2873 goto err;
2874 } else {
39f1d94d
SP
2875 status = be_vf_eth_addr_config(adapter);
2876 if (status)
2877 goto err;
2878 }
f9449ab7 2879
11ac75ed 2880 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2881 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2882 * Allow full available bandwidth
2883 */
2884 if (BE3_chip(adapter) && !old_vfs)
2885 be_cmd_set_qos(adapter, 1000, vf+1);
2886
2887 status = be_cmd_link_status_query(adapter, &lnk_speed,
2888 NULL, vf + 1);
2889 if (!status)
2890 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2891
2892 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2893 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2894 if (status)
2895 goto err;
2896 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2897
2898 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2899 }
2900 return 0;
2901err:
4c876616
SP
2902 dev_err(dev, "VF setup failed\n");
2903 be_vf_clear(adapter);
f9449ab7
SP
2904 return status;
2905}
2906
30128031
SP
2907static void be_setup_init(struct be_adapter *adapter)
2908{
2909 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2910 adapter->phy.link_speed = -1;
30128031
SP
2911 adapter->if_handle = -1;
2912 adapter->be3_native = false;
2913 adapter->promiscuous = false;
f25b119c
PR
2914 if (be_physfn(adapter))
2915 adapter->cmd_privileges = MAX_PRIVILEGES;
2916 else
2917 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2918}
2919
1578e777
PR
2920static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2921 bool *active_mac, u32 *pmac_id)
590c391d 2922{
1578e777 2923 int status = 0;
e5e1ee89 2924
1578e777
PR
2925 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2926 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2927 if (!lancer_chip(adapter) && !be_physfn(adapter))
2928 *active_mac = true;
2929 else
2930 *active_mac = false;
e5e1ee89 2931
1578e777
PR
2932 return status;
2933 }
e5e1ee89 2934
1578e777
PR
2935 if (lancer_chip(adapter)) {
2936 status = be_cmd_get_mac_from_list(adapter, mac,
2937 active_mac, pmac_id, 0);
2938 if (*active_mac) {
5ee4979b
SP
2939 status = be_cmd_mac_addr_query(adapter, mac, false,
2940 if_handle, *pmac_id);
1578e777
PR
2941 }
2942 } else if (be_physfn(adapter)) {
2943 /* For BE3, for PF get permanent MAC */
5ee4979b 2944 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2945 *active_mac = false;
e5e1ee89 2946 } else {
1578e777 2947 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2948 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2949 if_handle, 0);
2950 *active_mac = true;
e5e1ee89 2951 }
590c391d
PR
2952 return status;
2953}
2954
abb93951
PR
2955static void be_get_resources(struct be_adapter *adapter)
2956{
4c876616
SP
2957 u16 dev_num_vfs;
2958 int pos, status;
abb93951
PR
2959 bool profile_present = false;
2960
4c876616 2961 if (!BEx_chip(adapter)) {
abb93951 2962 status = be_cmd_get_func_config(adapter);
abb93951
PR
2963 if (!status)
2964 profile_present = true;
2965 }
2966
2967 if (profile_present) {
2968 /* Sanity fixes for Lancer */
2969 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2970 BE_UC_PMAC_COUNT);
2971 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2972 BE_NUM_VLANS_SUPPORTED);
2973 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2974 BE_MAX_MC);
2975 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2976 MAX_TX_QS);
2977 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2978 BE3_MAX_RSS_QS);
2979 adapter->max_event_queues = min_t(u16,
2980 adapter->max_event_queues,
2981 BE3_MAX_RSS_QS);
2982
2983 if (adapter->max_rss_queues &&
2984 adapter->max_rss_queues == adapter->max_rx_queues)
2985 adapter->max_rss_queues -= 1;
2986
2987 if (adapter->max_event_queues < adapter->max_rss_queues)
2988 adapter->max_rss_queues = adapter->max_event_queues;
2989
2990 } else {
2991 if (be_physfn(adapter))
2992 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2993 else
2994 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2995
2996 if (adapter->function_mode & FLEX10_MODE)
2997 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2998 else
2999 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3000
3001 adapter->max_mcast_mac = BE_MAX_MC;
3002 adapter->max_tx_queues = MAX_TX_QS;
3003 adapter->max_rss_queues = (adapter->be3_native) ?
3004 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3005 adapter->max_event_queues = BE3_MAX_RSS_QS;
3006
3007 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3008 BE_IF_FLAGS_BROADCAST |
3009 BE_IF_FLAGS_MULTICAST |
3010 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3011 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3012 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3013 BE_IF_FLAGS_PROMISCUOUS;
3014
3015 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3016 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3017 }
4c876616
SP
3018
3019 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3020 if (pos) {
3021 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3022 &dev_num_vfs);
3023 if (BE3_chip(adapter))
3024 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3025 adapter->dev_num_vfs = dev_num_vfs;
3026 }
abb93951
PR
3027}
3028
39f1d94d
SP
3029/* Routine to query per function resource limits */
3030static int be_get_config(struct be_adapter *adapter)
3031{
4c876616 3032 int status;
39f1d94d 3033
abb93951
PR
3034 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3035 &adapter->function_mode,
3036 &adapter->function_caps);
3037 if (status)
3038 goto err;
3039
3040 be_get_resources(adapter);
3041
3042 /* primary mac needs 1 pmac entry */
3043 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3044 sizeof(u32), GFP_KERNEL);
3045 if (!adapter->pmac_id) {
3046 status = -ENOMEM;
3047 goto err;
3048 }
3049
abb93951
PR
3050err:
3051 return status;
39f1d94d
SP
3052}
3053
5fb379ee
SP
3054static int be_setup(struct be_adapter *adapter)
3055{
39f1d94d 3056 struct device *dev = &adapter->pdev->dev;
abb93951 3057 u32 en_flags;
a54769f5 3058 u32 tx_fc, rx_fc;
10ef9ab4 3059 int status;
ba343c77 3060 u8 mac[ETH_ALEN];
1578e777 3061 bool active_mac;
ba343c77 3062
30128031 3063 be_setup_init(adapter);
6b7c5b94 3064
abb93951
PR
3065 if (!lancer_chip(adapter))
3066 be_cmd_req_native_mode(adapter);
39f1d94d 3067
abb93951
PR
3068 status = be_get_config(adapter);
3069 if (status)
3070 goto err;
73d540f2 3071
10ef9ab4
SP
3072 be_msix_enable(adapter);
3073
3074 status = be_evt_queues_create(adapter);
3075 if (status)
a54769f5 3076 goto err;
6b7c5b94 3077
10ef9ab4
SP
3078 status = be_tx_cqs_create(adapter);
3079 if (status)
3080 goto err;
3081
3082 status = be_rx_cqs_create(adapter);
3083 if (status)
a54769f5 3084 goto err;
6b7c5b94 3085
f9449ab7 3086 status = be_mcc_queues_create(adapter);
10ef9ab4 3087 if (status)
a54769f5 3088 goto err;
6b7c5b94 3089
f25b119c
PR
3090 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3091 /* In UMC mode FW does not return right privileges.
3092 * Override with correct privilege equivalent to PF.
3093 */
3094 if (be_is_mc(adapter))
3095 adapter->cmd_privileges = MAX_PRIVILEGES;
3096
f9449ab7
SP
3097 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3098 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3099
abb93951 3100 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3101 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3102
abb93951 3103 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3104
abb93951 3105 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3106 &adapter->if_handle, 0);
5fb379ee 3107 if (status != 0)
a54769f5 3108 goto err;
6b7c5b94 3109
1578e777
PR
3110 memset(mac, 0, ETH_ALEN);
3111 active_mac = false;
3112 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3113 &active_mac, &adapter->pmac_id[0]);
3114 if (status != 0)
3115 goto err;
3116
3117 if (!active_mac) {
3118 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3119 &adapter->pmac_id[0], 0);
3120 if (status != 0)
3121 goto err;
3122 }
3123
3124 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3125 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3126 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3127 }
0dffc83e 3128
10ef9ab4
SP
3129 status = be_tx_qs_create(adapter);
3130 if (status)
3131 goto err;
3132
04b71175 3133 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3134
1d1e9a46 3135 if (adapter->vlans_added)
10329df8 3136 be_vid_config(adapter);
7ab8b0b4 3137
a54769f5 3138 be_set_rx_mode(adapter->netdev);
5fb379ee 3139
ddc3f5cb 3140 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3141
ddc3f5cb
AK
3142 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3143 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3144 adapter->rx_fc);
2dc1deb6 3145
39f1d94d
SP
3146 if (be_physfn(adapter) && num_vfs) {
3147 if (adapter->dev_num_vfs)
3148 be_vf_setup(adapter);
3149 else
3150 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3151 }
3152
f25b119c
PR
3153 status = be_cmd_get_phy_info(adapter);
3154 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3155 adapter->phy.fc_autoneg = 1;
3156
191eb756
SP
3157 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3158 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3159 return 0;
a54769f5
SP
3160err:
3161 be_clear(adapter);
3162 return status;
3163}
6b7c5b94 3164
66268739
IV
3165#ifdef CONFIG_NET_POLL_CONTROLLER
3166static void be_netpoll(struct net_device *netdev)
3167{
3168 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3169 struct be_eq_obj *eqo;
66268739
IV
3170 int i;
3171
e49cc34f
SP
3172 for_all_evt_queues(adapter, eqo, i) {
3173 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3174 napi_schedule(&eqo->napi);
3175 }
10ef9ab4
SP
3176
3177 return;
66268739
IV
3178}
3179#endif
3180
84517482 3181#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3182char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3183
fa9a6fed 3184static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3185 const u8 *p, u32 img_start, int image_size,
3186 int hdr_size)
fa9a6fed
SB
3187{
3188 u32 crc_offset;
3189 u8 flashed_crc[4];
3190 int status;
3f0d4560
AK
3191
3192 crc_offset = hdr_size + img_start + image_size - 4;
3193
fa9a6fed 3194 p += crc_offset;
3f0d4560
AK
3195
3196 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3197 (image_size - 4));
fa9a6fed
SB
3198 if (status) {
3199 dev_err(&adapter->pdev->dev,
3200 "could not get crc from flash, not flashing redboot\n");
3201 return false;
3202 }
3203
3204 /*update redboot only if crc does not match*/
3205 if (!memcmp(flashed_crc, p, 4))
3206 return false;
3207 else
3208 return true;
fa9a6fed
SB
3209}
3210
306f1348
SP
3211static bool phy_flashing_required(struct be_adapter *adapter)
3212{
42f11cf2
AK
3213 return (adapter->phy.phy_type == TN_8022 &&
3214 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3215}
3216
c165541e
PR
3217static bool is_comp_in_ufi(struct be_adapter *adapter,
3218 struct flash_section_info *fsec, int type)
3219{
3220 int i = 0, img_type = 0;
3221 struct flash_section_info_g2 *fsec_g2 = NULL;
3222
ca34fe38 3223 if (BE2_chip(adapter))
c165541e
PR
3224 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3225
3226 for (i = 0; i < MAX_FLASH_COMP; i++) {
3227 if (fsec_g2)
3228 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3229 else
3230 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3231
3232 if (img_type == type)
3233 return true;
3234 }
3235 return false;
3236
3237}
3238
3239struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3240 int header_size,
3241 const struct firmware *fw)
3242{
3243 struct flash_section_info *fsec = NULL;
3244 const u8 *p = fw->data;
3245
3246 p += header_size;
3247 while (p < (fw->data + fw->size)) {
3248 fsec = (struct flash_section_info *)p;
3249 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3250 return fsec;
3251 p += 32;
3252 }
3253 return NULL;
3254}
3255
773a2d7c
PR
3256static int be_flash(struct be_adapter *adapter, const u8 *img,
3257 struct be_dma_mem *flash_cmd, int optype, int img_size)
3258{
3259 u32 total_bytes = 0, flash_op, num_bytes = 0;
3260 int status = 0;
3261 struct be_cmd_write_flashrom *req = flash_cmd->va;
3262
3263 total_bytes = img_size;
3264 while (total_bytes) {
3265 num_bytes = min_t(u32, 32*1024, total_bytes);
3266
3267 total_bytes -= num_bytes;
3268
3269 if (!total_bytes) {
3270 if (optype == OPTYPE_PHY_FW)
3271 flash_op = FLASHROM_OPER_PHY_FLASH;
3272 else
3273 flash_op = FLASHROM_OPER_FLASH;
3274 } else {
3275 if (optype == OPTYPE_PHY_FW)
3276 flash_op = FLASHROM_OPER_PHY_SAVE;
3277 else
3278 flash_op = FLASHROM_OPER_SAVE;
3279 }
3280
be716446 3281 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3282 img += num_bytes;
3283 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3284 flash_op, num_bytes);
3285 if (status) {
3286 if (status == ILLEGAL_IOCTL_REQ &&
3287 optype == OPTYPE_PHY_FW)
3288 break;
3289 dev_err(&adapter->pdev->dev,
3290 "cmd to write to flash rom failed.\n");
3291 return status;
3292 }
3293 }
3294 return 0;
3295}
3296
ca34fe38
SP
3297/* For BE2 and BE3 */
3298static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3299 const struct firmware *fw,
3300 struct be_dma_mem *flash_cmd,
3301 int num_of_images)
3f0d4560 3302
84517482 3303{
3f0d4560 3304 int status = 0, i, filehdr_size = 0;
c165541e 3305 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3306 const u8 *p = fw->data;
215faf9c 3307 const struct flash_comp *pflashcomp;
773a2d7c 3308 int num_comp, redboot;
c165541e
PR
3309 struct flash_section_info *fsec = NULL;
3310
3311 struct flash_comp gen3_flash_types[] = {
3312 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3313 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3314 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3315 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3316 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3317 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3318 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3319 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3320 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3321 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3322 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3323 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3324 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3325 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3326 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3327 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3328 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3329 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3330 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3331 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3332 };
c165541e
PR
3333
3334 struct flash_comp gen2_flash_types[] = {
3335 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3336 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3337 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3338 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3339 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3340 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3341 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3342 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3343 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3344 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3345 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3346 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3347 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3348 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3349 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3350 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3351 };
3352
ca34fe38 3353 if (BE3_chip(adapter)) {
3f0d4560
AK
3354 pflashcomp = gen3_flash_types;
3355 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3356 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3357 } else {
3358 pflashcomp = gen2_flash_types;
3359 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3360 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3361 }
ca34fe38 3362
c165541e
PR
3363 /* Get flash section info*/
3364 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3365 if (!fsec) {
3366 dev_err(&adapter->pdev->dev,
3367 "Invalid Cookie. UFI corrupted ?\n");
3368 return -1;
3369 }
9fe96934 3370 for (i = 0; i < num_comp; i++) {
c165541e 3371 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3372 continue;
c165541e
PR
3373
3374 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3375 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3376 continue;
3377
773a2d7c
PR
3378 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3379 !phy_flashing_required(adapter))
306f1348 3380 continue;
c165541e 3381
773a2d7c
PR
3382 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3383 redboot = be_flash_redboot(adapter, fw->data,
3384 pflashcomp[i].offset, pflashcomp[i].size,
3385 filehdr_size + img_hdrs_size);
3386 if (!redboot)
3387 continue;
3388 }
c165541e 3389
3f0d4560 3390 p = fw->data;
c165541e 3391 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3392 if (p + pflashcomp[i].size > fw->data + fw->size)
3393 return -1;
773a2d7c
PR
3394
3395 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3396 pflashcomp[i].size);
3397 if (status) {
3398 dev_err(&adapter->pdev->dev,
3399 "Flashing section type %d failed.\n",
3400 pflashcomp[i].img_type);
3401 return status;
84517482 3402 }
84517482 3403 }
84517482
AK
3404 return 0;
3405}
3406
773a2d7c
PR
3407static int be_flash_skyhawk(struct be_adapter *adapter,
3408 const struct firmware *fw,
3409 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3410{
773a2d7c
PR
3411 int status = 0, i, filehdr_size = 0;
3412 int img_offset, img_size, img_optype, redboot;
3413 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3414 const u8 *p = fw->data;
3415 struct flash_section_info *fsec = NULL;
3416
3417 filehdr_size = sizeof(struct flash_file_hdr_g3);
3418 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3419 if (!fsec) {
3420 dev_err(&adapter->pdev->dev,
3421 "Invalid Cookie. UFI corrupted ?\n");
3422 return -1;
3423 }
3424
3425 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3426 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3427 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3428
3429 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3430 case IMAGE_FIRMWARE_iSCSI:
3431 img_optype = OPTYPE_ISCSI_ACTIVE;
3432 break;
3433 case IMAGE_BOOT_CODE:
3434 img_optype = OPTYPE_REDBOOT;
3435 break;
3436 case IMAGE_OPTION_ROM_ISCSI:
3437 img_optype = OPTYPE_BIOS;
3438 break;
3439 case IMAGE_OPTION_ROM_PXE:
3440 img_optype = OPTYPE_PXE_BIOS;
3441 break;
3442 case IMAGE_OPTION_ROM_FCoE:
3443 img_optype = OPTYPE_FCOE_BIOS;
3444 break;
3445 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3446 img_optype = OPTYPE_ISCSI_BACKUP;
3447 break;
3448 case IMAGE_NCSI:
3449 img_optype = OPTYPE_NCSI_FW;
3450 break;
3451 default:
3452 continue;
3453 }
3454
3455 if (img_optype == OPTYPE_REDBOOT) {
3456 redboot = be_flash_redboot(adapter, fw->data,
3457 img_offset, img_size,
3458 filehdr_size + img_hdrs_size);
3459 if (!redboot)
3460 continue;
3461 }
3462
3463 p = fw->data;
3464 p += filehdr_size + img_offset + img_hdrs_size;
3465 if (p + img_size > fw->data + fw->size)
3466 return -1;
3467
3468 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3469 if (status) {
3470 dev_err(&adapter->pdev->dev,
3471 "Flashing section type %d failed.\n",
3472 fsec->fsec_entry[i].type);
3473 return status;
3474 }
3475 }
3476 return 0;
3f0d4560
AK
3477}
3478
f67ef7ba
PR
3479static int lancer_wait_idle(struct be_adapter *adapter)
3480{
3481#define SLIPORT_IDLE_TIMEOUT 30
3482 u32 reg_val;
3483 int status = 0, i;
3484
3485 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3486 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3487 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3488 break;
3489
3490 ssleep(1);
3491 }
3492
3493 if (i == SLIPORT_IDLE_TIMEOUT)
3494 status = -1;
3495
3496 return status;
3497}
3498
3499static int lancer_fw_reset(struct be_adapter *adapter)
3500{
3501 int status = 0;
3502
3503 status = lancer_wait_idle(adapter);
3504 if (status)
3505 return status;
3506
3507 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3508 PHYSDEV_CONTROL_OFFSET);
3509
3510 return status;
3511}
3512
485bf569
SN
3513static int lancer_fw_download(struct be_adapter *adapter,
3514 const struct firmware *fw)
84517482 3515{
485bf569
SN
3516#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3517#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3518 struct be_dma_mem flash_cmd;
485bf569
SN
3519 const u8 *data_ptr = NULL;
3520 u8 *dest_image_ptr = NULL;
3521 size_t image_size = 0;
3522 u32 chunk_size = 0;
3523 u32 data_written = 0;
3524 u32 offset = 0;
3525 int status = 0;
3526 u8 add_status = 0;
f67ef7ba 3527 u8 change_status;
84517482 3528
485bf569 3529 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3530 dev_err(&adapter->pdev->dev,
485bf569
SN
3531 "FW Image not properly aligned. "
3532 "Length must be 4 byte aligned.\n");
3533 status = -EINVAL;
3534 goto lancer_fw_exit;
d9efd2af
SB
3535 }
3536
485bf569
SN
3537 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3538 + LANCER_FW_DOWNLOAD_CHUNK;
3539 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3540 &flash_cmd.dma, GFP_KERNEL);
3541 if (!flash_cmd.va) {
3542 status = -ENOMEM;
3543 dev_err(&adapter->pdev->dev,
3544 "Memory allocation failure while flashing\n");
3545 goto lancer_fw_exit;
3546 }
84517482 3547
485bf569
SN
3548 dest_image_ptr = flash_cmd.va +
3549 sizeof(struct lancer_cmd_req_write_object);
3550 image_size = fw->size;
3551 data_ptr = fw->data;
3552
3553 while (image_size) {
3554 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3555
3556 /* Copy the image chunk content. */
3557 memcpy(dest_image_ptr, data_ptr, chunk_size);
3558
3559 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3560 chunk_size, offset,
3561 LANCER_FW_DOWNLOAD_LOCATION,
3562 &data_written, &change_status,
3563 &add_status);
485bf569
SN
3564 if (status)
3565 break;
3566
3567 offset += data_written;
3568 data_ptr += data_written;
3569 image_size -= data_written;
3570 }
3571
3572 if (!status) {
3573 /* Commit the FW written */
3574 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3575 0, offset,
3576 LANCER_FW_DOWNLOAD_LOCATION,
3577 &data_written, &change_status,
3578 &add_status);
485bf569
SN
3579 }
3580
3581 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3582 flash_cmd.dma);
3583 if (status) {
3584 dev_err(&adapter->pdev->dev,
3585 "Firmware load error. "
3586 "Status code: 0x%x Additional Status: 0x%x\n",
3587 status, add_status);
3588 goto lancer_fw_exit;
3589 }
3590
f67ef7ba
PR
3591 if (change_status == LANCER_FW_RESET_NEEDED) {
3592 status = lancer_fw_reset(adapter);
3593 if (status) {
3594 dev_err(&adapter->pdev->dev,
3595 "Adapter busy for FW reset.\n"
3596 "New FW will not be active.\n");
3597 goto lancer_fw_exit;
3598 }
3599 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3600 dev_err(&adapter->pdev->dev,
3601 "System reboot required for new FW"
3602 " to be active\n");
3603 }
3604
485bf569
SN
3605 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3606lancer_fw_exit:
3607 return status;
3608}
3609
ca34fe38
SP
3610#define UFI_TYPE2 2
3611#define UFI_TYPE3 3
3612#define UFI_TYPE4 4
3613static int be_get_ufi_type(struct be_adapter *adapter,
3614 struct flash_file_hdr_g2 *fhdr)
773a2d7c
PR
3615{
3616 if (fhdr == NULL)
3617 goto be_get_ufi_exit;
3618
ca34fe38
SP
3619 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3620 return UFI_TYPE4;
3621 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3622 return UFI_TYPE3;
3623 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3624 return UFI_TYPE2;
773a2d7c
PR
3625
3626be_get_ufi_exit:
3627 dev_err(&adapter->pdev->dev,
3628 "UFI and Interface are not compatible for flashing\n");
3629 return -1;
3630}
3631
485bf569
SN
3632static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3633{
3634 struct flash_file_hdr_g2 *fhdr;
3635 struct flash_file_hdr_g3 *fhdr3;
3636 struct image_hdr *img_hdr_ptr = NULL;
3637 struct be_dma_mem flash_cmd;
3638 const u8 *p;
773a2d7c 3639 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3640
be716446 3641 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3642 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3643 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3644 if (!flash_cmd.va) {
3645 status = -ENOMEM;
3646 dev_err(&adapter->pdev->dev,
3647 "Memory allocation failure while flashing\n");
485bf569 3648 goto be_fw_exit;
84517482
AK
3649 }
3650
773a2d7c
PR
3651 p = fw->data;
3652 fhdr = (struct flash_file_hdr_g2 *)p;
3653
ca34fe38 3654 ufi_type = be_get_ufi_type(adapter, fhdr);
773a2d7c
PR
3655
3656 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3657 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3658 for (i = 0; i < num_imgs; i++) {
3659 img_hdr_ptr = (struct image_hdr *)(fw->data +
3660 (sizeof(struct flash_file_hdr_g3) +
3661 i * sizeof(struct image_hdr)));
3662 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
ca34fe38 3663 if (ufi_type == UFI_TYPE4)
773a2d7c
PR
3664 status = be_flash_skyhawk(adapter, fw,
3665 &flash_cmd, num_imgs);
ca34fe38
SP
3666 else if (ufi_type == UFI_TYPE3)
3667 status = be_flash_BEx(adapter, fw, &flash_cmd,
3668 num_imgs);
3f0d4560 3669 }
773a2d7c
PR
3670 }
3671
ca34fe38
SP
3672 if (ufi_type == UFI_TYPE2)
3673 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3674 else if (ufi_type == -1)
3f0d4560 3675 status = -1;
84517482 3676
2b7bcebf
IV
3677 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3678 flash_cmd.dma);
84517482
AK
3679 if (status) {
3680 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3681 goto be_fw_exit;
84517482
AK
3682 }
3683
af901ca1 3684 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3685
485bf569
SN
3686be_fw_exit:
3687 return status;
3688}
3689
3690int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3691{
3692 const struct firmware *fw;
3693 int status;
3694
3695 if (!netif_running(adapter->netdev)) {
3696 dev_err(&adapter->pdev->dev,
3697 "Firmware load not allowed (interface is down)\n");
3698 return -1;
3699 }
3700
3701 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3702 if (status)
3703 goto fw_exit;
3704
3705 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3706
3707 if (lancer_chip(adapter))
3708 status = lancer_fw_download(adapter, fw);
3709 else
3710 status = be_fw_download(adapter, fw);
3711
84517482
AK
3712fw_exit:
3713 release_firmware(fw);
3714 return status;
3715}
3716
e5686ad8 3717static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3718 .ndo_open = be_open,
3719 .ndo_stop = be_close,
3720 .ndo_start_xmit = be_xmit,
a54769f5 3721 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3722 .ndo_set_mac_address = be_mac_addr_set,
3723 .ndo_change_mtu = be_change_mtu,
ab1594e9 3724 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3725 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3726 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3727 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3728 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3729 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3730 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3731 .ndo_get_vf_config = be_get_vf_config,
3732#ifdef CONFIG_NET_POLL_CONTROLLER
3733 .ndo_poll_controller = be_netpoll,
3734#endif
6b7c5b94
SP
3735};
3736
3737static void be_netdev_init(struct net_device *netdev)
3738{
3739 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3740 struct be_eq_obj *eqo;
3abcdeda 3741 int i;
6b7c5b94 3742
6332c8d3 3743 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3744 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3745 NETIF_F_HW_VLAN_TX;
3746 if (be_multi_rxq(adapter))
3747 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3748
3749 netdev->features |= netdev->hw_features |
8b8ddc68 3750 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3751
eb8a50d9 3752 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3753 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3754
fbc13f01
AK
3755 netdev->priv_flags |= IFF_UNICAST_FLT;
3756
6b7c5b94
SP
3757 netdev->flags |= IFF_MULTICAST;
3758
b7e5887e 3759 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3760
10ef9ab4 3761 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3762
3763 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3764
10ef9ab4
SP
3765 for_all_evt_queues(adapter, eqo, i)
3766 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3767}
3768
3769static void be_unmap_pci_bars(struct be_adapter *adapter)
3770{
c5b3ad4c
SP
3771 if (adapter->csr)
3772 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3773 if (adapter->db)
ce66f781 3774 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3775}
3776
ce66f781
SP
3777static int db_bar(struct be_adapter *adapter)
3778{
3779 if (lancer_chip(adapter) || !be_physfn(adapter))
3780 return 0;
3781 else
3782 return 4;
3783}
3784
3785static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3786{
dbf0f2a7 3787 if (skyhawk_chip(adapter)) {
ce66f781
SP
3788 adapter->roce_db.size = 4096;
3789 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3790 db_bar(adapter));
3791 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3792 db_bar(adapter));
3793 }
045508a8 3794 return 0;
6b7c5b94
SP
3795}
3796
3797static int be_map_pci_bars(struct be_adapter *adapter)
3798{
3799 u8 __iomem *addr;
ce66f781 3800 u32 sli_intf;
6b7c5b94 3801
ce66f781
SP
3802 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3803 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3804 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3805
c5b3ad4c
SP
3806 if (BEx_chip(adapter) && be_physfn(adapter)) {
3807 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3808 if (adapter->csr == NULL)
3809 return -ENOMEM;
3810 }
3811
ce66f781 3812 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3813 if (addr == NULL)
3814 goto pci_map_err;
ba343c77 3815 adapter->db = addr;
ce66f781
SP
3816
3817 be_roce_map_pci_bars(adapter);
6b7c5b94 3818 return 0;
ce66f781 3819
6b7c5b94
SP
3820pci_map_err:
3821 be_unmap_pci_bars(adapter);
3822 return -ENOMEM;
3823}
3824
6b7c5b94
SP
3825static void be_ctrl_cleanup(struct be_adapter *adapter)
3826{
8788fdc2 3827 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3828
3829 be_unmap_pci_bars(adapter);
3830
3831 if (mem->va)
2b7bcebf
IV
3832 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3833 mem->dma);
e7b909a6 3834
5b8821b7 3835 mem = &adapter->rx_filter;
e7b909a6 3836 if (mem->va)
2b7bcebf
IV
3837 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3838 mem->dma);
6b7c5b94
SP
3839}
3840
6b7c5b94
SP
3841static int be_ctrl_init(struct be_adapter *adapter)
3842{
8788fdc2
SP
3843 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3844 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3845 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3846 u32 sli_intf;
6b7c5b94 3847 int status;
6b7c5b94 3848
ce66f781
SP
3849 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3850 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3851 SLI_INTF_FAMILY_SHIFT;
3852 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3853
6b7c5b94
SP
3854 status = be_map_pci_bars(adapter);
3855 if (status)
e7b909a6 3856 goto done;
6b7c5b94
SP
3857
3858 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3859 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3860 mbox_mem_alloc->size,
3861 &mbox_mem_alloc->dma,
3862 GFP_KERNEL);
6b7c5b94 3863 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3864 status = -ENOMEM;
3865 goto unmap_pci_bars;
6b7c5b94
SP
3866 }
3867 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3868 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3869 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3870 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3871
5b8821b7
SP
3872 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3873 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3874 &rx_filter->dma, GFP_KERNEL);
3875 if (rx_filter->va == NULL) {
e7b909a6
SP
3876 status = -ENOMEM;
3877 goto free_mbox;
3878 }
5b8821b7 3879 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3880 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3881 spin_lock_init(&adapter->mcc_lock);
3882 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3883
dd131e76 3884 init_completion(&adapter->flash_compl);
cf588477 3885 pci_save_state(adapter->pdev);
6b7c5b94 3886 return 0;
e7b909a6
SP
3887
3888free_mbox:
2b7bcebf
IV
3889 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3890 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3891
3892unmap_pci_bars:
3893 be_unmap_pci_bars(adapter);
3894
3895done:
3896 return status;
6b7c5b94
SP
3897}
3898
3899static void be_stats_cleanup(struct be_adapter *adapter)
3900{
3abcdeda 3901 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3902
3903 if (cmd->va)
2b7bcebf
IV
3904 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3905 cmd->va, cmd->dma);
6b7c5b94
SP
3906}
3907
3908static int be_stats_init(struct be_adapter *adapter)
3909{
3abcdeda 3910 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3911
ca34fe38
SP
3912 if (lancer_chip(adapter))
3913 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3914 else if (BE2_chip(adapter))
89a88ab8 3915 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3916 else
3917 /* BE3 and Skyhawk */
3918 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3919
2b7bcebf
IV
3920 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3921 GFP_KERNEL);
6b7c5b94
SP
3922 if (cmd->va == NULL)
3923 return -1;
d291b9af 3924 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3925 return 0;
3926}
3927
3bc6b06c 3928static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3929{
3930 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3931
6b7c5b94
SP
3932 if (!adapter)
3933 return;
3934
045508a8
PP
3935 be_roce_dev_remove(adapter);
3936
f67ef7ba
PR
3937 cancel_delayed_work_sync(&adapter->func_recovery_work);
3938
6b7c5b94
SP
3939 unregister_netdev(adapter->netdev);
3940
5fb379ee
SP
3941 be_clear(adapter);
3942
bf99e50d
PR
3943 /* tell fw we're done with firing cmds */
3944 be_cmd_fw_clean(adapter);
3945
6b7c5b94
SP
3946 be_stats_cleanup(adapter);
3947
3948 be_ctrl_cleanup(adapter);
3949
d6b6d987
SP
3950 pci_disable_pcie_error_reporting(pdev);
3951
6b7c5b94
SP
3952 pci_set_drvdata(pdev, NULL);
3953 pci_release_regions(pdev);
3954 pci_disable_device(pdev);
3955
3956 free_netdev(adapter->netdev);
3957}
3958
4762f6ce
AK
3959bool be_is_wol_supported(struct be_adapter *adapter)
3960{
3961 return ((adapter->wol_cap & BE_WOL_CAP) &&
3962 !be_is_wol_excluded(adapter)) ? true : false;
3963}
3964
941a77d5
SK
3965u32 be_get_fw_log_level(struct be_adapter *adapter)
3966{
3967 struct be_dma_mem extfat_cmd;
3968 struct be_fat_conf_params *cfgs;
3969 int status;
3970 u32 level = 0;
3971 int j;
3972
f25b119c
PR
3973 if (lancer_chip(adapter))
3974 return 0;
3975
941a77d5
SK
3976 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3977 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3978 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3979 &extfat_cmd.dma);
3980
3981 if (!extfat_cmd.va) {
3982 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3983 __func__);
3984 goto err;
3985 }
3986
3987 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3988 if (!status) {
3989 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3990 sizeof(struct be_cmd_resp_hdr));
ac46a462 3991 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3992 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3993 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3994 }
3995 }
3996 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3997 extfat_cmd.dma);
3998err:
3999 return level;
4000}
abb93951 4001
39f1d94d 4002static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4003{
6b7c5b94 4004 int status;
941a77d5 4005 u32 level;
6b7c5b94 4006
9e1453c5
AK
4007 status = be_cmd_get_cntl_attributes(adapter);
4008 if (status)
4009 return status;
4010
4762f6ce
AK
4011 status = be_cmd_get_acpi_wol_cap(adapter);
4012 if (status) {
4013 /* in case of a failure to get wol capabillities
4014 * check the exclusion list to determine WOL capability */
4015 if (!be_is_wol_excluded(adapter))
4016 adapter->wol_cap |= BE_WOL_CAP;
4017 }
4018
4019 if (be_is_wol_supported(adapter))
4020 adapter->wol = true;
4021
7aeb2156
PR
4022 /* Must be a power of 2 or else MODULO will BUG_ON */
4023 adapter->be_get_temp_freq = 64;
4024
941a77d5
SK
4025 level = be_get_fw_log_level(adapter);
4026 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4027
2243e2e9 4028 return 0;
6b7c5b94
SP
4029}
4030
f67ef7ba 4031static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
4032{
4033 int status;
d8110f62 4034
f67ef7ba
PR
4035 status = lancer_test_and_set_rdy_state(adapter);
4036 if (status)
4037 goto err;
d8110f62 4038
f67ef7ba
PR
4039 if (netif_running(adapter->netdev))
4040 be_close(adapter->netdev);
d8110f62 4041
f67ef7ba
PR
4042 be_clear(adapter);
4043
4044 adapter->hw_error = false;
4045 adapter->fw_timeout = false;
4046
4047 status = be_setup(adapter);
4048 if (status)
4049 goto err;
d8110f62 4050
f67ef7ba
PR
4051 if (netif_running(adapter->netdev)) {
4052 status = be_open(adapter->netdev);
d8110f62
PR
4053 if (status)
4054 goto err;
f67ef7ba 4055 }
d8110f62 4056
f67ef7ba
PR
4057 dev_err(&adapter->pdev->dev,
4058 "Adapter SLIPORT recovery succeeded\n");
4059 return 0;
4060err:
67297ad8
PR
4061 if (adapter->eeh_error)
4062 dev_err(&adapter->pdev->dev,
4063 "Adapter SLIPORT recovery failed\n");
d8110f62 4064
f67ef7ba
PR
4065 return status;
4066}
4067
4068static void be_func_recovery_task(struct work_struct *work)
4069{
4070 struct be_adapter *adapter =
4071 container_of(work, struct be_adapter, func_recovery_work.work);
4072 int status;
d8110f62 4073
f67ef7ba 4074 be_detect_error(adapter);
d8110f62 4075
f67ef7ba 4076 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4077
f67ef7ba
PR
4078 if (adapter->eeh_error)
4079 goto out;
d8110f62 4080
f67ef7ba
PR
4081 rtnl_lock();
4082 netif_device_detach(adapter->netdev);
4083 rtnl_unlock();
d8110f62 4084
f67ef7ba 4085 status = lancer_recover_func(adapter);
d8110f62 4086
f67ef7ba
PR
4087 if (!status)
4088 netif_device_attach(adapter->netdev);
d8110f62 4089 }
f67ef7ba
PR
4090
4091out:
4092 schedule_delayed_work(&adapter->func_recovery_work,
4093 msecs_to_jiffies(1000));
d8110f62
PR
4094}
4095
4096static void be_worker(struct work_struct *work)
4097{
4098 struct be_adapter *adapter =
4099 container_of(work, struct be_adapter, work.work);
4100 struct be_rx_obj *rxo;
10ef9ab4 4101 struct be_eq_obj *eqo;
d8110f62
PR
4102 int i;
4103
d8110f62
PR
4104 /* when interrupts are not yet enabled, just reap any pending
4105 * mcc completions */
4106 if (!netif_running(adapter->netdev)) {
072a9c48 4107 local_bh_disable();
10ef9ab4 4108 be_process_mcc(adapter);
072a9c48 4109 local_bh_enable();
d8110f62
PR
4110 goto reschedule;
4111 }
4112
4113 if (!adapter->stats_cmd_sent) {
4114 if (lancer_chip(adapter))
4115 lancer_cmd_get_pport_stats(adapter,
4116 &adapter->stats_cmd);
4117 else
4118 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4119 }
4120
7aeb2156
PR
4121 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4122 be_cmd_get_die_temperature(adapter);
4123
d8110f62 4124 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4125 if (rxo->rx_post_starved) {
4126 rxo->rx_post_starved = false;
4127 be_post_rx_frags(rxo, GFP_KERNEL);
4128 }
4129 }
4130
10ef9ab4
SP
4131 for_all_evt_queues(adapter, eqo, i)
4132 be_eqd_update(adapter, eqo);
4133
d8110f62
PR
4134reschedule:
4135 adapter->work_counter++;
4136 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4137}
4138
39f1d94d
SP
4139static bool be_reset_required(struct be_adapter *adapter)
4140{
d79c0a20 4141 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4142}
4143
d379142b
SP
4144static char *mc_name(struct be_adapter *adapter)
4145{
4146 if (adapter->function_mode & FLEX10_MODE)
4147 return "FLEX10";
4148 else if (adapter->function_mode & VNIC_MODE)
4149 return "vNIC";
4150 else if (adapter->function_mode & UMC_ENABLED)
4151 return "UMC";
4152 else
4153 return "";
4154}
4155
4156static inline char *func_name(struct be_adapter *adapter)
4157{
4158 return be_physfn(adapter) ? "PF" : "VF";
4159}
4160
1dd06ae8 4161static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4162{
4163 int status = 0;
4164 struct be_adapter *adapter;
4165 struct net_device *netdev;
b4e32a71 4166 char port_name;
6b7c5b94
SP
4167
4168 status = pci_enable_device(pdev);
4169 if (status)
4170 goto do_none;
4171
4172 status = pci_request_regions(pdev, DRV_NAME);
4173 if (status)
4174 goto disable_dev;
4175 pci_set_master(pdev);
4176
7f640062 4177 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4178 if (netdev == NULL) {
4179 status = -ENOMEM;
4180 goto rel_reg;
4181 }
4182 adapter = netdev_priv(netdev);
4183 adapter->pdev = pdev;
4184 pci_set_drvdata(pdev, adapter);
4185 adapter->netdev = netdev;
2243e2e9 4186 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4187
2b7bcebf 4188 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4189 if (!status) {
4190 netdev->features |= NETIF_F_HIGHDMA;
4191 } else {
2b7bcebf 4192 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4193 if (status) {
4194 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4195 goto free_netdev;
4196 }
4197 }
4198
d6b6d987
SP
4199 status = pci_enable_pcie_error_reporting(pdev);
4200 if (status)
4201 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4202
6b7c5b94
SP
4203 status = be_ctrl_init(adapter);
4204 if (status)
39f1d94d 4205 goto free_netdev;
6b7c5b94 4206
2243e2e9 4207 /* sync up with fw's ready state */
ba343c77 4208 if (be_physfn(adapter)) {
bf99e50d 4209 status = be_fw_wait_ready(adapter);
ba343c77
SB
4210 if (status)
4211 goto ctrl_clean;
ba343c77 4212 }
6b7c5b94 4213
2243e2e9
SP
4214 /* tell fw we're ready to fire cmds */
4215 status = be_cmd_fw_init(adapter);
6b7c5b94 4216 if (status)
2243e2e9
SP
4217 goto ctrl_clean;
4218
39f1d94d
SP
4219 if (be_reset_required(adapter)) {
4220 status = be_cmd_reset_function(adapter);
4221 if (status)
4222 goto ctrl_clean;
4223 }
556ae191 4224
10ef9ab4
SP
4225 /* The INTR bit may be set in the card when probed by a kdump kernel
4226 * after a crash.
4227 */
4228 if (!lancer_chip(adapter))
4229 be_intr_set(adapter, false);
4230
2243e2e9
SP
4231 status = be_stats_init(adapter);
4232 if (status)
4233 goto ctrl_clean;
4234
39f1d94d 4235 status = be_get_initial_config(adapter);
6b7c5b94
SP
4236 if (status)
4237 goto stats_clean;
6b7c5b94
SP
4238
4239 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4240 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4241 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4242
5fb379ee
SP
4243 status = be_setup(adapter);
4244 if (status)
55f5c3c5 4245 goto stats_clean;
2243e2e9 4246
3abcdeda 4247 be_netdev_init(netdev);
6b7c5b94
SP
4248 status = register_netdev(netdev);
4249 if (status != 0)
5fb379ee 4250 goto unsetup;
6b7c5b94 4251
045508a8
PP
4252 be_roce_dev_add(adapter);
4253
f67ef7ba
PR
4254 schedule_delayed_work(&adapter->func_recovery_work,
4255 msecs_to_jiffies(1000));
b4e32a71
PR
4256
4257 be_cmd_query_port_name(adapter, &port_name);
4258
d379142b
SP
4259 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4260 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4261
6b7c5b94
SP
4262 return 0;
4263
5fb379ee
SP
4264unsetup:
4265 be_clear(adapter);
6b7c5b94
SP
4266stats_clean:
4267 be_stats_cleanup(adapter);
4268ctrl_clean:
4269 be_ctrl_cleanup(adapter);
f9449ab7 4270free_netdev:
fe6d2a38 4271 free_netdev(netdev);
8d56ff11 4272 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4273rel_reg:
4274 pci_release_regions(pdev);
4275disable_dev:
4276 pci_disable_device(pdev);
4277do_none:
c4ca2374 4278 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4279 return status;
4280}
4281
4282static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4283{
4284 struct be_adapter *adapter = pci_get_drvdata(pdev);
4285 struct net_device *netdev = adapter->netdev;
4286
71d8d1b5
AK
4287 if (adapter->wol)
4288 be_setup_wol(adapter, true);
4289
f67ef7ba
PR
4290 cancel_delayed_work_sync(&adapter->func_recovery_work);
4291
6b7c5b94
SP
4292 netif_device_detach(netdev);
4293 if (netif_running(netdev)) {
4294 rtnl_lock();
4295 be_close(netdev);
4296 rtnl_unlock();
4297 }
9b0365f1 4298 be_clear(adapter);
6b7c5b94
SP
4299
4300 pci_save_state(pdev);
4301 pci_disable_device(pdev);
4302 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4303 return 0;
4304}
4305
4306static int be_resume(struct pci_dev *pdev)
4307{
4308 int status = 0;
4309 struct be_adapter *adapter = pci_get_drvdata(pdev);
4310 struct net_device *netdev = adapter->netdev;
4311
4312 netif_device_detach(netdev);
4313
4314 status = pci_enable_device(pdev);
4315 if (status)
4316 return status;
4317
4318 pci_set_power_state(pdev, 0);
4319 pci_restore_state(pdev);
4320
2243e2e9
SP
4321 /* tell fw we're ready to fire cmds */
4322 status = be_cmd_fw_init(adapter);
4323 if (status)
4324 return status;
4325
9b0365f1 4326 be_setup(adapter);
6b7c5b94
SP
4327 if (netif_running(netdev)) {
4328 rtnl_lock();
4329 be_open(netdev);
4330 rtnl_unlock();
4331 }
f67ef7ba
PR
4332
4333 schedule_delayed_work(&adapter->func_recovery_work,
4334 msecs_to_jiffies(1000));
6b7c5b94 4335 netif_device_attach(netdev);
71d8d1b5
AK
4336
4337 if (adapter->wol)
4338 be_setup_wol(adapter, false);
a4ca055f 4339
6b7c5b94
SP
4340 return 0;
4341}
4342
82456b03
SP
4343/*
4344 * An FLR will stop BE from DMAing any data.
4345 */
4346static void be_shutdown(struct pci_dev *pdev)
4347{
4348 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4349
2d5d4154
AK
4350 if (!adapter)
4351 return;
82456b03 4352
0f4a6828 4353 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4354 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4355
2d5d4154 4356 netif_device_detach(adapter->netdev);
82456b03 4357
57841869
AK
4358 be_cmd_reset_function(adapter);
4359
82456b03 4360 pci_disable_device(pdev);
82456b03
SP
4361}
4362
cf588477
SP
4363static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4364 pci_channel_state_t state)
4365{
4366 struct be_adapter *adapter = pci_get_drvdata(pdev);
4367 struct net_device *netdev = adapter->netdev;
4368
4369 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4370
f67ef7ba
PR
4371 adapter->eeh_error = true;
4372
4373 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4374
f67ef7ba 4375 rtnl_lock();
cf588477 4376 netif_device_detach(netdev);
f67ef7ba 4377 rtnl_unlock();
cf588477
SP
4378
4379 if (netif_running(netdev)) {
4380 rtnl_lock();
4381 be_close(netdev);
4382 rtnl_unlock();
4383 }
4384 be_clear(adapter);
4385
4386 if (state == pci_channel_io_perm_failure)
4387 return PCI_ERS_RESULT_DISCONNECT;
4388
4389 pci_disable_device(pdev);
4390
eeb7fc7b
SK
4391 /* The error could cause the FW to trigger a flash debug dump.
4392 * Resetting the card while flash dump is in progress
c8a54163
PR
4393 * can cause it not to recover; wait for it to finish.
4394 * Wait only for first function as it is needed only once per
4395 * adapter.
eeb7fc7b 4396 */
c8a54163
PR
4397 if (pdev->devfn == 0)
4398 ssleep(30);
4399
cf588477
SP
4400 return PCI_ERS_RESULT_NEED_RESET;
4401}
4402
4403static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4404{
4405 struct be_adapter *adapter = pci_get_drvdata(pdev);
4406 int status;
4407
4408 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4409 be_clear_all_error(adapter);
cf588477
SP
4410
4411 status = pci_enable_device(pdev);
4412 if (status)
4413 return PCI_ERS_RESULT_DISCONNECT;
4414
4415 pci_set_master(pdev);
4416 pci_set_power_state(pdev, 0);
4417 pci_restore_state(pdev);
4418
4419 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4420 dev_info(&adapter->pdev->dev,
4421 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4422 status = be_fw_wait_ready(adapter);
cf588477
SP
4423 if (status)
4424 return PCI_ERS_RESULT_DISCONNECT;
4425
d6b6d987 4426 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4427 return PCI_ERS_RESULT_RECOVERED;
4428}
4429
4430static void be_eeh_resume(struct pci_dev *pdev)
4431{
4432 int status = 0;
4433 struct be_adapter *adapter = pci_get_drvdata(pdev);
4434 struct net_device *netdev = adapter->netdev;
4435
4436 dev_info(&adapter->pdev->dev, "EEH resume\n");
4437
4438 pci_save_state(pdev);
4439
4440 /* tell fw we're ready to fire cmds */
4441 status = be_cmd_fw_init(adapter);
4442 if (status)
4443 goto err;
4444
bf99e50d
PR
4445 status = be_cmd_reset_function(adapter);
4446 if (status)
4447 goto err;
4448
cf588477
SP
4449 status = be_setup(adapter);
4450 if (status)
4451 goto err;
4452
4453 if (netif_running(netdev)) {
4454 status = be_open(netdev);
4455 if (status)
4456 goto err;
4457 }
f67ef7ba
PR
4458
4459 schedule_delayed_work(&adapter->func_recovery_work,
4460 msecs_to_jiffies(1000));
cf588477
SP
4461 netif_device_attach(netdev);
4462 return;
4463err:
4464 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4465}
4466
3646f0e5 4467static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4468 .error_detected = be_eeh_err_detected,
4469 .slot_reset = be_eeh_reset,
4470 .resume = be_eeh_resume,
4471};
4472
6b7c5b94
SP
4473static struct pci_driver be_driver = {
4474 .name = DRV_NAME,
4475 .id_table = be_dev_ids,
4476 .probe = be_probe,
4477 .remove = be_remove,
4478 .suspend = be_suspend,
cf588477 4479 .resume = be_resume,
82456b03 4480 .shutdown = be_shutdown,
cf588477 4481 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4482};
4483
4484static int __init be_init_module(void)
4485{
8e95a202
JP
4486 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4487 rx_frag_size != 2048) {
6b7c5b94
SP
4488 printk(KERN_WARNING DRV_NAME
4489 " : Module param rx_frag_size must be 2048/4096/8192."
4490 " Using 2048\n");
4491 rx_frag_size = 2048;
4492 }
6b7c5b94
SP
4493
4494 return pci_register_driver(&be_driver);
4495}
4496module_init(be_init_module);
4497
4498static void __exit be_exit_module(void)
4499{
4500 pci_unregister_driver(&be_driver);
4501}
4502module_exit(be_exit_module);