]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
net: get rid of SET_ETHTOOL_OPS
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
84 "AXGMAC0",
85 "AXGMAC1",
86 "JTAG",
87 "MPU_INTPEND"
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
112 "HOST8",
113 "HOST9",
42c8b11e 114 "NETC",
7c185276
AK
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown",
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8
AK
1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
1114 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1115 goto set_vlan_promisc;
1116 dev_err(&adapter->pdev->dev,
1117 "Setting HW VLAN filtering failed.\n");
1118 } else {
1119 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1120 /* hw VLAN filtering re-enabled. */
1121 status = be_cmd_rx_filter(adapter,
1122 BE_FLAGS_VLAN_PROMISC, OFF);
1123 if (!status) {
1124 dev_info(&adapter->pdev->dev,
1125 "Disabling VLAN Promiscuous mode.\n");
1126 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1127 }
1128 }
6b7c5b94 1129 }
1da87b7f 1130
b31c50a7 1131 return status;
0fc16ebf
PR
1132
1133set_vlan_promisc:
a6b74e01
SK
1134 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1135 return 0;
d9d604f8
AK
1136
1137 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1138 if (!status) {
1139 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1140 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1141 } else
1142 dev_err(&adapter->pdev->dev,
1143 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1144 return status;
6b7c5b94
SP
1145}
1146
80d5c368 1147static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1148{
1149 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1150 int status = 0;
6b7c5b94 1151
a85e9986
PR
1152 /* Packets with VID 0 are always received by Lancer by default */
1153 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1154 return status;
1155
f6cbd364 1156 if (test_bit(vid, adapter->vids))
48291c22 1157 return status;
a85e9986 1158
f6cbd364 1159 set_bit(vid, adapter->vids);
a6b74e01 1160 adapter->vlans_added++;
8e586137 1161
a6b74e01
SK
1162 status = be_vid_config(adapter);
1163 if (status) {
1164 adapter->vlans_added--;
f6cbd364 1165 clear_bit(vid, adapter->vids);
a6b74e01 1166 }
48291c22 1167
80817cbf 1168 return status;
6b7c5b94
SP
1169}
1170
80d5c368 1171static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1174 int status = 0;
6b7c5b94 1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
1178 goto ret;
1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
a6b74e01 1181 status = be_vid_config(adapter);
80817cbf
AK
1182 if (!status)
1183 adapter->vlans_added--;
1184 else
f6cbd364 1185 set_bit(vid, adapter->vids);
80817cbf
AK
1186ret:
1187 return status;
6b7c5b94
SP
1188}
1189
7ad09458
S
1190static void be_clear_promisc(struct be_adapter *adapter)
1191{
1192 adapter->promiscuous = false;
1193 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1194
1195 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1196}
1197
a54769f5 1198static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1199{
1200 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1201 int status;
6b7c5b94 1202
24307eef 1203 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1204 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1205 adapter->promiscuous = true;
1206 goto done;
6b7c5b94
SP
1207 }
1208
25985edc 1209 /* BE was previously in promiscuous mode; disable it */
24307eef 1210 if (adapter->promiscuous) {
7ad09458 1211 be_clear_promisc(adapter);
c0e64ef4 1212 if (adapter->vlans_added)
10329df8 1213 be_vid_config(adapter);
6b7c5b94
SP
1214 }
1215
e7b909a6 1216 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1217 if (netdev->flags & IFF_ALLMULTI ||
92bf14ab 1218 netdev_mc_count(netdev) > be_max_mc(adapter)) {
5b8821b7 1219 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1220 goto done;
6b7c5b94 1221 }
6b7c5b94 1222
fbc13f01
AK
1223 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1224 struct netdev_hw_addr *ha;
1225 int i = 1; /* First slot is claimed by the Primary MAC */
1226
1227 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1228 be_cmd_pmac_del(adapter, adapter->if_handle,
1229 adapter->pmac_id[i], 0);
1230 }
1231
92bf14ab 1232 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1233 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1234 adapter->promiscuous = true;
1235 goto done;
1236 }
1237
1238 netdev_for_each_uc_addr(ha, adapter->netdev) {
1239 adapter->uc_macs++; /* First slot is for Primary MAC */
1240 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1241 adapter->if_handle,
1242 &adapter->pmac_id[adapter->uc_macs], 0);
1243 }
1244 }
1245
0fc16ebf
PR
1246 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1247
1248 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1249 if (status) {
748b539a
SP
1250 dev_info(&adapter->pdev->dev,
1251 "Exhausted multicast HW filters.\n");
1252 dev_info(&adapter->pdev->dev,
1253 "Disabling HW multicast filtering.\n");
0fc16ebf
PR
1254 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 }
24307eef
SP
1256done:
1257 return;
6b7c5b94
SP
1258}
1259
ba343c77
SB
1260static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1263 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1264 int status;
1265
11ac75ed 1266 if (!sriov_enabled(adapter))
ba343c77
SB
1267 return -EPERM;
1268
11ac75ed 1269 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1270 return -EINVAL;
1271
3175d8c2
SP
1272 if (BEx_chip(adapter)) {
1273 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1274 vf + 1);
ba343c77 1275
11ac75ed
SP
1276 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1277 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1278 } else {
1279 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1280 vf + 1);
590c391d
PR
1281 }
1282
64600ea5 1283 if (status)
ba343c77 1284 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
748b539a 1285 mac, vf);
64600ea5 1286 else
11ac75ed 1287 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1288
ba343c77
SB
1289 return status;
1290}
1291
64600ea5 1292static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1293 struct ifla_vf_info *vi)
64600ea5
AK
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1296 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1297
11ac75ed 1298 if (!sriov_enabled(adapter))
64600ea5
AK
1299 return -EPERM;
1300
11ac75ed 1301 if (vf >= adapter->num_vfs)
64600ea5
AK
1302 return -EINVAL;
1303
1304 vi->vf = vf;
11ac75ed 1305 vi->tx_rate = vf_cfg->tx_rate;
a60b3a13
AK
1306 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1307 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1308 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1309 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1310
1311 return 0;
1312}
1313
748b539a 1314static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1315{
1316 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1317 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1318 int status = 0;
1319
11ac75ed 1320 if (!sriov_enabled(adapter))
1da87b7f
AK
1321 return -EPERM;
1322
b9fc0e53 1323 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1324 return -EINVAL;
1325
b9fc0e53
AK
1326 if (vlan || qos) {
1327 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1328 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1329 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1330 vf_cfg->if_handle, 0);
1da87b7f 1331 } else {
f1f3ee1b 1332 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1333 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1334 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1335 }
1336
c502224e
SK
1337 if (!status)
1338 vf_cfg->vlan_tag = vlan;
1339 else
1da87b7f 1340 dev_info(&adapter->pdev->dev,
c502224e 1341 "VLAN %d config on VF %d failed\n", vlan, vf);
1da87b7f
AK
1342 return status;
1343}
1344
748b539a 1345static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
e1d18735
AK
1346{
1347 struct be_adapter *adapter = netdev_priv(netdev);
1348 int status = 0;
1349
11ac75ed 1350 if (!sriov_enabled(adapter))
e1d18735
AK
1351 return -EPERM;
1352
94f434c2 1353 if (vf >= adapter->num_vfs)
e1d18735
AK
1354 return -EINVAL;
1355
94f434c2
AK
1356 if (rate < 100 || rate > 10000) {
1357 dev_err(&adapter->pdev->dev,
1358 "tx rate must be between 100 and 10000 Mbps\n");
1359 return -EINVAL;
1360 }
e1d18735 1361
a401801c 1362 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
e1d18735 1363 if (status)
94f434c2 1364 dev_err(&adapter->pdev->dev,
748b539a 1365 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1366 else
1367 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1368 return status;
1369}
bdce2ad7
SR
1370static int be_set_vf_link_state(struct net_device *netdev, int vf,
1371 int link_state)
1372{
1373 struct be_adapter *adapter = netdev_priv(netdev);
1374 int status;
1375
1376 if (!sriov_enabled(adapter))
1377 return -EPERM;
1378
1379 if (vf >= adapter->num_vfs)
1380 return -EINVAL;
1381
1382 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1383 if (!status)
1384 adapter->vf_cfg[vf].plink_tracking = link_state;
1385
1386 return status;
1387}
e1d18735 1388
2632bafd
SP
1389static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1390 ulong now)
6b7c5b94 1391{
2632bafd
SP
1392 aic->rx_pkts_prev = rx_pkts;
1393 aic->tx_reqs_prev = tx_pkts;
1394 aic->jiffies = now;
1395}
ac124ff9 1396
2632bafd
SP
1397static void be_eqd_update(struct be_adapter *adapter)
1398{
1399 struct be_set_eqd set_eqd[MAX_EVT_QS];
1400 int eqd, i, num = 0, start;
1401 struct be_aic_obj *aic;
1402 struct be_eq_obj *eqo;
1403 struct be_rx_obj *rxo;
1404 struct be_tx_obj *txo;
1405 u64 rx_pkts, tx_pkts;
1406 ulong now;
1407 u32 pps, delta;
10ef9ab4 1408
2632bafd
SP
1409 for_all_evt_queues(adapter, eqo, i) {
1410 aic = &adapter->aic_obj[eqo->idx];
1411 if (!aic->enable) {
1412 if (aic->jiffies)
1413 aic->jiffies = 0;
1414 eqd = aic->et_eqd;
1415 goto modify_eqd;
1416 }
6b7c5b94 1417
2632bafd
SP
1418 rxo = &adapter->rx_obj[eqo->idx];
1419 do {
57a7744e 1420 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1421 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1422 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1423
2632bafd
SP
1424 txo = &adapter->tx_obj[eqo->idx];
1425 do {
57a7744e 1426 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1427 tx_pkts = txo->stats.tx_reqs;
57a7744e 1428 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1429
6b7c5b94 1430
2632bafd
SP
1431 /* Skip, if wrapped around or first calculation */
1432 now = jiffies;
1433 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1434 rx_pkts < aic->rx_pkts_prev ||
1435 tx_pkts < aic->tx_reqs_prev) {
1436 be_aic_update(aic, rx_pkts, tx_pkts, now);
1437 continue;
1438 }
1439
1440 delta = jiffies_to_msecs(now - aic->jiffies);
1441 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1442 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1443 eqd = (pps / 15000) << 2;
10ef9ab4 1444
2632bafd
SP
1445 if (eqd < 8)
1446 eqd = 0;
1447 eqd = min_t(u32, eqd, aic->max_eqd);
1448 eqd = max_t(u32, eqd, aic->min_eqd);
1449
1450 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1451modify_eqd:
2632bafd
SP
1452 if (eqd != aic->prev_eqd) {
1453 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1454 set_eqd[num].eq_id = eqo->q.id;
1455 aic->prev_eqd = eqd;
1456 num++;
1457 }
ac124ff9 1458 }
2632bafd
SP
1459
1460 if (num)
1461 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1462}
1463
3abcdeda 1464static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1465 struct be_rx_compl_info *rxcp)
4097f663 1466{
ac124ff9 1467 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1468
ab1594e9 1469 u64_stats_update_begin(&stats->sync);
3abcdeda 1470 stats->rx_compl++;
2e588f84 1471 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1472 stats->rx_pkts++;
2e588f84 1473 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1474 stats->rx_mcast_pkts++;
2e588f84 1475 if (rxcp->err)
ac124ff9 1476 stats->rx_compl_err++;
ab1594e9 1477 u64_stats_update_end(&stats->sync);
4097f663
SP
1478}
1479
2e588f84 1480static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1481{
19fad86f 1482 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1483 * Also ignore ipcksm for ipv6 pkts
1484 */
2e588f84 1485 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1486 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1487}
1488
0b0ef1d0 1489static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1490{
10ef9ab4 1491 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1492 struct be_rx_page_info *rx_page_info;
3abcdeda 1493 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1494 u16 frag_idx = rxq->tail;
6b7c5b94 1495
3abcdeda 1496 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1497 BUG_ON(!rx_page_info->page);
1498
e50287be 1499 if (rx_page_info->last_frag) {
2b7bcebf
IV
1500 dma_unmap_page(&adapter->pdev->dev,
1501 dma_unmap_addr(rx_page_info, bus),
1502 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1503 rx_page_info->last_frag = false;
1504 } else {
1505 dma_sync_single_for_cpu(&adapter->pdev->dev,
1506 dma_unmap_addr(rx_page_info, bus),
1507 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1508 }
6b7c5b94 1509
0b0ef1d0 1510 queue_tail_inc(rxq);
6b7c5b94
SP
1511 atomic_dec(&rxq->used);
1512 return rx_page_info;
1513}
1514
1515/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1516static void be_rx_compl_discard(struct be_rx_obj *rxo,
1517 struct be_rx_compl_info *rxcp)
6b7c5b94 1518{
6b7c5b94 1519 struct be_rx_page_info *page_info;
2e588f84 1520 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1521
e80d9da6 1522 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1523 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1524 put_page(page_info->page);
1525 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1526 }
1527}
1528
1529/*
1530 * skb_fill_rx_data forms a complete skb for an ether frame
1531 * indicated by rxcp.
1532 */
10ef9ab4
SP
1533static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1534 struct be_rx_compl_info *rxcp)
6b7c5b94 1535{
6b7c5b94 1536 struct be_rx_page_info *page_info;
2e588f84
SP
1537 u16 i, j;
1538 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1539 u8 *start;
6b7c5b94 1540
0b0ef1d0 1541 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1542 start = page_address(page_info->page) + page_info->page_offset;
1543 prefetch(start);
1544
1545 /* Copy data in the first descriptor of this completion */
2e588f84 1546 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1547
6b7c5b94
SP
1548 skb->len = curr_frag_len;
1549 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1550 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1551 /* Complete packet has now been moved to data */
1552 put_page(page_info->page);
1553 skb->data_len = 0;
1554 skb->tail += curr_frag_len;
1555 } else {
ac1ae5f3
ED
1556 hdr_len = ETH_HLEN;
1557 memcpy(skb->data, start, hdr_len);
6b7c5b94 1558 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1559 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1560 skb_shinfo(skb)->frags[0].page_offset =
1561 page_info->page_offset + hdr_len;
748b539a
SP
1562 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1563 curr_frag_len - hdr_len);
6b7c5b94 1564 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1565 skb->truesize += rx_frag_size;
6b7c5b94
SP
1566 skb->tail += hdr_len;
1567 }
205859a2 1568 page_info->page = NULL;
6b7c5b94 1569
2e588f84
SP
1570 if (rxcp->pkt_size <= rx_frag_size) {
1571 BUG_ON(rxcp->num_rcvd != 1);
1572 return;
6b7c5b94
SP
1573 }
1574
1575 /* More frags present for this completion */
2e588f84
SP
1576 remaining = rxcp->pkt_size - curr_frag_len;
1577 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1578 page_info = get_rx_page_info(rxo);
2e588f84 1579 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1580
bd46cb6c
AK
1581 /* Coalesce all frags from the same physical page in one slot */
1582 if (page_info->page_offset == 0) {
1583 /* Fresh page */
1584 j++;
b061b39e 1585 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1586 skb_shinfo(skb)->frags[j].page_offset =
1587 page_info->page_offset;
9e903e08 1588 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1589 skb_shinfo(skb)->nr_frags++;
1590 } else {
1591 put_page(page_info->page);
1592 }
1593
9e903e08 1594 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1595 skb->len += curr_frag_len;
1596 skb->data_len += curr_frag_len;
bdb28a97 1597 skb->truesize += rx_frag_size;
2e588f84 1598 remaining -= curr_frag_len;
205859a2 1599 page_info->page = NULL;
6b7c5b94 1600 }
bd46cb6c 1601 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1602}
1603
5be93b9a 1604/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1605static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1606 struct be_rx_compl_info *rxcp)
6b7c5b94 1607{
10ef9ab4 1608 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1609 struct net_device *netdev = adapter->netdev;
6b7c5b94 1610 struct sk_buff *skb;
89420424 1611
bb349bb4 1612 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1613 if (unlikely(!skb)) {
ac124ff9 1614 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1615 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1616 return;
1617 }
1618
10ef9ab4 1619 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1620
6332c8d3 1621 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1622 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1623 else
1624 skb_checksum_none_assert(skb);
6b7c5b94 1625
6332c8d3 1626 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1627 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1628 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1629 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1630
1631 skb->encapsulation = rxcp->tunneled;
6384a4d0 1632 skb_mark_napi_id(skb, napi);
6b7c5b94 1633
343e43c0 1634 if (rxcp->vlanf)
86a9bad3 1635 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1636
1637 netif_receive_skb(skb);
6b7c5b94
SP
1638}
1639
5be93b9a 1640/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1641static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1642 struct napi_struct *napi,
1643 struct be_rx_compl_info *rxcp)
6b7c5b94 1644{
10ef9ab4 1645 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1646 struct be_rx_page_info *page_info;
5be93b9a 1647 struct sk_buff *skb = NULL;
2e588f84
SP
1648 u16 remaining, curr_frag_len;
1649 u16 i, j;
3968fa1e 1650
10ef9ab4 1651 skb = napi_get_frags(napi);
5be93b9a 1652 if (!skb) {
10ef9ab4 1653 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1654 return;
1655 }
1656
2e588f84
SP
1657 remaining = rxcp->pkt_size;
1658 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1659 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1660
1661 curr_frag_len = min(remaining, rx_frag_size);
1662
bd46cb6c
AK
1663 /* Coalesce all frags from the same physical page in one slot */
1664 if (i == 0 || page_info->page_offset == 0) {
1665 /* First frag or Fresh page */
1666 j++;
b061b39e 1667 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1668 skb_shinfo(skb)->frags[j].page_offset =
1669 page_info->page_offset;
9e903e08 1670 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1671 } else {
1672 put_page(page_info->page);
1673 }
9e903e08 1674 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1675 skb->truesize += rx_frag_size;
bd46cb6c 1676 remaining -= curr_frag_len;
6b7c5b94
SP
1677 memset(page_info, 0, sizeof(*page_info));
1678 }
bd46cb6c 1679 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1680
5be93b9a 1681 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1682 skb->len = rxcp->pkt_size;
1683 skb->data_len = rxcp->pkt_size;
5be93b9a 1684 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1685 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1686 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1687 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1688
1689 skb->encapsulation = rxcp->tunneled;
6384a4d0 1690 skb_mark_napi_id(skb, napi);
5be93b9a 1691
343e43c0 1692 if (rxcp->vlanf)
86a9bad3 1693 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1694
10ef9ab4 1695 napi_gro_frags(napi);
2e588f84
SP
1696}
1697
10ef9ab4
SP
1698static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1699 struct be_rx_compl_info *rxcp)
2e588f84
SP
1700{
1701 rxcp->pkt_size =
1702 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1703 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1704 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1705 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1706 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1707 rxcp->ip_csum =
1708 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1709 rxcp->l4_csum =
1710 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1711 rxcp->ipv6 =
1712 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1713 rxcp->num_rcvd =
1714 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1715 rxcp->pkt_type =
1716 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1717 rxcp->rss_hash =
c297977e 1718 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1719 if (rxcp->vlanf) {
f93f160b 1720 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1721 compl);
748b539a
SP
1722 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1723 vlan_tag, compl);
15d72184 1724 }
12004ae9 1725 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1726 rxcp->tunneled =
1727 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1728}
1729
10ef9ab4
SP
1730static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1731 struct be_rx_compl_info *rxcp)
2e588f84
SP
1732{
1733 rxcp->pkt_size =
1734 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1735 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1736 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1737 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1738 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1739 rxcp->ip_csum =
1740 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1741 rxcp->l4_csum =
1742 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1743 rxcp->ipv6 =
1744 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1745 rxcp->num_rcvd =
1746 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1747 rxcp->pkt_type =
1748 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1749 rxcp->rss_hash =
c297977e 1750 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1751 if (rxcp->vlanf) {
f93f160b 1752 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1753 compl);
748b539a
SP
1754 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1755 vlan_tag, compl);
15d72184 1756 }
12004ae9 1757 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1758 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1759 ip_frag, compl);
2e588f84
SP
1760}
1761
1762static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1763{
1764 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1765 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1766 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1767
2e588f84
SP
1768 /* For checking the valid bit it is Ok to use either definition as the
1769 * valid bit is at the same position in both v0 and v1 Rx compl */
1770 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1771 return NULL;
6b7c5b94 1772
2e588f84
SP
1773 rmb();
1774 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1775
2e588f84 1776 if (adapter->be3_native)
10ef9ab4 1777 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1778 else
10ef9ab4 1779 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1780
e38b1706
SK
1781 if (rxcp->ip_frag)
1782 rxcp->l4_csum = 0;
1783
15d72184 1784 if (rxcp->vlanf) {
f93f160b
VV
1785 /* In QNQ modes, if qnq bit is not set, then the packet was
1786 * tagged only with the transparent outer vlan-tag and must
1787 * not be treated as a vlan packet by host
1788 */
1789 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1790 rxcp->vlanf = 0;
6b7c5b94 1791
15d72184 1792 if (!lancer_chip(adapter))
3c709f8f 1793 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1794
939cf306 1795 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1796 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1797 rxcp->vlanf = 0;
1798 }
2e588f84
SP
1799
1800 /* As the compl has been parsed, reset it; we wont touch it again */
1801 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1802
3abcdeda 1803 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1804 return rxcp;
1805}
1806
1829b086 1807static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1808{
6b7c5b94 1809 u32 order = get_order(size);
1829b086 1810
6b7c5b94 1811 if (order > 0)
1829b086
ED
1812 gfp |= __GFP_COMP;
1813 return alloc_pages(gfp, order);
6b7c5b94
SP
1814}
1815
1816/*
1817 * Allocate a page, split it to fragments of size rx_frag_size and post as
1818 * receive buffers to BE
1819 */
1829b086 1820static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1821{
3abcdeda 1822 struct be_adapter *adapter = rxo->adapter;
26d92f92 1823 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1824 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1825 struct page *pagep = NULL;
ba42fad0 1826 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1827 struct be_eth_rx_d *rxd;
1828 u64 page_dmaaddr = 0, frag_dmaaddr;
1829 u32 posted, page_offset = 0;
1830
3abcdeda 1831 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1832 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1833 if (!pagep) {
1829b086 1834 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1835 if (unlikely(!pagep)) {
ac124ff9 1836 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1837 break;
1838 }
ba42fad0
IV
1839 page_dmaaddr = dma_map_page(dev, pagep, 0,
1840 adapter->big_page_size,
2b7bcebf 1841 DMA_FROM_DEVICE);
ba42fad0
IV
1842 if (dma_mapping_error(dev, page_dmaaddr)) {
1843 put_page(pagep);
1844 pagep = NULL;
1845 rx_stats(rxo)->rx_post_fail++;
1846 break;
1847 }
e50287be 1848 page_offset = 0;
6b7c5b94
SP
1849 } else {
1850 get_page(pagep);
e50287be 1851 page_offset += rx_frag_size;
6b7c5b94 1852 }
e50287be 1853 page_info->page_offset = page_offset;
6b7c5b94 1854 page_info->page = pagep;
6b7c5b94
SP
1855
1856 rxd = queue_head_node(rxq);
e50287be 1857 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1858 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1859 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1860
1861 /* Any space left in the current big page for another frag? */
1862 if ((page_offset + rx_frag_size + rx_frag_size) >
1863 adapter->big_page_size) {
1864 pagep = NULL;
e50287be
SP
1865 page_info->last_frag = true;
1866 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1867 } else {
1868 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1869 }
26d92f92
SP
1870
1871 prev_page_info = page_info;
1872 queue_head_inc(rxq);
10ef9ab4 1873 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1874 }
e50287be
SP
1875
1876 /* Mark the last frag of a page when we break out of the above loop
1877 * with no more slots available in the RXQ
1878 */
1879 if (pagep) {
1880 prev_page_info->last_frag = true;
1881 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1882 }
6b7c5b94
SP
1883
1884 if (posted) {
6b7c5b94 1885 atomic_add(posted, &rxq->used);
6384a4d0
SP
1886 if (rxo->rx_post_starved)
1887 rxo->rx_post_starved = false;
8788fdc2 1888 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1889 } else if (atomic_read(&rxq->used) == 0) {
1890 /* Let be_worker replenish when memory is available */
3abcdeda 1891 rxo->rx_post_starved = true;
6b7c5b94 1892 }
6b7c5b94
SP
1893}
1894
5fb379ee 1895static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1896{
6b7c5b94
SP
1897 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1898
1899 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1900 return NULL;
1901
f3eb62d2 1902 rmb();
6b7c5b94
SP
1903 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1904
1905 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1906
1907 queue_tail_inc(tx_cq);
1908 return txcp;
1909}
1910
3c8def97 1911static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1912 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1913{
3c8def97 1914 struct be_queue_info *txq = &txo->q;
a73b796e 1915 struct be_eth_wrb *wrb;
3c8def97 1916 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1917 struct sk_buff *sent_skb;
ec43b1a6
SP
1918 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1919 bool unmap_skb_hdr = true;
6b7c5b94 1920
ec43b1a6 1921 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1922 BUG_ON(!sent_skb);
ec43b1a6
SP
1923 sent_skbs[txq->tail] = NULL;
1924
1925 /* skip header wrb */
a73b796e 1926 queue_tail_inc(txq);
6b7c5b94 1927
ec43b1a6 1928 do {
6b7c5b94 1929 cur_index = txq->tail;
a73b796e 1930 wrb = queue_tail_node(txq);
2b7bcebf
IV
1931 unmap_tx_frag(&adapter->pdev->dev, wrb,
1932 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1933 unmap_skb_hdr = false;
1934
6b7c5b94
SP
1935 num_wrbs++;
1936 queue_tail_inc(txq);
ec43b1a6 1937 } while (cur_index != last_index);
6b7c5b94 1938
d8ec2c02 1939 dev_kfree_skb_any(sent_skb);
4d586b82 1940 return num_wrbs;
6b7c5b94
SP
1941}
1942
10ef9ab4
SP
1943/* Return the number of events in the event queue */
1944static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1945{
10ef9ab4
SP
1946 struct be_eq_entry *eqe;
1947 int num = 0;
859b1e4e 1948
10ef9ab4
SP
1949 do {
1950 eqe = queue_tail_node(&eqo->q);
1951 if (eqe->evt == 0)
1952 break;
859b1e4e 1953
10ef9ab4
SP
1954 rmb();
1955 eqe->evt = 0;
1956 num++;
1957 queue_tail_inc(&eqo->q);
1958 } while (true);
1959
1960 return num;
859b1e4e
SP
1961}
1962
10ef9ab4
SP
1963/* Leaves the EQ is disarmed state */
1964static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1965{
10ef9ab4 1966 int num = events_get(eqo);
859b1e4e 1967
10ef9ab4 1968 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1969}
1970
10ef9ab4 1971static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1972{
1973 struct be_rx_page_info *page_info;
3abcdeda
SP
1974 struct be_queue_info *rxq = &rxo->q;
1975 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1976 struct be_rx_compl_info *rxcp;
d23e946c
SP
1977 struct be_adapter *adapter = rxo->adapter;
1978 int flush_wait = 0;
6b7c5b94 1979
d23e946c
SP
1980 /* Consume pending rx completions.
1981 * Wait for the flush completion (identified by zero num_rcvd)
1982 * to arrive. Notify CQ even when there are no more CQ entries
1983 * for HW to flush partially coalesced CQ entries.
1984 * In Lancer, there is no need to wait for flush compl.
1985 */
1986 for (;;) {
1987 rxcp = be_rx_compl_get(rxo);
1988 if (rxcp == NULL) {
1989 if (lancer_chip(adapter))
1990 break;
1991
1992 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1993 dev_warn(&adapter->pdev->dev,
1994 "did not receive flush compl\n");
1995 break;
1996 }
1997 be_cq_notify(adapter, rx_cq->id, true, 0);
1998 mdelay(1);
1999 } else {
2000 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2001 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2002 if (rxcp->num_rcvd == 0)
2003 break;
2004 }
6b7c5b94
SP
2005 }
2006
d23e946c
SP
2007 /* After cleanup, leave the CQ in unarmed state */
2008 be_cq_notify(adapter, rx_cq->id, false, 0);
2009
2010 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2011 while (atomic_read(&rxq->used) > 0) {
2012 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2013 put_page(page_info->page);
2014 memset(page_info, 0, sizeof(*page_info));
2015 }
2016 BUG_ON(atomic_read(&rxq->used));
482c9e79 2017 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2018}
2019
0ae57bb3 2020static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2021{
0ae57bb3
SP
2022 struct be_tx_obj *txo;
2023 struct be_queue_info *txq;
a8e9179a 2024 struct be_eth_tx_compl *txcp;
4d586b82 2025 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2026 struct sk_buff *sent_skb;
2027 bool dummy_wrb;
0ae57bb3 2028 int i, pending_txqs;
a8e9179a 2029
1a3d0717 2030 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2031 do {
0ae57bb3
SP
2032 pending_txqs = adapter->num_tx_qs;
2033
2034 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2035 cmpl = 0;
2036 num_wrbs = 0;
0ae57bb3
SP
2037 txq = &txo->q;
2038 while ((txcp = be_tx_compl_get(&txo->cq))) {
2039 end_idx =
2040 AMAP_GET_BITS(struct amap_eth_tx_compl,
2041 wrb_index, txcp);
2042 num_wrbs += be_tx_compl_process(adapter, txo,
2043 end_idx);
2044 cmpl++;
2045 }
2046 if (cmpl) {
2047 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2048 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2049 timeo = 0;
0ae57bb3
SP
2050 }
2051 if (atomic_read(&txq->used) == 0)
2052 pending_txqs--;
a8e9179a
SP
2053 }
2054
1a3d0717 2055 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2056 break;
2057
2058 mdelay(1);
2059 } while (true);
2060
0ae57bb3
SP
2061 for_all_tx_queues(adapter, txo, i) {
2062 txq = &txo->q;
2063 if (atomic_read(&txq->used))
2064 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2065 atomic_read(&txq->used));
2066
2067 /* free posted tx for which compls will never arrive */
2068 while (atomic_read(&txq->used)) {
2069 sent_skb = txo->sent_skb_list[txq->tail];
2070 end_idx = txq->tail;
2071 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2072 &dummy_wrb);
2073 index_adv(&end_idx, num_wrbs - 1, txq->len);
2074 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2075 atomic_sub(num_wrbs, &txq->used);
2076 }
b03388d6 2077 }
6b7c5b94
SP
2078}
2079
10ef9ab4
SP
2080static void be_evt_queues_destroy(struct be_adapter *adapter)
2081{
2082 struct be_eq_obj *eqo;
2083 int i;
2084
2085 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2086 if (eqo->q.created) {
2087 be_eq_clean(eqo);
10ef9ab4 2088 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2089 napi_hash_del(&eqo->napi);
68d7bdcb 2090 netif_napi_del(&eqo->napi);
19d59aa7 2091 }
10ef9ab4
SP
2092 be_queue_free(adapter, &eqo->q);
2093 }
2094}
2095
2096static int be_evt_queues_create(struct be_adapter *adapter)
2097{
2098 struct be_queue_info *eq;
2099 struct be_eq_obj *eqo;
2632bafd 2100 struct be_aic_obj *aic;
10ef9ab4
SP
2101 int i, rc;
2102
92bf14ab
SP
2103 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2104 adapter->cfg_num_qs);
10ef9ab4
SP
2105
2106 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2107 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2108 BE_NAPI_WEIGHT);
6384a4d0 2109 napi_hash_add(&eqo->napi);
2632bafd 2110 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2111 eqo->adapter = adapter;
2112 eqo->tx_budget = BE_TX_BUDGET;
2113 eqo->idx = i;
2632bafd
SP
2114 aic->max_eqd = BE_MAX_EQD;
2115 aic->enable = true;
10ef9ab4
SP
2116
2117 eq = &eqo->q;
2118 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2119 sizeof(struct be_eq_entry));
10ef9ab4
SP
2120 if (rc)
2121 return rc;
2122
f2f781a7 2123 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2124 if (rc)
2125 return rc;
2126 }
1cfafab9 2127 return 0;
10ef9ab4
SP
2128}
2129
5fb379ee
SP
2130static void be_mcc_queues_destroy(struct be_adapter *adapter)
2131{
2132 struct be_queue_info *q;
5fb379ee 2133
8788fdc2 2134 q = &adapter->mcc_obj.q;
5fb379ee 2135 if (q->created)
8788fdc2 2136 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2137 be_queue_free(adapter, q);
2138
8788fdc2 2139 q = &adapter->mcc_obj.cq;
5fb379ee 2140 if (q->created)
8788fdc2 2141 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2142 be_queue_free(adapter, q);
2143}
2144
2145/* Must be called only after TX qs are created as MCC shares TX EQ */
2146static int be_mcc_queues_create(struct be_adapter *adapter)
2147{
2148 struct be_queue_info *q, *cq;
5fb379ee 2149
8788fdc2 2150 cq = &adapter->mcc_obj.cq;
5fb379ee 2151 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2152 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2153 goto err;
2154
10ef9ab4
SP
2155 /* Use the default EQ for MCC completions */
2156 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2157 goto mcc_cq_free;
2158
8788fdc2 2159 q = &adapter->mcc_obj.q;
5fb379ee
SP
2160 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2161 goto mcc_cq_destroy;
2162
8788fdc2 2163 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2164 goto mcc_q_free;
2165
2166 return 0;
2167
2168mcc_q_free:
2169 be_queue_free(adapter, q);
2170mcc_cq_destroy:
8788fdc2 2171 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2172mcc_cq_free:
2173 be_queue_free(adapter, cq);
2174err:
2175 return -1;
2176}
2177
6b7c5b94
SP
2178static void be_tx_queues_destroy(struct be_adapter *adapter)
2179{
2180 struct be_queue_info *q;
3c8def97
SP
2181 struct be_tx_obj *txo;
2182 u8 i;
6b7c5b94 2183
3c8def97
SP
2184 for_all_tx_queues(adapter, txo, i) {
2185 q = &txo->q;
2186 if (q->created)
2187 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2188 be_queue_free(adapter, q);
6b7c5b94 2189
3c8def97
SP
2190 q = &txo->cq;
2191 if (q->created)
2192 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2193 be_queue_free(adapter, q);
2194 }
6b7c5b94
SP
2195}
2196
7707133c 2197static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2198{
10ef9ab4 2199 struct be_queue_info *cq, *eq;
3c8def97 2200 struct be_tx_obj *txo;
92bf14ab 2201 int status, i;
6b7c5b94 2202
92bf14ab 2203 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2204
10ef9ab4
SP
2205 for_all_tx_queues(adapter, txo, i) {
2206 cq = &txo->cq;
2207 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2208 sizeof(struct be_eth_tx_compl));
2209 if (status)
2210 return status;
3c8def97 2211
827da44c
JS
2212 u64_stats_init(&txo->stats.sync);
2213 u64_stats_init(&txo->stats.sync_compl);
2214
10ef9ab4
SP
2215 /* If num_evt_qs is less than num_tx_qs, then more than
2216 * one txq share an eq
2217 */
2218 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2219 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2220 if (status)
2221 return status;
6b7c5b94 2222
10ef9ab4
SP
2223 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2224 sizeof(struct be_eth_wrb));
2225 if (status)
2226 return status;
6b7c5b94 2227
94d73aaa 2228 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2229 if (status)
2230 return status;
3c8def97 2231 }
6b7c5b94 2232
d379142b
SP
2233 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2234 adapter->num_tx_qs);
10ef9ab4 2235 return 0;
6b7c5b94
SP
2236}
2237
10ef9ab4 2238static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2239{
2240 struct be_queue_info *q;
3abcdeda
SP
2241 struct be_rx_obj *rxo;
2242 int i;
2243
2244 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2245 q = &rxo->cq;
2246 if (q->created)
2247 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248 be_queue_free(adapter, q);
ac6a0c4a
SP
2249 }
2250}
2251
10ef9ab4 2252static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2253{
10ef9ab4 2254 struct be_queue_info *eq, *cq;
3abcdeda
SP
2255 struct be_rx_obj *rxo;
2256 int rc, i;
6b7c5b94 2257
92bf14ab
SP
2258 /* We can create as many RSS rings as there are EQs. */
2259 adapter->num_rx_qs = adapter->num_evt_qs;
2260
2261 /* We'll use RSS only if atleast 2 RSS rings are supported.
2262 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2263 */
92bf14ab
SP
2264 if (adapter->num_rx_qs > 1)
2265 adapter->num_rx_qs++;
2266
6b7c5b94 2267 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2268 for_all_rx_queues(adapter, rxo, i) {
2269 rxo->adapter = adapter;
3abcdeda
SP
2270 cq = &rxo->cq;
2271 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2272 sizeof(struct be_eth_rx_compl));
3abcdeda 2273 if (rc)
10ef9ab4 2274 return rc;
3abcdeda 2275
827da44c 2276 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2277 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2278 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2279 if (rc)
10ef9ab4 2280 return rc;
3abcdeda 2281 }
6b7c5b94 2282
d379142b
SP
2283 dev_info(&adapter->pdev->dev,
2284 "created %d RSS queue(s) and 1 default RX queue\n",
2285 adapter->num_rx_qs - 1);
10ef9ab4 2286 return 0;
b628bde2
SP
2287}
2288
6b7c5b94
SP
2289static irqreturn_t be_intx(int irq, void *dev)
2290{
e49cc34f
SP
2291 struct be_eq_obj *eqo = dev;
2292 struct be_adapter *adapter = eqo->adapter;
2293 int num_evts = 0;
6b7c5b94 2294
d0b9cec3
SP
2295 /* IRQ is not expected when NAPI is scheduled as the EQ
2296 * will not be armed.
2297 * But, this can happen on Lancer INTx where it takes
2298 * a while to de-assert INTx or in BE2 where occasionaly
2299 * an interrupt may be raised even when EQ is unarmed.
2300 * If NAPI is already scheduled, then counting & notifying
2301 * events will orphan them.
e49cc34f 2302 */
d0b9cec3 2303 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2304 num_evts = events_get(eqo);
d0b9cec3
SP
2305 __napi_schedule(&eqo->napi);
2306 if (num_evts)
2307 eqo->spurious_intr = 0;
2308 }
2309 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2310
d0b9cec3
SP
2311 /* Return IRQ_HANDLED only for the the first spurious intr
2312 * after a valid intr to stop the kernel from branding
2313 * this irq as a bad one!
e49cc34f 2314 */
d0b9cec3
SP
2315 if (num_evts || eqo->spurious_intr++ == 0)
2316 return IRQ_HANDLED;
2317 else
2318 return IRQ_NONE;
6b7c5b94
SP
2319}
2320
10ef9ab4 2321static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2322{
10ef9ab4 2323 struct be_eq_obj *eqo = dev;
6b7c5b94 2324
0b545a62
SP
2325 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2326 napi_schedule(&eqo->napi);
6b7c5b94
SP
2327 return IRQ_HANDLED;
2328}
2329
2e588f84 2330static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2331{
e38b1706 2332 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2333}
2334
10ef9ab4 2335static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2336 int budget, int polling)
6b7c5b94 2337{
3abcdeda
SP
2338 struct be_adapter *adapter = rxo->adapter;
2339 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2340 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2341 u32 work_done;
2342
2343 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2344 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2345 if (!rxcp)
2346 break;
2347
12004ae9
SP
2348 /* Is it a flush compl that has no data */
2349 if (unlikely(rxcp->num_rcvd == 0))
2350 goto loop_continue;
2351
2352 /* Discard compl with partial DMA Lancer B0 */
2353 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2354 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2355 goto loop_continue;
2356 }
2357
2358 /* On BE drop pkts that arrive due to imperfect filtering in
2359 * promiscuous mode on some skews
2360 */
2361 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2362 !lancer_chip(adapter))) {
10ef9ab4 2363 be_rx_compl_discard(rxo, rxcp);
12004ae9 2364 goto loop_continue;
64642811 2365 }
009dd872 2366
6384a4d0
SP
2367 /* Don't do gro when we're busy_polling */
2368 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2369 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2370 else
6384a4d0
SP
2371 be_rx_compl_process(rxo, napi, rxcp);
2372
12004ae9 2373loop_continue:
2e588f84 2374 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2375 }
2376
10ef9ab4
SP
2377 if (work_done) {
2378 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2379
6384a4d0
SP
2380 /* When an rx-obj gets into post_starved state, just
2381 * let be_worker do the posting.
2382 */
2383 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2384 !rxo->rx_post_starved)
10ef9ab4 2385 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2386 }
10ef9ab4 2387
6b7c5b94
SP
2388 return work_done;
2389}
2390
10ef9ab4
SP
2391static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2392 int budget, int idx)
6b7c5b94 2393{
6b7c5b94 2394 struct be_eth_tx_compl *txcp;
10ef9ab4 2395 int num_wrbs = 0, work_done;
3c8def97 2396
10ef9ab4
SP
2397 for (work_done = 0; work_done < budget; work_done++) {
2398 txcp = be_tx_compl_get(&txo->cq);
2399 if (!txcp)
2400 break;
2401 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2402 AMAP_GET_BITS(struct
2403 amap_eth_tx_compl,
2404 wrb_index, txcp));
10ef9ab4 2405 }
6b7c5b94 2406
10ef9ab4
SP
2407 if (work_done) {
2408 be_cq_notify(adapter, txo->cq.id, true, work_done);
2409 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2410
10ef9ab4
SP
2411 /* As Tx wrbs have been freed up, wake up netdev queue
2412 * if it was stopped due to lack of tx wrbs. */
2413 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2414 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2415 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2416 }
10ef9ab4
SP
2417
2418 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2419 tx_stats(txo)->tx_compl += work_done;
2420 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2421 }
10ef9ab4
SP
2422 return (work_done < budget); /* Done */
2423}
6b7c5b94 2424
68d7bdcb 2425int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2426{
2427 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2428 struct be_adapter *adapter = eqo->adapter;
0b545a62 2429 int max_work = 0, work, i, num_evts;
6384a4d0 2430 struct be_rx_obj *rxo;
10ef9ab4 2431 bool tx_done;
f31e50a8 2432
0b545a62
SP
2433 num_evts = events_get(eqo);
2434
10ef9ab4
SP
2435 /* Process all TXQs serviced by this EQ */
2436 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2437 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2438 eqo->tx_budget, i);
2439 if (!tx_done)
2440 max_work = budget;
f31e50a8
SP
2441 }
2442
6384a4d0
SP
2443 if (be_lock_napi(eqo)) {
2444 /* This loop will iterate twice for EQ0 in which
2445 * completions of the last RXQ (default one) are also processed
2446 * For other EQs the loop iterates only once
2447 */
2448 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2449 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2450 max_work = max(work, max_work);
2451 }
2452 be_unlock_napi(eqo);
2453 } else {
2454 max_work = budget;
10ef9ab4 2455 }
6b7c5b94 2456
10ef9ab4
SP
2457 if (is_mcc_eqo(eqo))
2458 be_process_mcc(adapter);
93c86700 2459
10ef9ab4
SP
2460 if (max_work < budget) {
2461 napi_complete(napi);
0b545a62 2462 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2463 } else {
2464 /* As we'll continue in polling mode, count and clear events */
0b545a62 2465 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2466 }
10ef9ab4 2467 return max_work;
6b7c5b94
SP
2468}
2469
6384a4d0
SP
2470#ifdef CONFIG_NET_RX_BUSY_POLL
2471static int be_busy_poll(struct napi_struct *napi)
2472{
2473 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2474 struct be_adapter *adapter = eqo->adapter;
2475 struct be_rx_obj *rxo;
2476 int i, work = 0;
2477
2478 if (!be_lock_busy_poll(eqo))
2479 return LL_FLUSH_BUSY;
2480
2481 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2482 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2483 if (work)
2484 break;
2485 }
2486
2487 be_unlock_busy_poll(eqo);
2488 return work;
2489}
2490#endif
2491
f67ef7ba 2492void be_detect_error(struct be_adapter *adapter)
7c185276 2493{
e1cfb67a
PR
2494 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2495 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2496 u32 i;
eb0eecc1
SK
2497 bool error_detected = false;
2498 struct device *dev = &adapter->pdev->dev;
2499 struct net_device *netdev = adapter->netdev;
7c185276 2500
d23e946c 2501 if (be_hw_error(adapter))
72f02485
SP
2502 return;
2503
e1cfb67a
PR
2504 if (lancer_chip(adapter)) {
2505 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2506 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2507 sliport_err1 = ioread32(adapter->db +
748b539a 2508 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2509 sliport_err2 = ioread32(adapter->db +
748b539a 2510 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2511 adapter->hw_error = true;
2512 /* Do not log error messages if its a FW reset */
2513 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2514 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2515 dev_info(dev, "Firmware update in progress\n");
2516 } else {
2517 error_detected = true;
2518 dev_err(dev, "Error detected in the card\n");
2519 dev_err(dev, "ERR: sliport status 0x%x\n",
2520 sliport_status);
2521 dev_err(dev, "ERR: sliport error1 0x%x\n",
2522 sliport_err1);
2523 dev_err(dev, "ERR: sliport error2 0x%x\n",
2524 sliport_err2);
2525 }
e1cfb67a
PR
2526 }
2527 } else {
2528 pci_read_config_dword(adapter->pdev,
748b539a 2529 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2530 pci_read_config_dword(adapter->pdev,
748b539a 2531 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2532 pci_read_config_dword(adapter->pdev,
748b539a 2533 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2534 pci_read_config_dword(adapter->pdev,
748b539a 2535 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2536
f67ef7ba
PR
2537 ue_lo = (ue_lo & ~ue_lo_mask);
2538 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2539
eb0eecc1
SK
2540 /* On certain platforms BE hardware can indicate spurious UEs.
2541 * Allow HW to stop working completely in case of a real UE.
2542 * Hence not setting the hw_error for UE detection.
2543 */
f67ef7ba 2544
eb0eecc1
SK
2545 if (ue_lo || ue_hi) {
2546 error_detected = true;
2547 dev_err(dev,
2548 "Unrecoverable Error detected in the adapter");
2549 dev_err(dev, "Please reboot server to recover");
2550 if (skyhawk_chip(adapter))
2551 adapter->hw_error = true;
2552 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2553 if (ue_lo & 1)
2554 dev_err(dev, "UE: %s bit set\n",
2555 ue_status_low_desc[i]);
2556 }
2557 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2558 if (ue_hi & 1)
2559 dev_err(dev, "UE: %s bit set\n",
2560 ue_status_hi_desc[i]);
2561 }
7c185276
AK
2562 }
2563 }
eb0eecc1
SK
2564 if (error_detected)
2565 netif_carrier_off(netdev);
7c185276
AK
2566}
2567
8d56ff11
SP
2568static void be_msix_disable(struct be_adapter *adapter)
2569{
ac6a0c4a 2570 if (msix_enabled(adapter)) {
8d56ff11 2571 pci_disable_msix(adapter->pdev);
ac6a0c4a 2572 adapter->num_msix_vec = 0;
68d7bdcb 2573 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2574 }
2575}
2576
c2bba3df 2577static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2578{
7dc4c064 2579 int i, num_vec;
d379142b 2580 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2581
92bf14ab
SP
2582 /* If RoCE is supported, program the max number of NIC vectors that
2583 * may be configured via set-channels, along with vectors needed for
2584 * RoCe. Else, just program the number we'll use initially.
2585 */
2586 if (be_roce_supported(adapter))
2587 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2588 2 * num_online_cpus());
2589 else
2590 num_vec = adapter->cfg_num_qs;
3abcdeda 2591
ac6a0c4a 2592 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2593 adapter->msix_entries[i].entry = i;
2594
7dc4c064
AG
2595 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2596 MIN_MSIX_VECTORS, num_vec);
2597 if (num_vec < 0)
2598 goto fail;
92bf14ab 2599
92bf14ab
SP
2600 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2601 adapter->num_msix_roce_vec = num_vec / 2;
2602 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2603 adapter->num_msix_roce_vec);
2604 }
2605
2606 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2607
2608 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2609 adapter->num_msix_vec);
c2bba3df 2610 return 0;
7dc4c064
AG
2611
2612fail:
2613 dev_warn(dev, "MSIx enable failed\n");
2614
2615 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2616 if (!be_physfn(adapter))
2617 return num_vec;
2618 return 0;
6b7c5b94
SP
2619}
2620
fe6d2a38 2621static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2622 struct be_eq_obj *eqo)
b628bde2 2623{
f2f781a7 2624 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2625}
6b7c5b94 2626
b628bde2
SP
2627static int be_msix_register(struct be_adapter *adapter)
2628{
10ef9ab4
SP
2629 struct net_device *netdev = adapter->netdev;
2630 struct be_eq_obj *eqo;
2631 int status, i, vec;
6b7c5b94 2632
10ef9ab4
SP
2633 for_all_evt_queues(adapter, eqo, i) {
2634 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2635 vec = be_msix_vec_get(adapter, eqo);
2636 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2637 if (status)
2638 goto err_msix;
2639 }
b628bde2 2640
6b7c5b94 2641 return 0;
3abcdeda 2642err_msix:
10ef9ab4
SP
2643 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2644 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2645 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2646 status);
ac6a0c4a 2647 be_msix_disable(adapter);
6b7c5b94
SP
2648 return status;
2649}
2650
2651static int be_irq_register(struct be_adapter *adapter)
2652{
2653 struct net_device *netdev = adapter->netdev;
2654 int status;
2655
ac6a0c4a 2656 if (msix_enabled(adapter)) {
6b7c5b94
SP
2657 status = be_msix_register(adapter);
2658 if (status == 0)
2659 goto done;
ba343c77
SB
2660 /* INTx is not supported for VF */
2661 if (!be_physfn(adapter))
2662 return status;
6b7c5b94
SP
2663 }
2664
e49cc34f 2665 /* INTx: only the first EQ is used */
6b7c5b94
SP
2666 netdev->irq = adapter->pdev->irq;
2667 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2668 &adapter->eq_obj[0]);
6b7c5b94
SP
2669 if (status) {
2670 dev_err(&adapter->pdev->dev,
2671 "INTx request IRQ failed - err %d\n", status);
2672 return status;
2673 }
2674done:
2675 adapter->isr_registered = true;
2676 return 0;
2677}
2678
2679static void be_irq_unregister(struct be_adapter *adapter)
2680{
2681 struct net_device *netdev = adapter->netdev;
10ef9ab4 2682 struct be_eq_obj *eqo;
3abcdeda 2683 int i;
6b7c5b94
SP
2684
2685 if (!adapter->isr_registered)
2686 return;
2687
2688 /* INTx */
ac6a0c4a 2689 if (!msix_enabled(adapter)) {
e49cc34f 2690 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2691 goto done;
2692 }
2693
2694 /* MSIx */
10ef9ab4
SP
2695 for_all_evt_queues(adapter, eqo, i)
2696 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2697
6b7c5b94
SP
2698done:
2699 adapter->isr_registered = false;
6b7c5b94
SP
2700}
2701
10ef9ab4 2702static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2703{
2704 struct be_queue_info *q;
2705 struct be_rx_obj *rxo;
2706 int i;
2707
2708 for_all_rx_queues(adapter, rxo, i) {
2709 q = &rxo->q;
2710 if (q->created) {
2711 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2712 be_rx_cq_clean(rxo);
482c9e79 2713 }
10ef9ab4 2714 be_queue_free(adapter, q);
482c9e79
SP
2715 }
2716}
2717
889cd4b2
SP
2718static int be_close(struct net_device *netdev)
2719{
2720 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2721 struct be_eq_obj *eqo;
2722 int i;
889cd4b2 2723
e1ad8e33
KA
2724 /* This protection is needed as be_close() may be called even when the
2725 * adapter is in cleared state (after eeh perm failure)
2726 */
2727 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2728 return 0;
2729
045508a8
PP
2730 be_roce_dev_close(adapter);
2731
dff345c5
IV
2732 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2733 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2734 napi_disable(&eqo->napi);
6384a4d0
SP
2735 be_disable_busy_poll(eqo);
2736 }
71237b6f 2737 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2738 }
a323d9bf
SP
2739
2740 be_async_mcc_disable(adapter);
2741
2742 /* Wait for all pending tx completions to arrive so that
2743 * all tx skbs are freed.
2744 */
fba87559 2745 netif_tx_disable(netdev);
6e1f9975 2746 be_tx_compl_clean(adapter);
a323d9bf
SP
2747
2748 be_rx_qs_destroy(adapter);
2749
d11a347d
AK
2750 for (i = 1; i < (adapter->uc_macs + 1); i++)
2751 be_cmd_pmac_del(adapter, adapter->if_handle,
2752 adapter->pmac_id[i], 0);
2753 adapter->uc_macs = 0;
2754
a323d9bf 2755 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2756 if (msix_enabled(adapter))
2757 synchronize_irq(be_msix_vec_get(adapter, eqo));
2758 else
2759 synchronize_irq(netdev->irq);
2760 be_eq_clean(eqo);
63fcb27f
PR
2761 }
2762
889cd4b2
SP
2763 be_irq_unregister(adapter);
2764
482c9e79
SP
2765 return 0;
2766}
2767
10ef9ab4 2768static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2769{
2770 struct be_rx_obj *rxo;
e9008ee9 2771 int rc, i, j;
e2557877
VD
2772 u8 rss_hkey[RSS_HASH_KEY_LEN];
2773 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2774
2775 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2776 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2777 sizeof(struct be_eth_rx_d));
2778 if (rc)
2779 return rc;
2780 }
2781
2782 /* The FW would like the default RXQ to be created first */
2783 rxo = default_rxo(adapter);
2784 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2785 adapter->if_handle, false, &rxo->rss_id);
2786 if (rc)
2787 return rc;
2788
2789 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2790 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2791 rx_frag_size, adapter->if_handle,
2792 true, &rxo->rss_id);
482c9e79
SP
2793 if (rc)
2794 return rc;
2795 }
2796
2797 if (be_multi_rxq(adapter)) {
e2557877
VD
2798 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2799 j += adapter->num_rx_qs - 1) {
e9008ee9 2800 for_all_rss_queues(adapter, rxo, i) {
e2557877 2801 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2802 break;
e2557877
VD
2803 rss->rsstable[j + i] = rxo->rss_id;
2804 rss->rss_queue[j + i] = i;
e9008ee9
PR
2805 }
2806 }
e2557877
VD
2807 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2808 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2809
2810 if (!BEx_chip(adapter))
e2557877
VD
2811 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2812 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2813 } else {
2814 /* Disable RSS, if only default RX Q is created */
e2557877 2815 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2816 }
594ad54a 2817
e2557877 2818 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2819 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2820 128, rss_hkey);
da1388d6 2821 if (rc) {
e2557877 2822 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2823 return rc;
482c9e79
SP
2824 }
2825
e2557877
VD
2826 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2827
482c9e79 2828 /* First time posting */
10ef9ab4 2829 for_all_rx_queues(adapter, rxo, i)
482c9e79 2830 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2831 return 0;
2832}
2833
6b7c5b94
SP
2834static int be_open(struct net_device *netdev)
2835{
2836 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2837 struct be_eq_obj *eqo;
3abcdeda 2838 struct be_rx_obj *rxo;
10ef9ab4 2839 struct be_tx_obj *txo;
b236916a 2840 u8 link_status;
3abcdeda 2841 int status, i;
5fb379ee 2842
10ef9ab4 2843 status = be_rx_qs_create(adapter);
482c9e79
SP
2844 if (status)
2845 goto err;
2846
c2bba3df
SK
2847 status = be_irq_register(adapter);
2848 if (status)
2849 goto err;
5fb379ee 2850
10ef9ab4 2851 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2852 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2853
10ef9ab4
SP
2854 for_all_tx_queues(adapter, txo, i)
2855 be_cq_notify(adapter, txo->cq.id, true, 0);
2856
7a1e9b20
SP
2857 be_async_mcc_enable(adapter);
2858
10ef9ab4
SP
2859 for_all_evt_queues(adapter, eqo, i) {
2860 napi_enable(&eqo->napi);
6384a4d0 2861 be_enable_busy_poll(eqo);
10ef9ab4
SP
2862 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2863 }
04d3d624 2864 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2865
323ff71e 2866 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2867 if (!status)
2868 be_link_status_update(adapter, link_status);
2869
fba87559 2870 netif_tx_start_all_queues(netdev);
045508a8 2871 be_roce_dev_open(adapter);
c9c47142 2872
c5abe7c0 2873#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2874 if (skyhawk_chip(adapter))
2875 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2876#endif
2877
889cd4b2
SP
2878 return 0;
2879err:
2880 be_close(adapter->netdev);
2881 return -EIO;
5fb379ee
SP
2882}
2883
71d8d1b5
AK
2884static int be_setup_wol(struct be_adapter *adapter, bool enable)
2885{
2886 struct be_dma_mem cmd;
2887 int status = 0;
2888 u8 mac[ETH_ALEN];
2889
2890 memset(mac, 0, ETH_ALEN);
2891
2892 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2893 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2894 GFP_KERNEL);
71d8d1b5
AK
2895 if (cmd.va == NULL)
2896 return -1;
71d8d1b5
AK
2897
2898 if (enable) {
2899 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2900 PCICFG_PM_CONTROL_OFFSET,
2901 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2902 if (status) {
2903 dev_err(&adapter->pdev->dev,
2381a55c 2904 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2905 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2906 cmd.dma);
71d8d1b5
AK
2907 return status;
2908 }
2909 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2910 adapter->netdev->dev_addr,
2911 &cmd);
71d8d1b5
AK
2912 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2913 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2914 } else {
2915 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2916 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2917 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2918 }
2919
2b7bcebf 2920 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2921 return status;
2922}
2923
6d87f5c3
AK
2924/*
2925 * Generate a seed MAC address from the PF MAC Address using jhash.
2926 * MAC Address for VFs are assigned incrementally starting from the seed.
2927 * These addresses are programmed in the ASIC by the PF and the VF driver
2928 * queries for the MAC address during its probe.
2929 */
4c876616 2930static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2931{
f9449ab7 2932 u32 vf;
3abcdeda 2933 int status = 0;
6d87f5c3 2934 u8 mac[ETH_ALEN];
11ac75ed 2935 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2936
2937 be_vf_eth_addr_generate(adapter, mac);
2938
11ac75ed 2939 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2940 if (BEx_chip(adapter))
590c391d 2941 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2942 vf_cfg->if_handle,
2943 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2944 else
2945 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2946 vf + 1);
590c391d 2947
6d87f5c3
AK
2948 if (status)
2949 dev_err(&adapter->pdev->dev,
748b539a
SP
2950 "Mac address assignment failed for VF %d\n",
2951 vf);
6d87f5c3 2952 else
11ac75ed 2953 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2954
2955 mac[5] += 1;
2956 }
2957 return status;
2958}
2959
4c876616
SP
2960static int be_vfs_mac_query(struct be_adapter *adapter)
2961{
2962 int status, vf;
2963 u8 mac[ETH_ALEN];
2964 struct be_vf_cfg *vf_cfg;
4c876616
SP
2965
2966 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
2967 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2968 mac, vf_cfg->if_handle,
2969 false, vf+1);
4c876616
SP
2970 if (status)
2971 return status;
2972 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2973 }
2974 return 0;
2975}
2976
f9449ab7 2977static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2978{
11ac75ed 2979 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2980 u32 vf;
2981
257a3feb 2982 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
2983 dev_warn(&adapter->pdev->dev,
2984 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2985 goto done;
2986 }
2987
b4c1df93
SP
2988 pci_disable_sriov(adapter->pdev);
2989
11ac75ed 2990 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2991 if (BEx_chip(adapter))
11ac75ed
SP
2992 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2993 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2994 else
2995 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2996 vf + 1);
f9449ab7 2997
11ac75ed
SP
2998 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2999 }
39f1d94d
SP
3000done:
3001 kfree(adapter->vf_cfg);
3002 adapter->num_vfs = 0;
6d87f5c3
AK
3003}
3004
7707133c
SP
3005static void be_clear_queues(struct be_adapter *adapter)
3006{
3007 be_mcc_queues_destroy(adapter);
3008 be_rx_cqs_destroy(adapter);
3009 be_tx_queues_destroy(adapter);
3010 be_evt_queues_destroy(adapter);
3011}
3012
68d7bdcb 3013static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3014{
191eb756
SP
3015 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3016 cancel_delayed_work_sync(&adapter->work);
3017 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3018 }
68d7bdcb
SP
3019}
3020
b05004ad 3021static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3022{
3023 int i;
3024
b05004ad
SK
3025 if (adapter->pmac_id) {
3026 for (i = 0; i < (adapter->uc_macs + 1); i++)
3027 be_cmd_pmac_del(adapter, adapter->if_handle,
3028 adapter->pmac_id[i], 0);
3029 adapter->uc_macs = 0;
3030
3031 kfree(adapter->pmac_id);
3032 adapter->pmac_id = NULL;
3033 }
3034}
3035
c5abe7c0 3036#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3037static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3038{
3039 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3040 be_cmd_manage_iface(adapter, adapter->if_handle,
3041 OP_CONVERT_TUNNEL_TO_NORMAL);
3042
3043 if (adapter->vxlan_port)
3044 be_cmd_set_vxlan_port(adapter, 0);
3045
3046 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3047 adapter->vxlan_port = 0;
3048}
c5abe7c0 3049#endif
c9c47142 3050
b05004ad
SK
3051static int be_clear(struct be_adapter *adapter)
3052{
68d7bdcb 3053 be_cancel_worker(adapter);
191eb756 3054
11ac75ed 3055 if (sriov_enabled(adapter))
f9449ab7
SP
3056 be_vf_clear(adapter);
3057
c5abe7c0 3058#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3059 be_disable_vxlan_offloads(adapter);
c5abe7c0 3060#endif
2d17f403 3061 /* delete the primary mac along with the uc-mac list */
b05004ad 3062 be_mac_clear(adapter);
fbc13f01 3063
f9449ab7 3064 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3065
7707133c 3066 be_clear_queues(adapter);
a54769f5 3067
10ef9ab4 3068 be_msix_disable(adapter);
e1ad8e33 3069 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3070 return 0;
3071}
3072
4c876616 3073static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3074{
92bf14ab 3075 struct be_resources res = {0};
4c876616
SP
3076 struct be_vf_cfg *vf_cfg;
3077 u32 cap_flags, en_flags, vf;
922bbe88 3078 int status = 0;
abb93951 3079
4c876616
SP
3080 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3081 BE_IF_FLAGS_MULTICAST;
abb93951 3082
4c876616 3083 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3084 if (!BE3_chip(adapter)) {
3085 status = be_cmd_get_profile_config(adapter, &res,
3086 vf + 1);
3087 if (!status)
3088 cap_flags = res.if_cap_flags;
3089 }
4c876616
SP
3090
3091 /* If a FW profile exists, then cap_flags are updated */
3092 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3093 BE_IF_FLAGS_BROADCAST |
3094 BE_IF_FLAGS_MULTICAST);
3095 status =
3096 be_cmd_if_create(adapter, cap_flags, en_flags,
3097 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3098 if (status)
3099 goto err;
3100 }
3101err:
3102 return status;
abb93951
PR
3103}
3104
39f1d94d 3105static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3106{
11ac75ed 3107 struct be_vf_cfg *vf_cfg;
30128031
SP
3108 int vf;
3109
39f1d94d
SP
3110 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3111 GFP_KERNEL);
3112 if (!adapter->vf_cfg)
3113 return -ENOMEM;
3114
11ac75ed
SP
3115 for_all_vfs(adapter, vf_cfg, vf) {
3116 vf_cfg->if_handle = -1;
3117 vf_cfg->pmac_id = -1;
30128031 3118 }
39f1d94d 3119 return 0;
30128031
SP
3120}
3121
f9449ab7
SP
3122static int be_vf_setup(struct be_adapter *adapter)
3123{
c502224e 3124 struct device *dev = &adapter->pdev->dev;
11ac75ed 3125 struct be_vf_cfg *vf_cfg;
4c876616 3126 int status, old_vfs, vf;
04a06028 3127 u32 privileges;
c502224e 3128 u16 lnk_speed;
39f1d94d 3129
257a3feb 3130 old_vfs = pci_num_vf(adapter->pdev);
4c876616
SP
3131 if (old_vfs) {
3132 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3133 if (old_vfs != num_vfs)
3134 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3135 adapter->num_vfs = old_vfs;
39f1d94d 3136 } else {
92bf14ab 3137 if (num_vfs > be_max_vfs(adapter))
4c876616 3138 dev_info(dev, "Device supports %d VFs and not %d\n",
92bf14ab
SP
3139 be_max_vfs(adapter), num_vfs);
3140 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
b4c1df93 3141 if (!adapter->num_vfs)
4c876616 3142 return 0;
39f1d94d
SP
3143 }
3144
3145 status = be_vf_setup_init(adapter);
3146 if (status)
3147 goto err;
30128031 3148
4c876616
SP
3149 if (old_vfs) {
3150 for_all_vfs(adapter, vf_cfg, vf) {
3151 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3152 if (status)
3153 goto err;
3154 }
3155 } else {
3156 status = be_vfs_if_create(adapter);
f9449ab7
SP
3157 if (status)
3158 goto err;
f9449ab7
SP
3159 }
3160
4c876616
SP
3161 if (old_vfs) {
3162 status = be_vfs_mac_query(adapter);
3163 if (status)
3164 goto err;
3165 } else {
39f1d94d
SP
3166 status = be_vf_eth_addr_config(adapter);
3167 if (status)
3168 goto err;
3169 }
f9449ab7 3170
11ac75ed 3171 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3172 /* Allow VFs to programs MAC/VLAN filters */
3173 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3174 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3175 status = be_cmd_set_fn_privileges(adapter,
3176 privileges |
3177 BE_PRIV_FILTMGMT,
3178 vf + 1);
3179 if (!status)
3180 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3181 vf);
3182 }
3183
4c876616
SP
3184 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3185 * Allow full available bandwidth
3186 */
3187 if (BE3_chip(adapter) && !old_vfs)
a401801c 3188 be_cmd_config_qos(adapter, 1000, vf + 1);
4c876616
SP
3189
3190 status = be_cmd_link_status_query(adapter, &lnk_speed,
3191 NULL, vf + 1);
3192 if (!status)
3193 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b 3194
bdce2ad7 3195 if (!old_vfs) {
0599863d 3196 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3197 be_cmd_set_logical_link_config(adapter,
3198 IFLA_VF_LINK_STATE_AUTO,
3199 vf+1);
3200 }
f9449ab7 3201 }
b4c1df93
SP
3202
3203 if (!old_vfs) {
3204 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3205 if (status) {
3206 dev_err(dev, "SRIOV enable failed\n");
3207 adapter->num_vfs = 0;
3208 goto err;
3209 }
3210 }
f9449ab7
SP
3211 return 0;
3212err:
4c876616
SP
3213 dev_err(dev, "VF setup failed\n");
3214 be_vf_clear(adapter);
f9449ab7
SP
3215 return status;
3216}
3217
f93f160b
VV
3218/* Converting function_mode bits on BE3 to SH mc_type enums */
3219
3220static u8 be_convert_mc_type(u32 function_mode)
3221{
3222 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3223 return vNIC1;
3224 else if (function_mode & FLEX10_MODE)
3225 return FLEX10;
3226 else if (function_mode & VNIC_MODE)
3227 return vNIC2;
3228 else if (function_mode & UMC_ENABLED)
3229 return UMC;
3230 else
3231 return MC_NONE;
3232}
3233
92bf14ab
SP
3234/* On BE2/BE3 FW does not suggest the supported limits */
3235static void BEx_get_resources(struct be_adapter *adapter,
3236 struct be_resources *res)
3237{
3238 struct pci_dev *pdev = adapter->pdev;
3239 bool use_sriov = false;
ecf1f6e1
SR
3240 int max_vfs = 0;
3241
3242 if (be_physfn(adapter) && BE3_chip(adapter)) {
3243 be_cmd_get_profile_config(adapter, res, 0);
3244 /* Some old versions of BE3 FW don't report max_vfs value */
3245 if (res->max_vfs == 0) {
3246 max_vfs = pci_sriov_get_totalvfs(pdev);
3247 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3248 }
3249 use_sriov = res->max_vfs && sriov_want(adapter);
92bf14ab
SP
3250 }
3251
3252 if (be_physfn(adapter))
3253 res->max_uc_mac = BE_UC_PMAC_COUNT;
3254 else
3255 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3256
f93f160b
VV
3257 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3258
3259 if (be_is_mc(adapter)) {
3260 /* Assuming that there are 4 channels per port,
3261 * when multi-channel is enabled
3262 */
3263 if (be_is_qnq_mode(adapter))
3264 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3265 else
3266 /* In a non-qnq multichannel mode, the pvid
3267 * takes up one vlan entry
3268 */
3269 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3270 } else {
92bf14ab 3271 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3272 }
3273
92bf14ab
SP
3274 res->max_mcast_mac = BE_MAX_MC;
3275
a5243dab
VV
3276 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3277 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3278 * *only* if it is RSS-capable.
3279 */
3280 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3281 !be_physfn(adapter) || (be_is_mc(adapter) &&
3282 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3283 res->max_tx_qs = 1;
3284 else
3285 res->max_tx_qs = BE3_MAX_TX_QS;
3286
3287 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3288 !use_sriov && be_physfn(adapter))
3289 res->max_rss_qs = (adapter->be3_native) ?
3290 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3291 res->max_rx_qs = res->max_rss_qs + 1;
3292
e3dc867c 3293 if (be_physfn(adapter))
ecf1f6e1 3294 res->max_evt_qs = (res->max_vfs > 0) ?
e3dc867c
SR
3295 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3296 else
3297 res->max_evt_qs = 1;
92bf14ab
SP
3298
3299 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3300 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3301 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3302}
3303
30128031
SP
3304static void be_setup_init(struct be_adapter *adapter)
3305{
3306 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3307 adapter->phy.link_speed = -1;
30128031
SP
3308 adapter->if_handle = -1;
3309 adapter->be3_native = false;
3310 adapter->promiscuous = false;
f25b119c
PR
3311 if (be_physfn(adapter))
3312 adapter->cmd_privileges = MAX_PRIVILEGES;
3313 else
3314 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3315}
3316
92bf14ab 3317static int be_get_resources(struct be_adapter *adapter)
abb93951 3318{
92bf14ab
SP
3319 struct device *dev = &adapter->pdev->dev;
3320 struct be_resources res = {0};
3321 int status;
abb93951 3322
92bf14ab
SP
3323 if (BEx_chip(adapter)) {
3324 BEx_get_resources(adapter, &res);
3325 adapter->res = res;
abb93951
PR
3326 }
3327
92bf14ab
SP
3328 /* For Lancer, SH etc read per-function resource limits from FW.
3329 * GET_FUNC_CONFIG returns per function guaranteed limits.
3330 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3331 */
3332 if (!BEx_chip(adapter)) {
3333 status = be_cmd_get_func_config(adapter, &res);
3334 if (status)
3335 return status;
abb93951 3336
92bf14ab
SP
3337 /* If RoCE may be enabled stash away half the EQs for RoCE */
3338 if (be_roce_supported(adapter))
3339 res.max_evt_qs /= 2;
3340 adapter->res = res;
abb93951 3341
92bf14ab
SP
3342 if (be_physfn(adapter)) {
3343 status = be_cmd_get_profile_config(adapter, &res, 0);
3344 if (status)
3345 return status;
3346 adapter->res.max_vfs = res.max_vfs;
3347 }
abb93951 3348
92bf14ab
SP
3349 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3350 be_max_txqs(adapter), be_max_rxqs(adapter),
3351 be_max_rss(adapter), be_max_eqs(adapter),
3352 be_max_vfs(adapter));
3353 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3354 be_max_uc(adapter), be_max_mc(adapter),
3355 be_max_vlans(adapter));
abb93951 3356 }
4c876616 3357
92bf14ab 3358 return 0;
abb93951
PR
3359}
3360
39f1d94d
SP
3361/* Routine to query per function resource limits */
3362static int be_get_config(struct be_adapter *adapter)
3363{
542963b7 3364 u16 profile_id;
4c876616 3365 int status;
39f1d94d 3366
abb93951
PR
3367 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3368 &adapter->function_mode,
0ad3157e
VV
3369 &adapter->function_caps,
3370 &adapter->asic_rev);
abb93951 3371 if (status)
92bf14ab 3372 return status;
abb93951 3373
542963b7
VV
3374 if (be_physfn(adapter)) {
3375 status = be_cmd_get_active_profile(adapter, &profile_id);
3376 if (!status)
3377 dev_info(&adapter->pdev->dev,
3378 "Using profile 0x%x\n", profile_id);
3379 }
3380
92bf14ab
SP
3381 status = be_get_resources(adapter);
3382 if (status)
3383 return status;
abb93951 3384
46ee9c14
RN
3385 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3386 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3387 if (!adapter->pmac_id)
3388 return -ENOMEM;
abb93951 3389
92bf14ab
SP
3390 /* Sanitize cfg_num_qs based on HW and platform limits */
3391 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3392
3393 return 0;
39f1d94d
SP
3394}
3395
95046b92
SP
3396static int be_mac_setup(struct be_adapter *adapter)
3397{
3398 u8 mac[ETH_ALEN];
3399 int status;
3400
3401 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3402 status = be_cmd_get_perm_mac(adapter, mac);
3403 if (status)
3404 return status;
3405
3406 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3407 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3408 } else {
3409 /* Maybe the HW was reset; dev_addr must be re-programmed */
3410 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3411 }
3412
2c7a9dc1
AK
3413 /* For BE3-R VFs, the PF programs the initial MAC address */
3414 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3415 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3416 &adapter->pmac_id[0], 0);
95046b92
SP
3417 return 0;
3418}
3419
68d7bdcb
SP
3420static void be_schedule_worker(struct be_adapter *adapter)
3421{
3422 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3423 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3424}
3425
7707133c 3426static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3427{
68d7bdcb 3428 struct net_device *netdev = adapter->netdev;
10ef9ab4 3429 int status;
ba343c77 3430
7707133c 3431 status = be_evt_queues_create(adapter);
abb93951
PR
3432 if (status)
3433 goto err;
73d540f2 3434
7707133c 3435 status = be_tx_qs_create(adapter);
c2bba3df
SK
3436 if (status)
3437 goto err;
10ef9ab4 3438
7707133c 3439 status = be_rx_cqs_create(adapter);
10ef9ab4 3440 if (status)
a54769f5 3441 goto err;
6b7c5b94 3442
7707133c 3443 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3444 if (status)
3445 goto err;
3446
68d7bdcb
SP
3447 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3448 if (status)
3449 goto err;
3450
3451 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3452 if (status)
3453 goto err;
3454
7707133c
SP
3455 return 0;
3456err:
3457 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3458 return status;
3459}
3460
68d7bdcb
SP
3461int be_update_queues(struct be_adapter *adapter)
3462{
3463 struct net_device *netdev = adapter->netdev;
3464 int status;
3465
3466 if (netif_running(netdev))
3467 be_close(netdev);
3468
3469 be_cancel_worker(adapter);
3470
3471 /* If any vectors have been shared with RoCE we cannot re-program
3472 * the MSIx table.
3473 */
3474 if (!adapter->num_msix_roce_vec)
3475 be_msix_disable(adapter);
3476
3477 be_clear_queues(adapter);
3478
3479 if (!msix_enabled(adapter)) {
3480 status = be_msix_enable(adapter);
3481 if (status)
3482 return status;
3483 }
3484
3485 status = be_setup_queues(adapter);
3486 if (status)
3487 return status;
3488
3489 be_schedule_worker(adapter);
3490
3491 if (netif_running(netdev))
3492 status = be_open(netdev);
3493
3494 return status;
3495}
3496
7707133c
SP
3497static int be_setup(struct be_adapter *adapter)
3498{
3499 struct device *dev = &adapter->pdev->dev;
3500 u32 tx_fc, rx_fc, en_flags;
3501 int status;
3502
3503 be_setup_init(adapter);
3504
3505 if (!lancer_chip(adapter))
3506 be_cmd_req_native_mode(adapter);
3507
3508 status = be_get_config(adapter);
10ef9ab4 3509 if (status)
a54769f5 3510 goto err;
6b7c5b94 3511
7707133c 3512 status = be_msix_enable(adapter);
10ef9ab4 3513 if (status)
a54769f5 3514 goto err;
6b7c5b94 3515
f9449ab7 3516 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3517 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3518 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3519 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3520 en_flags = en_flags & be_if_cap_flags(adapter);
3521 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3522 &adapter->if_handle, 0);
7707133c 3523 if (status)
a54769f5 3524 goto err;
6b7c5b94 3525
68d7bdcb
SP
3526 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3527 rtnl_lock();
7707133c 3528 status = be_setup_queues(adapter);
68d7bdcb 3529 rtnl_unlock();
95046b92 3530 if (status)
1578e777
PR
3531 goto err;
3532
7707133c 3533 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3534
3535 status = be_mac_setup(adapter);
10ef9ab4
SP
3536 if (status)
3537 goto err;
3538
eeb65ced 3539 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3540
e9e2a904
SK
3541 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3542 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3543 adapter->fw_ver);
3544 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3545 }
3546
1d1e9a46 3547 if (adapter->vlans_added)
10329df8 3548 be_vid_config(adapter);
7ab8b0b4 3549
a54769f5 3550 be_set_rx_mode(adapter->netdev);
5fb379ee 3551
76a9e08e
SR
3552 be_cmd_get_acpi_wol_cap(adapter);
3553
ddc3f5cb 3554 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3555
ddc3f5cb
AK
3556 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3557 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3558 adapter->rx_fc);
2dc1deb6 3559
bdce2ad7
SR
3560 if (be_physfn(adapter))
3561 be_cmd_set_logical_link_config(adapter,
3562 IFLA_VF_LINK_STATE_AUTO, 0);
3563
b905b5d4 3564 if (sriov_want(adapter)) {
92bf14ab 3565 if (be_max_vfs(adapter))
39f1d94d
SP
3566 be_vf_setup(adapter);
3567 else
3568 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3569 }
3570
f25b119c
PR
3571 status = be_cmd_get_phy_info(adapter);
3572 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3573 adapter->phy.fc_autoneg = 1;
3574
68d7bdcb 3575 be_schedule_worker(adapter);
e1ad8e33 3576 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3577 return 0;
a54769f5
SP
3578err:
3579 be_clear(adapter);
3580 return status;
3581}
6b7c5b94 3582
66268739
IV
3583#ifdef CONFIG_NET_POLL_CONTROLLER
3584static void be_netpoll(struct net_device *netdev)
3585{
3586 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3587 struct be_eq_obj *eqo;
66268739
IV
3588 int i;
3589
e49cc34f
SP
3590 for_all_evt_queues(adapter, eqo, i) {
3591 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3592 napi_schedule(&eqo->napi);
3593 }
10ef9ab4
SP
3594
3595 return;
66268739
IV
3596}
3597#endif
3598
84517482 3599#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
4188e7df 3600static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
c165541e 3601
fa9a6fed 3602static bool be_flash_redboot(struct be_adapter *adapter,
748b539a
SP
3603 const u8 *p, u32 img_start, int image_size,
3604 int hdr_size)
fa9a6fed
SB
3605{
3606 u32 crc_offset;
3607 u8 flashed_crc[4];
3608 int status;
3f0d4560
AK
3609
3610 crc_offset = hdr_size + img_start + image_size - 4;
3611
fa9a6fed 3612 p += crc_offset;
3f0d4560 3613
748b539a 3614 status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
fa9a6fed
SB
3615 if (status) {
3616 dev_err(&adapter->pdev->dev,
748b539a 3617 "could not get crc from flash, not flashing redboot\n");
fa9a6fed
SB
3618 return false;
3619 }
3620
3621 /*update redboot only if crc does not match*/
3622 if (!memcmp(flashed_crc, p, 4))
3623 return false;
3624 else
3625 return true;
fa9a6fed
SB
3626}
3627
306f1348
SP
3628static bool phy_flashing_required(struct be_adapter *adapter)
3629{
42f11cf2
AK
3630 return (adapter->phy.phy_type == TN_8022 &&
3631 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3632}
3633
c165541e
PR
3634static bool is_comp_in_ufi(struct be_adapter *adapter,
3635 struct flash_section_info *fsec, int type)
3636{
3637 int i = 0, img_type = 0;
3638 struct flash_section_info_g2 *fsec_g2 = NULL;
3639
ca34fe38 3640 if (BE2_chip(adapter))
c165541e
PR
3641 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3642
3643 for (i = 0; i < MAX_FLASH_COMP; i++) {
3644 if (fsec_g2)
3645 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3646 else
3647 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3648
3649 if (img_type == type)
3650 return true;
3651 }
3652 return false;
3653
3654}
3655
4188e7df 3656static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3657 int header_size,
3658 const struct firmware *fw)
c165541e
PR
3659{
3660 struct flash_section_info *fsec = NULL;
3661 const u8 *p = fw->data;
3662
3663 p += header_size;
3664 while (p < (fw->data + fw->size)) {
3665 fsec = (struct flash_section_info *)p;
3666 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3667 return fsec;
3668 p += 32;
3669 }
3670 return NULL;
3671}
3672
773a2d7c 3673static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3674 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c
PR
3675{
3676 u32 total_bytes = 0, flash_op, num_bytes = 0;
3677 int status = 0;
3678 struct be_cmd_write_flashrom *req = flash_cmd->va;
3679
3680 total_bytes = img_size;
3681 while (total_bytes) {
3682 num_bytes = min_t(u32, 32*1024, total_bytes);
3683
3684 total_bytes -= num_bytes;
3685
3686 if (!total_bytes) {
3687 if (optype == OPTYPE_PHY_FW)
3688 flash_op = FLASHROM_OPER_PHY_FLASH;
3689 else
3690 flash_op = FLASHROM_OPER_FLASH;
3691 } else {
3692 if (optype == OPTYPE_PHY_FW)
3693 flash_op = FLASHROM_OPER_PHY_SAVE;
3694 else
3695 flash_op = FLASHROM_OPER_SAVE;
3696 }
3697
be716446 3698 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3699 img += num_bytes;
3700 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3701 flash_op, num_bytes);
773a2d7c
PR
3702 if (status) {
3703 if (status == ILLEGAL_IOCTL_REQ &&
3704 optype == OPTYPE_PHY_FW)
3705 break;
3706 dev_err(&adapter->pdev->dev,
3707 "cmd to write to flash rom failed.\n");
3708 return status;
3709 }
3710 }
3711 return 0;
3712}
3713
0ad3157e 3714/* For BE2, BE3 and BE3-R */
ca34fe38 3715static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3716 const struct firmware *fw,
3717 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3718{
3f0d4560 3719 int status = 0, i, filehdr_size = 0;
c165541e 3720 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3721 const u8 *p = fw->data;
215faf9c 3722 const struct flash_comp *pflashcomp;
773a2d7c 3723 int num_comp, redboot;
c165541e
PR
3724 struct flash_section_info *fsec = NULL;
3725
3726 struct flash_comp gen3_flash_types[] = {
3727 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3728 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3729 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3730 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3731 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3732 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3733 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3734 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3735 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3736 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3737 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3738 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3739 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3740 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3741 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3742 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3743 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3744 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3745 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3746 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3747 };
c165541e
PR
3748
3749 struct flash_comp gen2_flash_types[] = {
3750 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3751 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3752 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3753 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3754 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3755 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3756 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3757 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3758 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3759 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3760 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3761 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3762 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3763 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3764 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3765 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3766 };
3767
ca34fe38 3768 if (BE3_chip(adapter)) {
3f0d4560
AK
3769 pflashcomp = gen3_flash_types;
3770 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3771 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3772 } else {
3773 pflashcomp = gen2_flash_types;
3774 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3775 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3776 }
ca34fe38 3777
c165541e
PR
3778 /* Get flash section info*/
3779 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3780 if (!fsec) {
3781 dev_err(&adapter->pdev->dev,
3782 "Invalid Cookie. UFI corrupted ?\n");
3783 return -1;
3784 }
9fe96934 3785 for (i = 0; i < num_comp; i++) {
c165541e 3786 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3787 continue;
c165541e
PR
3788
3789 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3790 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3791 continue;
3792
773a2d7c
PR
3793 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3794 !phy_flashing_required(adapter))
306f1348 3795 continue;
c165541e 3796
773a2d7c
PR
3797 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3798 redboot = be_flash_redboot(adapter, fw->data,
748b539a
SP
3799 pflashcomp[i].offset,
3800 pflashcomp[i].size,
3801 filehdr_size +
3802 img_hdrs_size);
773a2d7c
PR
3803 if (!redboot)
3804 continue;
3805 }
c165541e 3806
3f0d4560 3807 p = fw->data;
c165541e 3808 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3809 if (p + pflashcomp[i].size > fw->data + fw->size)
3810 return -1;
773a2d7c
PR
3811
3812 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3813 pflashcomp[i].size);
773a2d7c
PR
3814 if (status) {
3815 dev_err(&adapter->pdev->dev,
3816 "Flashing section type %d failed.\n",
3817 pflashcomp[i].img_type);
3818 return status;
84517482 3819 }
84517482 3820 }
84517482
AK
3821 return 0;
3822}
3823
773a2d7c 3824static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3825 const struct firmware *fw,
3826 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3827{
773a2d7c
PR
3828 int status = 0, i, filehdr_size = 0;
3829 int img_offset, img_size, img_optype, redboot;
3830 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3831 const u8 *p = fw->data;
3832 struct flash_section_info *fsec = NULL;
3833
3834 filehdr_size = sizeof(struct flash_file_hdr_g3);
3835 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3836 if (!fsec) {
3837 dev_err(&adapter->pdev->dev,
3838 "Invalid Cookie. UFI corrupted ?\n");
3839 return -1;
3840 }
3841
3842 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3843 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3844 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3845
3846 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3847 case IMAGE_FIRMWARE_iSCSI:
3848 img_optype = OPTYPE_ISCSI_ACTIVE;
3849 break;
3850 case IMAGE_BOOT_CODE:
3851 img_optype = OPTYPE_REDBOOT;
3852 break;
3853 case IMAGE_OPTION_ROM_ISCSI:
3854 img_optype = OPTYPE_BIOS;
3855 break;
3856 case IMAGE_OPTION_ROM_PXE:
3857 img_optype = OPTYPE_PXE_BIOS;
3858 break;
3859 case IMAGE_OPTION_ROM_FCoE:
3860 img_optype = OPTYPE_FCOE_BIOS;
3861 break;
3862 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3863 img_optype = OPTYPE_ISCSI_BACKUP;
3864 break;
3865 case IMAGE_NCSI:
3866 img_optype = OPTYPE_NCSI_FW;
3867 break;
3868 default:
3869 continue;
3870 }
3871
3872 if (img_optype == OPTYPE_REDBOOT) {
3873 redboot = be_flash_redboot(adapter, fw->data,
748b539a
SP
3874 img_offset, img_size,
3875 filehdr_size +
3876 img_hdrs_size);
773a2d7c
PR
3877 if (!redboot)
3878 continue;
3879 }
3880
3881 p = fw->data;
3882 p += filehdr_size + img_offset + img_hdrs_size;
3883 if (p + img_size > fw->data + fw->size)
3884 return -1;
3885
3886 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3887 if (status) {
3888 dev_err(&adapter->pdev->dev,
3889 "Flashing section type %d failed.\n",
3890 fsec->fsec_entry[i].type);
3891 return status;
3892 }
3893 }
3894 return 0;
3f0d4560
AK
3895}
3896
485bf569 3897static int lancer_fw_download(struct be_adapter *adapter,
748b539a 3898 const struct firmware *fw)
84517482 3899{
485bf569
SN
3900#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3901#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3902 struct be_dma_mem flash_cmd;
485bf569
SN
3903 const u8 *data_ptr = NULL;
3904 u8 *dest_image_ptr = NULL;
3905 size_t image_size = 0;
3906 u32 chunk_size = 0;
3907 u32 data_written = 0;
3908 u32 offset = 0;
3909 int status = 0;
3910 u8 add_status = 0;
f67ef7ba 3911 u8 change_status;
84517482 3912
485bf569 3913 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3914 dev_err(&adapter->pdev->dev,
485bf569
SN
3915 "FW Image not properly aligned. "
3916 "Length must be 4 byte aligned.\n");
3917 status = -EINVAL;
3918 goto lancer_fw_exit;
d9efd2af
SB
3919 }
3920
485bf569
SN
3921 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3922 + LANCER_FW_DOWNLOAD_CHUNK;
3923 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3924 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3925 if (!flash_cmd.va) {
3926 status = -ENOMEM;
485bf569
SN
3927 goto lancer_fw_exit;
3928 }
84517482 3929
485bf569
SN
3930 dest_image_ptr = flash_cmd.va +
3931 sizeof(struct lancer_cmd_req_write_object);
3932 image_size = fw->size;
3933 data_ptr = fw->data;
3934
3935 while (image_size) {
3936 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3937
3938 /* Copy the image chunk content. */
3939 memcpy(dest_image_ptr, data_ptr, chunk_size);
3940
3941 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3942 chunk_size, offset,
3943 LANCER_FW_DOWNLOAD_LOCATION,
3944 &data_written, &change_status,
3945 &add_status);
485bf569
SN
3946 if (status)
3947 break;
3948
3949 offset += data_written;
3950 data_ptr += data_written;
3951 image_size -= data_written;
3952 }
3953
3954 if (!status) {
3955 /* Commit the FW written */
3956 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3957 0, offset,
3958 LANCER_FW_DOWNLOAD_LOCATION,
3959 &data_written, &change_status,
3960 &add_status);
485bf569
SN
3961 }
3962
3963 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 3964 flash_cmd.dma);
485bf569
SN
3965 if (status) {
3966 dev_err(&adapter->pdev->dev,
3967 "Firmware load error. "
3968 "Status code: 0x%x Additional Status: 0x%x\n",
3969 status, add_status);
3970 goto lancer_fw_exit;
3971 }
3972
f67ef7ba 3973 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
3974 dev_info(&adapter->pdev->dev,
3975 "Resetting adapter to activate new FW\n");
5c510811
SK
3976 status = lancer_physdev_ctrl(adapter,
3977 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3978 if (status) {
3979 dev_err(&adapter->pdev->dev,
3980 "Adapter busy for FW reset.\n"
3981 "New FW will not be active.\n");
3982 goto lancer_fw_exit;
3983 }
3984 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
3985 dev_err(&adapter->pdev->dev,
3986 "System reboot required for new FW to be active\n");
f67ef7ba
PR
3987 }
3988
485bf569
SN
3989 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3990lancer_fw_exit:
3991 return status;
3992}
3993
ca34fe38
SP
3994#define UFI_TYPE2 2
3995#define UFI_TYPE3 3
0ad3157e 3996#define UFI_TYPE3R 10
ca34fe38
SP
3997#define UFI_TYPE4 4
3998static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3999 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
4000{
4001 if (fhdr == NULL)
4002 goto be_get_ufi_exit;
4003
ca34fe38
SP
4004 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4005 return UFI_TYPE4;
0ad3157e
VV
4006 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4007 if (fhdr->asic_type_rev == 0x10)
4008 return UFI_TYPE3R;
4009 else
4010 return UFI_TYPE3;
4011 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4012 return UFI_TYPE2;
773a2d7c
PR
4013
4014be_get_ufi_exit:
4015 dev_err(&adapter->pdev->dev,
4016 "UFI and Interface are not compatible for flashing\n");
4017 return -1;
4018}
4019
485bf569
SN
4020static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4021{
485bf569
SN
4022 struct flash_file_hdr_g3 *fhdr3;
4023 struct image_hdr *img_hdr_ptr = NULL;
4024 struct be_dma_mem flash_cmd;
4025 const u8 *p;
773a2d7c 4026 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4027
be716446 4028 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4029 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4030 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4031 if (!flash_cmd.va) {
4032 status = -ENOMEM;
485bf569 4033 goto be_fw_exit;
84517482
AK
4034 }
4035
773a2d7c 4036 p = fw->data;
0ad3157e 4037 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4038
0ad3157e 4039 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4040
773a2d7c
PR
4041 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4042 for (i = 0; i < num_imgs; i++) {
4043 img_hdr_ptr = (struct image_hdr *)(fw->data +
4044 (sizeof(struct flash_file_hdr_g3) +
4045 i * sizeof(struct image_hdr)));
4046 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4047 switch (ufi_type) {
4048 case UFI_TYPE4:
773a2d7c 4049 status = be_flash_skyhawk(adapter, fw,
748b539a 4050 &flash_cmd, num_imgs);
0ad3157e
VV
4051 break;
4052 case UFI_TYPE3R:
ca34fe38
SP
4053 status = be_flash_BEx(adapter, fw, &flash_cmd,
4054 num_imgs);
0ad3157e
VV
4055 break;
4056 case UFI_TYPE3:
4057 /* Do not flash this ufi on BE3-R cards */
4058 if (adapter->asic_rev < 0x10)
4059 status = be_flash_BEx(adapter, fw,
4060 &flash_cmd,
4061 num_imgs);
4062 else {
4063 status = -1;
4064 dev_err(&adapter->pdev->dev,
4065 "Can't load BE3 UFI on BE3R\n");
4066 }
4067 }
3f0d4560 4068 }
773a2d7c
PR
4069 }
4070
ca34fe38
SP
4071 if (ufi_type == UFI_TYPE2)
4072 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4073 else if (ufi_type == -1)
3f0d4560 4074 status = -1;
84517482 4075
2b7bcebf
IV
4076 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4077 flash_cmd.dma);
84517482
AK
4078 if (status) {
4079 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4080 goto be_fw_exit;
84517482
AK
4081 }
4082
af901ca1 4083 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4084
485bf569
SN
4085be_fw_exit:
4086 return status;
4087}
4088
4089int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4090{
4091 const struct firmware *fw;
4092 int status;
4093
4094 if (!netif_running(adapter->netdev)) {
4095 dev_err(&adapter->pdev->dev,
4096 "Firmware load not allowed (interface is down)\n");
4097 return -1;
4098 }
4099
4100 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4101 if (status)
4102 goto fw_exit;
4103
4104 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4105
4106 if (lancer_chip(adapter))
4107 status = lancer_fw_download(adapter, fw);
4108 else
4109 status = be_fw_download(adapter, fw);
4110
eeb65ced
SK
4111 if (!status)
4112 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4113 adapter->fw_on_flash);
4114
84517482
AK
4115fw_exit:
4116 release_firmware(fw);
4117 return status;
4118}
4119
748b539a 4120static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4121{
4122 struct be_adapter *adapter = netdev_priv(dev);
4123 struct nlattr *attr, *br_spec;
4124 int rem;
4125 int status = 0;
4126 u16 mode = 0;
4127
4128 if (!sriov_enabled(adapter))
4129 return -EOPNOTSUPP;
4130
4131 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4132
4133 nla_for_each_nested(attr, br_spec, rem) {
4134 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4135 continue;
4136
4137 mode = nla_get_u16(attr);
4138 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4139 return -EINVAL;
4140
4141 status = be_cmd_set_hsw_config(adapter, 0, 0,
4142 adapter->if_handle,
4143 mode == BRIDGE_MODE_VEPA ?
4144 PORT_FWD_TYPE_VEPA :
4145 PORT_FWD_TYPE_VEB);
4146 if (status)
4147 goto err;
4148
4149 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4150 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4151
4152 return status;
4153 }
4154err:
4155 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4156 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4157
4158 return status;
4159}
4160
4161static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4162 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4163{
4164 struct be_adapter *adapter = netdev_priv(dev);
4165 int status = 0;
4166 u8 hsw_mode;
4167
4168 if (!sriov_enabled(adapter))
4169 return 0;
4170
4171 /* BE and Lancer chips support VEB mode only */
4172 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4173 hsw_mode = PORT_FWD_TYPE_VEB;
4174 } else {
4175 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4176 adapter->if_handle, &hsw_mode);
4177 if (status)
4178 return 0;
4179 }
4180
4181 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4182 hsw_mode == PORT_FWD_TYPE_VEPA ?
4183 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4184}
4185
c5abe7c0 4186#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4187static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4188 __be16 port)
4189{
4190 struct be_adapter *adapter = netdev_priv(netdev);
4191 struct device *dev = &adapter->pdev->dev;
4192 int status;
4193
4194 if (lancer_chip(adapter) || BEx_chip(adapter))
4195 return;
4196
4197 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4198 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4199 be16_to_cpu(port));
4200 dev_info(dev,
4201 "Only one UDP port supported for VxLAN offloads\n");
4202 return;
4203 }
4204
4205 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4206 OP_CONVERT_NORMAL_TO_TUNNEL);
4207 if (status) {
4208 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4209 goto err;
4210 }
4211
4212 status = be_cmd_set_vxlan_port(adapter, port);
4213 if (status) {
4214 dev_warn(dev, "Failed to add VxLAN port\n");
4215 goto err;
4216 }
4217 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4218 adapter->vxlan_port = port;
4219
4220 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4221 be16_to_cpu(port));
4222 return;
4223err:
4224 be_disable_vxlan_offloads(adapter);
4225 return;
4226}
4227
4228static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4229 __be16 port)
4230{
4231 struct be_adapter *adapter = netdev_priv(netdev);
4232
4233 if (lancer_chip(adapter) || BEx_chip(adapter))
4234 return;
4235
4236 if (adapter->vxlan_port != port)
4237 return;
4238
4239 be_disable_vxlan_offloads(adapter);
4240
4241 dev_info(&adapter->pdev->dev,
4242 "Disabled VxLAN offloads for UDP port %d\n",
4243 be16_to_cpu(port));
4244}
c5abe7c0 4245#endif
c9c47142 4246
e5686ad8 4247static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4248 .ndo_open = be_open,
4249 .ndo_stop = be_close,
4250 .ndo_start_xmit = be_xmit,
a54769f5 4251 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4252 .ndo_set_mac_address = be_mac_addr_set,
4253 .ndo_change_mtu = be_change_mtu,
ab1594e9 4254 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4255 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4256 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4257 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4258 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4259 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 4260 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739 4261 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4262 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4263#ifdef CONFIG_NET_POLL_CONTROLLER
4264 .ndo_poll_controller = be_netpoll,
4265#endif
a77dcb8c
AK
4266 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4267 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4268#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4269 .ndo_busy_poll = be_busy_poll,
6384a4d0 4270#endif
c5abe7c0 4271#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4272 .ndo_add_vxlan_port = be_add_vxlan_port,
4273 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4274#endif
6b7c5b94
SP
4275};
4276
4277static void be_netdev_init(struct net_device *netdev)
4278{
4279 struct be_adapter *adapter = netdev_priv(netdev);
4280
c9c47142
SP
4281 if (skyhawk_chip(adapter)) {
4282 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4283 NETIF_F_TSO | NETIF_F_TSO6 |
4284 NETIF_F_GSO_UDP_TUNNEL;
4285 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4286 }
6332c8d3 4287 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4288 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4289 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4290 if (be_multi_rxq(adapter))
4291 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4292
4293 netdev->features |= netdev->hw_features |
f646968f 4294 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4295
eb8a50d9 4296 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4297 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4298
fbc13f01
AK
4299 netdev->priv_flags |= IFF_UNICAST_FLT;
4300
6b7c5b94
SP
4301 netdev->flags |= IFF_MULTICAST;
4302
b7e5887e 4303 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4304
10ef9ab4 4305 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4306
7ad24ea4 4307 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4308}
4309
4310static void be_unmap_pci_bars(struct be_adapter *adapter)
4311{
c5b3ad4c
SP
4312 if (adapter->csr)
4313 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4314 if (adapter->db)
ce66f781 4315 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4316}
4317
ce66f781
SP
4318static int db_bar(struct be_adapter *adapter)
4319{
4320 if (lancer_chip(adapter) || !be_physfn(adapter))
4321 return 0;
4322 else
4323 return 4;
4324}
4325
4326static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4327{
dbf0f2a7 4328 if (skyhawk_chip(adapter)) {
ce66f781
SP
4329 adapter->roce_db.size = 4096;
4330 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4331 db_bar(adapter));
4332 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4333 db_bar(adapter));
4334 }
045508a8 4335 return 0;
6b7c5b94
SP
4336}
4337
4338static int be_map_pci_bars(struct be_adapter *adapter)
4339{
4340 u8 __iomem *addr;
fe6d2a38 4341
c5b3ad4c
SP
4342 if (BEx_chip(adapter) && be_physfn(adapter)) {
4343 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4344 if (adapter->csr == NULL)
4345 return -ENOMEM;
4346 }
4347
ce66f781 4348 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
4349 if (addr == NULL)
4350 goto pci_map_err;
ba343c77 4351 adapter->db = addr;
ce66f781
SP
4352
4353 be_roce_map_pci_bars(adapter);
6b7c5b94 4354 return 0;
ce66f781 4355
6b7c5b94
SP
4356pci_map_err:
4357 be_unmap_pci_bars(adapter);
4358 return -ENOMEM;
4359}
4360
6b7c5b94
SP
4361static void be_ctrl_cleanup(struct be_adapter *adapter)
4362{
8788fdc2 4363 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4364
4365 be_unmap_pci_bars(adapter);
4366
4367 if (mem->va)
2b7bcebf
IV
4368 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4369 mem->dma);
e7b909a6 4370
5b8821b7 4371 mem = &adapter->rx_filter;
e7b909a6 4372 if (mem->va)
2b7bcebf
IV
4373 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4374 mem->dma);
6b7c5b94
SP
4375}
4376
6b7c5b94
SP
4377static int be_ctrl_init(struct be_adapter *adapter)
4378{
8788fdc2
SP
4379 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4380 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4381 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4382 u32 sli_intf;
6b7c5b94 4383 int status;
6b7c5b94 4384
ce66f781
SP
4385 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4386 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4387 SLI_INTF_FAMILY_SHIFT;
4388 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4389
6b7c5b94
SP
4390 status = be_map_pci_bars(adapter);
4391 if (status)
e7b909a6 4392 goto done;
6b7c5b94
SP
4393
4394 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4395 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4396 mbox_mem_alloc->size,
4397 &mbox_mem_alloc->dma,
4398 GFP_KERNEL);
6b7c5b94 4399 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4400 status = -ENOMEM;
4401 goto unmap_pci_bars;
6b7c5b94
SP
4402 }
4403 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4404 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4405 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4406 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4407
5b8821b7 4408 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4409 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4410 rx_filter->size, &rx_filter->dma,
4411 GFP_KERNEL);
5b8821b7 4412 if (rx_filter->va == NULL) {
e7b909a6
SP
4413 status = -ENOMEM;
4414 goto free_mbox;
4415 }
1f9061d2 4416
2984961c 4417 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4418 spin_lock_init(&adapter->mcc_lock);
4419 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4420
5eeff635 4421 init_completion(&adapter->et_cmd_compl);
cf588477 4422 pci_save_state(adapter->pdev);
6b7c5b94 4423 return 0;
e7b909a6
SP
4424
4425free_mbox:
2b7bcebf
IV
4426 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4427 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4428
4429unmap_pci_bars:
4430 be_unmap_pci_bars(adapter);
4431
4432done:
4433 return status;
6b7c5b94
SP
4434}
4435
4436static void be_stats_cleanup(struct be_adapter *adapter)
4437{
3abcdeda 4438 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4439
4440 if (cmd->va)
2b7bcebf
IV
4441 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4442 cmd->va, cmd->dma);
6b7c5b94
SP
4443}
4444
4445static int be_stats_init(struct be_adapter *adapter)
4446{
3abcdeda 4447 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4448
ca34fe38
SP
4449 if (lancer_chip(adapter))
4450 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4451 else if (BE2_chip(adapter))
89a88ab8 4452 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4453 else if (BE3_chip(adapter))
ca34fe38 4454 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4455 else
4456 /* ALL non-BE ASICs */
4457 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4458
ede23fa8
JP
4459 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4460 GFP_KERNEL);
6b7c5b94
SP
4461 if (cmd->va == NULL)
4462 return -1;
4463 return 0;
4464}
4465
3bc6b06c 4466static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4467{
4468 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4469
6b7c5b94
SP
4470 if (!adapter)
4471 return;
4472
045508a8 4473 be_roce_dev_remove(adapter);
8cef7a78 4474 be_intr_set(adapter, false);
045508a8 4475
f67ef7ba
PR
4476 cancel_delayed_work_sync(&adapter->func_recovery_work);
4477
6b7c5b94
SP
4478 unregister_netdev(adapter->netdev);
4479
5fb379ee
SP
4480 be_clear(adapter);
4481
bf99e50d
PR
4482 /* tell fw we're done with firing cmds */
4483 be_cmd_fw_clean(adapter);
4484
6b7c5b94
SP
4485 be_stats_cleanup(adapter);
4486
4487 be_ctrl_cleanup(adapter);
4488
d6b6d987
SP
4489 pci_disable_pcie_error_reporting(pdev);
4490
6b7c5b94
SP
4491 pci_release_regions(pdev);
4492 pci_disable_device(pdev);
4493
4494 free_netdev(adapter->netdev);
4495}
4496
39f1d94d 4497static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4498{
baaa08d1 4499 int status, level;
6b7c5b94 4500
9e1453c5
AK
4501 status = be_cmd_get_cntl_attributes(adapter);
4502 if (status)
4503 return status;
4504
7aeb2156
PR
4505 /* Must be a power of 2 or else MODULO will BUG_ON */
4506 adapter->be_get_temp_freq = 64;
4507
baaa08d1
VV
4508 if (BEx_chip(adapter)) {
4509 level = be_cmd_get_fw_log_level(adapter);
4510 adapter->msg_enable =
4511 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4512 }
941a77d5 4513
92bf14ab 4514 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4515 return 0;
6b7c5b94
SP
4516}
4517
f67ef7ba 4518static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4519{
01e5b2c4 4520 struct device *dev = &adapter->pdev->dev;
d8110f62 4521 int status;
d8110f62 4522
f67ef7ba
PR
4523 status = lancer_test_and_set_rdy_state(adapter);
4524 if (status)
4525 goto err;
d8110f62 4526
f67ef7ba
PR
4527 if (netif_running(adapter->netdev))
4528 be_close(adapter->netdev);
d8110f62 4529
f67ef7ba
PR
4530 be_clear(adapter);
4531
01e5b2c4 4532 be_clear_all_error(adapter);
f67ef7ba
PR
4533
4534 status = be_setup(adapter);
4535 if (status)
4536 goto err;
d8110f62 4537
f67ef7ba
PR
4538 if (netif_running(adapter->netdev)) {
4539 status = be_open(adapter->netdev);
d8110f62
PR
4540 if (status)
4541 goto err;
f67ef7ba 4542 }
d8110f62 4543
4bebb56a 4544 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4545 return 0;
4546err:
01e5b2c4
SK
4547 if (status == -EAGAIN)
4548 dev_err(dev, "Waiting for resource provisioning\n");
4549 else
4bebb56a 4550 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4551
f67ef7ba
PR
4552 return status;
4553}
4554
4555static void be_func_recovery_task(struct work_struct *work)
4556{
4557 struct be_adapter *adapter =
4558 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4559 int status = 0;
d8110f62 4560
f67ef7ba 4561 be_detect_error(adapter);
d8110f62 4562
f67ef7ba 4563 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4564
f67ef7ba
PR
4565 rtnl_lock();
4566 netif_device_detach(adapter->netdev);
4567 rtnl_unlock();
d8110f62 4568
f67ef7ba 4569 status = lancer_recover_func(adapter);
f67ef7ba
PR
4570 if (!status)
4571 netif_device_attach(adapter->netdev);
d8110f62 4572 }
f67ef7ba 4573
01e5b2c4
SK
4574 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4575 * no need to attempt further recovery.
4576 */
4577 if (!status || status == -EAGAIN)
4578 schedule_delayed_work(&adapter->func_recovery_work,
4579 msecs_to_jiffies(1000));
d8110f62
PR
4580}
4581
4582static void be_worker(struct work_struct *work)
4583{
4584 struct be_adapter *adapter =
4585 container_of(work, struct be_adapter, work.work);
4586 struct be_rx_obj *rxo;
4587 int i;
4588
d8110f62
PR
4589 /* when interrupts are not yet enabled, just reap any pending
4590 * mcc completions */
4591 if (!netif_running(adapter->netdev)) {
072a9c48 4592 local_bh_disable();
10ef9ab4 4593 be_process_mcc(adapter);
072a9c48 4594 local_bh_enable();
d8110f62
PR
4595 goto reschedule;
4596 }
4597
4598 if (!adapter->stats_cmd_sent) {
4599 if (lancer_chip(adapter))
4600 lancer_cmd_get_pport_stats(adapter,
4601 &adapter->stats_cmd);
4602 else
4603 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4604 }
4605
d696b5e2
VV
4606 if (be_physfn(adapter) &&
4607 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4608 be_cmd_get_die_temperature(adapter);
4609
d8110f62 4610 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4611 /* Replenish RX-queues starved due to memory
4612 * allocation failures.
4613 */
4614 if (rxo->rx_post_starved)
d8110f62 4615 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4616 }
4617
2632bafd 4618 be_eqd_update(adapter);
10ef9ab4 4619
d8110f62
PR
4620reschedule:
4621 adapter->work_counter++;
4622 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4623}
4624
257a3feb 4625/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4626static bool be_reset_required(struct be_adapter *adapter)
4627{
257a3feb 4628 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4629}
4630
d379142b
SP
4631static char *mc_name(struct be_adapter *adapter)
4632{
f93f160b
VV
4633 char *str = ""; /* default */
4634
4635 switch (adapter->mc_type) {
4636 case UMC:
4637 str = "UMC";
4638 break;
4639 case FLEX10:
4640 str = "FLEX10";
4641 break;
4642 case vNIC1:
4643 str = "vNIC-1";
4644 break;
4645 case nPAR:
4646 str = "nPAR";
4647 break;
4648 case UFP:
4649 str = "UFP";
4650 break;
4651 case vNIC2:
4652 str = "vNIC-2";
4653 break;
4654 default:
4655 str = "";
4656 }
4657
4658 return str;
d379142b
SP
4659}
4660
4661static inline char *func_name(struct be_adapter *adapter)
4662{
4663 return be_physfn(adapter) ? "PF" : "VF";
4664}
4665
1dd06ae8 4666static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4667{
4668 int status = 0;
4669 struct be_adapter *adapter;
4670 struct net_device *netdev;
b4e32a71 4671 char port_name;
6b7c5b94
SP
4672
4673 status = pci_enable_device(pdev);
4674 if (status)
4675 goto do_none;
4676
4677 status = pci_request_regions(pdev, DRV_NAME);
4678 if (status)
4679 goto disable_dev;
4680 pci_set_master(pdev);
4681
7f640062 4682 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4683 if (netdev == NULL) {
4684 status = -ENOMEM;
4685 goto rel_reg;
4686 }
4687 adapter = netdev_priv(netdev);
4688 adapter->pdev = pdev;
4689 pci_set_drvdata(pdev, adapter);
4690 adapter->netdev = netdev;
2243e2e9 4691 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4692
4c15c243 4693 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4694 if (!status) {
4695 netdev->features |= NETIF_F_HIGHDMA;
4696 } else {
4c15c243 4697 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4698 if (status) {
4699 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4700 goto free_netdev;
4701 }
4702 }
4703
ea58c180
AK
4704 if (be_physfn(adapter)) {
4705 status = pci_enable_pcie_error_reporting(pdev);
4706 if (!status)
4707 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4708 }
d6b6d987 4709
6b7c5b94
SP
4710 status = be_ctrl_init(adapter);
4711 if (status)
39f1d94d 4712 goto free_netdev;
6b7c5b94 4713
2243e2e9 4714 /* sync up with fw's ready state */
ba343c77 4715 if (be_physfn(adapter)) {
bf99e50d 4716 status = be_fw_wait_ready(adapter);
ba343c77
SB
4717 if (status)
4718 goto ctrl_clean;
ba343c77 4719 }
6b7c5b94 4720
39f1d94d
SP
4721 if (be_reset_required(adapter)) {
4722 status = be_cmd_reset_function(adapter);
4723 if (status)
4724 goto ctrl_clean;
556ae191 4725
2d177be8
KA
4726 /* Wait for interrupts to quiesce after an FLR */
4727 msleep(100);
4728 }
8cef7a78
SK
4729
4730 /* Allow interrupts for other ULPs running on NIC function */
4731 be_intr_set(adapter, true);
10ef9ab4 4732
2d177be8
KA
4733 /* tell fw we're ready to fire cmds */
4734 status = be_cmd_fw_init(adapter);
4735 if (status)
4736 goto ctrl_clean;
4737
2243e2e9
SP
4738 status = be_stats_init(adapter);
4739 if (status)
4740 goto ctrl_clean;
4741
39f1d94d 4742 status = be_get_initial_config(adapter);
6b7c5b94
SP
4743 if (status)
4744 goto stats_clean;
6b7c5b94
SP
4745
4746 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4747 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4748 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4749
5fb379ee
SP
4750 status = be_setup(adapter);
4751 if (status)
55f5c3c5 4752 goto stats_clean;
2243e2e9 4753
3abcdeda 4754 be_netdev_init(netdev);
6b7c5b94
SP
4755 status = register_netdev(netdev);
4756 if (status != 0)
5fb379ee 4757 goto unsetup;
6b7c5b94 4758
045508a8
PP
4759 be_roce_dev_add(adapter);
4760
f67ef7ba
PR
4761 schedule_delayed_work(&adapter->func_recovery_work,
4762 msecs_to_jiffies(1000));
b4e32a71
PR
4763
4764 be_cmd_query_port_name(adapter, &port_name);
4765
d379142b
SP
4766 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4767 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4768
6b7c5b94
SP
4769 return 0;
4770
5fb379ee
SP
4771unsetup:
4772 be_clear(adapter);
6b7c5b94
SP
4773stats_clean:
4774 be_stats_cleanup(adapter);
4775ctrl_clean:
4776 be_ctrl_cleanup(adapter);
f9449ab7 4777free_netdev:
fe6d2a38 4778 free_netdev(netdev);
6b7c5b94
SP
4779rel_reg:
4780 pci_release_regions(pdev);
4781disable_dev:
4782 pci_disable_device(pdev);
4783do_none:
c4ca2374 4784 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4785 return status;
4786}
4787
4788static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4789{
4790 struct be_adapter *adapter = pci_get_drvdata(pdev);
4791 struct net_device *netdev = adapter->netdev;
4792
76a9e08e 4793 if (adapter->wol_en)
71d8d1b5
AK
4794 be_setup_wol(adapter, true);
4795
d4360d6f 4796 be_intr_set(adapter, false);
f67ef7ba
PR
4797 cancel_delayed_work_sync(&adapter->func_recovery_work);
4798
6b7c5b94
SP
4799 netif_device_detach(netdev);
4800 if (netif_running(netdev)) {
4801 rtnl_lock();
4802 be_close(netdev);
4803 rtnl_unlock();
4804 }
9b0365f1 4805 be_clear(adapter);
6b7c5b94
SP
4806
4807 pci_save_state(pdev);
4808 pci_disable_device(pdev);
4809 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4810 return 0;
4811}
4812
4813static int be_resume(struct pci_dev *pdev)
4814{
4815 int status = 0;
4816 struct be_adapter *adapter = pci_get_drvdata(pdev);
4817 struct net_device *netdev = adapter->netdev;
4818
4819 netif_device_detach(netdev);
4820
4821 status = pci_enable_device(pdev);
4822 if (status)
4823 return status;
4824
1ca01512 4825 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4826 pci_restore_state(pdev);
4827
dd5746bf
SB
4828 status = be_fw_wait_ready(adapter);
4829 if (status)
4830 return status;
4831
d4360d6f 4832 be_intr_set(adapter, true);
2243e2e9
SP
4833 /* tell fw we're ready to fire cmds */
4834 status = be_cmd_fw_init(adapter);
4835 if (status)
4836 return status;
4837
9b0365f1 4838 be_setup(adapter);
6b7c5b94
SP
4839 if (netif_running(netdev)) {
4840 rtnl_lock();
4841 be_open(netdev);
4842 rtnl_unlock();
4843 }
f67ef7ba
PR
4844
4845 schedule_delayed_work(&adapter->func_recovery_work,
4846 msecs_to_jiffies(1000));
6b7c5b94 4847 netif_device_attach(netdev);
71d8d1b5 4848
76a9e08e 4849 if (adapter->wol_en)
71d8d1b5 4850 be_setup_wol(adapter, false);
a4ca055f 4851
6b7c5b94
SP
4852 return 0;
4853}
4854
82456b03
SP
4855/*
4856 * An FLR will stop BE from DMAing any data.
4857 */
4858static void be_shutdown(struct pci_dev *pdev)
4859{
4860 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4861
2d5d4154
AK
4862 if (!adapter)
4863 return;
82456b03 4864
0f4a6828 4865 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4866 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4867
2d5d4154 4868 netif_device_detach(adapter->netdev);
82456b03 4869
57841869
AK
4870 be_cmd_reset_function(adapter);
4871
82456b03 4872 pci_disable_device(pdev);
82456b03
SP
4873}
4874
cf588477 4875static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 4876 pci_channel_state_t state)
cf588477
SP
4877{
4878 struct be_adapter *adapter = pci_get_drvdata(pdev);
4879 struct net_device *netdev = adapter->netdev;
4880
4881 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4882
01e5b2c4
SK
4883 if (!adapter->eeh_error) {
4884 adapter->eeh_error = true;
cf588477 4885
01e5b2c4 4886 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4887
cf588477 4888 rtnl_lock();
01e5b2c4
SK
4889 netif_device_detach(netdev);
4890 if (netif_running(netdev))
4891 be_close(netdev);
cf588477 4892 rtnl_unlock();
01e5b2c4
SK
4893
4894 be_clear(adapter);
cf588477 4895 }
cf588477
SP
4896
4897 if (state == pci_channel_io_perm_failure)
4898 return PCI_ERS_RESULT_DISCONNECT;
4899
4900 pci_disable_device(pdev);
4901
eeb7fc7b
SK
4902 /* The error could cause the FW to trigger a flash debug dump.
4903 * Resetting the card while flash dump is in progress
c8a54163
PR
4904 * can cause it not to recover; wait for it to finish.
4905 * Wait only for first function as it is needed only once per
4906 * adapter.
eeb7fc7b 4907 */
c8a54163
PR
4908 if (pdev->devfn == 0)
4909 ssleep(30);
4910
cf588477
SP
4911 return PCI_ERS_RESULT_NEED_RESET;
4912}
4913
4914static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4915{
4916 struct be_adapter *adapter = pci_get_drvdata(pdev);
4917 int status;
4918
4919 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4920
4921 status = pci_enable_device(pdev);
4922 if (status)
4923 return PCI_ERS_RESULT_DISCONNECT;
4924
4925 pci_set_master(pdev);
1ca01512 4926 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
4927 pci_restore_state(pdev);
4928
4929 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4930 dev_info(&adapter->pdev->dev,
4931 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4932 status = be_fw_wait_ready(adapter);
cf588477
SP
4933 if (status)
4934 return PCI_ERS_RESULT_DISCONNECT;
4935
d6b6d987 4936 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4937 be_clear_all_error(adapter);
cf588477
SP
4938 return PCI_ERS_RESULT_RECOVERED;
4939}
4940
4941static void be_eeh_resume(struct pci_dev *pdev)
4942{
4943 int status = 0;
4944 struct be_adapter *adapter = pci_get_drvdata(pdev);
4945 struct net_device *netdev = adapter->netdev;
4946
4947 dev_info(&adapter->pdev->dev, "EEH resume\n");
4948
4949 pci_save_state(pdev);
4950
2d177be8 4951 status = be_cmd_reset_function(adapter);
cf588477
SP
4952 if (status)
4953 goto err;
4954
2d177be8
KA
4955 /* tell fw we're ready to fire cmds */
4956 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4957 if (status)
4958 goto err;
4959
cf588477
SP
4960 status = be_setup(adapter);
4961 if (status)
4962 goto err;
4963
4964 if (netif_running(netdev)) {
4965 status = be_open(netdev);
4966 if (status)
4967 goto err;
4968 }
f67ef7ba
PR
4969
4970 schedule_delayed_work(&adapter->func_recovery_work,
4971 msecs_to_jiffies(1000));
cf588477
SP
4972 netif_device_attach(netdev);
4973 return;
4974err:
4975 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4976}
4977
3646f0e5 4978static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4979 .error_detected = be_eeh_err_detected,
4980 .slot_reset = be_eeh_reset,
4981 .resume = be_eeh_resume,
4982};
4983
6b7c5b94
SP
4984static struct pci_driver be_driver = {
4985 .name = DRV_NAME,
4986 .id_table = be_dev_ids,
4987 .probe = be_probe,
4988 .remove = be_remove,
4989 .suspend = be_suspend,
cf588477 4990 .resume = be_resume,
82456b03 4991 .shutdown = be_shutdown,
cf588477 4992 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4993};
4994
4995static int __init be_init_module(void)
4996{
8e95a202
JP
4997 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4998 rx_frag_size != 2048) {
6b7c5b94
SP
4999 printk(KERN_WARNING DRV_NAME
5000 " : Module param rx_frag_size must be 2048/4096/8192."
5001 " Using 2048\n");
5002 rx_frag_size = 2048;
5003 }
6b7c5b94
SP
5004
5005 return pci_register_driver(&be_driver);
5006}
5007module_init(be_init_module);
5008
5009static void __exit be_exit_module(void)
5010{
5011 pci_unregister_driver(&be_driver);
5012}
5013module_exit(be_exit_module);