]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: use be_max_vfs() macro to access max-vfs
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1178 return 0;
a85e9986 1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
6b7c5b94
SP
1184}
1185
7ad09458
S
1186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
a0794885 1189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
a54769f5 1194static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1197 int status;
6b7c5b94 1198
24307eef 1199 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1201 adapter->promiscuous = true;
1202 goto done;
6b7c5b94
SP
1203 }
1204
25985edc 1205 /* BE was previously in promiscuous mode; disable it */
24307eef 1206 if (adapter->promiscuous) {
7ad09458 1207 be_clear_promisc(adapter);
c0e64ef4 1208 if (adapter->vlans_added)
10329df8 1209 be_vid_config(adapter);
6b7c5b94
SP
1210 }
1211
e7b909a6 1212 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1213 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
6b7c5b94 1216
fbc13f01
AK
1217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
92bf14ab 1226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
0fc16ebf 1240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
0fc16ebf 1245 }
a0794885
KA
1246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1257done:
1258 return;
6b7c5b94
SP
1259}
1260
ba343c77
SB
1261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1265 int status;
1266
11ac75ed 1267 if (!sriov_enabled(adapter))
ba343c77
SB
1268 return -EPERM;
1269
11ac75ed 1270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1271 return -EINVAL;
1272
3175d8c2
SP
1273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
ba343c77 1276
11ac75ed
SP
1277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
590c391d
PR
1282 }
1283
abccf23e
KA
1284 if (status) {
1285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1286 mac, vf, status);
1287 return be_cmd_status(status);
1288 }
64600ea5 1289
abccf23e
KA
1290 ether_addr_copy(vf_cfg->mac_addr, mac);
1291
1292 return 0;
ba343c77
SB
1293}
1294
64600ea5 1295static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1296 struct ifla_vf_info *vi)
64600ea5
AK
1297{
1298 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1299 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1300
11ac75ed 1301 if (!sriov_enabled(adapter))
64600ea5
AK
1302 return -EPERM;
1303
11ac75ed 1304 if (vf >= adapter->num_vfs)
64600ea5
AK
1305 return -EINVAL;
1306
1307 vi->vf = vf;
ed616689
SC
1308 vi->max_tx_rate = vf_cfg->tx_rate;
1309 vi->min_tx_rate = 0;
a60b3a13
AK
1310 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1311 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1312 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1313 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1314
1315 return 0;
1316}
1317
748b539a 1318static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1319{
1320 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1321 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1322 int status = 0;
1323
11ac75ed 1324 if (!sriov_enabled(adapter))
1da87b7f
AK
1325 return -EPERM;
1326
b9fc0e53 1327 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1328 return -EINVAL;
1329
b9fc0e53
AK
1330 if (vlan || qos) {
1331 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1332 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1333 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1334 vf_cfg->if_handle, 0);
1da87b7f 1335 } else {
f1f3ee1b 1336 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1337 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1338 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1339 }
1340
abccf23e
KA
1341 if (status) {
1342 dev_err(&adapter->pdev->dev,
1343 "VLAN %d config on VF %d failed : %#x\n", vlan,
1344 vf, status);
1345 return be_cmd_status(status);
1346 }
1347
1348 vf_cfg->vlan_tag = vlan;
1349
1350 return 0;
1da87b7f
AK
1351}
1352
ed616689
SC
1353static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1354 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1355{
1356 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1357 struct device *dev = &adapter->pdev->dev;
1358 int percent_rate, status = 0;
1359 u16 link_speed = 0;
1360 u8 link_status;
e1d18735 1361
11ac75ed 1362 if (!sriov_enabled(adapter))
e1d18735
AK
1363 return -EPERM;
1364
94f434c2 1365 if (vf >= adapter->num_vfs)
e1d18735
AK
1366 return -EINVAL;
1367
ed616689
SC
1368 if (min_tx_rate)
1369 return -EINVAL;
1370
0f77ba73
RN
1371 if (!max_tx_rate)
1372 goto config_qos;
1373
1374 status = be_cmd_link_status_query(adapter, &link_speed,
1375 &link_status, 0);
1376 if (status)
1377 goto err;
1378
1379 if (!link_status) {
1380 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1381 status = -ENETDOWN;
0f77ba73
RN
1382 goto err;
1383 }
1384
1385 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1386 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1387 link_speed);
1388 status = -EINVAL;
1389 goto err;
1390 }
1391
1392 /* On Skyhawk the QOS setting must be done only as a % value */
1393 percent_rate = link_speed / 100;
1394 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1395 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1396 percent_rate);
1397 status = -EINVAL;
1398 goto err;
94f434c2 1399 }
e1d18735 1400
0f77ba73
RN
1401config_qos:
1402 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1403 if (status)
0f77ba73
RN
1404 goto err;
1405
1406 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1407 return 0;
1408
1409err:
1410 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1411 max_tx_rate, vf);
abccf23e 1412 return be_cmd_status(status);
e1d18735 1413}
bdce2ad7
SR
1414static int be_set_vf_link_state(struct net_device *netdev, int vf,
1415 int link_state)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
1418 int status;
1419
1420 if (!sriov_enabled(adapter))
1421 return -EPERM;
1422
1423 if (vf >= adapter->num_vfs)
1424 return -EINVAL;
1425
1426 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1427 if (status) {
1428 dev_err(&adapter->pdev->dev,
1429 "Link state change on VF %d failed: %#x\n", vf, status);
1430 return be_cmd_status(status);
1431 }
bdce2ad7 1432
abccf23e
KA
1433 adapter->vf_cfg[vf].plink_tracking = link_state;
1434
1435 return 0;
bdce2ad7 1436}
e1d18735 1437
2632bafd
SP
1438static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1439 ulong now)
6b7c5b94 1440{
2632bafd
SP
1441 aic->rx_pkts_prev = rx_pkts;
1442 aic->tx_reqs_prev = tx_pkts;
1443 aic->jiffies = now;
1444}
ac124ff9 1445
2632bafd
SP
1446static void be_eqd_update(struct be_adapter *adapter)
1447{
1448 struct be_set_eqd set_eqd[MAX_EVT_QS];
1449 int eqd, i, num = 0, start;
1450 struct be_aic_obj *aic;
1451 struct be_eq_obj *eqo;
1452 struct be_rx_obj *rxo;
1453 struct be_tx_obj *txo;
1454 u64 rx_pkts, tx_pkts;
1455 ulong now;
1456 u32 pps, delta;
10ef9ab4 1457
2632bafd
SP
1458 for_all_evt_queues(adapter, eqo, i) {
1459 aic = &adapter->aic_obj[eqo->idx];
1460 if (!aic->enable) {
1461 if (aic->jiffies)
1462 aic->jiffies = 0;
1463 eqd = aic->et_eqd;
1464 goto modify_eqd;
1465 }
6b7c5b94 1466
2632bafd
SP
1467 rxo = &adapter->rx_obj[eqo->idx];
1468 do {
57a7744e 1469 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1470 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1471 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1472
2632bafd
SP
1473 txo = &adapter->tx_obj[eqo->idx];
1474 do {
57a7744e 1475 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1476 tx_pkts = txo->stats.tx_reqs;
57a7744e 1477 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1478
6b7c5b94 1479
2632bafd
SP
1480 /* Skip, if wrapped around or first calculation */
1481 now = jiffies;
1482 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1483 rx_pkts < aic->rx_pkts_prev ||
1484 tx_pkts < aic->tx_reqs_prev) {
1485 be_aic_update(aic, rx_pkts, tx_pkts, now);
1486 continue;
1487 }
1488
1489 delta = jiffies_to_msecs(now - aic->jiffies);
1490 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1491 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1492 eqd = (pps / 15000) << 2;
10ef9ab4 1493
2632bafd
SP
1494 if (eqd < 8)
1495 eqd = 0;
1496 eqd = min_t(u32, eqd, aic->max_eqd);
1497 eqd = max_t(u32, eqd, aic->min_eqd);
1498
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1500modify_eqd:
2632bafd
SP
1501 if (eqd != aic->prev_eqd) {
1502 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1503 set_eqd[num].eq_id = eqo->q.id;
1504 aic->prev_eqd = eqd;
1505 num++;
1506 }
ac124ff9 1507 }
2632bafd
SP
1508
1509 if (num)
1510 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1511}
1512
3abcdeda 1513static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1514 struct be_rx_compl_info *rxcp)
4097f663 1515{
ac124ff9 1516 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1517
ab1594e9 1518 u64_stats_update_begin(&stats->sync);
3abcdeda 1519 stats->rx_compl++;
2e588f84 1520 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1521 stats->rx_pkts++;
2e588f84 1522 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1523 stats->rx_mcast_pkts++;
2e588f84 1524 if (rxcp->err)
ac124ff9 1525 stats->rx_compl_err++;
ab1594e9 1526 u64_stats_update_end(&stats->sync);
4097f663
SP
1527}
1528
2e588f84 1529static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1530{
19fad86f 1531 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1532 * Also ignore ipcksm for ipv6 pkts
1533 */
2e588f84 1534 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1535 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1536}
1537
0b0ef1d0 1538static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1539{
10ef9ab4 1540 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1541 struct be_rx_page_info *rx_page_info;
3abcdeda 1542 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1543 u16 frag_idx = rxq->tail;
6b7c5b94 1544
3abcdeda 1545 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1546 BUG_ON(!rx_page_info->page);
1547
e50287be 1548 if (rx_page_info->last_frag) {
2b7bcebf
IV
1549 dma_unmap_page(&adapter->pdev->dev,
1550 dma_unmap_addr(rx_page_info, bus),
1551 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1552 rx_page_info->last_frag = false;
1553 } else {
1554 dma_sync_single_for_cpu(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1557 }
6b7c5b94 1558
0b0ef1d0 1559 queue_tail_inc(rxq);
6b7c5b94
SP
1560 atomic_dec(&rxq->used);
1561 return rx_page_info;
1562}
1563
1564/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1565static void be_rx_compl_discard(struct be_rx_obj *rxo,
1566 struct be_rx_compl_info *rxcp)
6b7c5b94 1567{
6b7c5b94 1568 struct be_rx_page_info *page_info;
2e588f84 1569 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1570
e80d9da6 1571 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1572 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1573 put_page(page_info->page);
1574 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1575 }
1576}
1577
1578/*
1579 * skb_fill_rx_data forms a complete skb for an ether frame
1580 * indicated by rxcp.
1581 */
10ef9ab4
SP
1582static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1583 struct be_rx_compl_info *rxcp)
6b7c5b94 1584{
6b7c5b94 1585 struct be_rx_page_info *page_info;
2e588f84
SP
1586 u16 i, j;
1587 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1588 u8 *start;
6b7c5b94 1589
0b0ef1d0 1590 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1591 start = page_address(page_info->page) + page_info->page_offset;
1592 prefetch(start);
1593
1594 /* Copy data in the first descriptor of this completion */
2e588f84 1595 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1596
6b7c5b94
SP
1597 skb->len = curr_frag_len;
1598 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1599 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1600 /* Complete packet has now been moved to data */
1601 put_page(page_info->page);
1602 skb->data_len = 0;
1603 skb->tail += curr_frag_len;
1604 } else {
ac1ae5f3
ED
1605 hdr_len = ETH_HLEN;
1606 memcpy(skb->data, start, hdr_len);
6b7c5b94 1607 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1608 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1609 skb_shinfo(skb)->frags[0].page_offset =
1610 page_info->page_offset + hdr_len;
748b539a
SP
1611 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1612 curr_frag_len - hdr_len);
6b7c5b94 1613 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1614 skb->truesize += rx_frag_size;
6b7c5b94
SP
1615 skb->tail += hdr_len;
1616 }
205859a2 1617 page_info->page = NULL;
6b7c5b94 1618
2e588f84
SP
1619 if (rxcp->pkt_size <= rx_frag_size) {
1620 BUG_ON(rxcp->num_rcvd != 1);
1621 return;
6b7c5b94
SP
1622 }
1623
1624 /* More frags present for this completion */
2e588f84
SP
1625 remaining = rxcp->pkt_size - curr_frag_len;
1626 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1627 page_info = get_rx_page_info(rxo);
2e588f84 1628 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1629
bd46cb6c
AK
1630 /* Coalesce all frags from the same physical page in one slot */
1631 if (page_info->page_offset == 0) {
1632 /* Fresh page */
1633 j++;
b061b39e 1634 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1635 skb_shinfo(skb)->frags[j].page_offset =
1636 page_info->page_offset;
9e903e08 1637 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1638 skb_shinfo(skb)->nr_frags++;
1639 } else {
1640 put_page(page_info->page);
1641 }
1642
9e903e08 1643 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1644 skb->len += curr_frag_len;
1645 skb->data_len += curr_frag_len;
bdb28a97 1646 skb->truesize += rx_frag_size;
2e588f84 1647 remaining -= curr_frag_len;
205859a2 1648 page_info->page = NULL;
6b7c5b94 1649 }
bd46cb6c 1650 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1651}
1652
5be93b9a 1653/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1654static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1655 struct be_rx_compl_info *rxcp)
6b7c5b94 1656{
10ef9ab4 1657 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1658 struct net_device *netdev = adapter->netdev;
6b7c5b94 1659 struct sk_buff *skb;
89420424 1660
bb349bb4 1661 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1662 if (unlikely(!skb)) {
ac124ff9 1663 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1664 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1665 return;
1666 }
1667
10ef9ab4 1668 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1669
6332c8d3 1670 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1672 else
1673 skb_checksum_none_assert(skb);
6b7c5b94 1674
6332c8d3 1675 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1676 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1677 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1678 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1679
1680 skb->encapsulation = rxcp->tunneled;
6384a4d0 1681 skb_mark_napi_id(skb, napi);
6b7c5b94 1682
343e43c0 1683 if (rxcp->vlanf)
86a9bad3 1684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1685
1686 netif_receive_skb(skb);
6b7c5b94
SP
1687}
1688
5be93b9a 1689/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1690static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1691 struct napi_struct *napi,
1692 struct be_rx_compl_info *rxcp)
6b7c5b94 1693{
10ef9ab4 1694 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1695 struct be_rx_page_info *page_info;
5be93b9a 1696 struct sk_buff *skb = NULL;
2e588f84
SP
1697 u16 remaining, curr_frag_len;
1698 u16 i, j;
3968fa1e 1699
10ef9ab4 1700 skb = napi_get_frags(napi);
5be93b9a 1701 if (!skb) {
10ef9ab4 1702 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1703 return;
1704 }
1705
2e588f84
SP
1706 remaining = rxcp->pkt_size;
1707 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1708 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1709
1710 curr_frag_len = min(remaining, rx_frag_size);
1711
bd46cb6c
AK
1712 /* Coalesce all frags from the same physical page in one slot */
1713 if (i == 0 || page_info->page_offset == 0) {
1714 /* First frag or Fresh page */
1715 j++;
b061b39e 1716 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1717 skb_shinfo(skb)->frags[j].page_offset =
1718 page_info->page_offset;
9e903e08 1719 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1720 } else {
1721 put_page(page_info->page);
1722 }
9e903e08 1723 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1724 skb->truesize += rx_frag_size;
bd46cb6c 1725 remaining -= curr_frag_len;
6b7c5b94
SP
1726 memset(page_info, 0, sizeof(*page_info));
1727 }
bd46cb6c 1728 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1729
5be93b9a 1730 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1731 skb->len = rxcp->pkt_size;
1732 skb->data_len = rxcp->pkt_size;
5be93b9a 1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1734 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1735 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1736 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1737
1738 skb->encapsulation = rxcp->tunneled;
6384a4d0 1739 skb_mark_napi_id(skb, napi);
5be93b9a 1740
343e43c0 1741 if (rxcp->vlanf)
86a9bad3 1742 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1743
10ef9ab4 1744 napi_gro_frags(napi);
2e588f84
SP
1745}
1746
10ef9ab4
SP
1747static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1748 struct be_rx_compl_info *rxcp)
2e588f84
SP
1749{
1750 rxcp->pkt_size =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1752 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1753 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1754 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1755 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1756 rxcp->ip_csum =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1758 rxcp->l4_csum =
1759 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1760 rxcp->ipv6 =
1761 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1762 rxcp->num_rcvd =
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1764 rxcp->pkt_type =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1766 rxcp->rss_hash =
c297977e 1767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1768 if (rxcp->vlanf) {
f93f160b 1769 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1770 compl);
748b539a
SP
1771 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1772 vlan_tag, compl);
15d72184 1773 }
12004ae9 1774 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1775 rxcp->tunneled =
1776 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1777}
1778
10ef9ab4
SP
1779static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1780 struct be_rx_compl_info *rxcp)
2e588f84
SP
1781{
1782 rxcp->pkt_size =
1783 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1784 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1785 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1786 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1787 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1788 rxcp->ip_csum =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1790 rxcp->l4_csum =
1791 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1792 rxcp->ipv6 =
1793 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1794 rxcp->num_rcvd =
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1796 rxcp->pkt_type =
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1798 rxcp->rss_hash =
c297977e 1799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1800 if (rxcp->vlanf) {
f93f160b 1801 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1802 compl);
748b539a
SP
1803 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1804 vlan_tag, compl);
15d72184 1805 }
12004ae9 1806 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1807 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1808 ip_frag, compl);
2e588f84
SP
1809}
1810
1811static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1812{
1813 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1814 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1815 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1816
2e588f84
SP
1817 /* For checking the valid bit it is Ok to use either definition as the
1818 * valid bit is at the same position in both v0 and v1 Rx compl */
1819 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1820 return NULL;
6b7c5b94 1821
2e588f84
SP
1822 rmb();
1823 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1824
2e588f84 1825 if (adapter->be3_native)
10ef9ab4 1826 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1827 else
10ef9ab4 1828 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1829
e38b1706
SK
1830 if (rxcp->ip_frag)
1831 rxcp->l4_csum = 0;
1832
15d72184 1833 if (rxcp->vlanf) {
f93f160b
VV
1834 /* In QNQ modes, if qnq bit is not set, then the packet was
1835 * tagged only with the transparent outer vlan-tag and must
1836 * not be treated as a vlan packet by host
1837 */
1838 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1839 rxcp->vlanf = 0;
6b7c5b94 1840
15d72184 1841 if (!lancer_chip(adapter))
3c709f8f 1842 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1843
939cf306 1844 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1845 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1846 rxcp->vlanf = 0;
1847 }
2e588f84
SP
1848
1849 /* As the compl has been parsed, reset it; we wont touch it again */
1850 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1851
3abcdeda 1852 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1853 return rxcp;
1854}
1855
1829b086 1856static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1857{
6b7c5b94 1858 u32 order = get_order(size);
1829b086 1859
6b7c5b94 1860 if (order > 0)
1829b086
ED
1861 gfp |= __GFP_COMP;
1862 return alloc_pages(gfp, order);
6b7c5b94
SP
1863}
1864
1865/*
1866 * Allocate a page, split it to fragments of size rx_frag_size and post as
1867 * receive buffers to BE
1868 */
1829b086 1869static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1870{
3abcdeda 1871 struct be_adapter *adapter = rxo->adapter;
26d92f92 1872 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1873 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1874 struct page *pagep = NULL;
ba42fad0 1875 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1876 struct be_eth_rx_d *rxd;
1877 u64 page_dmaaddr = 0, frag_dmaaddr;
1878 u32 posted, page_offset = 0;
1879
3abcdeda 1880 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1881 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1882 if (!pagep) {
1829b086 1883 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1884 if (unlikely(!pagep)) {
ac124ff9 1885 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1886 break;
1887 }
ba42fad0
IV
1888 page_dmaaddr = dma_map_page(dev, pagep, 0,
1889 adapter->big_page_size,
2b7bcebf 1890 DMA_FROM_DEVICE);
ba42fad0
IV
1891 if (dma_mapping_error(dev, page_dmaaddr)) {
1892 put_page(pagep);
1893 pagep = NULL;
1894 rx_stats(rxo)->rx_post_fail++;
1895 break;
1896 }
e50287be 1897 page_offset = 0;
6b7c5b94
SP
1898 } else {
1899 get_page(pagep);
e50287be 1900 page_offset += rx_frag_size;
6b7c5b94 1901 }
e50287be 1902 page_info->page_offset = page_offset;
6b7c5b94 1903 page_info->page = pagep;
6b7c5b94
SP
1904
1905 rxd = queue_head_node(rxq);
e50287be 1906 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1907 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1908 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1909
1910 /* Any space left in the current big page for another frag? */
1911 if ((page_offset + rx_frag_size + rx_frag_size) >
1912 adapter->big_page_size) {
1913 pagep = NULL;
e50287be
SP
1914 page_info->last_frag = true;
1915 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1916 } else {
1917 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1918 }
26d92f92
SP
1919
1920 prev_page_info = page_info;
1921 queue_head_inc(rxq);
10ef9ab4 1922 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1923 }
e50287be
SP
1924
1925 /* Mark the last frag of a page when we break out of the above loop
1926 * with no more slots available in the RXQ
1927 */
1928 if (pagep) {
1929 prev_page_info->last_frag = true;
1930 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1931 }
6b7c5b94
SP
1932
1933 if (posted) {
6b7c5b94 1934 atomic_add(posted, &rxq->used);
6384a4d0
SP
1935 if (rxo->rx_post_starved)
1936 rxo->rx_post_starved = false;
8788fdc2 1937 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1938 } else if (atomic_read(&rxq->used) == 0) {
1939 /* Let be_worker replenish when memory is available */
3abcdeda 1940 rxo->rx_post_starved = true;
6b7c5b94 1941 }
6b7c5b94
SP
1942}
1943
5fb379ee 1944static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1945{
6b7c5b94
SP
1946 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1947
1948 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1949 return NULL;
1950
f3eb62d2 1951 rmb();
6b7c5b94
SP
1952 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1953
1954 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1955
1956 queue_tail_inc(tx_cq);
1957 return txcp;
1958}
1959
3c8def97 1960static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1961 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1962{
3c8def97 1963 struct be_queue_info *txq = &txo->q;
a73b796e 1964 struct be_eth_wrb *wrb;
3c8def97 1965 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1966 struct sk_buff *sent_skb;
ec43b1a6
SP
1967 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1968 bool unmap_skb_hdr = true;
6b7c5b94 1969
ec43b1a6 1970 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1971 BUG_ON(!sent_skb);
ec43b1a6
SP
1972 sent_skbs[txq->tail] = NULL;
1973
1974 /* skip header wrb */
a73b796e 1975 queue_tail_inc(txq);
6b7c5b94 1976
ec43b1a6 1977 do {
6b7c5b94 1978 cur_index = txq->tail;
a73b796e 1979 wrb = queue_tail_node(txq);
2b7bcebf
IV
1980 unmap_tx_frag(&adapter->pdev->dev, wrb,
1981 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1982 unmap_skb_hdr = false;
1983
6b7c5b94
SP
1984 num_wrbs++;
1985 queue_tail_inc(txq);
ec43b1a6 1986 } while (cur_index != last_index);
6b7c5b94 1987
d8ec2c02 1988 dev_kfree_skb_any(sent_skb);
4d586b82 1989 return num_wrbs;
6b7c5b94
SP
1990}
1991
10ef9ab4
SP
1992/* Return the number of events in the event queue */
1993static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1994{
10ef9ab4
SP
1995 struct be_eq_entry *eqe;
1996 int num = 0;
859b1e4e 1997
10ef9ab4
SP
1998 do {
1999 eqe = queue_tail_node(&eqo->q);
2000 if (eqe->evt == 0)
2001 break;
859b1e4e 2002
10ef9ab4
SP
2003 rmb();
2004 eqe->evt = 0;
2005 num++;
2006 queue_tail_inc(&eqo->q);
2007 } while (true);
2008
2009 return num;
859b1e4e
SP
2010}
2011
10ef9ab4
SP
2012/* Leaves the EQ is disarmed state */
2013static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2014{
10ef9ab4 2015 int num = events_get(eqo);
859b1e4e 2016
10ef9ab4 2017 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2018}
2019
10ef9ab4 2020static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2021{
2022 struct be_rx_page_info *page_info;
3abcdeda
SP
2023 struct be_queue_info *rxq = &rxo->q;
2024 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2025 struct be_rx_compl_info *rxcp;
d23e946c
SP
2026 struct be_adapter *adapter = rxo->adapter;
2027 int flush_wait = 0;
6b7c5b94 2028
d23e946c
SP
2029 /* Consume pending rx completions.
2030 * Wait for the flush completion (identified by zero num_rcvd)
2031 * to arrive. Notify CQ even when there are no more CQ entries
2032 * for HW to flush partially coalesced CQ entries.
2033 * In Lancer, there is no need to wait for flush compl.
2034 */
2035 for (;;) {
2036 rxcp = be_rx_compl_get(rxo);
ddf1169f 2037 if (!rxcp) {
d23e946c
SP
2038 if (lancer_chip(adapter))
2039 break;
2040
2041 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2042 dev_warn(&adapter->pdev->dev,
2043 "did not receive flush compl\n");
2044 break;
2045 }
2046 be_cq_notify(adapter, rx_cq->id, true, 0);
2047 mdelay(1);
2048 } else {
2049 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2050 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2051 if (rxcp->num_rcvd == 0)
2052 break;
2053 }
6b7c5b94
SP
2054 }
2055
d23e946c
SP
2056 /* After cleanup, leave the CQ in unarmed state */
2057 be_cq_notify(adapter, rx_cq->id, false, 0);
2058
2059 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2060 while (atomic_read(&rxq->used) > 0) {
2061 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2062 put_page(page_info->page);
2063 memset(page_info, 0, sizeof(*page_info));
2064 }
2065 BUG_ON(atomic_read(&rxq->used));
482c9e79 2066 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2067}
2068
0ae57bb3 2069static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2070{
0ae57bb3
SP
2071 struct be_tx_obj *txo;
2072 struct be_queue_info *txq;
a8e9179a 2073 struct be_eth_tx_compl *txcp;
4d586b82 2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
0ae57bb3 2077 int i, pending_txqs;
a8e9179a 2078
1a3d0717 2079 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2080 do {
0ae57bb3
SP
2081 pending_txqs = adapter->num_tx_qs;
2082
2083 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2084 cmpl = 0;
2085 num_wrbs = 0;
0ae57bb3
SP
2086 txq = &txo->q;
2087 while ((txcp = be_tx_compl_get(&txo->cq))) {
2088 end_idx =
2089 AMAP_GET_BITS(struct amap_eth_tx_compl,
2090 wrb_index, txcp);
2091 num_wrbs += be_tx_compl_process(adapter, txo,
2092 end_idx);
2093 cmpl++;
2094 }
2095 if (cmpl) {
2096 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2097 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2098 timeo = 0;
0ae57bb3
SP
2099 }
2100 if (atomic_read(&txq->used) == 0)
2101 pending_txqs--;
a8e9179a
SP
2102 }
2103
1a3d0717 2104 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2105 break;
2106
2107 mdelay(1);
2108 } while (true);
2109
0ae57bb3
SP
2110 for_all_tx_queues(adapter, txo, i) {
2111 txq = &txo->q;
2112 if (atomic_read(&txq->used))
2113 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2114 atomic_read(&txq->used));
2115
2116 /* free posted tx for which compls will never arrive */
2117 while (atomic_read(&txq->used)) {
2118 sent_skb = txo->sent_skb_list[txq->tail];
2119 end_idx = txq->tail;
2120 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2121 &dummy_wrb);
2122 index_adv(&end_idx, num_wrbs - 1, txq->len);
2123 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2124 atomic_sub(num_wrbs, &txq->used);
2125 }
b03388d6 2126 }
6b7c5b94
SP
2127}
2128
10ef9ab4
SP
2129static void be_evt_queues_destroy(struct be_adapter *adapter)
2130{
2131 struct be_eq_obj *eqo;
2132 int i;
2133
2134 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2135 if (eqo->q.created) {
2136 be_eq_clean(eqo);
10ef9ab4 2137 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2138 napi_hash_del(&eqo->napi);
68d7bdcb 2139 netif_napi_del(&eqo->napi);
19d59aa7 2140 }
10ef9ab4
SP
2141 be_queue_free(adapter, &eqo->q);
2142 }
2143}
2144
2145static int be_evt_queues_create(struct be_adapter *adapter)
2146{
2147 struct be_queue_info *eq;
2148 struct be_eq_obj *eqo;
2632bafd 2149 struct be_aic_obj *aic;
10ef9ab4
SP
2150 int i, rc;
2151
92bf14ab
SP
2152 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2153 adapter->cfg_num_qs);
10ef9ab4
SP
2154
2155 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2156 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2157 BE_NAPI_WEIGHT);
6384a4d0 2158 napi_hash_add(&eqo->napi);
2632bafd 2159 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2160 eqo->adapter = adapter;
2161 eqo->tx_budget = BE_TX_BUDGET;
2162 eqo->idx = i;
2632bafd
SP
2163 aic->max_eqd = BE_MAX_EQD;
2164 aic->enable = true;
10ef9ab4
SP
2165
2166 eq = &eqo->q;
2167 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2168 sizeof(struct be_eq_entry));
10ef9ab4
SP
2169 if (rc)
2170 return rc;
2171
f2f781a7 2172 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2173 if (rc)
2174 return rc;
2175 }
1cfafab9 2176 return 0;
10ef9ab4
SP
2177}
2178
5fb379ee
SP
2179static void be_mcc_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q;
5fb379ee 2182
8788fdc2 2183 q = &adapter->mcc_obj.q;
5fb379ee 2184 if (q->created)
8788fdc2 2185 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2186 be_queue_free(adapter, q);
2187
8788fdc2 2188 q = &adapter->mcc_obj.cq;
5fb379ee 2189 if (q->created)
8788fdc2 2190 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2191 be_queue_free(adapter, q);
2192}
2193
2194/* Must be called only after TX qs are created as MCC shares TX EQ */
2195static int be_mcc_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *q, *cq;
5fb379ee 2198
8788fdc2 2199 cq = &adapter->mcc_obj.cq;
5fb379ee 2200 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2201 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2202 goto err;
2203
10ef9ab4
SP
2204 /* Use the default EQ for MCC completions */
2205 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2206 goto mcc_cq_free;
2207
8788fdc2 2208 q = &adapter->mcc_obj.q;
5fb379ee
SP
2209 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2210 goto mcc_cq_destroy;
2211
8788fdc2 2212 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2213 goto mcc_q_free;
2214
2215 return 0;
2216
2217mcc_q_free:
2218 be_queue_free(adapter, q);
2219mcc_cq_destroy:
8788fdc2 2220 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2221mcc_cq_free:
2222 be_queue_free(adapter, cq);
2223err:
2224 return -1;
2225}
2226
6b7c5b94
SP
2227static void be_tx_queues_destroy(struct be_adapter *adapter)
2228{
2229 struct be_queue_info *q;
3c8def97
SP
2230 struct be_tx_obj *txo;
2231 u8 i;
6b7c5b94 2232
3c8def97
SP
2233 for_all_tx_queues(adapter, txo, i) {
2234 q = &txo->q;
2235 if (q->created)
2236 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2237 be_queue_free(adapter, q);
6b7c5b94 2238
3c8def97
SP
2239 q = &txo->cq;
2240 if (q->created)
2241 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2242 be_queue_free(adapter, q);
2243 }
6b7c5b94
SP
2244}
2245
7707133c 2246static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2247{
10ef9ab4 2248 struct be_queue_info *cq, *eq;
3c8def97 2249 struct be_tx_obj *txo;
92bf14ab 2250 int status, i;
6b7c5b94 2251
92bf14ab 2252 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2253
10ef9ab4
SP
2254 for_all_tx_queues(adapter, txo, i) {
2255 cq = &txo->cq;
2256 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2257 sizeof(struct be_eth_tx_compl));
2258 if (status)
2259 return status;
3c8def97 2260
827da44c
JS
2261 u64_stats_init(&txo->stats.sync);
2262 u64_stats_init(&txo->stats.sync_compl);
2263
10ef9ab4
SP
2264 /* If num_evt_qs is less than num_tx_qs, then more than
2265 * one txq share an eq
2266 */
2267 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2268 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2269 if (status)
2270 return status;
6b7c5b94 2271
10ef9ab4
SP
2272 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2273 sizeof(struct be_eth_wrb));
2274 if (status)
2275 return status;
6b7c5b94 2276
94d73aaa 2277 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2278 if (status)
2279 return status;
3c8def97 2280 }
6b7c5b94 2281
d379142b
SP
2282 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2283 adapter->num_tx_qs);
10ef9ab4 2284 return 0;
6b7c5b94
SP
2285}
2286
10ef9ab4 2287static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2288{
2289 struct be_queue_info *q;
3abcdeda
SP
2290 struct be_rx_obj *rxo;
2291 int i;
2292
2293 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2294 q = &rxo->cq;
2295 if (q->created)
2296 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2297 be_queue_free(adapter, q);
ac6a0c4a
SP
2298 }
2299}
2300
10ef9ab4 2301static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2302{
10ef9ab4 2303 struct be_queue_info *eq, *cq;
3abcdeda
SP
2304 struct be_rx_obj *rxo;
2305 int rc, i;
6b7c5b94 2306
92bf14ab
SP
2307 /* We can create as many RSS rings as there are EQs. */
2308 adapter->num_rx_qs = adapter->num_evt_qs;
2309
2310 /* We'll use RSS only if atleast 2 RSS rings are supported.
2311 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2312 */
92bf14ab
SP
2313 if (adapter->num_rx_qs > 1)
2314 adapter->num_rx_qs++;
2315
6b7c5b94 2316 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2317 for_all_rx_queues(adapter, rxo, i) {
2318 rxo->adapter = adapter;
3abcdeda
SP
2319 cq = &rxo->cq;
2320 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2321 sizeof(struct be_eth_rx_compl));
3abcdeda 2322 if (rc)
10ef9ab4 2323 return rc;
3abcdeda 2324
827da44c 2325 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2326 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2327 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2328 if (rc)
10ef9ab4 2329 return rc;
3abcdeda 2330 }
6b7c5b94 2331
d379142b
SP
2332 dev_info(&adapter->pdev->dev,
2333 "created %d RSS queue(s) and 1 default RX queue\n",
2334 adapter->num_rx_qs - 1);
10ef9ab4 2335 return 0;
b628bde2
SP
2336}
2337
6b7c5b94
SP
2338static irqreturn_t be_intx(int irq, void *dev)
2339{
e49cc34f
SP
2340 struct be_eq_obj *eqo = dev;
2341 struct be_adapter *adapter = eqo->adapter;
2342 int num_evts = 0;
6b7c5b94 2343
d0b9cec3
SP
2344 /* IRQ is not expected when NAPI is scheduled as the EQ
2345 * will not be armed.
2346 * But, this can happen on Lancer INTx where it takes
2347 * a while to de-assert INTx or in BE2 where occasionaly
2348 * an interrupt may be raised even when EQ is unarmed.
2349 * If NAPI is already scheduled, then counting & notifying
2350 * events will orphan them.
e49cc34f 2351 */
d0b9cec3 2352 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2353 num_evts = events_get(eqo);
d0b9cec3
SP
2354 __napi_schedule(&eqo->napi);
2355 if (num_evts)
2356 eqo->spurious_intr = 0;
2357 }
2358 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2359
d0b9cec3
SP
2360 /* Return IRQ_HANDLED only for the the first spurious intr
2361 * after a valid intr to stop the kernel from branding
2362 * this irq as a bad one!
e49cc34f 2363 */
d0b9cec3
SP
2364 if (num_evts || eqo->spurious_intr++ == 0)
2365 return IRQ_HANDLED;
2366 else
2367 return IRQ_NONE;
6b7c5b94
SP
2368}
2369
10ef9ab4 2370static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2371{
10ef9ab4 2372 struct be_eq_obj *eqo = dev;
6b7c5b94 2373
0b545a62
SP
2374 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2375 napi_schedule(&eqo->napi);
6b7c5b94
SP
2376 return IRQ_HANDLED;
2377}
2378
2e588f84 2379static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2380{
e38b1706 2381 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2382}
2383
10ef9ab4 2384static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2385 int budget, int polling)
6b7c5b94 2386{
3abcdeda
SP
2387 struct be_adapter *adapter = rxo->adapter;
2388 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2389 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2390 u32 work_done;
2391
2392 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2393 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2394 if (!rxcp)
2395 break;
2396
12004ae9
SP
2397 /* Is it a flush compl that has no data */
2398 if (unlikely(rxcp->num_rcvd == 0))
2399 goto loop_continue;
2400
2401 /* Discard compl with partial DMA Lancer B0 */
2402 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2403 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2404 goto loop_continue;
2405 }
2406
2407 /* On BE drop pkts that arrive due to imperfect filtering in
2408 * promiscuous mode on some skews
2409 */
2410 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2411 !lancer_chip(adapter))) {
10ef9ab4 2412 be_rx_compl_discard(rxo, rxcp);
12004ae9 2413 goto loop_continue;
64642811 2414 }
009dd872 2415
6384a4d0
SP
2416 /* Don't do gro when we're busy_polling */
2417 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2418 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2419 else
6384a4d0
SP
2420 be_rx_compl_process(rxo, napi, rxcp);
2421
12004ae9 2422loop_continue:
2e588f84 2423 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2424 }
2425
10ef9ab4
SP
2426 if (work_done) {
2427 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2428
6384a4d0
SP
2429 /* When an rx-obj gets into post_starved state, just
2430 * let be_worker do the posting.
2431 */
2432 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2433 !rxo->rx_post_starved)
10ef9ab4 2434 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2435 }
10ef9ab4 2436
6b7c5b94
SP
2437 return work_done;
2438}
2439
10ef9ab4
SP
2440static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2441 int budget, int idx)
6b7c5b94 2442{
6b7c5b94 2443 struct be_eth_tx_compl *txcp;
10ef9ab4 2444 int num_wrbs = 0, work_done;
3c8def97 2445
10ef9ab4
SP
2446 for (work_done = 0; work_done < budget; work_done++) {
2447 txcp = be_tx_compl_get(&txo->cq);
2448 if (!txcp)
2449 break;
2450 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2451 AMAP_GET_BITS(struct
2452 amap_eth_tx_compl,
2453 wrb_index, txcp));
10ef9ab4 2454 }
6b7c5b94 2455
10ef9ab4
SP
2456 if (work_done) {
2457 be_cq_notify(adapter, txo->cq.id, true, work_done);
2458 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2459
10ef9ab4
SP
2460 /* As Tx wrbs have been freed up, wake up netdev queue
2461 * if it was stopped due to lack of tx wrbs. */
2462 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2463 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2464 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2465 }
10ef9ab4
SP
2466
2467 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2468 tx_stats(txo)->tx_compl += work_done;
2469 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2470 }
10ef9ab4
SP
2471 return (work_done < budget); /* Done */
2472}
6b7c5b94 2473
68d7bdcb 2474int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2475{
2476 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2477 struct be_adapter *adapter = eqo->adapter;
0b545a62 2478 int max_work = 0, work, i, num_evts;
6384a4d0 2479 struct be_rx_obj *rxo;
10ef9ab4 2480 bool tx_done;
f31e50a8 2481
0b545a62
SP
2482 num_evts = events_get(eqo);
2483
10ef9ab4
SP
2484 /* Process all TXQs serviced by this EQ */
2485 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2486 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2487 eqo->tx_budget, i);
2488 if (!tx_done)
2489 max_work = budget;
f31e50a8
SP
2490 }
2491
6384a4d0
SP
2492 if (be_lock_napi(eqo)) {
2493 /* This loop will iterate twice for EQ0 in which
2494 * completions of the last RXQ (default one) are also processed
2495 * For other EQs the loop iterates only once
2496 */
2497 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2498 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2499 max_work = max(work, max_work);
2500 }
2501 be_unlock_napi(eqo);
2502 } else {
2503 max_work = budget;
10ef9ab4 2504 }
6b7c5b94 2505
10ef9ab4
SP
2506 if (is_mcc_eqo(eqo))
2507 be_process_mcc(adapter);
93c86700 2508
10ef9ab4
SP
2509 if (max_work < budget) {
2510 napi_complete(napi);
0b545a62 2511 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2512 } else {
2513 /* As we'll continue in polling mode, count and clear events */
0b545a62 2514 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2515 }
10ef9ab4 2516 return max_work;
6b7c5b94
SP
2517}
2518
6384a4d0
SP
2519#ifdef CONFIG_NET_RX_BUSY_POLL
2520static int be_busy_poll(struct napi_struct *napi)
2521{
2522 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2523 struct be_adapter *adapter = eqo->adapter;
2524 struct be_rx_obj *rxo;
2525 int i, work = 0;
2526
2527 if (!be_lock_busy_poll(eqo))
2528 return LL_FLUSH_BUSY;
2529
2530 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2531 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2532 if (work)
2533 break;
2534 }
2535
2536 be_unlock_busy_poll(eqo);
2537 return work;
2538}
2539#endif
2540
f67ef7ba 2541void be_detect_error(struct be_adapter *adapter)
7c185276 2542{
e1cfb67a
PR
2543 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2544 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2545 u32 i;
eb0eecc1
SK
2546 bool error_detected = false;
2547 struct device *dev = &adapter->pdev->dev;
2548 struct net_device *netdev = adapter->netdev;
7c185276 2549
d23e946c 2550 if (be_hw_error(adapter))
72f02485
SP
2551 return;
2552
e1cfb67a
PR
2553 if (lancer_chip(adapter)) {
2554 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2555 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2556 sliport_err1 = ioread32(adapter->db +
748b539a 2557 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2558 sliport_err2 = ioread32(adapter->db +
748b539a 2559 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2560 adapter->hw_error = true;
2561 /* Do not log error messages if its a FW reset */
2562 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2563 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2564 dev_info(dev, "Firmware update in progress\n");
2565 } else {
2566 error_detected = true;
2567 dev_err(dev, "Error detected in the card\n");
2568 dev_err(dev, "ERR: sliport status 0x%x\n",
2569 sliport_status);
2570 dev_err(dev, "ERR: sliport error1 0x%x\n",
2571 sliport_err1);
2572 dev_err(dev, "ERR: sliport error2 0x%x\n",
2573 sliport_err2);
2574 }
e1cfb67a
PR
2575 }
2576 } else {
2577 pci_read_config_dword(adapter->pdev,
748b539a 2578 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2579 pci_read_config_dword(adapter->pdev,
748b539a 2580 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2581 pci_read_config_dword(adapter->pdev,
748b539a 2582 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2583 pci_read_config_dword(adapter->pdev,
748b539a 2584 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2585
f67ef7ba
PR
2586 ue_lo = (ue_lo & ~ue_lo_mask);
2587 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2588
eb0eecc1
SK
2589 /* On certain platforms BE hardware can indicate spurious UEs.
2590 * Allow HW to stop working completely in case of a real UE.
2591 * Hence not setting the hw_error for UE detection.
2592 */
f67ef7ba 2593
eb0eecc1
SK
2594 if (ue_lo || ue_hi) {
2595 error_detected = true;
2596 dev_err(dev,
2597 "Unrecoverable Error detected in the adapter");
2598 dev_err(dev, "Please reboot server to recover");
2599 if (skyhawk_chip(adapter))
2600 adapter->hw_error = true;
2601 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2602 if (ue_lo & 1)
2603 dev_err(dev, "UE: %s bit set\n",
2604 ue_status_low_desc[i]);
2605 }
2606 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2607 if (ue_hi & 1)
2608 dev_err(dev, "UE: %s bit set\n",
2609 ue_status_hi_desc[i]);
2610 }
7c185276
AK
2611 }
2612 }
eb0eecc1
SK
2613 if (error_detected)
2614 netif_carrier_off(netdev);
7c185276
AK
2615}
2616
8d56ff11
SP
2617static void be_msix_disable(struct be_adapter *adapter)
2618{
ac6a0c4a 2619 if (msix_enabled(adapter)) {
8d56ff11 2620 pci_disable_msix(adapter->pdev);
ac6a0c4a 2621 adapter->num_msix_vec = 0;
68d7bdcb 2622 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2623 }
2624}
2625
c2bba3df 2626static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2627{
7dc4c064 2628 int i, num_vec;
d379142b 2629 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2630
92bf14ab
SP
2631 /* If RoCE is supported, program the max number of NIC vectors that
2632 * may be configured via set-channels, along with vectors needed for
2633 * RoCe. Else, just program the number we'll use initially.
2634 */
2635 if (be_roce_supported(adapter))
2636 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2637 2 * num_online_cpus());
2638 else
2639 num_vec = adapter->cfg_num_qs;
3abcdeda 2640
ac6a0c4a 2641 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2642 adapter->msix_entries[i].entry = i;
2643
7dc4c064
AG
2644 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2645 MIN_MSIX_VECTORS, num_vec);
2646 if (num_vec < 0)
2647 goto fail;
92bf14ab 2648
92bf14ab
SP
2649 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2650 adapter->num_msix_roce_vec = num_vec / 2;
2651 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2652 adapter->num_msix_roce_vec);
2653 }
2654
2655 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2656
2657 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2658 adapter->num_msix_vec);
c2bba3df 2659 return 0;
7dc4c064
AG
2660
2661fail:
2662 dev_warn(dev, "MSIx enable failed\n");
2663
2664 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2665 if (!be_physfn(adapter))
2666 return num_vec;
2667 return 0;
6b7c5b94
SP
2668}
2669
fe6d2a38 2670static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2671 struct be_eq_obj *eqo)
b628bde2 2672{
f2f781a7 2673 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2674}
6b7c5b94 2675
b628bde2
SP
2676static int be_msix_register(struct be_adapter *adapter)
2677{
10ef9ab4
SP
2678 struct net_device *netdev = adapter->netdev;
2679 struct be_eq_obj *eqo;
2680 int status, i, vec;
6b7c5b94 2681
10ef9ab4
SP
2682 for_all_evt_queues(adapter, eqo, i) {
2683 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2684 vec = be_msix_vec_get(adapter, eqo);
2685 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2686 if (status)
2687 goto err_msix;
2688 }
b628bde2 2689
6b7c5b94 2690 return 0;
3abcdeda 2691err_msix:
10ef9ab4
SP
2692 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2693 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2694 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2695 status);
ac6a0c4a 2696 be_msix_disable(adapter);
6b7c5b94
SP
2697 return status;
2698}
2699
2700static int be_irq_register(struct be_adapter *adapter)
2701{
2702 struct net_device *netdev = adapter->netdev;
2703 int status;
2704
ac6a0c4a 2705 if (msix_enabled(adapter)) {
6b7c5b94
SP
2706 status = be_msix_register(adapter);
2707 if (status == 0)
2708 goto done;
ba343c77
SB
2709 /* INTx is not supported for VF */
2710 if (!be_physfn(adapter))
2711 return status;
6b7c5b94
SP
2712 }
2713
e49cc34f 2714 /* INTx: only the first EQ is used */
6b7c5b94
SP
2715 netdev->irq = adapter->pdev->irq;
2716 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2717 &adapter->eq_obj[0]);
6b7c5b94
SP
2718 if (status) {
2719 dev_err(&adapter->pdev->dev,
2720 "INTx request IRQ failed - err %d\n", status);
2721 return status;
2722 }
2723done:
2724 adapter->isr_registered = true;
2725 return 0;
2726}
2727
2728static void be_irq_unregister(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
10ef9ab4 2731 struct be_eq_obj *eqo;
3abcdeda 2732 int i;
6b7c5b94
SP
2733
2734 if (!adapter->isr_registered)
2735 return;
2736
2737 /* INTx */
ac6a0c4a 2738 if (!msix_enabled(adapter)) {
e49cc34f 2739 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2740 goto done;
2741 }
2742
2743 /* MSIx */
10ef9ab4
SP
2744 for_all_evt_queues(adapter, eqo, i)
2745 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2746
6b7c5b94
SP
2747done:
2748 adapter->isr_registered = false;
6b7c5b94
SP
2749}
2750
10ef9ab4 2751static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2752{
2753 struct be_queue_info *q;
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
2758 q = &rxo->q;
2759 if (q->created) {
2760 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2761 be_rx_cq_clean(rxo);
482c9e79 2762 }
10ef9ab4 2763 be_queue_free(adapter, q);
482c9e79
SP
2764 }
2765}
2766
889cd4b2
SP
2767static int be_close(struct net_device *netdev)
2768{
2769 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2770 struct be_eq_obj *eqo;
2771 int i;
889cd4b2 2772
e1ad8e33
KA
2773 /* This protection is needed as be_close() may be called even when the
2774 * adapter is in cleared state (after eeh perm failure)
2775 */
2776 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2777 return 0;
2778
045508a8
PP
2779 be_roce_dev_close(adapter);
2780
dff345c5
IV
2781 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2782 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2783 napi_disable(&eqo->napi);
6384a4d0
SP
2784 be_disable_busy_poll(eqo);
2785 }
71237b6f 2786 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2787 }
a323d9bf
SP
2788
2789 be_async_mcc_disable(adapter);
2790
2791 /* Wait for all pending tx completions to arrive so that
2792 * all tx skbs are freed.
2793 */
fba87559 2794 netif_tx_disable(netdev);
6e1f9975 2795 be_tx_compl_clean(adapter);
a323d9bf
SP
2796
2797 be_rx_qs_destroy(adapter);
2798
d11a347d
AK
2799 for (i = 1; i < (adapter->uc_macs + 1); i++)
2800 be_cmd_pmac_del(adapter, adapter->if_handle,
2801 adapter->pmac_id[i], 0);
2802 adapter->uc_macs = 0;
2803
a323d9bf 2804 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2805 if (msix_enabled(adapter))
2806 synchronize_irq(be_msix_vec_get(adapter, eqo));
2807 else
2808 synchronize_irq(netdev->irq);
2809 be_eq_clean(eqo);
63fcb27f
PR
2810 }
2811
889cd4b2
SP
2812 be_irq_unregister(adapter);
2813
482c9e79
SP
2814 return 0;
2815}
2816
10ef9ab4 2817static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2818{
2819 struct be_rx_obj *rxo;
e9008ee9 2820 int rc, i, j;
e2557877
VD
2821 u8 rss_hkey[RSS_HASH_KEY_LEN];
2822 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2823
2824 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2825 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2826 sizeof(struct be_eth_rx_d));
2827 if (rc)
2828 return rc;
2829 }
2830
2831 /* The FW would like the default RXQ to be created first */
2832 rxo = default_rxo(adapter);
2833 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2834 adapter->if_handle, false, &rxo->rss_id);
2835 if (rc)
2836 return rc;
2837
2838 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2839 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2840 rx_frag_size, adapter->if_handle,
2841 true, &rxo->rss_id);
482c9e79
SP
2842 if (rc)
2843 return rc;
2844 }
2845
2846 if (be_multi_rxq(adapter)) {
e2557877
VD
2847 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2848 j += adapter->num_rx_qs - 1) {
e9008ee9 2849 for_all_rss_queues(adapter, rxo, i) {
e2557877 2850 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2851 break;
e2557877
VD
2852 rss->rsstable[j + i] = rxo->rss_id;
2853 rss->rss_queue[j + i] = i;
e9008ee9
PR
2854 }
2855 }
e2557877
VD
2856 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2857 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2858
2859 if (!BEx_chip(adapter))
e2557877
VD
2860 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2861 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2862 } else {
2863 /* Disable RSS, if only default RX Q is created */
e2557877 2864 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2865 }
594ad54a 2866
e2557877 2867 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2868 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2869 128, rss_hkey);
da1388d6 2870 if (rc) {
e2557877 2871 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2872 return rc;
482c9e79
SP
2873 }
2874
e2557877
VD
2875 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2876
482c9e79 2877 /* First time posting */
10ef9ab4 2878 for_all_rx_queues(adapter, rxo, i)
482c9e79 2879 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2880 return 0;
2881}
2882
6b7c5b94
SP
2883static int be_open(struct net_device *netdev)
2884{
2885 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2886 struct be_eq_obj *eqo;
3abcdeda 2887 struct be_rx_obj *rxo;
10ef9ab4 2888 struct be_tx_obj *txo;
b236916a 2889 u8 link_status;
3abcdeda 2890 int status, i;
5fb379ee 2891
10ef9ab4 2892 status = be_rx_qs_create(adapter);
482c9e79
SP
2893 if (status)
2894 goto err;
2895
c2bba3df
SK
2896 status = be_irq_register(adapter);
2897 if (status)
2898 goto err;
5fb379ee 2899
10ef9ab4 2900 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2901 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2902
10ef9ab4
SP
2903 for_all_tx_queues(adapter, txo, i)
2904 be_cq_notify(adapter, txo->cq.id, true, 0);
2905
7a1e9b20
SP
2906 be_async_mcc_enable(adapter);
2907
10ef9ab4
SP
2908 for_all_evt_queues(adapter, eqo, i) {
2909 napi_enable(&eqo->napi);
6384a4d0 2910 be_enable_busy_poll(eqo);
4cad9f3b 2911 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2912 }
04d3d624 2913 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2914
323ff71e 2915 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2916 if (!status)
2917 be_link_status_update(adapter, link_status);
2918
fba87559 2919 netif_tx_start_all_queues(netdev);
045508a8 2920 be_roce_dev_open(adapter);
c9c47142 2921
c5abe7c0 2922#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2923 if (skyhawk_chip(adapter))
2924 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2925#endif
2926
889cd4b2
SP
2927 return 0;
2928err:
2929 be_close(adapter->netdev);
2930 return -EIO;
5fb379ee
SP
2931}
2932
71d8d1b5
AK
2933static int be_setup_wol(struct be_adapter *adapter, bool enable)
2934{
2935 struct be_dma_mem cmd;
2936 int status = 0;
2937 u8 mac[ETH_ALEN];
2938
2939 memset(mac, 0, ETH_ALEN);
2940
2941 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2942 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2943 GFP_KERNEL);
ddf1169f 2944 if (!cmd.va)
6b568689 2945 return -ENOMEM;
71d8d1b5
AK
2946
2947 if (enable) {
2948 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2949 PCICFG_PM_CONTROL_OFFSET,
2950 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2951 if (status) {
2952 dev_err(&adapter->pdev->dev,
2381a55c 2953 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2954 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2955 cmd.dma);
71d8d1b5
AK
2956 return status;
2957 }
2958 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2959 adapter->netdev->dev_addr,
2960 &cmd);
71d8d1b5
AK
2961 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2962 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2963 } else {
2964 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2965 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2966 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2967 }
2968
2b7bcebf 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2970 return status;
2971}
2972
6d87f5c3
AK
2973/*
2974 * Generate a seed MAC address from the PF MAC Address using jhash.
2975 * MAC Address for VFs are assigned incrementally starting from the seed.
2976 * These addresses are programmed in the ASIC by the PF and the VF driver
2977 * queries for the MAC address during its probe.
2978 */
4c876616 2979static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2980{
f9449ab7 2981 u32 vf;
3abcdeda 2982 int status = 0;
6d87f5c3 2983 u8 mac[ETH_ALEN];
11ac75ed 2984 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2985
2986 be_vf_eth_addr_generate(adapter, mac);
2987
11ac75ed 2988 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2989 if (BEx_chip(adapter))
590c391d 2990 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2991 vf_cfg->if_handle,
2992 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2993 else
2994 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2995 vf + 1);
590c391d 2996
6d87f5c3
AK
2997 if (status)
2998 dev_err(&adapter->pdev->dev,
748b539a
SP
2999 "Mac address assignment failed for VF %d\n",
3000 vf);
6d87f5c3 3001 else
11ac75ed 3002 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3003
3004 mac[5] += 1;
3005 }
3006 return status;
3007}
3008
4c876616
SP
3009static int be_vfs_mac_query(struct be_adapter *adapter)
3010{
3011 int status, vf;
3012 u8 mac[ETH_ALEN];
3013 struct be_vf_cfg *vf_cfg;
4c876616
SP
3014
3015 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3016 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3017 mac, vf_cfg->if_handle,
3018 false, vf+1);
4c876616
SP
3019 if (status)
3020 return status;
3021 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3022 }
3023 return 0;
3024}
3025
f9449ab7 3026static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3027{
11ac75ed 3028 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3029 u32 vf;
3030
257a3feb 3031 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3032 dev_warn(&adapter->pdev->dev,
3033 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3034 goto done;
3035 }
3036
b4c1df93
SP
3037 pci_disable_sriov(adapter->pdev);
3038
11ac75ed 3039 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3040 if (BEx_chip(adapter))
11ac75ed
SP
3041 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3042 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3043 else
3044 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3045 vf + 1);
f9449ab7 3046
11ac75ed
SP
3047 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3048 }
39f1d94d
SP
3049done:
3050 kfree(adapter->vf_cfg);
3051 adapter->num_vfs = 0;
6d87f5c3
AK
3052}
3053
7707133c
SP
3054static void be_clear_queues(struct be_adapter *adapter)
3055{
3056 be_mcc_queues_destroy(adapter);
3057 be_rx_cqs_destroy(adapter);
3058 be_tx_queues_destroy(adapter);
3059 be_evt_queues_destroy(adapter);
3060}
3061
68d7bdcb 3062static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3063{
191eb756
SP
3064 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3065 cancel_delayed_work_sync(&adapter->work);
3066 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3067 }
68d7bdcb
SP
3068}
3069
b05004ad 3070static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3071{
3072 int i;
3073
b05004ad
SK
3074 if (adapter->pmac_id) {
3075 for (i = 0; i < (adapter->uc_macs + 1); i++)
3076 be_cmd_pmac_del(adapter, adapter->if_handle,
3077 adapter->pmac_id[i], 0);
3078 adapter->uc_macs = 0;
3079
3080 kfree(adapter->pmac_id);
3081 adapter->pmac_id = NULL;
3082 }
3083}
3084
c5abe7c0 3085#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3086static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3087{
3088 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3089 be_cmd_manage_iface(adapter, adapter->if_handle,
3090 OP_CONVERT_TUNNEL_TO_NORMAL);
3091
3092 if (adapter->vxlan_port)
3093 be_cmd_set_vxlan_port(adapter, 0);
3094
3095 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3096 adapter->vxlan_port = 0;
3097}
c5abe7c0 3098#endif
c9c47142 3099
b05004ad
SK
3100static int be_clear(struct be_adapter *adapter)
3101{
68d7bdcb 3102 be_cancel_worker(adapter);
191eb756 3103
11ac75ed 3104 if (sriov_enabled(adapter))
f9449ab7
SP
3105 be_vf_clear(adapter);
3106
bec84e6b
VV
3107 /* Re-configure FW to distribute resources evenly across max-supported
3108 * number of VFs, only when VFs are not already enabled.
3109 */
3110 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3111 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3112 pci_sriov_get_totalvfs(adapter->pdev));
3113
c5abe7c0 3114#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3115 be_disable_vxlan_offloads(adapter);
c5abe7c0 3116#endif
2d17f403 3117 /* delete the primary mac along with the uc-mac list */
b05004ad 3118 be_mac_clear(adapter);
fbc13f01 3119
f9449ab7 3120 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3121
7707133c 3122 be_clear_queues(adapter);
a54769f5 3123
10ef9ab4 3124 be_msix_disable(adapter);
e1ad8e33 3125 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3126 return 0;
3127}
3128
4c876616 3129static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3130{
92bf14ab 3131 struct be_resources res = {0};
4c876616
SP
3132 struct be_vf_cfg *vf_cfg;
3133 u32 cap_flags, en_flags, vf;
922bbe88 3134 int status = 0;
abb93951 3135
4c876616
SP
3136 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3137 BE_IF_FLAGS_MULTICAST;
abb93951 3138
4c876616 3139 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3140 if (!BE3_chip(adapter)) {
3141 status = be_cmd_get_profile_config(adapter, &res,
3142 vf + 1);
3143 if (!status)
3144 cap_flags = res.if_cap_flags;
3145 }
4c876616
SP
3146
3147 /* If a FW profile exists, then cap_flags are updated */
3148 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3149 BE_IF_FLAGS_BROADCAST |
3150 BE_IF_FLAGS_MULTICAST);
3151 status =
3152 be_cmd_if_create(adapter, cap_flags, en_flags,
3153 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3154 if (status)
3155 goto err;
3156 }
3157err:
3158 return status;
abb93951
PR
3159}
3160
39f1d94d 3161static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3162{
11ac75ed 3163 struct be_vf_cfg *vf_cfg;
30128031
SP
3164 int vf;
3165
39f1d94d
SP
3166 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3167 GFP_KERNEL);
3168 if (!adapter->vf_cfg)
3169 return -ENOMEM;
3170
11ac75ed
SP
3171 for_all_vfs(adapter, vf_cfg, vf) {
3172 vf_cfg->if_handle = -1;
3173 vf_cfg->pmac_id = -1;
30128031 3174 }
39f1d94d 3175 return 0;
30128031
SP
3176}
3177
f9449ab7
SP
3178static int be_vf_setup(struct be_adapter *adapter)
3179{
c502224e 3180 struct device *dev = &adapter->pdev->dev;
11ac75ed 3181 struct be_vf_cfg *vf_cfg;
4c876616 3182 int status, old_vfs, vf;
04a06028 3183 u32 privileges;
39f1d94d 3184
257a3feb 3185 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3186
3187 status = be_vf_setup_init(adapter);
3188 if (status)
3189 goto err;
30128031 3190
4c876616
SP
3191 if (old_vfs) {
3192 for_all_vfs(adapter, vf_cfg, vf) {
3193 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3194 if (status)
3195 goto err;
3196 }
f9449ab7 3197
4c876616
SP
3198 status = be_vfs_mac_query(adapter);
3199 if (status)
3200 goto err;
3201 } else {
bec84e6b
VV
3202 status = be_vfs_if_create(adapter);
3203 if (status)
3204 goto err;
3205
39f1d94d
SP
3206 status = be_vf_eth_addr_config(adapter);
3207 if (status)
3208 goto err;
3209 }
f9449ab7 3210
11ac75ed 3211 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3212 /* Allow VFs to programs MAC/VLAN filters */
3213 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3214 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3215 status = be_cmd_set_fn_privileges(adapter,
3216 privileges |
3217 BE_PRIV_FILTMGMT,
3218 vf + 1);
3219 if (!status)
3220 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3221 vf);
3222 }
3223
0f77ba73
RN
3224 /* Allow full available bandwidth */
3225 if (!old_vfs)
3226 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3227
bdce2ad7 3228 if (!old_vfs) {
0599863d 3229 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3230 be_cmd_set_logical_link_config(adapter,
3231 IFLA_VF_LINK_STATE_AUTO,
3232 vf+1);
3233 }
f9449ab7 3234 }
b4c1df93
SP
3235
3236 if (!old_vfs) {
3237 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3238 if (status) {
3239 dev_err(dev, "SRIOV enable failed\n");
3240 adapter->num_vfs = 0;
3241 goto err;
3242 }
3243 }
f9449ab7
SP
3244 return 0;
3245err:
4c876616
SP
3246 dev_err(dev, "VF setup failed\n");
3247 be_vf_clear(adapter);
f9449ab7
SP
3248 return status;
3249}
3250
f93f160b
VV
3251/* Converting function_mode bits on BE3 to SH mc_type enums */
3252
3253static u8 be_convert_mc_type(u32 function_mode)
3254{
66064dbc 3255 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3256 return vNIC1;
66064dbc 3257 else if (function_mode & QNQ_MODE)
f93f160b
VV
3258 return FLEX10;
3259 else if (function_mode & VNIC_MODE)
3260 return vNIC2;
3261 else if (function_mode & UMC_ENABLED)
3262 return UMC;
3263 else
3264 return MC_NONE;
3265}
3266
92bf14ab
SP
3267/* On BE2/BE3 FW does not suggest the supported limits */
3268static void BEx_get_resources(struct be_adapter *adapter,
3269 struct be_resources *res)
3270{
bec84e6b 3271 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3272
3273 if (be_physfn(adapter))
3274 res->max_uc_mac = BE_UC_PMAC_COUNT;
3275 else
3276 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3277
f93f160b
VV
3278 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3279
3280 if (be_is_mc(adapter)) {
3281 /* Assuming that there are 4 channels per port,
3282 * when multi-channel is enabled
3283 */
3284 if (be_is_qnq_mode(adapter))
3285 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3286 else
3287 /* In a non-qnq multichannel mode, the pvid
3288 * takes up one vlan entry
3289 */
3290 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3291 } else {
92bf14ab 3292 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3293 }
3294
92bf14ab
SP
3295 res->max_mcast_mac = BE_MAX_MC;
3296
a5243dab
VV
3297 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3298 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3299 * *only* if it is RSS-capable.
3300 */
3301 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3302 !be_physfn(adapter) || (be_is_mc(adapter) &&
3303 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3304 res->max_tx_qs = 1;
3305 else
3306 res->max_tx_qs = BE3_MAX_TX_QS;
3307
3308 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3309 !use_sriov && be_physfn(adapter))
3310 res->max_rss_qs = (adapter->be3_native) ?
3311 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3312 res->max_rx_qs = res->max_rss_qs + 1;
3313
e3dc867c 3314 if (be_physfn(adapter))
d3518e21 3315 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3316 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3317 else
3318 res->max_evt_qs = 1;
92bf14ab
SP
3319
3320 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3321 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3322 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3323}
3324
30128031
SP
3325static void be_setup_init(struct be_adapter *adapter)
3326{
3327 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3328 adapter->phy.link_speed = -1;
30128031
SP
3329 adapter->if_handle = -1;
3330 adapter->be3_native = false;
3331 adapter->promiscuous = false;
f25b119c
PR
3332 if (be_physfn(adapter))
3333 adapter->cmd_privileges = MAX_PRIVILEGES;
3334 else
3335 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3336}
3337
bec84e6b
VV
3338static int be_get_sriov_config(struct be_adapter *adapter)
3339{
3340 struct device *dev = &adapter->pdev->dev;
3341 struct be_resources res = {0};
3342 int status, max_vfs, old_vfs;
3343
3344 status = be_cmd_get_profile_config(adapter, &res, 0);
3345 if (status)
3346 return status;
3347
3348 adapter->pool_res = res;
3349
3350 /* Some old versions of BE3 FW don't report max_vfs value */
3351 if (BE3_chip(adapter) && !res.max_vfs) {
3352 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3353 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3354 }
3355
3356 adapter->pool_res.max_vfs = res.max_vfs;
3357 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3358
3359 if (!be_max_vfs(adapter)) {
3360 if (num_vfs)
3361 dev_warn(dev, "device doesn't support SRIOV\n");
3362 adapter->num_vfs = 0;
3363 return 0;
3364 }
3365
3366 /* validate num_vfs module param */
3367 old_vfs = pci_num_vf(adapter->pdev);
3368 if (old_vfs) {
3369 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3370 if (old_vfs != num_vfs)
3371 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3372 adapter->num_vfs = old_vfs;
3373 } else {
3374 if (num_vfs > be_max_vfs(adapter)) {
3375 dev_info(dev, "Resources unavailable to init %d VFs\n",
3376 num_vfs);
3377 dev_info(dev, "Limiting to %d VFs\n",
3378 be_max_vfs(adapter));
3379 }
3380 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3381 }
3382
3383 return 0;
3384}
3385
92bf14ab 3386static int be_get_resources(struct be_adapter *adapter)
abb93951 3387{
92bf14ab
SP
3388 struct device *dev = &adapter->pdev->dev;
3389 struct be_resources res = {0};
3390 int status;
abb93951 3391
92bf14ab
SP
3392 if (BEx_chip(adapter)) {
3393 BEx_get_resources(adapter, &res);
3394 adapter->res = res;
abb93951
PR
3395 }
3396
92bf14ab
SP
3397 /* For Lancer, SH etc read per-function resource limits from FW.
3398 * GET_FUNC_CONFIG returns per function guaranteed limits.
3399 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3400 */
3401 if (!BEx_chip(adapter)) {
3402 status = be_cmd_get_func_config(adapter, &res);
3403 if (status)
3404 return status;
abb93951 3405
92bf14ab
SP
3406 /* If RoCE may be enabled stash away half the EQs for RoCE */
3407 if (be_roce_supported(adapter))
3408 res.max_evt_qs /= 2;
3409 adapter->res = res;
abb93951 3410
92bf14ab
SP
3411 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3412 be_max_txqs(adapter), be_max_rxqs(adapter),
3413 be_max_rss(adapter), be_max_eqs(adapter),
3414 be_max_vfs(adapter));
3415 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3416 be_max_uc(adapter), be_max_mc(adapter),
3417 be_max_vlans(adapter));
abb93951 3418 }
4c876616 3419
92bf14ab 3420 return 0;
abb93951
PR
3421}
3422
39f1d94d
SP
3423static int be_get_config(struct be_adapter *adapter)
3424{
542963b7 3425 u16 profile_id;
4c876616 3426 int status;
39f1d94d 3427
e97e3cda 3428 status = be_cmd_query_fw_cfg(adapter);
abb93951 3429 if (status)
92bf14ab 3430 return status;
abb93951 3431
542963b7
VV
3432 if (be_physfn(adapter)) {
3433 status = be_cmd_get_active_profile(adapter, &profile_id);
3434 if (!status)
3435 dev_info(&adapter->pdev->dev,
3436 "Using profile 0x%x\n", profile_id);
bec84e6b
VV
3437
3438 status = be_get_sriov_config(adapter);
3439 if (status)
3440 return status;
3441
3442 /* When the HW is in SRIOV capable configuration, the PF-pool
3443 * resources are equally distributed across the max-number of
3444 * VFs. The user may request only a subset of the max-vfs to be
3445 * enabled. Based on num_vfs, redistribute the resources across
3446 * num_vfs so that each VF will have access to more number of
3447 * resources. This facility is not available in BE3 FW.
3448 * Also, this is done by FW in Lancer chip.
3449 */
3450 if (!pci_num_vf(adapter->pdev)) {
3451 status = be_cmd_set_sriov_config(adapter,
3452 adapter->pool_res,
3453 adapter->num_vfs);
3454 if (status)
3455 return status;
3456 }
542963b7
VV
3457 }
3458
92bf14ab
SP
3459 status = be_get_resources(adapter);
3460 if (status)
3461 return status;
abb93951 3462
46ee9c14
RN
3463 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3464 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3465 if (!adapter->pmac_id)
3466 return -ENOMEM;
abb93951 3467
92bf14ab
SP
3468 /* Sanitize cfg_num_qs based on HW and platform limits */
3469 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3470
3471 return 0;
39f1d94d
SP
3472}
3473
95046b92
SP
3474static int be_mac_setup(struct be_adapter *adapter)
3475{
3476 u8 mac[ETH_ALEN];
3477 int status;
3478
3479 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3480 status = be_cmd_get_perm_mac(adapter, mac);
3481 if (status)
3482 return status;
3483
3484 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3485 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3486 } else {
3487 /* Maybe the HW was reset; dev_addr must be re-programmed */
3488 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3489 }
3490
2c7a9dc1
AK
3491 /* For BE3-R VFs, the PF programs the initial MAC address */
3492 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3493 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3494 &adapter->pmac_id[0], 0);
95046b92
SP
3495 return 0;
3496}
3497
68d7bdcb
SP
3498static void be_schedule_worker(struct be_adapter *adapter)
3499{
3500 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3501 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3502}
3503
7707133c 3504static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3505{
68d7bdcb 3506 struct net_device *netdev = adapter->netdev;
10ef9ab4 3507 int status;
ba343c77 3508
7707133c 3509 status = be_evt_queues_create(adapter);
abb93951
PR
3510 if (status)
3511 goto err;
73d540f2 3512
7707133c 3513 status = be_tx_qs_create(adapter);
c2bba3df
SK
3514 if (status)
3515 goto err;
10ef9ab4 3516
7707133c 3517 status = be_rx_cqs_create(adapter);
10ef9ab4 3518 if (status)
a54769f5 3519 goto err;
6b7c5b94 3520
7707133c 3521 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3522 if (status)
3523 goto err;
3524
68d7bdcb
SP
3525 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3526 if (status)
3527 goto err;
3528
3529 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3530 if (status)
3531 goto err;
3532
7707133c
SP
3533 return 0;
3534err:
3535 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3536 return status;
3537}
3538
68d7bdcb
SP
3539int be_update_queues(struct be_adapter *adapter)
3540{
3541 struct net_device *netdev = adapter->netdev;
3542 int status;
3543
3544 if (netif_running(netdev))
3545 be_close(netdev);
3546
3547 be_cancel_worker(adapter);
3548
3549 /* If any vectors have been shared with RoCE we cannot re-program
3550 * the MSIx table.
3551 */
3552 if (!adapter->num_msix_roce_vec)
3553 be_msix_disable(adapter);
3554
3555 be_clear_queues(adapter);
3556
3557 if (!msix_enabled(adapter)) {
3558 status = be_msix_enable(adapter);
3559 if (status)
3560 return status;
3561 }
3562
3563 status = be_setup_queues(adapter);
3564 if (status)
3565 return status;
3566
3567 be_schedule_worker(adapter);
3568
3569 if (netif_running(netdev))
3570 status = be_open(netdev);
3571
3572 return status;
3573}
3574
7707133c
SP
3575static int be_setup(struct be_adapter *adapter)
3576{
3577 struct device *dev = &adapter->pdev->dev;
3578 u32 tx_fc, rx_fc, en_flags;
3579 int status;
3580
3581 be_setup_init(adapter);
3582
3583 if (!lancer_chip(adapter))
3584 be_cmd_req_native_mode(adapter);
3585
3586 status = be_get_config(adapter);
10ef9ab4 3587 if (status)
a54769f5 3588 goto err;
6b7c5b94 3589
7707133c 3590 status = be_msix_enable(adapter);
10ef9ab4 3591 if (status)
a54769f5 3592 goto err;
6b7c5b94 3593
f9449ab7 3594 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3595 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3596 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3597 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3598 en_flags = en_flags & be_if_cap_flags(adapter);
3599 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3600 &adapter->if_handle, 0);
7707133c 3601 if (status)
a54769f5 3602 goto err;
6b7c5b94 3603
68d7bdcb
SP
3604 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3605 rtnl_lock();
7707133c 3606 status = be_setup_queues(adapter);
68d7bdcb 3607 rtnl_unlock();
95046b92 3608 if (status)
1578e777
PR
3609 goto err;
3610
7707133c 3611 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3612
3613 status = be_mac_setup(adapter);
10ef9ab4
SP
3614 if (status)
3615 goto err;
3616
e97e3cda 3617 be_cmd_get_fw_ver(adapter);
5a56eb10 3618
e9e2a904
SK
3619 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3620 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3621 adapter->fw_ver);
3622 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3623 }
3624
1d1e9a46 3625 if (adapter->vlans_added)
10329df8 3626 be_vid_config(adapter);
7ab8b0b4 3627
a54769f5 3628 be_set_rx_mode(adapter->netdev);
5fb379ee 3629
76a9e08e
SR
3630 be_cmd_get_acpi_wol_cap(adapter);
3631
ddc3f5cb 3632 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3633
ddc3f5cb
AK
3634 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3635 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3636 adapter->rx_fc);
2dc1deb6 3637
bdce2ad7
SR
3638 if (be_physfn(adapter))
3639 be_cmd_set_logical_link_config(adapter,
3640 IFLA_VF_LINK_STATE_AUTO, 0);
3641
bec84e6b
VV
3642 if (adapter->num_vfs)
3643 be_vf_setup(adapter);
f9449ab7 3644
f25b119c
PR
3645 status = be_cmd_get_phy_info(adapter);
3646 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3647 adapter->phy.fc_autoneg = 1;
3648
68d7bdcb 3649 be_schedule_worker(adapter);
e1ad8e33 3650 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3651 return 0;
a54769f5
SP
3652err:
3653 be_clear(adapter);
3654 return status;
3655}
6b7c5b94 3656
66268739
IV
3657#ifdef CONFIG_NET_POLL_CONTROLLER
3658static void be_netpoll(struct net_device *netdev)
3659{
3660 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3661 struct be_eq_obj *eqo;
66268739
IV
3662 int i;
3663
e49cc34f
SP
3664 for_all_evt_queues(adapter, eqo, i) {
3665 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3666 napi_schedule(&eqo->napi);
3667 }
10ef9ab4
SP
3668
3669 return;
66268739
IV
3670}
3671#endif
3672
96c9b2e4 3673static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3674
306f1348
SP
3675static bool phy_flashing_required(struct be_adapter *adapter)
3676{
42f11cf2
AK
3677 return (adapter->phy.phy_type == TN_8022 &&
3678 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3679}
3680
c165541e
PR
3681static bool is_comp_in_ufi(struct be_adapter *adapter,
3682 struct flash_section_info *fsec, int type)
3683{
3684 int i = 0, img_type = 0;
3685 struct flash_section_info_g2 *fsec_g2 = NULL;
3686
ca34fe38 3687 if (BE2_chip(adapter))
c165541e
PR
3688 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3689
3690 for (i = 0; i < MAX_FLASH_COMP; i++) {
3691 if (fsec_g2)
3692 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3693 else
3694 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3695
3696 if (img_type == type)
3697 return true;
3698 }
3699 return false;
3700
3701}
3702
4188e7df 3703static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3704 int header_size,
3705 const struct firmware *fw)
c165541e
PR
3706{
3707 struct flash_section_info *fsec = NULL;
3708 const u8 *p = fw->data;
3709
3710 p += header_size;
3711 while (p < (fw->data + fw->size)) {
3712 fsec = (struct flash_section_info *)p;
3713 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3714 return fsec;
3715 p += 32;
3716 }
3717 return NULL;
3718}
3719
96c9b2e4
VV
3720static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3721 u32 img_offset, u32 img_size, int hdr_size,
3722 u16 img_optype, bool *crc_match)
3723{
3724 u32 crc_offset;
3725 int status;
3726 u8 crc[4];
3727
3728 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3729 if (status)
3730 return status;
3731
3732 crc_offset = hdr_size + img_offset + img_size - 4;
3733
3734 /* Skip flashing, if crc of flashed region matches */
3735 if (!memcmp(crc, p + crc_offset, 4))
3736 *crc_match = true;
3737 else
3738 *crc_match = false;
3739
3740 return status;
3741}
3742
773a2d7c 3743static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3744 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3745{
773a2d7c 3746 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3747 u32 total_bytes, flash_op, num_bytes;
3748 int status;
773a2d7c
PR
3749
3750 total_bytes = img_size;
3751 while (total_bytes) {
3752 num_bytes = min_t(u32, 32*1024, total_bytes);
3753
3754 total_bytes -= num_bytes;
3755
3756 if (!total_bytes) {
3757 if (optype == OPTYPE_PHY_FW)
3758 flash_op = FLASHROM_OPER_PHY_FLASH;
3759 else
3760 flash_op = FLASHROM_OPER_FLASH;
3761 } else {
3762 if (optype == OPTYPE_PHY_FW)
3763 flash_op = FLASHROM_OPER_PHY_SAVE;
3764 else
3765 flash_op = FLASHROM_OPER_SAVE;
3766 }
3767
be716446 3768 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3769 img += num_bytes;
3770 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3771 flash_op, num_bytes);
4c60005f 3772 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3773 optype == OPTYPE_PHY_FW)
3774 break;
3775 else if (status)
773a2d7c 3776 return status;
773a2d7c
PR
3777 }
3778 return 0;
3779}
3780
0ad3157e 3781/* For BE2, BE3 and BE3-R */
ca34fe38 3782static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3783 const struct firmware *fw,
3784 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3785{
c165541e 3786 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3787 struct device *dev = &adapter->pdev->dev;
c165541e 3788 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3789 int status, i, filehdr_size, num_comp;
3790 const struct flash_comp *pflashcomp;
3791 bool crc_match;
3792 const u8 *p;
c165541e
PR
3793
3794 struct flash_comp gen3_flash_types[] = {
3795 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3796 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3797 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3798 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3799 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3800 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3801 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3802 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3803 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3804 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3805 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3806 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3807 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3808 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3809 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3810 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3811 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3812 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3813 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3814 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3815 };
c165541e
PR
3816
3817 struct flash_comp gen2_flash_types[] = {
3818 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3819 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3820 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3821 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3822 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3823 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3824 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3825 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3826 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3827 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3828 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3829 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3830 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3831 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3832 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3833 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3834 };
3835
ca34fe38 3836 if (BE3_chip(adapter)) {
3f0d4560
AK
3837 pflashcomp = gen3_flash_types;
3838 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3839 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3840 } else {
3841 pflashcomp = gen2_flash_types;
3842 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3843 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3844 }
ca34fe38 3845
c165541e
PR
3846 /* Get flash section info*/
3847 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3848 if (!fsec) {
96c9b2e4 3849 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3850 return -1;
3851 }
9fe96934 3852 for (i = 0; i < num_comp; i++) {
c165541e 3853 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3854 continue;
c165541e
PR
3855
3856 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3857 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3858 continue;
3859
773a2d7c
PR
3860 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3861 !phy_flashing_required(adapter))
306f1348 3862 continue;
c165541e 3863
773a2d7c 3864 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3865 status = be_check_flash_crc(adapter, fw->data,
3866 pflashcomp[i].offset,
3867 pflashcomp[i].size,
3868 filehdr_size +
3869 img_hdrs_size,
3870 OPTYPE_REDBOOT, &crc_match);
3871 if (status) {
3872 dev_err(dev,
3873 "Could not get CRC for 0x%x region\n",
3874 pflashcomp[i].optype);
3875 continue;
3876 }
3877
3878 if (crc_match)
773a2d7c
PR
3879 continue;
3880 }
c165541e 3881
96c9b2e4
VV
3882 p = fw->data + filehdr_size + pflashcomp[i].offset +
3883 img_hdrs_size;
306f1348
SP
3884 if (p + pflashcomp[i].size > fw->data + fw->size)
3885 return -1;
773a2d7c
PR
3886
3887 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3888 pflashcomp[i].size);
773a2d7c 3889 if (status) {
96c9b2e4 3890 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3891 pflashcomp[i].img_type);
3892 return status;
84517482 3893 }
84517482 3894 }
84517482
AK
3895 return 0;
3896}
3897
96c9b2e4
VV
3898static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3899{
3900 u32 img_type = le32_to_cpu(fsec_entry.type);
3901 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3902
3903 if (img_optype != 0xFFFF)
3904 return img_optype;
3905
3906 switch (img_type) {
3907 case IMAGE_FIRMWARE_iSCSI:
3908 img_optype = OPTYPE_ISCSI_ACTIVE;
3909 break;
3910 case IMAGE_BOOT_CODE:
3911 img_optype = OPTYPE_REDBOOT;
3912 break;
3913 case IMAGE_OPTION_ROM_ISCSI:
3914 img_optype = OPTYPE_BIOS;
3915 break;
3916 case IMAGE_OPTION_ROM_PXE:
3917 img_optype = OPTYPE_PXE_BIOS;
3918 break;
3919 case IMAGE_OPTION_ROM_FCoE:
3920 img_optype = OPTYPE_FCOE_BIOS;
3921 break;
3922 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3923 img_optype = OPTYPE_ISCSI_BACKUP;
3924 break;
3925 case IMAGE_NCSI:
3926 img_optype = OPTYPE_NCSI_FW;
3927 break;
3928 case IMAGE_FLASHISM_JUMPVECTOR:
3929 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3930 break;
3931 case IMAGE_FIRMWARE_PHY:
3932 img_optype = OPTYPE_SH_PHY_FW;
3933 break;
3934 case IMAGE_REDBOOT_DIR:
3935 img_optype = OPTYPE_REDBOOT_DIR;
3936 break;
3937 case IMAGE_REDBOOT_CONFIG:
3938 img_optype = OPTYPE_REDBOOT_CONFIG;
3939 break;
3940 case IMAGE_UFI_DIR:
3941 img_optype = OPTYPE_UFI_DIR;
3942 break;
3943 default:
3944 break;
3945 }
3946
3947 return img_optype;
3948}
3949
773a2d7c 3950static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3951 const struct firmware *fw,
3952 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3953{
773a2d7c 3954 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3955 struct device *dev = &adapter->pdev->dev;
773a2d7c 3956 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3957 u32 img_offset, img_size, img_type;
3958 int status, i, filehdr_size;
3959 bool crc_match, old_fw_img;
3960 u16 img_optype;
3961 const u8 *p;
773a2d7c
PR
3962
3963 filehdr_size = sizeof(struct flash_file_hdr_g3);
3964 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3965 if (!fsec) {
96c9b2e4 3966 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 3967 return -EINVAL;
773a2d7c
PR
3968 }
3969
3970 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3971 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3972 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
3973 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3974 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3975 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 3976
96c9b2e4 3977 if (img_optype == 0xFFFF)
773a2d7c 3978 continue;
96c9b2e4
VV
3979 /* Don't bother verifying CRC if an old FW image is being
3980 * flashed
3981 */
3982 if (old_fw_img)
3983 goto flash;
3984
3985 status = be_check_flash_crc(adapter, fw->data, img_offset,
3986 img_size, filehdr_size +
3987 img_hdrs_size, img_optype,
3988 &crc_match);
3989 /* The current FW image on the card does not recognize the new
3990 * FLASH op_type. The FW download is partially complete.
3991 * Reboot the server now to enable FW image to recognize the
3992 * new FLASH op_type. To complete the remaining process,
3993 * download the same FW again after the reboot.
3994 */
4c60005f
KA
3995 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
3996 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
3997 dev_err(dev, "Flash incomplete. Reset the server\n");
3998 dev_err(dev, "Download FW image again after reset\n");
3999 return -EAGAIN;
4000 } else if (status) {
4001 dev_err(dev, "Could not get CRC for 0x%x region\n",
4002 img_optype);
4003 return -EFAULT;
773a2d7c
PR
4004 }
4005
96c9b2e4
VV
4006 if (crc_match)
4007 continue;
773a2d7c 4008
96c9b2e4
VV
4009flash:
4010 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4011 if (p + img_size > fw->data + fw->size)
4012 return -1;
4013
4014 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4015 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4016 * UFI_DIR region
4017 */
4c60005f
KA
4018 if (old_fw_img &&
4019 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4020 (img_optype == OPTYPE_UFI_DIR &&
4021 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4022 continue;
4023 } else if (status) {
4024 dev_err(dev, "Flashing section type 0x%x failed\n",
4025 img_type);
4026 return -EFAULT;
773a2d7c
PR
4027 }
4028 }
4029 return 0;
3f0d4560
AK
4030}
4031
485bf569 4032static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4033 const struct firmware *fw)
84517482 4034{
485bf569
SN
4035#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4036#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 4037 struct be_dma_mem flash_cmd;
485bf569
SN
4038 const u8 *data_ptr = NULL;
4039 u8 *dest_image_ptr = NULL;
4040 size_t image_size = 0;
4041 u32 chunk_size = 0;
4042 u32 data_written = 0;
4043 u32 offset = 0;
4044 int status = 0;
4045 u8 add_status = 0;
f67ef7ba 4046 u8 change_status;
84517482 4047
485bf569 4048 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4049 dev_err(&adapter->pdev->dev,
485bf569
SN
4050 "FW Image not properly aligned. "
4051 "Length must be 4 byte aligned.\n");
4052 status = -EINVAL;
4053 goto lancer_fw_exit;
d9efd2af
SB
4054 }
4055
485bf569
SN
4056 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4057 + LANCER_FW_DOWNLOAD_CHUNK;
4058 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4059 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4060 if (!flash_cmd.va) {
4061 status = -ENOMEM;
485bf569
SN
4062 goto lancer_fw_exit;
4063 }
84517482 4064
485bf569
SN
4065 dest_image_ptr = flash_cmd.va +
4066 sizeof(struct lancer_cmd_req_write_object);
4067 image_size = fw->size;
4068 data_ptr = fw->data;
4069
4070 while (image_size) {
4071 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4072
4073 /* Copy the image chunk content. */
4074 memcpy(dest_image_ptr, data_ptr, chunk_size);
4075
4076 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4077 chunk_size, offset,
4078 LANCER_FW_DOWNLOAD_LOCATION,
4079 &data_written, &change_status,
4080 &add_status);
485bf569
SN
4081 if (status)
4082 break;
4083
4084 offset += data_written;
4085 data_ptr += data_written;
4086 image_size -= data_written;
4087 }
4088
4089 if (!status) {
4090 /* Commit the FW written */
4091 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4092 0, offset,
4093 LANCER_FW_DOWNLOAD_LOCATION,
4094 &data_written, &change_status,
4095 &add_status);
485bf569
SN
4096 }
4097
4098 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4099 flash_cmd.dma);
485bf569
SN
4100 if (status) {
4101 dev_err(&adapter->pdev->dev,
4102 "Firmware load error. "
4103 "Status code: 0x%x Additional Status: 0x%x\n",
4104 status, add_status);
4105 goto lancer_fw_exit;
4106 }
4107
f67ef7ba 4108 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4109 dev_info(&adapter->pdev->dev,
4110 "Resetting adapter to activate new FW\n");
5c510811
SK
4111 status = lancer_physdev_ctrl(adapter,
4112 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4113 if (status) {
4114 dev_err(&adapter->pdev->dev,
4115 "Adapter busy for FW reset.\n"
4116 "New FW will not be active.\n");
4117 goto lancer_fw_exit;
4118 }
4119 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4120 dev_err(&adapter->pdev->dev,
4121 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4122 }
4123
485bf569
SN
4124 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4125lancer_fw_exit:
4126 return status;
4127}
4128
ca34fe38
SP
4129#define UFI_TYPE2 2
4130#define UFI_TYPE3 3
0ad3157e 4131#define UFI_TYPE3R 10
ca34fe38
SP
4132#define UFI_TYPE4 4
4133static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4134 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4135{
ddf1169f 4136 if (!fhdr)
773a2d7c
PR
4137 goto be_get_ufi_exit;
4138
ca34fe38
SP
4139 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4140 return UFI_TYPE4;
0ad3157e
VV
4141 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4142 if (fhdr->asic_type_rev == 0x10)
4143 return UFI_TYPE3R;
4144 else
4145 return UFI_TYPE3;
4146 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4147 return UFI_TYPE2;
773a2d7c
PR
4148
4149be_get_ufi_exit:
4150 dev_err(&adapter->pdev->dev,
4151 "UFI and Interface are not compatible for flashing\n");
4152 return -1;
4153}
4154
485bf569
SN
4155static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4156{
485bf569
SN
4157 struct flash_file_hdr_g3 *fhdr3;
4158 struct image_hdr *img_hdr_ptr = NULL;
4159 struct be_dma_mem flash_cmd;
4160 const u8 *p;
773a2d7c 4161 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4162
be716446 4163 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4164 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4165 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4166 if (!flash_cmd.va) {
4167 status = -ENOMEM;
485bf569 4168 goto be_fw_exit;
84517482
AK
4169 }
4170
773a2d7c 4171 p = fw->data;
0ad3157e 4172 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4173
0ad3157e 4174 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4175
773a2d7c
PR
4176 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4177 for (i = 0; i < num_imgs; i++) {
4178 img_hdr_ptr = (struct image_hdr *)(fw->data +
4179 (sizeof(struct flash_file_hdr_g3) +
4180 i * sizeof(struct image_hdr)));
4181 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4182 switch (ufi_type) {
4183 case UFI_TYPE4:
773a2d7c 4184 status = be_flash_skyhawk(adapter, fw,
748b539a 4185 &flash_cmd, num_imgs);
0ad3157e
VV
4186 break;
4187 case UFI_TYPE3R:
ca34fe38
SP
4188 status = be_flash_BEx(adapter, fw, &flash_cmd,
4189 num_imgs);
0ad3157e
VV
4190 break;
4191 case UFI_TYPE3:
4192 /* Do not flash this ufi on BE3-R cards */
4193 if (adapter->asic_rev < 0x10)
4194 status = be_flash_BEx(adapter, fw,
4195 &flash_cmd,
4196 num_imgs);
4197 else {
56ace3a0 4198 status = -EINVAL;
0ad3157e
VV
4199 dev_err(&adapter->pdev->dev,
4200 "Can't load BE3 UFI on BE3R\n");
4201 }
4202 }
3f0d4560 4203 }
773a2d7c
PR
4204 }
4205
ca34fe38
SP
4206 if (ufi_type == UFI_TYPE2)
4207 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4208 else if (ufi_type == -1)
56ace3a0 4209 status = -EINVAL;
84517482 4210
2b7bcebf
IV
4211 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4212 flash_cmd.dma);
84517482
AK
4213 if (status) {
4214 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4215 goto be_fw_exit;
84517482
AK
4216 }
4217
af901ca1 4218 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4219
485bf569
SN
4220be_fw_exit:
4221 return status;
4222}
4223
4224int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4225{
4226 const struct firmware *fw;
4227 int status;
4228
4229 if (!netif_running(adapter->netdev)) {
4230 dev_err(&adapter->pdev->dev,
4231 "Firmware load not allowed (interface is down)\n");
940a3fcd 4232 return -ENETDOWN;
485bf569
SN
4233 }
4234
4235 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4236 if (status)
4237 goto fw_exit;
4238
4239 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4240
4241 if (lancer_chip(adapter))
4242 status = lancer_fw_download(adapter, fw);
4243 else
4244 status = be_fw_download(adapter, fw);
4245
eeb65ced 4246 if (!status)
e97e3cda 4247 be_cmd_get_fw_ver(adapter);
eeb65ced 4248
84517482
AK
4249fw_exit:
4250 release_firmware(fw);
4251 return status;
4252}
4253
748b539a 4254static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4255{
4256 struct be_adapter *adapter = netdev_priv(dev);
4257 struct nlattr *attr, *br_spec;
4258 int rem;
4259 int status = 0;
4260 u16 mode = 0;
4261
4262 if (!sriov_enabled(adapter))
4263 return -EOPNOTSUPP;
4264
4265 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4266
4267 nla_for_each_nested(attr, br_spec, rem) {
4268 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4269 continue;
4270
4271 mode = nla_get_u16(attr);
4272 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4273 return -EINVAL;
4274
4275 status = be_cmd_set_hsw_config(adapter, 0, 0,
4276 adapter->if_handle,
4277 mode == BRIDGE_MODE_VEPA ?
4278 PORT_FWD_TYPE_VEPA :
4279 PORT_FWD_TYPE_VEB);
4280 if (status)
4281 goto err;
4282
4283 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4284 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4285
4286 return status;
4287 }
4288err:
4289 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4290 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4291
4292 return status;
4293}
4294
4295static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4296 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4297{
4298 struct be_adapter *adapter = netdev_priv(dev);
4299 int status = 0;
4300 u8 hsw_mode;
4301
4302 if (!sriov_enabled(adapter))
4303 return 0;
4304
4305 /* BE and Lancer chips support VEB mode only */
4306 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4307 hsw_mode = PORT_FWD_TYPE_VEB;
4308 } else {
4309 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4310 adapter->if_handle, &hsw_mode);
4311 if (status)
4312 return 0;
4313 }
4314
4315 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4316 hsw_mode == PORT_FWD_TYPE_VEPA ?
4317 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4318}
4319
c5abe7c0 4320#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4321static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4322 __be16 port)
4323{
4324 struct be_adapter *adapter = netdev_priv(netdev);
4325 struct device *dev = &adapter->pdev->dev;
4326 int status;
4327
4328 if (lancer_chip(adapter) || BEx_chip(adapter))
4329 return;
4330
4331 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4332 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4333 be16_to_cpu(port));
4334 dev_info(dev,
4335 "Only one UDP port supported for VxLAN offloads\n");
4336 return;
4337 }
4338
4339 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4340 OP_CONVERT_NORMAL_TO_TUNNEL);
4341 if (status) {
4342 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4343 goto err;
4344 }
4345
4346 status = be_cmd_set_vxlan_port(adapter, port);
4347 if (status) {
4348 dev_warn(dev, "Failed to add VxLAN port\n");
4349 goto err;
4350 }
4351 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4352 adapter->vxlan_port = port;
4353
4354 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4355 be16_to_cpu(port));
4356 return;
4357err:
4358 be_disable_vxlan_offloads(adapter);
4359 return;
4360}
4361
4362static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4363 __be16 port)
4364{
4365 struct be_adapter *adapter = netdev_priv(netdev);
4366
4367 if (lancer_chip(adapter) || BEx_chip(adapter))
4368 return;
4369
4370 if (adapter->vxlan_port != port)
4371 return;
4372
4373 be_disable_vxlan_offloads(adapter);
4374
4375 dev_info(&adapter->pdev->dev,
4376 "Disabled VxLAN offloads for UDP port %d\n",
4377 be16_to_cpu(port));
4378}
c5abe7c0 4379#endif
c9c47142 4380
e5686ad8 4381static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4382 .ndo_open = be_open,
4383 .ndo_stop = be_close,
4384 .ndo_start_xmit = be_xmit,
a54769f5 4385 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4386 .ndo_set_mac_address = be_mac_addr_set,
4387 .ndo_change_mtu = be_change_mtu,
ab1594e9 4388 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4389 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4390 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4391 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4392 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4393 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4394 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4395 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4396 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4397#ifdef CONFIG_NET_POLL_CONTROLLER
4398 .ndo_poll_controller = be_netpoll,
4399#endif
a77dcb8c
AK
4400 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4401 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4402#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4403 .ndo_busy_poll = be_busy_poll,
6384a4d0 4404#endif
c5abe7c0 4405#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4406 .ndo_add_vxlan_port = be_add_vxlan_port,
4407 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4408#endif
6b7c5b94
SP
4409};
4410
4411static void be_netdev_init(struct net_device *netdev)
4412{
4413 struct be_adapter *adapter = netdev_priv(netdev);
4414
c9c47142
SP
4415 if (skyhawk_chip(adapter)) {
4416 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4417 NETIF_F_TSO | NETIF_F_TSO6 |
4418 NETIF_F_GSO_UDP_TUNNEL;
4419 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4420 }
6332c8d3 4421 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4422 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4423 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4424 if (be_multi_rxq(adapter))
4425 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4426
4427 netdev->features |= netdev->hw_features |
f646968f 4428 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4429
eb8a50d9 4430 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4431 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4432
fbc13f01
AK
4433 netdev->priv_flags |= IFF_UNICAST_FLT;
4434
6b7c5b94
SP
4435 netdev->flags |= IFF_MULTICAST;
4436
b7e5887e 4437 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4438
10ef9ab4 4439 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4440
7ad24ea4 4441 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4442}
4443
4444static void be_unmap_pci_bars(struct be_adapter *adapter)
4445{
c5b3ad4c
SP
4446 if (adapter->csr)
4447 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4448 if (adapter->db)
ce66f781 4449 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4450}
4451
ce66f781
SP
4452static int db_bar(struct be_adapter *adapter)
4453{
4454 if (lancer_chip(adapter) || !be_physfn(adapter))
4455 return 0;
4456 else
4457 return 4;
4458}
4459
4460static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4461{
dbf0f2a7 4462 if (skyhawk_chip(adapter)) {
ce66f781
SP
4463 adapter->roce_db.size = 4096;
4464 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4465 db_bar(adapter));
4466 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4467 db_bar(adapter));
4468 }
045508a8 4469 return 0;
6b7c5b94
SP
4470}
4471
4472static int be_map_pci_bars(struct be_adapter *adapter)
4473{
4474 u8 __iomem *addr;
fe6d2a38 4475
c5b3ad4c
SP
4476 if (BEx_chip(adapter) && be_physfn(adapter)) {
4477 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4478 if (!adapter->csr)
c5b3ad4c
SP
4479 return -ENOMEM;
4480 }
4481
ce66f781 4482 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4483 if (!addr)
6b7c5b94 4484 goto pci_map_err;
ba343c77 4485 adapter->db = addr;
ce66f781
SP
4486
4487 be_roce_map_pci_bars(adapter);
6b7c5b94 4488 return 0;
ce66f781 4489
6b7c5b94
SP
4490pci_map_err:
4491 be_unmap_pci_bars(adapter);
4492 return -ENOMEM;
4493}
4494
6b7c5b94
SP
4495static void be_ctrl_cleanup(struct be_adapter *adapter)
4496{
8788fdc2 4497 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4498
4499 be_unmap_pci_bars(adapter);
4500
4501 if (mem->va)
2b7bcebf
IV
4502 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4503 mem->dma);
e7b909a6 4504
5b8821b7 4505 mem = &adapter->rx_filter;
e7b909a6 4506 if (mem->va)
2b7bcebf
IV
4507 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4508 mem->dma);
6b7c5b94
SP
4509}
4510
6b7c5b94
SP
4511static int be_ctrl_init(struct be_adapter *adapter)
4512{
8788fdc2
SP
4513 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4514 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4515 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4516 u32 sli_intf;
6b7c5b94 4517 int status;
6b7c5b94 4518
ce66f781
SP
4519 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4520 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4521 SLI_INTF_FAMILY_SHIFT;
4522 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4523
6b7c5b94
SP
4524 status = be_map_pci_bars(adapter);
4525 if (status)
e7b909a6 4526 goto done;
6b7c5b94
SP
4527
4528 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4529 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4530 mbox_mem_alloc->size,
4531 &mbox_mem_alloc->dma,
4532 GFP_KERNEL);
6b7c5b94 4533 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4534 status = -ENOMEM;
4535 goto unmap_pci_bars;
6b7c5b94
SP
4536 }
4537 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4538 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4539 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4540 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4541
5b8821b7 4542 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4543 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4544 rx_filter->size, &rx_filter->dma,
4545 GFP_KERNEL);
ddf1169f 4546 if (!rx_filter->va) {
e7b909a6
SP
4547 status = -ENOMEM;
4548 goto free_mbox;
4549 }
1f9061d2 4550
2984961c 4551 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4552 spin_lock_init(&adapter->mcc_lock);
4553 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4554
5eeff635 4555 init_completion(&adapter->et_cmd_compl);
cf588477 4556 pci_save_state(adapter->pdev);
6b7c5b94 4557 return 0;
e7b909a6
SP
4558
4559free_mbox:
2b7bcebf
IV
4560 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4561 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4562
4563unmap_pci_bars:
4564 be_unmap_pci_bars(adapter);
4565
4566done:
4567 return status;
6b7c5b94
SP
4568}
4569
4570static void be_stats_cleanup(struct be_adapter *adapter)
4571{
3abcdeda 4572 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4573
4574 if (cmd->va)
2b7bcebf
IV
4575 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4576 cmd->va, cmd->dma);
6b7c5b94
SP
4577}
4578
4579static int be_stats_init(struct be_adapter *adapter)
4580{
3abcdeda 4581 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4582
ca34fe38
SP
4583 if (lancer_chip(adapter))
4584 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4585 else if (BE2_chip(adapter))
89a88ab8 4586 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4587 else if (BE3_chip(adapter))
ca34fe38 4588 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4589 else
4590 /* ALL non-BE ASICs */
4591 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4592
ede23fa8
JP
4593 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4594 GFP_KERNEL);
ddf1169f 4595 if (!cmd->va)
6b568689 4596 return -ENOMEM;
6b7c5b94
SP
4597 return 0;
4598}
4599
3bc6b06c 4600static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4601{
4602 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4603
6b7c5b94
SP
4604 if (!adapter)
4605 return;
4606
045508a8 4607 be_roce_dev_remove(adapter);
8cef7a78 4608 be_intr_set(adapter, false);
045508a8 4609
f67ef7ba
PR
4610 cancel_delayed_work_sync(&adapter->func_recovery_work);
4611
6b7c5b94
SP
4612 unregister_netdev(adapter->netdev);
4613
5fb379ee
SP
4614 be_clear(adapter);
4615
bf99e50d
PR
4616 /* tell fw we're done with firing cmds */
4617 be_cmd_fw_clean(adapter);
4618
6b7c5b94
SP
4619 be_stats_cleanup(adapter);
4620
4621 be_ctrl_cleanup(adapter);
4622
d6b6d987
SP
4623 pci_disable_pcie_error_reporting(pdev);
4624
6b7c5b94
SP
4625 pci_release_regions(pdev);
4626 pci_disable_device(pdev);
4627
4628 free_netdev(adapter->netdev);
4629}
4630
39f1d94d 4631static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4632{
baaa08d1 4633 int status, level;
6b7c5b94 4634
9e1453c5
AK
4635 status = be_cmd_get_cntl_attributes(adapter);
4636 if (status)
4637 return status;
4638
7aeb2156
PR
4639 /* Must be a power of 2 or else MODULO will BUG_ON */
4640 adapter->be_get_temp_freq = 64;
4641
baaa08d1
VV
4642 if (BEx_chip(adapter)) {
4643 level = be_cmd_get_fw_log_level(adapter);
4644 adapter->msg_enable =
4645 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4646 }
941a77d5 4647
92bf14ab 4648 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4649 return 0;
6b7c5b94
SP
4650}
4651
f67ef7ba 4652static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4653{
01e5b2c4 4654 struct device *dev = &adapter->pdev->dev;
d8110f62 4655 int status;
d8110f62 4656
f67ef7ba
PR
4657 status = lancer_test_and_set_rdy_state(adapter);
4658 if (status)
4659 goto err;
d8110f62 4660
f67ef7ba
PR
4661 if (netif_running(adapter->netdev))
4662 be_close(adapter->netdev);
d8110f62 4663
f67ef7ba
PR
4664 be_clear(adapter);
4665
01e5b2c4 4666 be_clear_all_error(adapter);
f67ef7ba
PR
4667
4668 status = be_setup(adapter);
4669 if (status)
4670 goto err;
d8110f62 4671
f67ef7ba
PR
4672 if (netif_running(adapter->netdev)) {
4673 status = be_open(adapter->netdev);
d8110f62
PR
4674 if (status)
4675 goto err;
f67ef7ba 4676 }
d8110f62 4677
4bebb56a 4678 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4679 return 0;
4680err:
01e5b2c4
SK
4681 if (status == -EAGAIN)
4682 dev_err(dev, "Waiting for resource provisioning\n");
4683 else
4bebb56a 4684 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4685
f67ef7ba
PR
4686 return status;
4687}
4688
4689static void be_func_recovery_task(struct work_struct *work)
4690{
4691 struct be_adapter *adapter =
4692 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4693 int status = 0;
d8110f62 4694
f67ef7ba 4695 be_detect_error(adapter);
d8110f62 4696
f67ef7ba 4697 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4698
f67ef7ba
PR
4699 rtnl_lock();
4700 netif_device_detach(adapter->netdev);
4701 rtnl_unlock();
d8110f62 4702
f67ef7ba 4703 status = lancer_recover_func(adapter);
f67ef7ba
PR
4704 if (!status)
4705 netif_device_attach(adapter->netdev);
d8110f62 4706 }
f67ef7ba 4707
01e5b2c4
SK
4708 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4709 * no need to attempt further recovery.
4710 */
4711 if (!status || status == -EAGAIN)
4712 schedule_delayed_work(&adapter->func_recovery_work,
4713 msecs_to_jiffies(1000));
d8110f62
PR
4714}
4715
4716static void be_worker(struct work_struct *work)
4717{
4718 struct be_adapter *adapter =
4719 container_of(work, struct be_adapter, work.work);
4720 struct be_rx_obj *rxo;
4721 int i;
4722
d8110f62
PR
4723 /* when interrupts are not yet enabled, just reap any pending
4724 * mcc completions */
4725 if (!netif_running(adapter->netdev)) {
072a9c48 4726 local_bh_disable();
10ef9ab4 4727 be_process_mcc(adapter);
072a9c48 4728 local_bh_enable();
d8110f62
PR
4729 goto reschedule;
4730 }
4731
4732 if (!adapter->stats_cmd_sent) {
4733 if (lancer_chip(adapter))
4734 lancer_cmd_get_pport_stats(adapter,
4735 &adapter->stats_cmd);
4736 else
4737 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4738 }
4739
d696b5e2
VV
4740 if (be_physfn(adapter) &&
4741 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4742 be_cmd_get_die_temperature(adapter);
4743
d8110f62 4744 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4745 /* Replenish RX-queues starved due to memory
4746 * allocation failures.
4747 */
4748 if (rxo->rx_post_starved)
d8110f62 4749 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4750 }
4751
2632bafd 4752 be_eqd_update(adapter);
10ef9ab4 4753
d8110f62
PR
4754reschedule:
4755 adapter->work_counter++;
4756 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4757}
4758
257a3feb 4759/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4760static bool be_reset_required(struct be_adapter *adapter)
4761{
257a3feb 4762 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4763}
4764
d379142b
SP
4765static char *mc_name(struct be_adapter *adapter)
4766{
f93f160b
VV
4767 char *str = ""; /* default */
4768
4769 switch (adapter->mc_type) {
4770 case UMC:
4771 str = "UMC";
4772 break;
4773 case FLEX10:
4774 str = "FLEX10";
4775 break;
4776 case vNIC1:
4777 str = "vNIC-1";
4778 break;
4779 case nPAR:
4780 str = "nPAR";
4781 break;
4782 case UFP:
4783 str = "UFP";
4784 break;
4785 case vNIC2:
4786 str = "vNIC-2";
4787 break;
4788 default:
4789 str = "";
4790 }
4791
4792 return str;
d379142b
SP
4793}
4794
4795static inline char *func_name(struct be_adapter *adapter)
4796{
4797 return be_physfn(adapter) ? "PF" : "VF";
4798}
4799
1dd06ae8 4800static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4801{
4802 int status = 0;
4803 struct be_adapter *adapter;
4804 struct net_device *netdev;
b4e32a71 4805 char port_name;
6b7c5b94
SP
4806
4807 status = pci_enable_device(pdev);
4808 if (status)
4809 goto do_none;
4810
4811 status = pci_request_regions(pdev, DRV_NAME);
4812 if (status)
4813 goto disable_dev;
4814 pci_set_master(pdev);
4815
7f640062 4816 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4817 if (!netdev) {
6b7c5b94
SP
4818 status = -ENOMEM;
4819 goto rel_reg;
4820 }
4821 adapter = netdev_priv(netdev);
4822 adapter->pdev = pdev;
4823 pci_set_drvdata(pdev, adapter);
4824 adapter->netdev = netdev;
2243e2e9 4825 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4826
4c15c243 4827 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4828 if (!status) {
4829 netdev->features |= NETIF_F_HIGHDMA;
4830 } else {
4c15c243 4831 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4832 if (status) {
4833 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4834 goto free_netdev;
4835 }
4836 }
4837
ea58c180
AK
4838 if (be_physfn(adapter)) {
4839 status = pci_enable_pcie_error_reporting(pdev);
4840 if (!status)
4841 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4842 }
d6b6d987 4843
6b7c5b94
SP
4844 status = be_ctrl_init(adapter);
4845 if (status)
39f1d94d 4846 goto free_netdev;
6b7c5b94 4847
2243e2e9 4848 /* sync up with fw's ready state */
ba343c77 4849 if (be_physfn(adapter)) {
bf99e50d 4850 status = be_fw_wait_ready(adapter);
ba343c77
SB
4851 if (status)
4852 goto ctrl_clean;
ba343c77 4853 }
6b7c5b94 4854
39f1d94d
SP
4855 if (be_reset_required(adapter)) {
4856 status = be_cmd_reset_function(adapter);
4857 if (status)
4858 goto ctrl_clean;
556ae191 4859
2d177be8
KA
4860 /* Wait for interrupts to quiesce after an FLR */
4861 msleep(100);
4862 }
8cef7a78
SK
4863
4864 /* Allow interrupts for other ULPs running on NIC function */
4865 be_intr_set(adapter, true);
10ef9ab4 4866
2d177be8
KA
4867 /* tell fw we're ready to fire cmds */
4868 status = be_cmd_fw_init(adapter);
4869 if (status)
4870 goto ctrl_clean;
4871
2243e2e9
SP
4872 status = be_stats_init(adapter);
4873 if (status)
4874 goto ctrl_clean;
4875
39f1d94d 4876 status = be_get_initial_config(adapter);
6b7c5b94
SP
4877 if (status)
4878 goto stats_clean;
6b7c5b94
SP
4879
4880 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4881 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4882 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4883
5fb379ee
SP
4884 status = be_setup(adapter);
4885 if (status)
55f5c3c5 4886 goto stats_clean;
2243e2e9 4887
3abcdeda 4888 be_netdev_init(netdev);
6b7c5b94
SP
4889 status = register_netdev(netdev);
4890 if (status != 0)
5fb379ee 4891 goto unsetup;
6b7c5b94 4892
045508a8
PP
4893 be_roce_dev_add(adapter);
4894
f67ef7ba
PR
4895 schedule_delayed_work(&adapter->func_recovery_work,
4896 msecs_to_jiffies(1000));
b4e32a71
PR
4897
4898 be_cmd_query_port_name(adapter, &port_name);
4899
d379142b
SP
4900 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4901 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4902
6b7c5b94
SP
4903 return 0;
4904
5fb379ee
SP
4905unsetup:
4906 be_clear(adapter);
6b7c5b94
SP
4907stats_clean:
4908 be_stats_cleanup(adapter);
4909ctrl_clean:
4910 be_ctrl_cleanup(adapter);
f9449ab7 4911free_netdev:
fe6d2a38 4912 free_netdev(netdev);
6b7c5b94
SP
4913rel_reg:
4914 pci_release_regions(pdev);
4915disable_dev:
4916 pci_disable_device(pdev);
4917do_none:
c4ca2374 4918 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4919 return status;
4920}
4921
4922static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4923{
4924 struct be_adapter *adapter = pci_get_drvdata(pdev);
4925 struct net_device *netdev = adapter->netdev;
4926
76a9e08e 4927 if (adapter->wol_en)
71d8d1b5
AK
4928 be_setup_wol(adapter, true);
4929
d4360d6f 4930 be_intr_set(adapter, false);
f67ef7ba
PR
4931 cancel_delayed_work_sync(&adapter->func_recovery_work);
4932
6b7c5b94
SP
4933 netif_device_detach(netdev);
4934 if (netif_running(netdev)) {
4935 rtnl_lock();
4936 be_close(netdev);
4937 rtnl_unlock();
4938 }
9b0365f1 4939 be_clear(adapter);
6b7c5b94
SP
4940
4941 pci_save_state(pdev);
4942 pci_disable_device(pdev);
4943 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4944 return 0;
4945}
4946
4947static int be_resume(struct pci_dev *pdev)
4948{
4949 int status = 0;
4950 struct be_adapter *adapter = pci_get_drvdata(pdev);
4951 struct net_device *netdev = adapter->netdev;
4952
4953 netif_device_detach(netdev);
4954
4955 status = pci_enable_device(pdev);
4956 if (status)
4957 return status;
4958
1ca01512 4959 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4960 pci_restore_state(pdev);
4961
dd5746bf
SB
4962 status = be_fw_wait_ready(adapter);
4963 if (status)
4964 return status;
4965
d4360d6f 4966 be_intr_set(adapter, true);
2243e2e9
SP
4967 /* tell fw we're ready to fire cmds */
4968 status = be_cmd_fw_init(adapter);
4969 if (status)
4970 return status;
4971
9b0365f1 4972 be_setup(adapter);
6b7c5b94
SP
4973 if (netif_running(netdev)) {
4974 rtnl_lock();
4975 be_open(netdev);
4976 rtnl_unlock();
4977 }
f67ef7ba
PR
4978
4979 schedule_delayed_work(&adapter->func_recovery_work,
4980 msecs_to_jiffies(1000));
6b7c5b94 4981 netif_device_attach(netdev);
71d8d1b5 4982
76a9e08e 4983 if (adapter->wol_en)
71d8d1b5 4984 be_setup_wol(adapter, false);
a4ca055f 4985
6b7c5b94
SP
4986 return 0;
4987}
4988
82456b03
SP
4989/*
4990 * An FLR will stop BE from DMAing any data.
4991 */
4992static void be_shutdown(struct pci_dev *pdev)
4993{
4994 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4995
2d5d4154
AK
4996 if (!adapter)
4997 return;
82456b03 4998
0f4a6828 4999 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5000 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5001
2d5d4154 5002 netif_device_detach(adapter->netdev);
82456b03 5003
57841869
AK
5004 be_cmd_reset_function(adapter);
5005
82456b03 5006 pci_disable_device(pdev);
82456b03
SP
5007}
5008
cf588477 5009static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5010 pci_channel_state_t state)
cf588477
SP
5011{
5012 struct be_adapter *adapter = pci_get_drvdata(pdev);
5013 struct net_device *netdev = adapter->netdev;
5014
5015 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5016
01e5b2c4
SK
5017 if (!adapter->eeh_error) {
5018 adapter->eeh_error = true;
cf588477 5019
01e5b2c4 5020 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5021
cf588477 5022 rtnl_lock();
01e5b2c4
SK
5023 netif_device_detach(netdev);
5024 if (netif_running(netdev))
5025 be_close(netdev);
cf588477 5026 rtnl_unlock();
01e5b2c4
SK
5027
5028 be_clear(adapter);
cf588477 5029 }
cf588477
SP
5030
5031 if (state == pci_channel_io_perm_failure)
5032 return PCI_ERS_RESULT_DISCONNECT;
5033
5034 pci_disable_device(pdev);
5035
eeb7fc7b
SK
5036 /* The error could cause the FW to trigger a flash debug dump.
5037 * Resetting the card while flash dump is in progress
c8a54163
PR
5038 * can cause it not to recover; wait for it to finish.
5039 * Wait only for first function as it is needed only once per
5040 * adapter.
eeb7fc7b 5041 */
c8a54163
PR
5042 if (pdev->devfn == 0)
5043 ssleep(30);
5044
cf588477
SP
5045 return PCI_ERS_RESULT_NEED_RESET;
5046}
5047
5048static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5049{
5050 struct be_adapter *adapter = pci_get_drvdata(pdev);
5051 int status;
5052
5053 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5054
5055 status = pci_enable_device(pdev);
5056 if (status)
5057 return PCI_ERS_RESULT_DISCONNECT;
5058
5059 pci_set_master(pdev);
1ca01512 5060 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5061 pci_restore_state(pdev);
5062
5063 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5064 dev_info(&adapter->pdev->dev,
5065 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5066 status = be_fw_wait_ready(adapter);
cf588477
SP
5067 if (status)
5068 return PCI_ERS_RESULT_DISCONNECT;
5069
d6b6d987 5070 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5071 be_clear_all_error(adapter);
cf588477
SP
5072 return PCI_ERS_RESULT_RECOVERED;
5073}
5074
5075static void be_eeh_resume(struct pci_dev *pdev)
5076{
5077 int status = 0;
5078 struct be_adapter *adapter = pci_get_drvdata(pdev);
5079 struct net_device *netdev = adapter->netdev;
5080
5081 dev_info(&adapter->pdev->dev, "EEH resume\n");
5082
5083 pci_save_state(pdev);
5084
2d177be8 5085 status = be_cmd_reset_function(adapter);
cf588477
SP
5086 if (status)
5087 goto err;
5088
03a58baa
KA
5089 /* On some BE3 FW versions, after a HW reset,
5090 * interrupts will remain disabled for each function.
5091 * So, explicitly enable interrupts
5092 */
5093 be_intr_set(adapter, true);
5094
2d177be8
KA
5095 /* tell fw we're ready to fire cmds */
5096 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5097 if (status)
5098 goto err;
5099
cf588477
SP
5100 status = be_setup(adapter);
5101 if (status)
5102 goto err;
5103
5104 if (netif_running(netdev)) {
5105 status = be_open(netdev);
5106 if (status)
5107 goto err;
5108 }
f67ef7ba
PR
5109
5110 schedule_delayed_work(&adapter->func_recovery_work,
5111 msecs_to_jiffies(1000));
cf588477
SP
5112 netif_device_attach(netdev);
5113 return;
5114err:
5115 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5116}
5117
3646f0e5 5118static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5119 .error_detected = be_eeh_err_detected,
5120 .slot_reset = be_eeh_reset,
5121 .resume = be_eeh_resume,
5122};
5123
6b7c5b94
SP
5124static struct pci_driver be_driver = {
5125 .name = DRV_NAME,
5126 .id_table = be_dev_ids,
5127 .probe = be_probe,
5128 .remove = be_remove,
5129 .suspend = be_suspend,
cf588477 5130 .resume = be_resume,
82456b03 5131 .shutdown = be_shutdown,
cf588477 5132 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5133};
5134
5135static int __init be_init_module(void)
5136{
8e95a202
JP
5137 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5138 rx_frag_size != 2048) {
6b7c5b94
SP
5139 printk(KERN_WARNING DRV_NAME
5140 " : Module param rx_frag_size must be 2048/4096/8192."
5141 " Using 2048\n");
5142 rx_frag_size = 2048;
5143 }
6b7c5b94
SP
5144
5145 return pci_register_driver(&be_driver);
5146}
5147module_init(be_init_module);
5148
5149static void __exit be_exit_module(void)
5150{
5151 pci_unregister_driver(&be_driver);
5152}
5153module_exit(be_exit_module);