]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: ignore get/set profile FW cmd failures
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 31MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
32MODULE_LICENSE("GPL");
33
ba343c77 34static unsigned int num_vfs;
ba343c77 35module_param(num_vfs, uint, S_IRUGO);
ba343c77 36MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 37
11ac75ed
SP
38static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
6b7c5b94 42static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 44 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
51 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 54/* UE Status Low CSR */
42c8b11e 55static const char * const ue_status_low_desc[] = {
7c185276
AK
56 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
6bdf8f55
VV
84 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
7c185276
AK
88};
89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94 124
752961a1 125
6b7c5b94
SP
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
190 val |= qid & DB_RQ_RING_ID_MASK;
191 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
192
193 wmb();
8788fdc2 194 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
195}
196
94d73aaa
VV
197static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198 u16 posted)
6b7c5b94
SP
199{
200 u32 val = 0;
94d73aaa 201 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 202 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
203
204 wmb();
94d73aaa 205 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
206}
207
8788fdc2 208static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 209 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
210{
211 u32 val = 0;
212 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 213 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 214
f67ef7ba 215 if (adapter->eeh_error)
cf588477
SP
216 return;
217
6b7c5b94
SP
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
225}
226
8788fdc2 227void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
228{
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 233
f67ef7ba 234 if (adapter->eeh_error)
cf588477
SP
235 return;
236
6b7c5b94
SP
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
241}
242
6b7c5b94
SP
243static int be_mac_addr_set(struct net_device *netdev, void *p)
244{
245 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 246 struct device *dev = &adapter->pdev->dev;
6b7c5b94 247 struct sockaddr *addr = p;
5a712c13
SP
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 251
ca9e4988
AK
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
ff32f8ab
VV
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
5a712c13
SP
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
704e4c88 266 */
5a712c13
SP
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
704e4c88
PR
278 }
279
5a712c13
SP
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
704e4c88 282 */
b188f090
SR
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
a65027e4 285 if (status)
e3a7ae2c 286 goto err;
6b7c5b94 287
5a712c13
SP
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
61d23e9f 291 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
292 status = -EPERM;
293 goto err;
294 }
295
e3a7ae2c 296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 297 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
298 return 0;
299err:
5a712c13 300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
301 return status;
302}
303
ca34fe38
SP
304/* BE2 supports only v0 cmd */
305static void *hw_stats_from_cmd(struct be_adapter *adapter)
306{
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
61000861 311 } else if (BE3_chip(adapter)) {
ca34fe38
SP
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
61000861
AK
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
ca34fe38
SP
318 return &cmd->hw_stats;
319 }
320}
321
322/* BE2 supports only v0 cmd */
323static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324{
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
61000861 329 } else if (BE3_chip(adapter)) {
ca34fe38
SP
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
61000861
AK
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
ca34fe38
SP
336 return &hw_stats->erx;
337 }
338}
339
340static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 341{
ac124ff9
SP
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 345 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 348
ac124ff9 349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
89a88ab8
AK
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
ac124ff9 377 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 378 else
ac124ff9 379 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387}
388
ca34fe38 389static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 390{
ac124ff9
SP
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 394 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 397
ac124ff9 398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
ac124ff9 421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433}
434
61000861
AK
435static void populate_be_v2_stats(struct be_adapter *adapter)
436{
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 479 if (be_roce_supported(adapter)) {
461ae379
AK
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
61000861
AK
487}
488
005d5696
SX
489static void populate_lancer_stats(struct be_adapter *adapter)
490{
89a88ab8 491
005d5696 492 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 493 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
494
495 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 499 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 500 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
501 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505 drvs->rx_dropped_tcp_length =
506 pport_stats->rx_dropped_invalid_tcp_length;
507 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510 drvs->rx_dropped_header_too_small =
511 pport_stats->rx_dropped_header_too_small;
512 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
513 drvs->rx_address_filtered =
514 pport_stats->rx_address_filtered +
515 pport_stats->rx_vlan_filtered;
ac124ff9 516 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 517 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
518 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 520 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
521 drvs->forwarded_packets = pport_stats->num_forwards_lo;
522 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 523 drvs->rx_drops_too_many_frags =
ac124ff9 524 pport_stats->rx_drops_too_many_frags_lo;
005d5696 525}
89a88ab8 526
09c1c68f
SP
527static void accumulate_16bit_val(u32 *acc, u16 val)
528{
529#define lo(x) (x & 0xFFFF)
530#define hi(x) (x & 0xFFFF0000)
531 bool wrapped = val < lo(*acc);
532 u32 newacc = hi(*acc) + val;
533
534 if (wrapped)
535 newacc += 65536;
536 ACCESS_ONCE(*acc) = newacc;
537}
538
4188e7df 539static void populate_erx_stats(struct be_adapter *adapter,
748b539a 540 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
541{
542 if (!BEx_chip(adapter))
543 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544 else
545 /* below erx HW counter can actually wrap around after
546 * 65535. Driver accumulates a 32-bit value
547 */
548 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549 (u16)erx_stat);
550}
551
89a88ab8
AK
552void be_parse_stats(struct be_adapter *adapter)
553{
61000861 554 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
555 struct be_rx_obj *rxo;
556 int i;
a6c578ef 557 u32 erx_stat;
ac124ff9 558
ca34fe38
SP
559 if (lancer_chip(adapter)) {
560 populate_lancer_stats(adapter);
005d5696 561 } else {
ca34fe38
SP
562 if (BE2_chip(adapter))
563 populate_be_v0_stats(adapter);
61000861
AK
564 else if (BE3_chip(adapter))
565 /* for BE3 */
ca34fe38 566 populate_be_v1_stats(adapter);
61000861
AK
567 else
568 populate_be_v2_stats(adapter);
d51ebd33 569
61000861 570 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 571 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
572 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 574 }
09c1c68f 575 }
89a88ab8
AK
576}
577
ab1594e9 578static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 579 struct rtnl_link_stats64 *stats)
6b7c5b94 580{
ab1594e9 581 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 582 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 583 struct be_rx_obj *rxo;
3c8def97 584 struct be_tx_obj *txo;
ab1594e9
SP
585 u64 pkts, bytes;
586 unsigned int start;
3abcdeda 587 int i;
6b7c5b94 588
3abcdeda 589 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
590 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591 do {
57a7744e 592 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
593 pkts = rx_stats(rxo)->rx_pkts;
594 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 595 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
596 stats->rx_packets += pkts;
597 stats->rx_bytes += bytes;
598 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
601 }
602
3c8def97 603 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
604 const struct be_tx_stats *tx_stats = tx_stats(txo);
605 do {
57a7744e 606 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
607 pkts = tx_stats(txo)->tx_pkts;
608 bytes = tx_stats(txo)->tx_bytes;
57a7744e 609 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
610 stats->tx_packets += pkts;
611 stats->tx_bytes += bytes;
3c8def97 612 }
6b7c5b94
SP
613
614 /* bad pkts received */
ab1594e9 615 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
616 drvs->rx_alignment_symbol_errors +
617 drvs->rx_in_range_errors +
618 drvs->rx_out_range_errors +
619 drvs->rx_frame_too_long +
620 drvs->rx_dropped_too_small +
621 drvs->rx_dropped_too_short +
622 drvs->rx_dropped_header_too_small +
623 drvs->rx_dropped_tcp_length +
ab1594e9 624 drvs->rx_dropped_runt;
68110868 625
6b7c5b94 626 /* detailed rx errors */
ab1594e9 627 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
628 drvs->rx_out_range_errors +
629 drvs->rx_frame_too_long;
68110868 630
ab1594e9 631 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
632
633 /* frame alignment errors */
ab1594e9 634 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 635
6b7c5b94
SP
636 /* receiver fifo overrun */
637 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 638 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
639 drvs->rx_input_fifo_overflow_drop +
640 drvs->rx_drops_no_pbuf;
ab1594e9 641 return stats;
6b7c5b94
SP
642}
643
b236916a 644void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 645{
6b7c5b94
SP
646 struct net_device *netdev = adapter->netdev;
647
b236916a 648 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 649 netif_carrier_off(netdev);
b236916a 650 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 651 }
b236916a 652
bdce2ad7 653 if (link_status)
b236916a
AK
654 netif_carrier_on(netdev);
655 else
656 netif_carrier_off(netdev);
6b7c5b94
SP
657}
658
3c8def97 659static void be_tx_stats_update(struct be_tx_obj *txo,
748b539a
SP
660 u32 wrb_cnt, u32 copied, u32 gso_segs,
661 bool stopped)
6b7c5b94 662{
3c8def97
SP
663 struct be_tx_stats *stats = tx_stats(txo);
664
ab1594e9 665 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
666 stats->tx_reqs++;
667 stats->tx_wrbs += wrb_cnt;
668 stats->tx_bytes += copied;
669 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 670 if (stopped)
ac124ff9 671 stats->tx_stops++;
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
675/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38 676static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
748b539a 677 bool *dummy)
6b7c5b94 678{
ebc8d2ab
DM
679 int cnt = (skb->len > skb->data_len);
680
681 cnt += skb_shinfo(skb)->nr_frags;
682
6b7c5b94
SP
683 /* to account for hdr wrb */
684 cnt++;
fe6d2a38
SP
685 if (lancer_chip(adapter) || !(cnt & 1)) {
686 *dummy = false;
687 } else {
6b7c5b94
SP
688 /* add a dummy to make it an even num */
689 cnt++;
690 *dummy = true;
fe6d2a38 691 }
6b7c5b94
SP
692 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693 return cnt;
694}
695
696static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697{
698 wrb->frag_pa_hi = upper_32_bits(addr);
699 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 701 wrb->rsvd0 = 0;
6b7c5b94
SP
702}
703
1ded132d 704static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 705 struct sk_buff *skb)
1ded132d
AK
706{
707 u8 vlan_prio;
708 u16 vlan_tag;
709
710 vlan_tag = vlan_tx_tag_get(skb);
711 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712 /* If vlan priority provided by OS is NOT in available bmap */
713 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715 adapter->recommended_prio;
716
717 return vlan_tag;
718}
719
c9c47142
SP
720/* Used only for IP tunnel packets */
721static u16 skb_inner_ip_proto(struct sk_buff *skb)
722{
723 return (inner_ip_hdr(skb)->version == 4) ?
724 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725}
726
727static u16 skb_ip_proto(struct sk_buff *skb)
728{
729 return (ip_hdr(skb)->version == 4) ?
730 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731}
732
cc4ce020 733static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
734 struct sk_buff *skb, u32 wrb_cnt, u32 len,
735 bool skip_hw_vlan)
6b7c5b94 736{
c9c47142 737 u16 vlan_tag, proto;
cc4ce020 738
6b7c5b94
SP
739 memset(hdr, 0, sizeof(*hdr));
740
741 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
49e4b847 743 if (skb_is_gso(skb)) {
6b7c5b94
SP
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 747 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 748 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94 749 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142
SP
750 if (skb->encapsulation) {
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752 proto = skb_inner_ip_proto(skb);
753 } else {
754 proto = skb_ip_proto(skb);
755 }
756 if (proto == IPPROTO_TCP)
6b7c5b94 757 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
c9c47142 758 else if (proto == IPPROTO_UDP)
6b7c5b94
SP
759 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760 }
761
4c5102f9 762 if (vlan_tx_tag_present(skb)) {
6b7c5b94 763 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 764 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 765 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
766 }
767
bc0c3405
AK
768 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 770 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
771 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773}
774
2b7bcebf 775static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 776 bool unmap_single)
7101e111
SP
777{
778 dma_addr_t dma;
779
780 be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 783 if (wrb->frag_len) {
7101e111 784 if (unmap_single)
2b7bcebf
IV
785 dma_unmap_single(dev, dma, wrb->frag_len,
786 DMA_TO_DEVICE);
7101e111 787 else
2b7bcebf 788 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
789 }
790}
6b7c5b94 791
3c8def97 792static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
748b539a
SP
793 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794 bool skip_hw_vlan)
6b7c5b94 795{
7101e111
SP
796 dma_addr_t busaddr;
797 int i, copied = 0;
2b7bcebf 798 struct device *dev = &adapter->pdev->dev;
6b7c5b94 799 struct sk_buff *first_skb = skb;
6b7c5b94
SP
800 struct be_eth_wrb *wrb;
801 struct be_eth_hdr_wrb *hdr;
7101e111
SP
802 bool map_single = false;
803 u16 map_head;
6b7c5b94 804
6b7c5b94
SP
805 hdr = queue_head_node(txq);
806 queue_head_inc(txq);
7101e111 807 map_head = txq->head;
6b7c5b94 808
ebc8d2ab 809 if (skb->len > skb->data_len) {
e743d313 810 int len = skb_headlen(skb);
2b7bcebf
IV
811 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812 if (dma_mapping_error(dev, busaddr))
7101e111
SP
813 goto dma_err;
814 map_single = true;
ebc8d2ab
DM
815 wrb = queue_head_node(txq);
816 wrb_fill(wrb, busaddr, len);
817 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818 queue_head_inc(txq);
819 copied += len;
820 }
6b7c5b94 821
ebc8d2ab 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 823 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
b061b39e 824 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 825 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 826 if (dma_mapping_error(dev, busaddr))
7101e111 827 goto dma_err;
ebc8d2ab 828 wrb = queue_head_node(txq);
9e903e08 829 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
830 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831 queue_head_inc(txq);
9e903e08 832 copied += skb_frag_size(frag);
6b7c5b94
SP
833 }
834
835 if (dummy_wrb) {
836 wrb = queue_head_node(txq);
837 wrb_fill(wrb, 0, 0);
838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
840 }
841
bc0c3405 842 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
843 be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845 return copied;
7101e111
SP
846dma_err:
847 txq->head = map_head;
848 while (copied) {
849 wrb = queue_head_node(txq);
2b7bcebf 850 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
851 map_single = false;
852 copied -= wrb->frag_len;
853 queue_head_inc(txq);
854 }
855 return 0;
6b7c5b94
SP
856}
857
93040ae5 858static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
859 struct sk_buff *skb,
860 bool *skip_hw_vlan)
93040ae5
SK
861{
862 u16 vlan_tag = 0;
863
864 skb = skb_share_check(skb, GFP_ATOMIC);
865 if (unlikely(!skb))
866 return skb;
867
efee8e87 868 if (vlan_tx_tag_present(skb))
93040ae5 869 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
870
871 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872 if (!vlan_tag)
873 vlan_tag = adapter->pvid;
874 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875 * skip VLAN insertion
876 */
877 if (skip_hw_vlan)
878 *skip_hw_vlan = true;
879 }
bc0c3405
AK
880
881 if (vlan_tag) {
58717686 882 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
883 if (unlikely(!skb))
884 return skb;
bc0c3405
AK
885 skb->vlan_tci = 0;
886 }
887
888 /* Insert the outer VLAN, if any */
889 if (adapter->qnq_vid) {
890 vlan_tag = adapter->qnq_vid;
58717686 891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
892 if (unlikely(!skb))
893 return skb;
894 if (skip_hw_vlan)
895 *skip_hw_vlan = true;
896 }
897
93040ae5
SK
898 return skb;
899}
900
bc0c3405
AK
901static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902{
903 struct ethhdr *eh = (struct ethhdr *)skb->data;
904 u16 offset = ETH_HLEN;
905
906 if (eh->h_proto == htons(ETH_P_IPV6)) {
907 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909 offset += sizeof(struct ipv6hdr);
910 if (ip6h->nexthdr != NEXTHDR_TCP &&
911 ip6h->nexthdr != NEXTHDR_UDP) {
912 struct ipv6_opt_hdr *ehdr =
913 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916 if (ehdr->hdrlen == 0xff)
917 return true;
918 }
919 }
920 return false;
921}
922
923static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924{
925 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926}
927
748b539a 928static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 929{
ee9c799c 930 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
931}
932
ec495fac
VV
933static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934 struct sk_buff *skb,
935 bool *skip_hw_vlan)
6b7c5b94 936{
d2cb6ce7 937 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
938 unsigned int eth_hdr_len;
939 struct iphdr *ip;
93040ae5 940
1297f9db
AK
941 /* For padded packets, BE HW modifies tot_len field in IP header
942 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 943 * For padded packets, Lancer computes incorrect checksum.
1ded132d 944 */
ee9c799c
SP
945 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
947 if (skb->len <= 60 &&
948 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 949 is_ipv4_pkt(skb)) {
93040ae5
SK
950 ip = (struct iphdr *)ip_hdr(skb);
951 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952 }
1ded132d 953
d2cb6ce7 954 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 955 * tagging in pvid-tagging mode
d2cb6ce7 956 */
f93f160b 957 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 958 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 959 *skip_hw_vlan = true;
d2cb6ce7 960
93040ae5
SK
961 /* HW has a bug wherein it will calculate CSUM for VLAN
962 * pkts even though it is disabled.
963 * Manually insert VLAN in pkt.
964 */
965 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
966 vlan_tx_tag_present(skb)) {
967 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 968 if (unlikely(!skb))
c9128951 969 goto err;
bc0c3405
AK
970 }
971
972 /* HW may lockup when VLAN HW tagging is requested on
973 * certain ipv6 packets. Drop such pkts if the HW workaround to
974 * skip HW tagging is not enabled by FW.
975 */
976 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
977 (adapter->pvid || adapter->qnq_vid) &&
978 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
979 goto tx_drop;
980
981 /* Manual VLAN tag insertion to prevent:
982 * ASIC lockup when the ASIC inserts VLAN tag into
983 * certain ipv6 packets. Insert VLAN tags in driver,
984 * and set event, completion, vlan bits accordingly
985 * in the Tx WRB.
986 */
987 if (be_ipv6_tx_stall_chk(adapter, skb) &&
988 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 989 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 990 if (unlikely(!skb))
c9128951 991 goto err;
1ded132d
AK
992 }
993
ee9c799c
SP
994 return skb;
995tx_drop:
996 dev_kfree_skb_any(skb);
c9128951 997err:
ee9c799c
SP
998 return NULL;
999}
1000
ec495fac
VV
1001static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002 struct sk_buff *skb,
1003 bool *skip_hw_vlan)
1004{
1005 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006 * less may cause a transmit stall on that port. So the work-around is
1007 * to pad short packets (<= 32 bytes) to a 36-byte length.
1008 */
1009 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010 if (skb_padto(skb, 36))
1011 return NULL;
1012 skb->len = 36;
1013 }
1014
1015 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017 if (!skb)
1018 return NULL;
1019 }
1020
1021 return skb;
1022}
1023
ee9c799c
SP
1024static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
1027 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028 struct be_queue_info *txq = &txo->q;
1029 bool dummy_wrb, stopped = false;
1030 u32 wrb_cnt = 0, copied = 0;
1031 bool skip_hw_vlan = false;
1032 u32 start = txq->head;
1033
1034 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
bc617526
SP
1035 if (!skb) {
1036 tx_stats(txo)->tx_drv_drops++;
ee9c799c 1037 return NETDEV_TX_OK;
bc617526 1038 }
ee9c799c 1039
fe6d2a38 1040 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 1041
bc0c3405
AK
1042 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043 skip_hw_vlan);
c190e3c8 1044 if (copied) {
cd8f76c0
ED
1045 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
c190e3c8 1047 /* record the sent skb in the sent_skb table */
3c8def97
SP
1048 BUG_ON(txo->sent_skb_list[start]);
1049 txo->sent_skb_list[start] = skb;
c190e3c8
AK
1050
1051 /* Ensure txq has space for the next skb; Else stop the queue
1052 * *BEFORE* ringing the tx doorbell, so that we serialze the
1053 * tx compls of the current transmit which'll wake up the queue
1054 */
7101e111 1055 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
1056 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057 txq->len) {
3c8def97 1058 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
1059 stopped = true;
1060 }
6b7c5b94 1061
94d73aaa 1062 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 1063
cd8f76c0 1064 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
1065 } else {
1066 txq->head = start;
bc617526 1067 tx_stats(txo)->tx_drv_drops++;
c190e3c8 1068 dev_kfree_skb_any(skb);
6b7c5b94 1069 }
6b7c5b94
SP
1070 return NETDEV_TX_OK;
1071}
1072
1073static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074{
1075 struct be_adapter *adapter = netdev_priv(netdev);
1076 if (new_mtu < BE_MIN_MTU ||
748b539a 1077 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94 1078 dev_info(&adapter->pdev->dev,
748b539a
SP
1079 "MTU must be between %d and %d bytes\n",
1080 BE_MIN_MTU,
1081 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
1082 return -EINVAL;
1083 }
1084 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
748b539a 1085 netdev->mtu, new_mtu);
6b7c5b94
SP
1086 netdev->mtu = new_mtu;
1087 return 0;
1088}
1089
1090/*
82903e4b
AK
1091 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1093 */
10329df8 1094static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1095{
10329df8 1096 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1097 u16 num = 0, i = 0;
82903e4b 1098 int status = 0;
1da87b7f 1099
c0e64ef4
SP
1100 /* No need to further configure vids if in promiscuous mode */
1101 if (adapter->promiscuous)
1102 return 0;
1103
92bf14ab 1104 if (adapter->vlans_added > be_max_vlans(adapter))
0fc16ebf
PR
1105 goto set_vlan_promisc;
1106
1107 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1108 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109 vids[num++] = cpu_to_le16(i);
0fc16ebf 1110
4d567d97 1111 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1112 if (status) {
d9d604f8 1113 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1114 if (addl_status(status) ==
1115 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
d9d604f8
AK
1116 goto set_vlan_promisc;
1117 dev_err(&adapter->pdev->dev,
1118 "Setting HW VLAN filtering failed.\n");
1119 } else {
1120 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121 /* hw VLAN filtering re-enabled. */
1122 status = be_cmd_rx_filter(adapter,
1123 BE_FLAGS_VLAN_PROMISC, OFF);
1124 if (!status) {
1125 dev_info(&adapter->pdev->dev,
1126 "Disabling VLAN Promiscuous mode.\n");
1127 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
d9d604f8
AK
1128 }
1129 }
6b7c5b94 1130 }
1da87b7f 1131
b31c50a7 1132 return status;
0fc16ebf
PR
1133
1134set_vlan_promisc:
a6b74e01
SK
1135 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136 return 0;
d9d604f8
AK
1137
1138 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139 if (!status) {
1140 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
d9d604f8
AK
1141 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142 } else
1143 dev_err(&adapter->pdev->dev,
1144 "Failed to enable VLAN Promiscuous mode.\n");
0fc16ebf 1145 return status;
6b7c5b94
SP
1146}
1147
80d5c368 1148static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1149{
1150 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1151 int status = 0;
6b7c5b94 1152
a85e9986
PR
1153 /* Packets with VID 0 are always received by Lancer by default */
1154 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1155 return status;
1156
f6cbd364 1157 if (test_bit(vid, adapter->vids))
48291c22 1158 return status;
a85e9986 1159
f6cbd364 1160 set_bit(vid, adapter->vids);
a6b74e01 1161 adapter->vlans_added++;
8e586137 1162
a6b74e01
SK
1163 status = be_vid_config(adapter);
1164 if (status) {
1165 adapter->vlans_added--;
f6cbd364 1166 clear_bit(vid, adapter->vids);
a6b74e01 1167 }
48291c22 1168
80817cbf 1169 return status;
6b7c5b94
SP
1170}
1171
80d5c368 1172static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1173{
1174 struct be_adapter *adapter = netdev_priv(netdev);
1175
a85e9986
PR
1176 /* Packets with VID 0 are always received by Lancer by default */
1177 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1178 return 0;
a85e9986 1179
f6cbd364 1180 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1181 adapter->vlans_added--;
1182
1183 return be_vid_config(adapter);
6b7c5b94
SP
1184}
1185
7ad09458
S
1186static void be_clear_promisc(struct be_adapter *adapter)
1187{
1188 adapter->promiscuous = false;
a0794885 1189 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
7ad09458
S
1190
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192}
1193
a54769f5 1194static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1195{
1196 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1197 int status;
6b7c5b94 1198
24307eef 1199 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1200 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1201 adapter->promiscuous = true;
1202 goto done;
6b7c5b94
SP
1203 }
1204
25985edc 1205 /* BE was previously in promiscuous mode; disable it */
24307eef 1206 if (adapter->promiscuous) {
7ad09458 1207 be_clear_promisc(adapter);
c0e64ef4 1208 if (adapter->vlans_added)
10329df8 1209 be_vid_config(adapter);
6b7c5b94
SP
1210 }
1211
e7b909a6 1212 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1213 if (netdev->flags & IFF_ALLMULTI ||
a0794885
KA
1214 netdev_mc_count(netdev) > be_max_mc(adapter))
1215 goto set_mcast_promisc;
6b7c5b94 1216
fbc13f01
AK
1217 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218 struct netdev_hw_addr *ha;
1219 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222 be_cmd_pmac_del(adapter, adapter->if_handle,
1223 adapter->pmac_id[i], 0);
1224 }
1225
92bf14ab 1226 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
fbc13f01
AK
1227 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228 adapter->promiscuous = true;
1229 goto done;
1230 }
1231
1232 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233 adapter->uc_macs++; /* First slot is for Primary MAC */
1234 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235 adapter->if_handle,
1236 &adapter->pmac_id[adapter->uc_macs], 0);
1237 }
1238 }
1239
0fc16ebf 1240 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
a0794885
KA
1241 if (!status) {
1242 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244 goto done;
0fc16ebf 1245 }
a0794885
KA
1246
1247set_mcast_promisc:
1248 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249 return;
1250
1251 /* Set to MCAST promisc mode if setting MULTICAST address fails
1252 * or if num configured exceeds what we support
1253 */
1254 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255 if (!status)
1256 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
24307eef
SP
1257done:
1258 return;
6b7c5b94
SP
1259}
1260
ba343c77
SB
1261static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262{
1263 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1264 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1265 int status;
1266
11ac75ed 1267 if (!sriov_enabled(adapter))
ba343c77
SB
1268 return -EPERM;
1269
11ac75ed 1270 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1271 return -EINVAL;
1272
3175d8c2
SP
1273 if (BEx_chip(adapter)) {
1274 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1275 vf + 1);
ba343c77 1276
11ac75ed
SP
1277 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1278 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1279 } else {
1280 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1281 vf + 1);
590c391d
PR
1282 }
1283
abccf23e
KA
1284 if (status) {
1285 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1286 mac, vf, status);
1287 return be_cmd_status(status);
1288 }
64600ea5 1289
abccf23e
KA
1290 ether_addr_copy(vf_cfg->mac_addr, mac);
1291
1292 return 0;
ba343c77
SB
1293}
1294
64600ea5 1295static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1296 struct ifla_vf_info *vi)
64600ea5
AK
1297{
1298 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1299 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1300
11ac75ed 1301 if (!sriov_enabled(adapter))
64600ea5
AK
1302 return -EPERM;
1303
11ac75ed 1304 if (vf >= adapter->num_vfs)
64600ea5
AK
1305 return -EINVAL;
1306
1307 vi->vf = vf;
ed616689
SC
1308 vi->max_tx_rate = vf_cfg->tx_rate;
1309 vi->min_tx_rate = 0;
a60b3a13
AK
1310 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1311 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1312 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1313 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1314
1315 return 0;
1316}
1317
748b539a 1318static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1319{
1320 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1321 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1322 int status = 0;
1323
11ac75ed 1324 if (!sriov_enabled(adapter))
1da87b7f
AK
1325 return -EPERM;
1326
b9fc0e53 1327 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1328 return -EINVAL;
1329
b9fc0e53
AK
1330 if (vlan || qos) {
1331 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1332 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1333 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1334 vf_cfg->if_handle, 0);
1da87b7f 1335 } else {
f1f3ee1b 1336 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1337 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1338 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1339 }
1340
abccf23e
KA
1341 if (status) {
1342 dev_err(&adapter->pdev->dev,
1343 "VLAN %d config on VF %d failed : %#x\n", vlan,
1344 vf, status);
1345 return be_cmd_status(status);
1346 }
1347
1348 vf_cfg->vlan_tag = vlan;
1349
1350 return 0;
1da87b7f
AK
1351}
1352
ed616689
SC
1353static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1354 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1355{
1356 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1357 struct device *dev = &adapter->pdev->dev;
1358 int percent_rate, status = 0;
1359 u16 link_speed = 0;
1360 u8 link_status;
e1d18735 1361
11ac75ed 1362 if (!sriov_enabled(adapter))
e1d18735
AK
1363 return -EPERM;
1364
94f434c2 1365 if (vf >= adapter->num_vfs)
e1d18735
AK
1366 return -EINVAL;
1367
ed616689
SC
1368 if (min_tx_rate)
1369 return -EINVAL;
1370
0f77ba73
RN
1371 if (!max_tx_rate)
1372 goto config_qos;
1373
1374 status = be_cmd_link_status_query(adapter, &link_speed,
1375 &link_status, 0);
1376 if (status)
1377 goto err;
1378
1379 if (!link_status) {
1380 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1381 status = -ENETDOWN;
0f77ba73
RN
1382 goto err;
1383 }
1384
1385 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1386 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1387 link_speed);
1388 status = -EINVAL;
1389 goto err;
1390 }
1391
1392 /* On Skyhawk the QOS setting must be done only as a % value */
1393 percent_rate = link_speed / 100;
1394 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1395 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1396 percent_rate);
1397 status = -EINVAL;
1398 goto err;
94f434c2 1399 }
e1d18735 1400
0f77ba73
RN
1401config_qos:
1402 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1403 if (status)
0f77ba73
RN
1404 goto err;
1405
1406 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1407 return 0;
1408
1409err:
1410 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1411 max_tx_rate, vf);
abccf23e 1412 return be_cmd_status(status);
e1d18735 1413}
bdce2ad7
SR
1414static int be_set_vf_link_state(struct net_device *netdev, int vf,
1415 int link_state)
1416{
1417 struct be_adapter *adapter = netdev_priv(netdev);
1418 int status;
1419
1420 if (!sriov_enabled(adapter))
1421 return -EPERM;
1422
1423 if (vf >= adapter->num_vfs)
1424 return -EINVAL;
1425
1426 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1427 if (status) {
1428 dev_err(&adapter->pdev->dev,
1429 "Link state change on VF %d failed: %#x\n", vf, status);
1430 return be_cmd_status(status);
1431 }
bdce2ad7 1432
abccf23e
KA
1433 adapter->vf_cfg[vf].plink_tracking = link_state;
1434
1435 return 0;
bdce2ad7 1436}
e1d18735 1437
2632bafd
SP
1438static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1439 ulong now)
6b7c5b94 1440{
2632bafd
SP
1441 aic->rx_pkts_prev = rx_pkts;
1442 aic->tx_reqs_prev = tx_pkts;
1443 aic->jiffies = now;
1444}
ac124ff9 1445
2632bafd
SP
1446static void be_eqd_update(struct be_adapter *adapter)
1447{
1448 struct be_set_eqd set_eqd[MAX_EVT_QS];
1449 int eqd, i, num = 0, start;
1450 struct be_aic_obj *aic;
1451 struct be_eq_obj *eqo;
1452 struct be_rx_obj *rxo;
1453 struct be_tx_obj *txo;
1454 u64 rx_pkts, tx_pkts;
1455 ulong now;
1456 u32 pps, delta;
10ef9ab4 1457
2632bafd
SP
1458 for_all_evt_queues(adapter, eqo, i) {
1459 aic = &adapter->aic_obj[eqo->idx];
1460 if (!aic->enable) {
1461 if (aic->jiffies)
1462 aic->jiffies = 0;
1463 eqd = aic->et_eqd;
1464 goto modify_eqd;
1465 }
6b7c5b94 1466
2632bafd
SP
1467 rxo = &adapter->rx_obj[eqo->idx];
1468 do {
57a7744e 1469 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1470 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1471 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1472
2632bafd
SP
1473 txo = &adapter->tx_obj[eqo->idx];
1474 do {
57a7744e 1475 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1476 tx_pkts = txo->stats.tx_reqs;
57a7744e 1477 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1478
6b7c5b94 1479
2632bafd
SP
1480 /* Skip, if wrapped around or first calculation */
1481 now = jiffies;
1482 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1483 rx_pkts < aic->rx_pkts_prev ||
1484 tx_pkts < aic->tx_reqs_prev) {
1485 be_aic_update(aic, rx_pkts, tx_pkts, now);
1486 continue;
1487 }
1488
1489 delta = jiffies_to_msecs(now - aic->jiffies);
1490 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1491 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1492 eqd = (pps / 15000) << 2;
10ef9ab4 1493
2632bafd
SP
1494 if (eqd < 8)
1495 eqd = 0;
1496 eqd = min_t(u32, eqd, aic->max_eqd);
1497 eqd = max_t(u32, eqd, aic->min_eqd);
1498
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1500modify_eqd:
2632bafd
SP
1501 if (eqd != aic->prev_eqd) {
1502 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1503 set_eqd[num].eq_id = eqo->q.id;
1504 aic->prev_eqd = eqd;
1505 num++;
1506 }
ac124ff9 1507 }
2632bafd
SP
1508
1509 if (num)
1510 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1511}
1512
3abcdeda 1513static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1514 struct be_rx_compl_info *rxcp)
4097f663 1515{
ac124ff9 1516 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1517
ab1594e9 1518 u64_stats_update_begin(&stats->sync);
3abcdeda 1519 stats->rx_compl++;
2e588f84 1520 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1521 stats->rx_pkts++;
2e588f84 1522 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1523 stats->rx_mcast_pkts++;
2e588f84 1524 if (rxcp->err)
ac124ff9 1525 stats->rx_compl_err++;
ab1594e9 1526 u64_stats_update_end(&stats->sync);
4097f663
SP
1527}
1528
2e588f84 1529static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1530{
19fad86f 1531 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1532 * Also ignore ipcksm for ipv6 pkts
1533 */
2e588f84 1534 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1535 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1536}
1537
0b0ef1d0 1538static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1539{
10ef9ab4 1540 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1541 struct be_rx_page_info *rx_page_info;
3abcdeda 1542 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1543 u16 frag_idx = rxq->tail;
6b7c5b94 1544
3abcdeda 1545 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1546 BUG_ON(!rx_page_info->page);
1547
e50287be 1548 if (rx_page_info->last_frag) {
2b7bcebf
IV
1549 dma_unmap_page(&adapter->pdev->dev,
1550 dma_unmap_addr(rx_page_info, bus),
1551 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1552 rx_page_info->last_frag = false;
1553 } else {
1554 dma_sync_single_for_cpu(&adapter->pdev->dev,
1555 dma_unmap_addr(rx_page_info, bus),
1556 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1557 }
6b7c5b94 1558
0b0ef1d0 1559 queue_tail_inc(rxq);
6b7c5b94
SP
1560 atomic_dec(&rxq->used);
1561 return rx_page_info;
1562}
1563
1564/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1565static void be_rx_compl_discard(struct be_rx_obj *rxo,
1566 struct be_rx_compl_info *rxcp)
6b7c5b94 1567{
6b7c5b94 1568 struct be_rx_page_info *page_info;
2e588f84 1569 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1570
e80d9da6 1571 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1572 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1573 put_page(page_info->page);
1574 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1575 }
1576}
1577
1578/*
1579 * skb_fill_rx_data forms a complete skb for an ether frame
1580 * indicated by rxcp.
1581 */
10ef9ab4
SP
1582static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1583 struct be_rx_compl_info *rxcp)
6b7c5b94 1584{
6b7c5b94 1585 struct be_rx_page_info *page_info;
2e588f84
SP
1586 u16 i, j;
1587 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1588 u8 *start;
6b7c5b94 1589
0b0ef1d0 1590 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1591 start = page_address(page_info->page) + page_info->page_offset;
1592 prefetch(start);
1593
1594 /* Copy data in the first descriptor of this completion */
2e588f84 1595 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1596
6b7c5b94
SP
1597 skb->len = curr_frag_len;
1598 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1599 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1600 /* Complete packet has now been moved to data */
1601 put_page(page_info->page);
1602 skb->data_len = 0;
1603 skb->tail += curr_frag_len;
1604 } else {
ac1ae5f3
ED
1605 hdr_len = ETH_HLEN;
1606 memcpy(skb->data, start, hdr_len);
6b7c5b94 1607 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1608 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1609 skb_shinfo(skb)->frags[0].page_offset =
1610 page_info->page_offset + hdr_len;
748b539a
SP
1611 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1612 curr_frag_len - hdr_len);
6b7c5b94 1613 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1614 skb->truesize += rx_frag_size;
6b7c5b94
SP
1615 skb->tail += hdr_len;
1616 }
205859a2 1617 page_info->page = NULL;
6b7c5b94 1618
2e588f84
SP
1619 if (rxcp->pkt_size <= rx_frag_size) {
1620 BUG_ON(rxcp->num_rcvd != 1);
1621 return;
6b7c5b94
SP
1622 }
1623
1624 /* More frags present for this completion */
2e588f84
SP
1625 remaining = rxcp->pkt_size - curr_frag_len;
1626 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1627 page_info = get_rx_page_info(rxo);
2e588f84 1628 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1629
bd46cb6c
AK
1630 /* Coalesce all frags from the same physical page in one slot */
1631 if (page_info->page_offset == 0) {
1632 /* Fresh page */
1633 j++;
b061b39e 1634 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1635 skb_shinfo(skb)->frags[j].page_offset =
1636 page_info->page_offset;
9e903e08 1637 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1638 skb_shinfo(skb)->nr_frags++;
1639 } else {
1640 put_page(page_info->page);
1641 }
1642
9e903e08 1643 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1644 skb->len += curr_frag_len;
1645 skb->data_len += curr_frag_len;
bdb28a97 1646 skb->truesize += rx_frag_size;
2e588f84 1647 remaining -= curr_frag_len;
205859a2 1648 page_info->page = NULL;
6b7c5b94 1649 }
bd46cb6c 1650 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1651}
1652
5be93b9a 1653/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1654static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1655 struct be_rx_compl_info *rxcp)
6b7c5b94 1656{
10ef9ab4 1657 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1658 struct net_device *netdev = adapter->netdev;
6b7c5b94 1659 struct sk_buff *skb;
89420424 1660
bb349bb4 1661 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1662 if (unlikely(!skb)) {
ac124ff9 1663 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1664 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1665 return;
1666 }
1667
10ef9ab4 1668 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1669
6332c8d3 1670 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1671 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1672 else
1673 skb_checksum_none_assert(skb);
6b7c5b94 1674
6332c8d3 1675 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1676 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1677 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1678 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1679
1680 skb->encapsulation = rxcp->tunneled;
6384a4d0 1681 skb_mark_napi_id(skb, napi);
6b7c5b94 1682
343e43c0 1683 if (rxcp->vlanf)
86a9bad3 1684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1685
1686 netif_receive_skb(skb);
6b7c5b94
SP
1687}
1688
5be93b9a 1689/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1690static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1691 struct napi_struct *napi,
1692 struct be_rx_compl_info *rxcp)
6b7c5b94 1693{
10ef9ab4 1694 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1695 struct be_rx_page_info *page_info;
5be93b9a 1696 struct sk_buff *skb = NULL;
2e588f84
SP
1697 u16 remaining, curr_frag_len;
1698 u16 i, j;
3968fa1e 1699
10ef9ab4 1700 skb = napi_get_frags(napi);
5be93b9a 1701 if (!skb) {
10ef9ab4 1702 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1703 return;
1704 }
1705
2e588f84
SP
1706 remaining = rxcp->pkt_size;
1707 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1708 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1709
1710 curr_frag_len = min(remaining, rx_frag_size);
1711
bd46cb6c
AK
1712 /* Coalesce all frags from the same physical page in one slot */
1713 if (i == 0 || page_info->page_offset == 0) {
1714 /* First frag or Fresh page */
1715 j++;
b061b39e 1716 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1717 skb_shinfo(skb)->frags[j].page_offset =
1718 page_info->page_offset;
9e903e08 1719 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1720 } else {
1721 put_page(page_info->page);
1722 }
9e903e08 1723 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1724 skb->truesize += rx_frag_size;
bd46cb6c 1725 remaining -= curr_frag_len;
6b7c5b94
SP
1726 memset(page_info, 0, sizeof(*page_info));
1727 }
bd46cb6c 1728 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1729
5be93b9a 1730 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1731 skb->len = rxcp->pkt_size;
1732 skb->data_len = rxcp->pkt_size;
5be93b9a 1733 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1734 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1735 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1736 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142
SP
1737
1738 skb->encapsulation = rxcp->tunneled;
6384a4d0 1739 skb_mark_napi_id(skb, napi);
5be93b9a 1740
343e43c0 1741 if (rxcp->vlanf)
86a9bad3 1742 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1743
10ef9ab4 1744 napi_gro_frags(napi);
2e588f84
SP
1745}
1746
10ef9ab4
SP
1747static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1748 struct be_rx_compl_info *rxcp)
2e588f84
SP
1749{
1750 rxcp->pkt_size =
1751 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1752 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1753 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1754 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1755 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1756 rxcp->ip_csum =
1757 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1758 rxcp->l4_csum =
1759 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1760 rxcp->ipv6 =
1761 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
2e588f84
SP
1762 rxcp->num_rcvd =
1763 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1764 rxcp->pkt_type =
1765 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1766 rxcp->rss_hash =
c297977e 1767 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184 1768 if (rxcp->vlanf) {
f93f160b 1769 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
3c709f8f 1770 compl);
748b539a
SP
1771 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1772 vlan_tag, compl);
15d72184 1773 }
12004ae9 1774 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
c9c47142
SP
1775 rxcp->tunneled =
1776 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
2e588f84
SP
1777}
1778
10ef9ab4
SP
1779static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1780 struct be_rx_compl_info *rxcp)
2e588f84
SP
1781{
1782 rxcp->pkt_size =
1783 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1784 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1785 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1786 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1787 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1788 rxcp->ip_csum =
1789 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1790 rxcp->l4_csum =
1791 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1792 rxcp->ipv6 =
1793 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
2e588f84
SP
1794 rxcp->num_rcvd =
1795 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1796 rxcp->pkt_type =
1797 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1798 rxcp->rss_hash =
c297977e 1799 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184 1800 if (rxcp->vlanf) {
f93f160b 1801 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
3c709f8f 1802 compl);
748b539a
SP
1803 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1804 vlan_tag, compl);
15d72184 1805 }
12004ae9 1806 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1807 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1808 ip_frag, compl);
2e588f84
SP
1809}
1810
1811static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1812{
1813 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1814 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1815 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1816
2e588f84
SP
1817 /* For checking the valid bit it is Ok to use either definition as the
1818 * valid bit is at the same position in both v0 and v1 Rx compl */
1819 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1820 return NULL;
6b7c5b94 1821
2e588f84
SP
1822 rmb();
1823 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1824
2e588f84 1825 if (adapter->be3_native)
10ef9ab4 1826 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1827 else
10ef9ab4 1828 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1829
e38b1706
SK
1830 if (rxcp->ip_frag)
1831 rxcp->l4_csum = 0;
1832
15d72184 1833 if (rxcp->vlanf) {
f93f160b
VV
1834 /* In QNQ modes, if qnq bit is not set, then the packet was
1835 * tagged only with the transparent outer vlan-tag and must
1836 * not be treated as a vlan packet by host
1837 */
1838 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1839 rxcp->vlanf = 0;
6b7c5b94 1840
15d72184 1841 if (!lancer_chip(adapter))
3c709f8f 1842 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1843
939cf306 1844 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1845 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1846 rxcp->vlanf = 0;
1847 }
2e588f84
SP
1848
1849 /* As the compl has been parsed, reset it; we wont touch it again */
1850 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1851
3abcdeda 1852 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1853 return rxcp;
1854}
1855
1829b086 1856static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1857{
6b7c5b94 1858 u32 order = get_order(size);
1829b086 1859
6b7c5b94 1860 if (order > 0)
1829b086
ED
1861 gfp |= __GFP_COMP;
1862 return alloc_pages(gfp, order);
6b7c5b94
SP
1863}
1864
1865/*
1866 * Allocate a page, split it to fragments of size rx_frag_size and post as
1867 * receive buffers to BE
1868 */
1829b086 1869static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1870{
3abcdeda 1871 struct be_adapter *adapter = rxo->adapter;
26d92f92 1872 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1873 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1874 struct page *pagep = NULL;
ba42fad0 1875 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1876 struct be_eth_rx_d *rxd;
1877 u64 page_dmaaddr = 0, frag_dmaaddr;
1878 u32 posted, page_offset = 0;
1879
3abcdeda 1880 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1881 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1882 if (!pagep) {
1829b086 1883 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1884 if (unlikely(!pagep)) {
ac124ff9 1885 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1886 break;
1887 }
ba42fad0
IV
1888 page_dmaaddr = dma_map_page(dev, pagep, 0,
1889 adapter->big_page_size,
2b7bcebf 1890 DMA_FROM_DEVICE);
ba42fad0
IV
1891 if (dma_mapping_error(dev, page_dmaaddr)) {
1892 put_page(pagep);
1893 pagep = NULL;
1894 rx_stats(rxo)->rx_post_fail++;
1895 break;
1896 }
e50287be 1897 page_offset = 0;
6b7c5b94
SP
1898 } else {
1899 get_page(pagep);
e50287be 1900 page_offset += rx_frag_size;
6b7c5b94 1901 }
e50287be 1902 page_info->page_offset = page_offset;
6b7c5b94 1903 page_info->page = pagep;
6b7c5b94
SP
1904
1905 rxd = queue_head_node(rxq);
e50287be 1906 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
1907 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1908 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1909
1910 /* Any space left in the current big page for another frag? */
1911 if ((page_offset + rx_frag_size + rx_frag_size) >
1912 adapter->big_page_size) {
1913 pagep = NULL;
e50287be
SP
1914 page_info->last_frag = true;
1915 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1916 } else {
1917 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 1918 }
26d92f92
SP
1919
1920 prev_page_info = page_info;
1921 queue_head_inc(rxq);
10ef9ab4 1922 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 1923 }
e50287be
SP
1924
1925 /* Mark the last frag of a page when we break out of the above loop
1926 * with no more slots available in the RXQ
1927 */
1928 if (pagep) {
1929 prev_page_info->last_frag = true;
1930 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1931 }
6b7c5b94
SP
1932
1933 if (posted) {
6b7c5b94 1934 atomic_add(posted, &rxq->used);
6384a4d0
SP
1935 if (rxo->rx_post_starved)
1936 rxo->rx_post_starved = false;
8788fdc2 1937 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1938 } else if (atomic_read(&rxq->used) == 0) {
1939 /* Let be_worker replenish when memory is available */
3abcdeda 1940 rxo->rx_post_starved = true;
6b7c5b94 1941 }
6b7c5b94
SP
1942}
1943
5fb379ee 1944static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1945{
6b7c5b94
SP
1946 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1947
1948 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1949 return NULL;
1950
f3eb62d2 1951 rmb();
6b7c5b94
SP
1952 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1953
1954 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1955
1956 queue_tail_inc(tx_cq);
1957 return txcp;
1958}
1959
3c8def97 1960static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 1961 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1962{
3c8def97 1963 struct be_queue_info *txq = &txo->q;
a73b796e 1964 struct be_eth_wrb *wrb;
3c8def97 1965 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1966 struct sk_buff *sent_skb;
ec43b1a6
SP
1967 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1968 bool unmap_skb_hdr = true;
6b7c5b94 1969
ec43b1a6 1970 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1971 BUG_ON(!sent_skb);
ec43b1a6
SP
1972 sent_skbs[txq->tail] = NULL;
1973
1974 /* skip header wrb */
a73b796e 1975 queue_tail_inc(txq);
6b7c5b94 1976
ec43b1a6 1977 do {
6b7c5b94 1978 cur_index = txq->tail;
a73b796e 1979 wrb = queue_tail_node(txq);
2b7bcebf
IV
1980 unmap_tx_frag(&adapter->pdev->dev, wrb,
1981 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1982 unmap_skb_hdr = false;
1983
6b7c5b94
SP
1984 num_wrbs++;
1985 queue_tail_inc(txq);
ec43b1a6 1986 } while (cur_index != last_index);
6b7c5b94 1987
d8ec2c02 1988 dev_kfree_skb_any(sent_skb);
4d586b82 1989 return num_wrbs;
6b7c5b94
SP
1990}
1991
10ef9ab4
SP
1992/* Return the number of events in the event queue */
1993static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1994{
10ef9ab4
SP
1995 struct be_eq_entry *eqe;
1996 int num = 0;
859b1e4e 1997
10ef9ab4
SP
1998 do {
1999 eqe = queue_tail_node(&eqo->q);
2000 if (eqe->evt == 0)
2001 break;
859b1e4e 2002
10ef9ab4
SP
2003 rmb();
2004 eqe->evt = 0;
2005 num++;
2006 queue_tail_inc(&eqo->q);
2007 } while (true);
2008
2009 return num;
859b1e4e
SP
2010}
2011
10ef9ab4
SP
2012/* Leaves the EQ is disarmed state */
2013static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2014{
10ef9ab4 2015 int num = events_get(eqo);
859b1e4e 2016
10ef9ab4 2017 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2018}
2019
10ef9ab4 2020static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2021{
2022 struct be_rx_page_info *page_info;
3abcdeda
SP
2023 struct be_queue_info *rxq = &rxo->q;
2024 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2025 struct be_rx_compl_info *rxcp;
d23e946c
SP
2026 struct be_adapter *adapter = rxo->adapter;
2027 int flush_wait = 0;
6b7c5b94 2028
d23e946c
SP
2029 /* Consume pending rx completions.
2030 * Wait for the flush completion (identified by zero num_rcvd)
2031 * to arrive. Notify CQ even when there are no more CQ entries
2032 * for HW to flush partially coalesced CQ entries.
2033 * In Lancer, there is no need to wait for flush compl.
2034 */
2035 for (;;) {
2036 rxcp = be_rx_compl_get(rxo);
ddf1169f 2037 if (!rxcp) {
d23e946c
SP
2038 if (lancer_chip(adapter))
2039 break;
2040
2041 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2042 dev_warn(&adapter->pdev->dev,
2043 "did not receive flush compl\n");
2044 break;
2045 }
2046 be_cq_notify(adapter, rx_cq->id, true, 0);
2047 mdelay(1);
2048 } else {
2049 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2050 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2051 if (rxcp->num_rcvd == 0)
2052 break;
2053 }
6b7c5b94
SP
2054 }
2055
d23e946c
SP
2056 /* After cleanup, leave the CQ in unarmed state */
2057 be_cq_notify(adapter, rx_cq->id, false, 0);
2058
2059 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2060 while (atomic_read(&rxq->used) > 0) {
2061 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2062 put_page(page_info->page);
2063 memset(page_info, 0, sizeof(*page_info));
2064 }
2065 BUG_ON(atomic_read(&rxq->used));
482c9e79 2066 rxq->tail = rxq->head = 0;
6b7c5b94
SP
2067}
2068
0ae57bb3 2069static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2070{
0ae57bb3
SP
2071 struct be_tx_obj *txo;
2072 struct be_queue_info *txq;
a8e9179a 2073 struct be_eth_tx_compl *txcp;
4d586b82 2074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
2075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
0ae57bb3 2077 int i, pending_txqs;
a8e9179a 2078
1a3d0717 2079 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2080 do {
0ae57bb3
SP
2081 pending_txqs = adapter->num_tx_qs;
2082
2083 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2084 cmpl = 0;
2085 num_wrbs = 0;
0ae57bb3
SP
2086 txq = &txo->q;
2087 while ((txcp = be_tx_compl_get(&txo->cq))) {
2088 end_idx =
2089 AMAP_GET_BITS(struct amap_eth_tx_compl,
2090 wrb_index, txcp);
2091 num_wrbs += be_tx_compl_process(adapter, txo,
2092 end_idx);
2093 cmpl++;
2094 }
2095 if (cmpl) {
2096 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2097 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2098 timeo = 0;
0ae57bb3
SP
2099 }
2100 if (atomic_read(&txq->used) == 0)
2101 pending_txqs--;
a8e9179a
SP
2102 }
2103
1a3d0717 2104 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2105 break;
2106
2107 mdelay(1);
2108 } while (true);
2109
0ae57bb3
SP
2110 for_all_tx_queues(adapter, txo, i) {
2111 txq = &txo->q;
2112 if (atomic_read(&txq->used))
2113 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2114 atomic_read(&txq->used));
2115
2116 /* free posted tx for which compls will never arrive */
2117 while (atomic_read(&txq->used)) {
2118 sent_skb = txo->sent_skb_list[txq->tail];
2119 end_idx = txq->tail;
2120 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2121 &dummy_wrb);
2122 index_adv(&end_idx, num_wrbs - 1, txq->len);
2123 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2124 atomic_sub(num_wrbs, &txq->used);
2125 }
b03388d6 2126 }
6b7c5b94
SP
2127}
2128
10ef9ab4
SP
2129static void be_evt_queues_destroy(struct be_adapter *adapter)
2130{
2131 struct be_eq_obj *eqo;
2132 int i;
2133
2134 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2135 if (eqo->q.created) {
2136 be_eq_clean(eqo);
10ef9ab4 2137 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2138 napi_hash_del(&eqo->napi);
68d7bdcb 2139 netif_napi_del(&eqo->napi);
19d59aa7 2140 }
10ef9ab4
SP
2141 be_queue_free(adapter, &eqo->q);
2142 }
2143}
2144
2145static int be_evt_queues_create(struct be_adapter *adapter)
2146{
2147 struct be_queue_info *eq;
2148 struct be_eq_obj *eqo;
2632bafd 2149 struct be_aic_obj *aic;
10ef9ab4
SP
2150 int i, rc;
2151
92bf14ab
SP
2152 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2153 adapter->cfg_num_qs);
10ef9ab4
SP
2154
2155 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2156 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2157 BE_NAPI_WEIGHT);
6384a4d0 2158 napi_hash_add(&eqo->napi);
2632bafd 2159 aic = &adapter->aic_obj[i];
10ef9ab4
SP
2160 eqo->adapter = adapter;
2161 eqo->tx_budget = BE_TX_BUDGET;
2162 eqo->idx = i;
2632bafd
SP
2163 aic->max_eqd = BE_MAX_EQD;
2164 aic->enable = true;
10ef9ab4
SP
2165
2166 eq = &eqo->q;
2167 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2168 sizeof(struct be_eq_entry));
10ef9ab4
SP
2169 if (rc)
2170 return rc;
2171
f2f781a7 2172 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2173 if (rc)
2174 return rc;
2175 }
1cfafab9 2176 return 0;
10ef9ab4
SP
2177}
2178
5fb379ee
SP
2179static void be_mcc_queues_destroy(struct be_adapter *adapter)
2180{
2181 struct be_queue_info *q;
5fb379ee 2182
8788fdc2 2183 q = &adapter->mcc_obj.q;
5fb379ee 2184 if (q->created)
8788fdc2 2185 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2186 be_queue_free(adapter, q);
2187
8788fdc2 2188 q = &adapter->mcc_obj.cq;
5fb379ee 2189 if (q->created)
8788fdc2 2190 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2191 be_queue_free(adapter, q);
2192}
2193
2194/* Must be called only after TX qs are created as MCC shares TX EQ */
2195static int be_mcc_queues_create(struct be_adapter *adapter)
2196{
2197 struct be_queue_info *q, *cq;
5fb379ee 2198
8788fdc2 2199 cq = &adapter->mcc_obj.cq;
5fb379ee 2200 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2201 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2202 goto err;
2203
10ef9ab4
SP
2204 /* Use the default EQ for MCC completions */
2205 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2206 goto mcc_cq_free;
2207
8788fdc2 2208 q = &adapter->mcc_obj.q;
5fb379ee
SP
2209 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2210 goto mcc_cq_destroy;
2211
8788fdc2 2212 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2213 goto mcc_q_free;
2214
2215 return 0;
2216
2217mcc_q_free:
2218 be_queue_free(adapter, q);
2219mcc_cq_destroy:
8788fdc2 2220 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2221mcc_cq_free:
2222 be_queue_free(adapter, cq);
2223err:
2224 return -1;
2225}
2226
6b7c5b94
SP
2227static void be_tx_queues_destroy(struct be_adapter *adapter)
2228{
2229 struct be_queue_info *q;
3c8def97
SP
2230 struct be_tx_obj *txo;
2231 u8 i;
6b7c5b94 2232
3c8def97
SP
2233 for_all_tx_queues(adapter, txo, i) {
2234 q = &txo->q;
2235 if (q->created)
2236 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2237 be_queue_free(adapter, q);
6b7c5b94 2238
3c8def97
SP
2239 q = &txo->cq;
2240 if (q->created)
2241 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2242 be_queue_free(adapter, q);
2243 }
6b7c5b94
SP
2244}
2245
7707133c 2246static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2247{
10ef9ab4 2248 struct be_queue_info *cq, *eq;
3c8def97 2249 struct be_tx_obj *txo;
92bf14ab 2250 int status, i;
6b7c5b94 2251
92bf14ab 2252 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2253
10ef9ab4
SP
2254 for_all_tx_queues(adapter, txo, i) {
2255 cq = &txo->cq;
2256 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2257 sizeof(struct be_eth_tx_compl));
2258 if (status)
2259 return status;
3c8def97 2260
827da44c
JS
2261 u64_stats_init(&txo->stats.sync);
2262 u64_stats_init(&txo->stats.sync_compl);
2263
10ef9ab4
SP
2264 /* If num_evt_qs is less than num_tx_qs, then more than
2265 * one txq share an eq
2266 */
2267 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2268 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2269 if (status)
2270 return status;
6b7c5b94 2271
10ef9ab4
SP
2272 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2273 sizeof(struct be_eth_wrb));
2274 if (status)
2275 return status;
6b7c5b94 2276
94d73aaa 2277 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2278 if (status)
2279 return status;
3c8def97 2280 }
6b7c5b94 2281
d379142b
SP
2282 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2283 adapter->num_tx_qs);
10ef9ab4 2284 return 0;
6b7c5b94
SP
2285}
2286
10ef9ab4 2287static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2288{
2289 struct be_queue_info *q;
3abcdeda
SP
2290 struct be_rx_obj *rxo;
2291 int i;
2292
2293 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2294 q = &rxo->cq;
2295 if (q->created)
2296 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2297 be_queue_free(adapter, q);
ac6a0c4a
SP
2298 }
2299}
2300
10ef9ab4 2301static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2302{
10ef9ab4 2303 struct be_queue_info *eq, *cq;
3abcdeda
SP
2304 struct be_rx_obj *rxo;
2305 int rc, i;
6b7c5b94 2306
92bf14ab
SP
2307 /* We can create as many RSS rings as there are EQs. */
2308 adapter->num_rx_qs = adapter->num_evt_qs;
2309
2310 /* We'll use RSS only if atleast 2 RSS rings are supported.
2311 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2312 */
92bf14ab
SP
2313 if (adapter->num_rx_qs > 1)
2314 adapter->num_rx_qs++;
2315
6b7c5b94 2316 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2317 for_all_rx_queues(adapter, rxo, i) {
2318 rxo->adapter = adapter;
3abcdeda
SP
2319 cq = &rxo->cq;
2320 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2321 sizeof(struct be_eth_rx_compl));
3abcdeda 2322 if (rc)
10ef9ab4 2323 return rc;
3abcdeda 2324
827da44c 2325 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2326 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2327 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2328 if (rc)
10ef9ab4 2329 return rc;
3abcdeda 2330 }
6b7c5b94 2331
d379142b
SP
2332 dev_info(&adapter->pdev->dev,
2333 "created %d RSS queue(s) and 1 default RX queue\n",
2334 adapter->num_rx_qs - 1);
10ef9ab4 2335 return 0;
b628bde2
SP
2336}
2337
6b7c5b94
SP
2338static irqreturn_t be_intx(int irq, void *dev)
2339{
e49cc34f
SP
2340 struct be_eq_obj *eqo = dev;
2341 struct be_adapter *adapter = eqo->adapter;
2342 int num_evts = 0;
6b7c5b94 2343
d0b9cec3
SP
2344 /* IRQ is not expected when NAPI is scheduled as the EQ
2345 * will not be armed.
2346 * But, this can happen on Lancer INTx where it takes
2347 * a while to de-assert INTx or in BE2 where occasionaly
2348 * an interrupt may be raised even when EQ is unarmed.
2349 * If NAPI is already scheduled, then counting & notifying
2350 * events will orphan them.
e49cc34f 2351 */
d0b9cec3 2352 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2353 num_evts = events_get(eqo);
d0b9cec3
SP
2354 __napi_schedule(&eqo->napi);
2355 if (num_evts)
2356 eqo->spurious_intr = 0;
2357 }
2358 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2359
d0b9cec3
SP
2360 /* Return IRQ_HANDLED only for the the first spurious intr
2361 * after a valid intr to stop the kernel from branding
2362 * this irq as a bad one!
e49cc34f 2363 */
d0b9cec3
SP
2364 if (num_evts || eqo->spurious_intr++ == 0)
2365 return IRQ_HANDLED;
2366 else
2367 return IRQ_NONE;
6b7c5b94
SP
2368}
2369
10ef9ab4 2370static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2371{
10ef9ab4 2372 struct be_eq_obj *eqo = dev;
6b7c5b94 2373
0b545a62
SP
2374 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2375 napi_schedule(&eqo->napi);
6b7c5b94
SP
2376 return IRQ_HANDLED;
2377}
2378
2e588f84 2379static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2380{
e38b1706 2381 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2382}
2383
10ef9ab4 2384static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2385 int budget, int polling)
6b7c5b94 2386{
3abcdeda
SP
2387 struct be_adapter *adapter = rxo->adapter;
2388 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2389 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2390 u32 work_done;
2391
2392 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2393 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2394 if (!rxcp)
2395 break;
2396
12004ae9
SP
2397 /* Is it a flush compl that has no data */
2398 if (unlikely(rxcp->num_rcvd == 0))
2399 goto loop_continue;
2400
2401 /* Discard compl with partial DMA Lancer B0 */
2402 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2403 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2404 goto loop_continue;
2405 }
2406
2407 /* On BE drop pkts that arrive due to imperfect filtering in
2408 * promiscuous mode on some skews
2409 */
2410 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2411 !lancer_chip(adapter))) {
10ef9ab4 2412 be_rx_compl_discard(rxo, rxcp);
12004ae9 2413 goto loop_continue;
64642811 2414 }
009dd872 2415
6384a4d0
SP
2416 /* Don't do gro when we're busy_polling */
2417 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2418 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2419 else
6384a4d0
SP
2420 be_rx_compl_process(rxo, napi, rxcp);
2421
12004ae9 2422loop_continue:
2e588f84 2423 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2424 }
2425
10ef9ab4
SP
2426 if (work_done) {
2427 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2428
6384a4d0
SP
2429 /* When an rx-obj gets into post_starved state, just
2430 * let be_worker do the posting.
2431 */
2432 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2433 !rxo->rx_post_starved)
10ef9ab4 2434 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2435 }
10ef9ab4 2436
6b7c5b94
SP
2437 return work_done;
2438}
2439
10ef9ab4
SP
2440static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2441 int budget, int idx)
6b7c5b94 2442{
6b7c5b94 2443 struct be_eth_tx_compl *txcp;
10ef9ab4 2444 int num_wrbs = 0, work_done;
3c8def97 2445
10ef9ab4
SP
2446 for (work_done = 0; work_done < budget; work_done++) {
2447 txcp = be_tx_compl_get(&txo->cq);
2448 if (!txcp)
2449 break;
2450 num_wrbs += be_tx_compl_process(adapter, txo,
748b539a
SP
2451 AMAP_GET_BITS(struct
2452 amap_eth_tx_compl,
2453 wrb_index, txcp));
10ef9ab4 2454 }
6b7c5b94 2455
10ef9ab4
SP
2456 if (work_done) {
2457 be_cq_notify(adapter, txo->cq.id, true, work_done);
2458 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2459
10ef9ab4
SP
2460 /* As Tx wrbs have been freed up, wake up netdev queue
2461 * if it was stopped due to lack of tx wrbs. */
2462 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2463 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2464 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2465 }
10ef9ab4
SP
2466
2467 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2468 tx_stats(txo)->tx_compl += work_done;
2469 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2470 }
10ef9ab4
SP
2471 return (work_done < budget); /* Done */
2472}
6b7c5b94 2473
68d7bdcb 2474int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2475{
2476 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2477 struct be_adapter *adapter = eqo->adapter;
0b545a62 2478 int max_work = 0, work, i, num_evts;
6384a4d0 2479 struct be_rx_obj *rxo;
10ef9ab4 2480 bool tx_done;
f31e50a8 2481
0b545a62
SP
2482 num_evts = events_get(eqo);
2483
10ef9ab4
SP
2484 /* Process all TXQs serviced by this EQ */
2485 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2486 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2487 eqo->tx_budget, i);
2488 if (!tx_done)
2489 max_work = budget;
f31e50a8
SP
2490 }
2491
6384a4d0
SP
2492 if (be_lock_napi(eqo)) {
2493 /* This loop will iterate twice for EQ0 in which
2494 * completions of the last RXQ (default one) are also processed
2495 * For other EQs the loop iterates only once
2496 */
2497 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2498 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2499 max_work = max(work, max_work);
2500 }
2501 be_unlock_napi(eqo);
2502 } else {
2503 max_work = budget;
10ef9ab4 2504 }
6b7c5b94 2505
10ef9ab4
SP
2506 if (is_mcc_eqo(eqo))
2507 be_process_mcc(adapter);
93c86700 2508
10ef9ab4
SP
2509 if (max_work < budget) {
2510 napi_complete(napi);
0b545a62 2511 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2512 } else {
2513 /* As we'll continue in polling mode, count and clear events */
0b545a62 2514 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2515 }
10ef9ab4 2516 return max_work;
6b7c5b94
SP
2517}
2518
6384a4d0
SP
2519#ifdef CONFIG_NET_RX_BUSY_POLL
2520static int be_busy_poll(struct napi_struct *napi)
2521{
2522 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2523 struct be_adapter *adapter = eqo->adapter;
2524 struct be_rx_obj *rxo;
2525 int i, work = 0;
2526
2527 if (!be_lock_busy_poll(eqo))
2528 return LL_FLUSH_BUSY;
2529
2530 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2531 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2532 if (work)
2533 break;
2534 }
2535
2536 be_unlock_busy_poll(eqo);
2537 return work;
2538}
2539#endif
2540
f67ef7ba 2541void be_detect_error(struct be_adapter *adapter)
7c185276 2542{
e1cfb67a
PR
2543 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2544 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2545 u32 i;
eb0eecc1
SK
2546 bool error_detected = false;
2547 struct device *dev = &adapter->pdev->dev;
2548 struct net_device *netdev = adapter->netdev;
7c185276 2549
d23e946c 2550 if (be_hw_error(adapter))
72f02485
SP
2551 return;
2552
e1cfb67a
PR
2553 if (lancer_chip(adapter)) {
2554 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2555 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2556 sliport_err1 = ioread32(adapter->db +
748b539a 2557 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2558 sliport_err2 = ioread32(adapter->db +
748b539a 2559 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2560 adapter->hw_error = true;
2561 /* Do not log error messages if its a FW reset */
2562 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2563 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2564 dev_info(dev, "Firmware update in progress\n");
2565 } else {
2566 error_detected = true;
2567 dev_err(dev, "Error detected in the card\n");
2568 dev_err(dev, "ERR: sliport status 0x%x\n",
2569 sliport_status);
2570 dev_err(dev, "ERR: sliport error1 0x%x\n",
2571 sliport_err1);
2572 dev_err(dev, "ERR: sliport error2 0x%x\n",
2573 sliport_err2);
2574 }
e1cfb67a
PR
2575 }
2576 } else {
2577 pci_read_config_dword(adapter->pdev,
748b539a 2578 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2579 pci_read_config_dword(adapter->pdev,
748b539a 2580 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2581 pci_read_config_dword(adapter->pdev,
748b539a 2582 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2583 pci_read_config_dword(adapter->pdev,
748b539a 2584 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2585
f67ef7ba
PR
2586 ue_lo = (ue_lo & ~ue_lo_mask);
2587 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2588
eb0eecc1
SK
2589 /* On certain platforms BE hardware can indicate spurious UEs.
2590 * Allow HW to stop working completely in case of a real UE.
2591 * Hence not setting the hw_error for UE detection.
2592 */
f67ef7ba 2593
eb0eecc1
SK
2594 if (ue_lo || ue_hi) {
2595 error_detected = true;
2596 dev_err(dev,
2597 "Unrecoverable Error detected in the adapter");
2598 dev_err(dev, "Please reboot server to recover");
2599 if (skyhawk_chip(adapter))
2600 adapter->hw_error = true;
2601 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2602 if (ue_lo & 1)
2603 dev_err(dev, "UE: %s bit set\n",
2604 ue_status_low_desc[i]);
2605 }
2606 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2607 if (ue_hi & 1)
2608 dev_err(dev, "UE: %s bit set\n",
2609 ue_status_hi_desc[i]);
2610 }
7c185276
AK
2611 }
2612 }
eb0eecc1
SK
2613 if (error_detected)
2614 netif_carrier_off(netdev);
7c185276
AK
2615}
2616
8d56ff11
SP
2617static void be_msix_disable(struct be_adapter *adapter)
2618{
ac6a0c4a 2619 if (msix_enabled(adapter)) {
8d56ff11 2620 pci_disable_msix(adapter->pdev);
ac6a0c4a 2621 adapter->num_msix_vec = 0;
68d7bdcb 2622 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2623 }
2624}
2625
c2bba3df 2626static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2627{
7dc4c064 2628 int i, num_vec;
d379142b 2629 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2630
92bf14ab
SP
2631 /* If RoCE is supported, program the max number of NIC vectors that
2632 * may be configured via set-channels, along with vectors needed for
2633 * RoCe. Else, just program the number we'll use initially.
2634 */
2635 if (be_roce_supported(adapter))
2636 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2637 2 * num_online_cpus());
2638 else
2639 num_vec = adapter->cfg_num_qs;
3abcdeda 2640
ac6a0c4a 2641 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2642 adapter->msix_entries[i].entry = i;
2643
7dc4c064
AG
2644 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2645 MIN_MSIX_VECTORS, num_vec);
2646 if (num_vec < 0)
2647 goto fail;
92bf14ab 2648
92bf14ab
SP
2649 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2650 adapter->num_msix_roce_vec = num_vec / 2;
2651 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2652 adapter->num_msix_roce_vec);
2653 }
2654
2655 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2656
2657 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2658 adapter->num_msix_vec);
c2bba3df 2659 return 0;
7dc4c064
AG
2660
2661fail:
2662 dev_warn(dev, "MSIx enable failed\n");
2663
2664 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2665 if (!be_physfn(adapter))
2666 return num_vec;
2667 return 0;
6b7c5b94
SP
2668}
2669
fe6d2a38 2670static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2671 struct be_eq_obj *eqo)
b628bde2 2672{
f2f781a7 2673 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2674}
6b7c5b94 2675
b628bde2
SP
2676static int be_msix_register(struct be_adapter *adapter)
2677{
10ef9ab4
SP
2678 struct net_device *netdev = adapter->netdev;
2679 struct be_eq_obj *eqo;
2680 int status, i, vec;
6b7c5b94 2681
10ef9ab4
SP
2682 for_all_evt_queues(adapter, eqo, i) {
2683 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2684 vec = be_msix_vec_get(adapter, eqo);
2685 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2686 if (status)
2687 goto err_msix;
2688 }
b628bde2 2689
6b7c5b94 2690 return 0;
3abcdeda 2691err_msix:
10ef9ab4
SP
2692 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2693 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2694 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2695 status);
ac6a0c4a 2696 be_msix_disable(adapter);
6b7c5b94
SP
2697 return status;
2698}
2699
2700static int be_irq_register(struct be_adapter *adapter)
2701{
2702 struct net_device *netdev = adapter->netdev;
2703 int status;
2704
ac6a0c4a 2705 if (msix_enabled(adapter)) {
6b7c5b94
SP
2706 status = be_msix_register(adapter);
2707 if (status == 0)
2708 goto done;
ba343c77
SB
2709 /* INTx is not supported for VF */
2710 if (!be_physfn(adapter))
2711 return status;
6b7c5b94
SP
2712 }
2713
e49cc34f 2714 /* INTx: only the first EQ is used */
6b7c5b94
SP
2715 netdev->irq = adapter->pdev->irq;
2716 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2717 &adapter->eq_obj[0]);
6b7c5b94
SP
2718 if (status) {
2719 dev_err(&adapter->pdev->dev,
2720 "INTx request IRQ failed - err %d\n", status);
2721 return status;
2722 }
2723done:
2724 adapter->isr_registered = true;
2725 return 0;
2726}
2727
2728static void be_irq_unregister(struct be_adapter *adapter)
2729{
2730 struct net_device *netdev = adapter->netdev;
10ef9ab4 2731 struct be_eq_obj *eqo;
3abcdeda 2732 int i;
6b7c5b94
SP
2733
2734 if (!adapter->isr_registered)
2735 return;
2736
2737 /* INTx */
ac6a0c4a 2738 if (!msix_enabled(adapter)) {
e49cc34f 2739 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2740 goto done;
2741 }
2742
2743 /* MSIx */
10ef9ab4
SP
2744 for_all_evt_queues(adapter, eqo, i)
2745 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2746
6b7c5b94
SP
2747done:
2748 adapter->isr_registered = false;
6b7c5b94
SP
2749}
2750
10ef9ab4 2751static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2752{
2753 struct be_queue_info *q;
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
2758 q = &rxo->q;
2759 if (q->created) {
2760 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2761 be_rx_cq_clean(rxo);
482c9e79 2762 }
10ef9ab4 2763 be_queue_free(adapter, q);
482c9e79
SP
2764 }
2765}
2766
889cd4b2
SP
2767static int be_close(struct net_device *netdev)
2768{
2769 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2770 struct be_eq_obj *eqo;
2771 int i;
889cd4b2 2772
e1ad8e33
KA
2773 /* This protection is needed as be_close() may be called even when the
2774 * adapter is in cleared state (after eeh perm failure)
2775 */
2776 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2777 return 0;
2778
045508a8
PP
2779 be_roce_dev_close(adapter);
2780
dff345c5
IV
2781 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2782 for_all_evt_queues(adapter, eqo, i) {
04d3d624 2783 napi_disable(&eqo->napi);
6384a4d0
SP
2784 be_disable_busy_poll(eqo);
2785 }
71237b6f 2786 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 2787 }
a323d9bf
SP
2788
2789 be_async_mcc_disable(adapter);
2790
2791 /* Wait for all pending tx completions to arrive so that
2792 * all tx skbs are freed.
2793 */
fba87559 2794 netif_tx_disable(netdev);
6e1f9975 2795 be_tx_compl_clean(adapter);
a323d9bf
SP
2796
2797 be_rx_qs_destroy(adapter);
2798
d11a347d
AK
2799 for (i = 1; i < (adapter->uc_macs + 1); i++)
2800 be_cmd_pmac_del(adapter, adapter->if_handle,
2801 adapter->pmac_id[i], 0);
2802 adapter->uc_macs = 0;
2803
a323d9bf 2804 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2805 if (msix_enabled(adapter))
2806 synchronize_irq(be_msix_vec_get(adapter, eqo));
2807 else
2808 synchronize_irq(netdev->irq);
2809 be_eq_clean(eqo);
63fcb27f
PR
2810 }
2811
889cd4b2
SP
2812 be_irq_unregister(adapter);
2813
482c9e79
SP
2814 return 0;
2815}
2816
10ef9ab4 2817static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2818{
2819 struct be_rx_obj *rxo;
e9008ee9 2820 int rc, i, j;
e2557877
VD
2821 u8 rss_hkey[RSS_HASH_KEY_LEN];
2822 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
2823
2824 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2825 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2826 sizeof(struct be_eth_rx_d));
2827 if (rc)
2828 return rc;
2829 }
2830
2831 /* The FW would like the default RXQ to be created first */
2832 rxo = default_rxo(adapter);
2833 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2834 adapter->if_handle, false, &rxo->rss_id);
2835 if (rc)
2836 return rc;
2837
2838 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2839 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2840 rx_frag_size, adapter->if_handle,
2841 true, &rxo->rss_id);
482c9e79
SP
2842 if (rc)
2843 return rc;
2844 }
2845
2846 if (be_multi_rxq(adapter)) {
e2557877
VD
2847 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2848 j += adapter->num_rx_qs - 1) {
e9008ee9 2849 for_all_rss_queues(adapter, rxo, i) {
e2557877 2850 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 2851 break;
e2557877
VD
2852 rss->rsstable[j + i] = rxo->rss_id;
2853 rss->rss_queue[j + i] = i;
e9008ee9
PR
2854 }
2855 }
e2557877
VD
2856 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2857 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
2858
2859 if (!BEx_chip(adapter))
e2557877
VD
2860 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2861 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
2862 } else {
2863 /* Disable RSS, if only default RX Q is created */
e2557877 2864 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2865 }
594ad54a 2866
e2557877 2867 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
748b539a 2868 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
e2557877 2869 128, rss_hkey);
da1388d6 2870 if (rc) {
e2557877 2871 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 2872 return rc;
482c9e79
SP
2873 }
2874
e2557877
VD
2875 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2876
482c9e79 2877 /* First time posting */
10ef9ab4 2878 for_all_rx_queues(adapter, rxo, i)
482c9e79 2879 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2880 return 0;
2881}
2882
6b7c5b94
SP
2883static int be_open(struct net_device *netdev)
2884{
2885 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2886 struct be_eq_obj *eqo;
3abcdeda 2887 struct be_rx_obj *rxo;
10ef9ab4 2888 struct be_tx_obj *txo;
b236916a 2889 u8 link_status;
3abcdeda 2890 int status, i;
5fb379ee 2891
10ef9ab4 2892 status = be_rx_qs_create(adapter);
482c9e79
SP
2893 if (status)
2894 goto err;
2895
c2bba3df
SK
2896 status = be_irq_register(adapter);
2897 if (status)
2898 goto err;
5fb379ee 2899
10ef9ab4 2900 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2901 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2902
10ef9ab4
SP
2903 for_all_tx_queues(adapter, txo, i)
2904 be_cq_notify(adapter, txo->cq.id, true, 0);
2905
7a1e9b20
SP
2906 be_async_mcc_enable(adapter);
2907
10ef9ab4
SP
2908 for_all_evt_queues(adapter, eqo, i) {
2909 napi_enable(&eqo->napi);
6384a4d0 2910 be_enable_busy_poll(eqo);
4cad9f3b 2911 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 2912 }
04d3d624 2913 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2914
323ff71e 2915 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2916 if (!status)
2917 be_link_status_update(adapter, link_status);
2918
fba87559 2919 netif_tx_start_all_queues(netdev);
045508a8 2920 be_roce_dev_open(adapter);
c9c47142 2921
c5abe7c0 2922#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
2923 if (skyhawk_chip(adapter))
2924 vxlan_get_rx_port(netdev);
c5abe7c0
SP
2925#endif
2926
889cd4b2
SP
2927 return 0;
2928err:
2929 be_close(adapter->netdev);
2930 return -EIO;
5fb379ee
SP
2931}
2932
71d8d1b5
AK
2933static int be_setup_wol(struct be_adapter *adapter, bool enable)
2934{
2935 struct be_dma_mem cmd;
2936 int status = 0;
2937 u8 mac[ETH_ALEN];
2938
2939 memset(mac, 0, ETH_ALEN);
2940
2941 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
2942 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2943 GFP_KERNEL);
ddf1169f 2944 if (!cmd.va)
6b568689 2945 return -ENOMEM;
71d8d1b5
AK
2946
2947 if (enable) {
2948 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
2949 PCICFG_PM_CONTROL_OFFSET,
2950 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
2951 if (status) {
2952 dev_err(&adapter->pdev->dev,
2381a55c 2953 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2954 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2955 cmd.dma);
71d8d1b5
AK
2956 return status;
2957 }
2958 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
2959 adapter->netdev->dev_addr,
2960 &cmd);
71d8d1b5
AK
2961 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2962 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2963 } else {
2964 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2965 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2966 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2967 }
2968
2b7bcebf 2969 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2970 return status;
2971}
2972
6d87f5c3
AK
2973/*
2974 * Generate a seed MAC address from the PF MAC Address using jhash.
2975 * MAC Address for VFs are assigned incrementally starting from the seed.
2976 * These addresses are programmed in the ASIC by the PF and the VF driver
2977 * queries for the MAC address during its probe.
2978 */
4c876616 2979static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2980{
f9449ab7 2981 u32 vf;
3abcdeda 2982 int status = 0;
6d87f5c3 2983 u8 mac[ETH_ALEN];
11ac75ed 2984 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2985
2986 be_vf_eth_addr_generate(adapter, mac);
2987
11ac75ed 2988 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 2989 if (BEx_chip(adapter))
590c391d 2990 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2991 vf_cfg->if_handle,
2992 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
2993 else
2994 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2995 vf + 1);
590c391d 2996
6d87f5c3
AK
2997 if (status)
2998 dev_err(&adapter->pdev->dev,
748b539a
SP
2999 "Mac address assignment failed for VF %d\n",
3000 vf);
6d87f5c3 3001 else
11ac75ed 3002 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3003
3004 mac[5] += 1;
3005 }
3006 return status;
3007}
3008
4c876616
SP
3009static int be_vfs_mac_query(struct be_adapter *adapter)
3010{
3011 int status, vf;
3012 u8 mac[ETH_ALEN];
3013 struct be_vf_cfg *vf_cfg;
4c876616
SP
3014
3015 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3016 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3017 mac, vf_cfg->if_handle,
3018 false, vf+1);
4c876616
SP
3019 if (status)
3020 return status;
3021 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3022 }
3023 return 0;
3024}
3025
f9449ab7 3026static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3027{
11ac75ed 3028 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3029 u32 vf;
3030
257a3feb 3031 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3032 dev_warn(&adapter->pdev->dev,
3033 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3034 goto done;
3035 }
3036
b4c1df93
SP
3037 pci_disable_sriov(adapter->pdev);
3038
11ac75ed 3039 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3040 if (BEx_chip(adapter))
11ac75ed
SP
3041 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3042 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3043 else
3044 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3045 vf + 1);
f9449ab7 3046
11ac75ed
SP
3047 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3048 }
39f1d94d
SP
3049done:
3050 kfree(adapter->vf_cfg);
3051 adapter->num_vfs = 0;
f174c7ec 3052 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3053}
3054
7707133c
SP
3055static void be_clear_queues(struct be_adapter *adapter)
3056{
3057 be_mcc_queues_destroy(adapter);
3058 be_rx_cqs_destroy(adapter);
3059 be_tx_queues_destroy(adapter);
3060 be_evt_queues_destroy(adapter);
3061}
3062
68d7bdcb 3063static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3064{
191eb756
SP
3065 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3066 cancel_delayed_work_sync(&adapter->work);
3067 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3068 }
68d7bdcb
SP
3069}
3070
b05004ad 3071static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb
SP
3072{
3073 int i;
3074
b05004ad
SK
3075 if (adapter->pmac_id) {
3076 for (i = 0; i < (adapter->uc_macs + 1); i++)
3077 be_cmd_pmac_del(adapter, adapter->if_handle,
3078 adapter->pmac_id[i], 0);
3079 adapter->uc_macs = 0;
3080
3081 kfree(adapter->pmac_id);
3082 adapter->pmac_id = NULL;
3083 }
3084}
3085
c5abe7c0 3086#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3087static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3088{
3089 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3090 be_cmd_manage_iface(adapter, adapter->if_handle,
3091 OP_CONVERT_TUNNEL_TO_NORMAL);
3092
3093 if (adapter->vxlan_port)
3094 be_cmd_set_vxlan_port(adapter, 0);
3095
3096 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3097 adapter->vxlan_port = 0;
3098}
c5abe7c0 3099#endif
c9c47142 3100
b05004ad
SK
3101static int be_clear(struct be_adapter *adapter)
3102{
68d7bdcb 3103 be_cancel_worker(adapter);
191eb756 3104
11ac75ed 3105 if (sriov_enabled(adapter))
f9449ab7
SP
3106 be_vf_clear(adapter);
3107
bec84e6b
VV
3108 /* Re-configure FW to distribute resources evenly across max-supported
3109 * number of VFs, only when VFs are not already enabled.
3110 */
3111 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3112 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3113 pci_sriov_get_totalvfs(adapter->pdev));
3114
c5abe7c0 3115#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3116 be_disable_vxlan_offloads(adapter);
c5abe7c0 3117#endif
2d17f403 3118 /* delete the primary mac along with the uc-mac list */
b05004ad 3119 be_mac_clear(adapter);
fbc13f01 3120
f9449ab7 3121 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3122
7707133c 3123 be_clear_queues(adapter);
a54769f5 3124
10ef9ab4 3125 be_msix_disable(adapter);
e1ad8e33 3126 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3127 return 0;
3128}
3129
4c876616 3130static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3131{
92bf14ab 3132 struct be_resources res = {0};
4c876616
SP
3133 struct be_vf_cfg *vf_cfg;
3134 u32 cap_flags, en_flags, vf;
922bbe88 3135 int status = 0;
abb93951 3136
4c876616
SP
3137 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3138 BE_IF_FLAGS_MULTICAST;
abb93951 3139
4c876616 3140 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3141 if (!BE3_chip(adapter)) {
3142 status = be_cmd_get_profile_config(adapter, &res,
3143 vf + 1);
3144 if (!status)
3145 cap_flags = res.if_cap_flags;
3146 }
4c876616
SP
3147
3148 /* If a FW profile exists, then cap_flags are updated */
3149 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
748b539a
SP
3150 BE_IF_FLAGS_BROADCAST |
3151 BE_IF_FLAGS_MULTICAST);
3152 status =
3153 be_cmd_if_create(adapter, cap_flags, en_flags,
3154 &vf_cfg->if_handle, vf + 1);
4c876616
SP
3155 if (status)
3156 goto err;
3157 }
3158err:
3159 return status;
abb93951
PR
3160}
3161
39f1d94d 3162static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3163{
11ac75ed 3164 struct be_vf_cfg *vf_cfg;
30128031
SP
3165 int vf;
3166
39f1d94d
SP
3167 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3168 GFP_KERNEL);
3169 if (!adapter->vf_cfg)
3170 return -ENOMEM;
3171
11ac75ed
SP
3172 for_all_vfs(adapter, vf_cfg, vf) {
3173 vf_cfg->if_handle = -1;
3174 vf_cfg->pmac_id = -1;
30128031 3175 }
39f1d94d 3176 return 0;
30128031
SP
3177}
3178
f9449ab7
SP
3179static int be_vf_setup(struct be_adapter *adapter)
3180{
c502224e 3181 struct device *dev = &adapter->pdev->dev;
11ac75ed 3182 struct be_vf_cfg *vf_cfg;
4c876616 3183 int status, old_vfs, vf;
04a06028 3184 u32 privileges;
39f1d94d 3185
257a3feb 3186 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3187
3188 status = be_vf_setup_init(adapter);
3189 if (status)
3190 goto err;
30128031 3191
4c876616
SP
3192 if (old_vfs) {
3193 for_all_vfs(adapter, vf_cfg, vf) {
3194 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3195 if (status)
3196 goto err;
3197 }
f9449ab7 3198
4c876616
SP
3199 status = be_vfs_mac_query(adapter);
3200 if (status)
3201 goto err;
3202 } else {
bec84e6b
VV
3203 status = be_vfs_if_create(adapter);
3204 if (status)
3205 goto err;
3206
39f1d94d
SP
3207 status = be_vf_eth_addr_config(adapter);
3208 if (status)
3209 goto err;
3210 }
f9449ab7 3211
11ac75ed 3212 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3213 /* Allow VFs to programs MAC/VLAN filters */
3214 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3215 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3216 status = be_cmd_set_fn_privileges(adapter,
3217 privileges |
3218 BE_PRIV_FILTMGMT,
3219 vf + 1);
3220 if (!status)
3221 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3222 vf);
3223 }
3224
0f77ba73
RN
3225 /* Allow full available bandwidth */
3226 if (!old_vfs)
3227 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3228
bdce2ad7 3229 if (!old_vfs) {
0599863d 3230 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3231 be_cmd_set_logical_link_config(adapter,
3232 IFLA_VF_LINK_STATE_AUTO,
3233 vf+1);
3234 }
f9449ab7 3235 }
b4c1df93
SP
3236
3237 if (!old_vfs) {
3238 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3239 if (status) {
3240 dev_err(dev, "SRIOV enable failed\n");
3241 adapter->num_vfs = 0;
3242 goto err;
3243 }
3244 }
f174c7ec
VV
3245
3246 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3247 return 0;
3248err:
4c876616
SP
3249 dev_err(dev, "VF setup failed\n");
3250 be_vf_clear(adapter);
f9449ab7
SP
3251 return status;
3252}
3253
f93f160b
VV
3254/* Converting function_mode bits on BE3 to SH mc_type enums */
3255
3256static u8 be_convert_mc_type(u32 function_mode)
3257{
66064dbc 3258 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3259 return vNIC1;
66064dbc 3260 else if (function_mode & QNQ_MODE)
f93f160b
VV
3261 return FLEX10;
3262 else if (function_mode & VNIC_MODE)
3263 return vNIC2;
3264 else if (function_mode & UMC_ENABLED)
3265 return UMC;
3266 else
3267 return MC_NONE;
3268}
3269
92bf14ab
SP
3270/* On BE2/BE3 FW does not suggest the supported limits */
3271static void BEx_get_resources(struct be_adapter *adapter,
3272 struct be_resources *res)
3273{
bec84e6b 3274 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3275
3276 if (be_physfn(adapter))
3277 res->max_uc_mac = BE_UC_PMAC_COUNT;
3278 else
3279 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3280
f93f160b
VV
3281 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3282
3283 if (be_is_mc(adapter)) {
3284 /* Assuming that there are 4 channels per port,
3285 * when multi-channel is enabled
3286 */
3287 if (be_is_qnq_mode(adapter))
3288 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3289 else
3290 /* In a non-qnq multichannel mode, the pvid
3291 * takes up one vlan entry
3292 */
3293 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3294 } else {
92bf14ab 3295 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3296 }
3297
92bf14ab
SP
3298 res->max_mcast_mac = BE_MAX_MC;
3299
a5243dab
VV
3300 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3301 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3302 * *only* if it is RSS-capable.
3303 */
3304 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3305 !be_physfn(adapter) || (be_is_mc(adapter) &&
3306 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
92bf14ab
SP
3307 res->max_tx_qs = 1;
3308 else
3309 res->max_tx_qs = BE3_MAX_TX_QS;
3310
3311 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3312 !use_sriov && be_physfn(adapter))
3313 res->max_rss_qs = (adapter->be3_native) ?
3314 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3315 res->max_rx_qs = res->max_rss_qs + 1;
3316
e3dc867c 3317 if (be_physfn(adapter))
d3518e21 3318 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3319 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3320 else
3321 res->max_evt_qs = 1;
92bf14ab
SP
3322
3323 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3324 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3325 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3326}
3327
30128031
SP
3328static void be_setup_init(struct be_adapter *adapter)
3329{
3330 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3331 adapter->phy.link_speed = -1;
30128031
SP
3332 adapter->if_handle = -1;
3333 adapter->be3_native = false;
3334 adapter->promiscuous = false;
f25b119c
PR
3335 if (be_physfn(adapter))
3336 adapter->cmd_privileges = MAX_PRIVILEGES;
3337 else
3338 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3339}
3340
bec84e6b
VV
3341static int be_get_sriov_config(struct be_adapter *adapter)
3342{
3343 struct device *dev = &adapter->pdev->dev;
3344 struct be_resources res = {0};
d3d18312 3345 int max_vfs, old_vfs;
bec84e6b
VV
3346
3347 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3348 be_cmd_get_profile_config(adapter, &res, 0);
3349
bec84e6b
VV
3350 if (BE3_chip(adapter) && !res.max_vfs) {
3351 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3352 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3353 }
3354
d3d18312 3355 adapter->pool_res = res;
bec84e6b
VV
3356
3357 if (!be_max_vfs(adapter)) {
3358 if (num_vfs)
3359 dev_warn(dev, "device doesn't support SRIOV\n");
3360 adapter->num_vfs = 0;
3361 return 0;
3362 }
3363
d3d18312
SP
3364 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3365
bec84e6b
VV
3366 /* validate num_vfs module param */
3367 old_vfs = pci_num_vf(adapter->pdev);
3368 if (old_vfs) {
3369 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3370 if (old_vfs != num_vfs)
3371 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3372 adapter->num_vfs = old_vfs;
3373 } else {
3374 if (num_vfs > be_max_vfs(adapter)) {
3375 dev_info(dev, "Resources unavailable to init %d VFs\n",
3376 num_vfs);
3377 dev_info(dev, "Limiting to %d VFs\n",
3378 be_max_vfs(adapter));
3379 }
3380 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3381 }
3382
3383 return 0;
3384}
3385
92bf14ab 3386static int be_get_resources(struct be_adapter *adapter)
abb93951 3387{
92bf14ab
SP
3388 struct device *dev = &adapter->pdev->dev;
3389 struct be_resources res = {0};
3390 int status;
abb93951 3391
92bf14ab
SP
3392 if (BEx_chip(adapter)) {
3393 BEx_get_resources(adapter, &res);
3394 adapter->res = res;
abb93951
PR
3395 }
3396
92bf14ab
SP
3397 /* For Lancer, SH etc read per-function resource limits from FW.
3398 * GET_FUNC_CONFIG returns per function guaranteed limits.
3399 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3400 */
3401 if (!BEx_chip(adapter)) {
3402 status = be_cmd_get_func_config(adapter, &res);
3403 if (status)
3404 return status;
abb93951 3405
92bf14ab
SP
3406 /* If RoCE may be enabled stash away half the EQs for RoCE */
3407 if (be_roce_supported(adapter))
3408 res.max_evt_qs /= 2;
3409 adapter->res = res;
abb93951 3410
92bf14ab
SP
3411 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3412 be_max_txqs(adapter), be_max_rxqs(adapter),
3413 be_max_rss(adapter), be_max_eqs(adapter),
3414 be_max_vfs(adapter));
3415 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3416 be_max_uc(adapter), be_max_mc(adapter),
3417 be_max_vlans(adapter));
abb93951 3418 }
4c876616 3419
92bf14ab 3420 return 0;
abb93951
PR
3421}
3422
d3d18312
SP
3423static void be_sriov_config(struct be_adapter *adapter)
3424{
3425 struct device *dev = &adapter->pdev->dev;
3426 int status;
3427
3428 status = be_get_sriov_config(adapter);
3429 if (status) {
3430 dev_err(dev, "Failed to query SR-IOV configuration\n");
3431 dev_err(dev, "SR-IOV cannot be enabled\n");
3432 return;
3433 }
3434
3435 /* When the HW is in SRIOV capable configuration, the PF-pool
3436 * resources are equally distributed across the max-number of
3437 * VFs. The user may request only a subset of the max-vfs to be
3438 * enabled. Based on num_vfs, redistribute the resources across
3439 * num_vfs so that each VF will have access to more number of
3440 * resources. This facility is not available in BE3 FW.
3441 * Also, this is done by FW in Lancer chip.
3442 */
3443 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3444 status = be_cmd_set_sriov_config(adapter,
3445 adapter->pool_res,
3446 adapter->num_vfs);
3447 if (status)
3448 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3449 }
3450}
3451
39f1d94d
SP
3452static int be_get_config(struct be_adapter *adapter)
3453{
542963b7 3454 u16 profile_id;
4c876616 3455 int status;
39f1d94d 3456
e97e3cda 3457 status = be_cmd_query_fw_cfg(adapter);
abb93951 3458 if (status)
92bf14ab 3459 return status;
abb93951 3460
542963b7
VV
3461 if (be_physfn(adapter)) {
3462 status = be_cmd_get_active_profile(adapter, &profile_id);
3463 if (!status)
3464 dev_info(&adapter->pdev->dev,
3465 "Using profile 0x%x\n", profile_id);
962bcb75 3466 }
bec84e6b 3467
d3d18312
SP
3468 if (!BE2_chip(adapter) && be_physfn(adapter))
3469 be_sriov_config(adapter);
542963b7 3470
92bf14ab
SP
3471 status = be_get_resources(adapter);
3472 if (status)
3473 return status;
abb93951 3474
46ee9c14
RN
3475 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3476 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3477 if (!adapter->pmac_id)
3478 return -ENOMEM;
abb93951 3479
92bf14ab
SP
3480 /* Sanitize cfg_num_qs based on HW and platform limits */
3481 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3482
3483 return 0;
39f1d94d
SP
3484}
3485
95046b92
SP
3486static int be_mac_setup(struct be_adapter *adapter)
3487{
3488 u8 mac[ETH_ALEN];
3489 int status;
3490
3491 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3492 status = be_cmd_get_perm_mac(adapter, mac);
3493 if (status)
3494 return status;
3495
3496 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3497 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3498 } else {
3499 /* Maybe the HW was reset; dev_addr must be re-programmed */
3500 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3501 }
3502
2c7a9dc1
AK
3503 /* For BE3-R VFs, the PF programs the initial MAC address */
3504 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3505 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3506 &adapter->pmac_id[0], 0);
95046b92
SP
3507 return 0;
3508}
3509
68d7bdcb
SP
3510static void be_schedule_worker(struct be_adapter *adapter)
3511{
3512 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3513 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3514}
3515
7707133c 3516static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3517{
68d7bdcb 3518 struct net_device *netdev = adapter->netdev;
10ef9ab4 3519 int status;
ba343c77 3520
7707133c 3521 status = be_evt_queues_create(adapter);
abb93951
PR
3522 if (status)
3523 goto err;
73d540f2 3524
7707133c 3525 status = be_tx_qs_create(adapter);
c2bba3df
SK
3526 if (status)
3527 goto err;
10ef9ab4 3528
7707133c 3529 status = be_rx_cqs_create(adapter);
10ef9ab4 3530 if (status)
a54769f5 3531 goto err;
6b7c5b94 3532
7707133c 3533 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3534 if (status)
3535 goto err;
3536
68d7bdcb
SP
3537 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3538 if (status)
3539 goto err;
3540
3541 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3542 if (status)
3543 goto err;
3544
7707133c
SP
3545 return 0;
3546err:
3547 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3548 return status;
3549}
3550
68d7bdcb
SP
3551int be_update_queues(struct be_adapter *adapter)
3552{
3553 struct net_device *netdev = adapter->netdev;
3554 int status;
3555
3556 if (netif_running(netdev))
3557 be_close(netdev);
3558
3559 be_cancel_worker(adapter);
3560
3561 /* If any vectors have been shared with RoCE we cannot re-program
3562 * the MSIx table.
3563 */
3564 if (!adapter->num_msix_roce_vec)
3565 be_msix_disable(adapter);
3566
3567 be_clear_queues(adapter);
3568
3569 if (!msix_enabled(adapter)) {
3570 status = be_msix_enable(adapter);
3571 if (status)
3572 return status;
3573 }
3574
3575 status = be_setup_queues(adapter);
3576 if (status)
3577 return status;
3578
3579 be_schedule_worker(adapter);
3580
3581 if (netif_running(netdev))
3582 status = be_open(netdev);
3583
3584 return status;
3585}
3586
7707133c
SP
3587static int be_setup(struct be_adapter *adapter)
3588{
3589 struct device *dev = &adapter->pdev->dev;
3590 u32 tx_fc, rx_fc, en_flags;
3591 int status;
3592
3593 be_setup_init(adapter);
3594
3595 if (!lancer_chip(adapter))
3596 be_cmd_req_native_mode(adapter);
3597
3598 status = be_get_config(adapter);
10ef9ab4 3599 if (status)
a54769f5 3600 goto err;
6b7c5b94 3601
7707133c 3602 status = be_msix_enable(adapter);
10ef9ab4 3603 if (status)
a54769f5 3604 goto err;
6b7c5b94 3605
f9449ab7 3606 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
7707133c 3607 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3608 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3609 en_flags |= BE_IF_FLAGS_RSS;
92bf14ab
SP
3610 en_flags = en_flags & be_if_cap_flags(adapter);
3611 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
1578e777 3612 &adapter->if_handle, 0);
7707133c 3613 if (status)
a54769f5 3614 goto err;
6b7c5b94 3615
68d7bdcb
SP
3616 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3617 rtnl_lock();
7707133c 3618 status = be_setup_queues(adapter);
68d7bdcb 3619 rtnl_unlock();
95046b92 3620 if (status)
1578e777
PR
3621 goto err;
3622
7707133c 3623 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3624
3625 status = be_mac_setup(adapter);
10ef9ab4
SP
3626 if (status)
3627 goto err;
3628
e97e3cda 3629 be_cmd_get_fw_ver(adapter);
5a56eb10 3630
e9e2a904
SK
3631 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3632 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3633 adapter->fw_ver);
3634 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3635 }
3636
1d1e9a46 3637 if (adapter->vlans_added)
10329df8 3638 be_vid_config(adapter);
7ab8b0b4 3639
a54769f5 3640 be_set_rx_mode(adapter->netdev);
5fb379ee 3641
76a9e08e
SR
3642 be_cmd_get_acpi_wol_cap(adapter);
3643
ddc3f5cb 3644 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3645
ddc3f5cb
AK
3646 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3647 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3648 adapter->rx_fc);
2dc1deb6 3649
bdce2ad7
SR
3650 if (be_physfn(adapter))
3651 be_cmd_set_logical_link_config(adapter,
3652 IFLA_VF_LINK_STATE_AUTO, 0);
3653
bec84e6b
VV
3654 if (adapter->num_vfs)
3655 be_vf_setup(adapter);
f9449ab7 3656
f25b119c
PR
3657 status = be_cmd_get_phy_info(adapter);
3658 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3659 adapter->phy.fc_autoneg = 1;
3660
68d7bdcb 3661 be_schedule_worker(adapter);
e1ad8e33 3662 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3663 return 0;
a54769f5
SP
3664err:
3665 be_clear(adapter);
3666 return status;
3667}
6b7c5b94 3668
66268739
IV
3669#ifdef CONFIG_NET_POLL_CONTROLLER
3670static void be_netpoll(struct net_device *netdev)
3671{
3672 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3673 struct be_eq_obj *eqo;
66268739
IV
3674 int i;
3675
e49cc34f
SP
3676 for_all_evt_queues(adapter, eqo, i) {
3677 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3678 napi_schedule(&eqo->napi);
3679 }
10ef9ab4
SP
3680
3681 return;
66268739
IV
3682}
3683#endif
3684
96c9b2e4 3685static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3686
306f1348
SP
3687static bool phy_flashing_required(struct be_adapter *adapter)
3688{
42f11cf2
AK
3689 return (adapter->phy.phy_type == TN_8022 &&
3690 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3691}
3692
c165541e
PR
3693static bool is_comp_in_ufi(struct be_adapter *adapter,
3694 struct flash_section_info *fsec, int type)
3695{
3696 int i = 0, img_type = 0;
3697 struct flash_section_info_g2 *fsec_g2 = NULL;
3698
ca34fe38 3699 if (BE2_chip(adapter))
c165541e
PR
3700 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3701
3702 for (i = 0; i < MAX_FLASH_COMP; i++) {
3703 if (fsec_g2)
3704 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3705 else
3706 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3707
3708 if (img_type == type)
3709 return true;
3710 }
3711 return false;
3712
3713}
3714
4188e7df 3715static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
3716 int header_size,
3717 const struct firmware *fw)
c165541e
PR
3718{
3719 struct flash_section_info *fsec = NULL;
3720 const u8 *p = fw->data;
3721
3722 p += header_size;
3723 while (p < (fw->data + fw->size)) {
3724 fsec = (struct flash_section_info *)p;
3725 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3726 return fsec;
3727 p += 32;
3728 }
3729 return NULL;
3730}
3731
96c9b2e4
VV
3732static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3733 u32 img_offset, u32 img_size, int hdr_size,
3734 u16 img_optype, bool *crc_match)
3735{
3736 u32 crc_offset;
3737 int status;
3738 u8 crc[4];
3739
3740 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3741 if (status)
3742 return status;
3743
3744 crc_offset = hdr_size + img_offset + img_size - 4;
3745
3746 /* Skip flashing, if crc of flashed region matches */
3747 if (!memcmp(crc, p + crc_offset, 4))
3748 *crc_match = true;
3749 else
3750 *crc_match = false;
3751
3752 return status;
3753}
3754
773a2d7c 3755static int be_flash(struct be_adapter *adapter, const u8 *img,
748b539a 3756 struct be_dma_mem *flash_cmd, int optype, int img_size)
773a2d7c 3757{
773a2d7c 3758 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4
VV
3759 u32 total_bytes, flash_op, num_bytes;
3760 int status;
773a2d7c
PR
3761
3762 total_bytes = img_size;
3763 while (total_bytes) {
3764 num_bytes = min_t(u32, 32*1024, total_bytes);
3765
3766 total_bytes -= num_bytes;
3767
3768 if (!total_bytes) {
3769 if (optype == OPTYPE_PHY_FW)
3770 flash_op = FLASHROM_OPER_PHY_FLASH;
3771 else
3772 flash_op = FLASHROM_OPER_FLASH;
3773 } else {
3774 if (optype == OPTYPE_PHY_FW)
3775 flash_op = FLASHROM_OPER_PHY_SAVE;
3776 else
3777 flash_op = FLASHROM_OPER_SAVE;
3778 }
3779
be716446 3780 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3781 img += num_bytes;
3782 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
748b539a 3783 flash_op, num_bytes);
4c60005f 3784 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
3785 optype == OPTYPE_PHY_FW)
3786 break;
3787 else if (status)
773a2d7c 3788 return status;
773a2d7c
PR
3789 }
3790 return 0;
3791}
3792
0ad3157e 3793/* For BE2, BE3 and BE3-R */
ca34fe38 3794static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
3795 const struct firmware *fw,
3796 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 3797{
c165541e 3798 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 3799 struct device *dev = &adapter->pdev->dev;
c165541e 3800 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3801 int status, i, filehdr_size, num_comp;
3802 const struct flash_comp *pflashcomp;
3803 bool crc_match;
3804 const u8 *p;
c165541e
PR
3805
3806 struct flash_comp gen3_flash_types[] = {
3807 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3808 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3809 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3810 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3811 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3812 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3813 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3814 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3815 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3816 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3817 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3818 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3819 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3820 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3821 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3822 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3823 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3824 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3825 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3826 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3827 };
c165541e
PR
3828
3829 struct flash_comp gen2_flash_types[] = {
3830 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3831 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3832 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3833 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3834 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3835 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3836 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3837 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3838 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3839 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3840 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3841 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3842 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3843 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3844 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3845 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3846 };
3847
ca34fe38 3848 if (BE3_chip(adapter)) {
3f0d4560
AK
3849 pflashcomp = gen3_flash_types;
3850 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3851 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3852 } else {
3853 pflashcomp = gen2_flash_types;
3854 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3855 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3856 }
ca34fe38 3857
c165541e
PR
3858 /* Get flash section info*/
3859 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3860 if (!fsec) {
96c9b2e4 3861 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
3862 return -1;
3863 }
9fe96934 3864 for (i = 0; i < num_comp; i++) {
c165541e 3865 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3866 continue;
c165541e
PR
3867
3868 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3869 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3870 continue;
3871
773a2d7c
PR
3872 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3873 !phy_flashing_required(adapter))
306f1348 3874 continue;
c165541e 3875
773a2d7c 3876 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
3877 status = be_check_flash_crc(adapter, fw->data,
3878 pflashcomp[i].offset,
3879 pflashcomp[i].size,
3880 filehdr_size +
3881 img_hdrs_size,
3882 OPTYPE_REDBOOT, &crc_match);
3883 if (status) {
3884 dev_err(dev,
3885 "Could not get CRC for 0x%x region\n",
3886 pflashcomp[i].optype);
3887 continue;
3888 }
3889
3890 if (crc_match)
773a2d7c
PR
3891 continue;
3892 }
c165541e 3893
96c9b2e4
VV
3894 p = fw->data + filehdr_size + pflashcomp[i].offset +
3895 img_hdrs_size;
306f1348
SP
3896 if (p + pflashcomp[i].size > fw->data + fw->size)
3897 return -1;
773a2d7c
PR
3898
3899 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
748b539a 3900 pflashcomp[i].size);
773a2d7c 3901 if (status) {
96c9b2e4 3902 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
3903 pflashcomp[i].img_type);
3904 return status;
84517482 3905 }
84517482 3906 }
84517482
AK
3907 return 0;
3908}
3909
96c9b2e4
VV
3910static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3911{
3912 u32 img_type = le32_to_cpu(fsec_entry.type);
3913 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3914
3915 if (img_optype != 0xFFFF)
3916 return img_optype;
3917
3918 switch (img_type) {
3919 case IMAGE_FIRMWARE_iSCSI:
3920 img_optype = OPTYPE_ISCSI_ACTIVE;
3921 break;
3922 case IMAGE_BOOT_CODE:
3923 img_optype = OPTYPE_REDBOOT;
3924 break;
3925 case IMAGE_OPTION_ROM_ISCSI:
3926 img_optype = OPTYPE_BIOS;
3927 break;
3928 case IMAGE_OPTION_ROM_PXE:
3929 img_optype = OPTYPE_PXE_BIOS;
3930 break;
3931 case IMAGE_OPTION_ROM_FCoE:
3932 img_optype = OPTYPE_FCOE_BIOS;
3933 break;
3934 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3935 img_optype = OPTYPE_ISCSI_BACKUP;
3936 break;
3937 case IMAGE_NCSI:
3938 img_optype = OPTYPE_NCSI_FW;
3939 break;
3940 case IMAGE_FLASHISM_JUMPVECTOR:
3941 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3942 break;
3943 case IMAGE_FIRMWARE_PHY:
3944 img_optype = OPTYPE_SH_PHY_FW;
3945 break;
3946 case IMAGE_REDBOOT_DIR:
3947 img_optype = OPTYPE_REDBOOT_DIR;
3948 break;
3949 case IMAGE_REDBOOT_CONFIG:
3950 img_optype = OPTYPE_REDBOOT_CONFIG;
3951 break;
3952 case IMAGE_UFI_DIR:
3953 img_optype = OPTYPE_UFI_DIR;
3954 break;
3955 default:
3956 break;
3957 }
3958
3959 return img_optype;
3960}
3961
773a2d7c 3962static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
3963 const struct firmware *fw,
3964 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3965{
773a2d7c 3966 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
96c9b2e4 3967 struct device *dev = &adapter->pdev->dev;
773a2d7c 3968 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
3969 u32 img_offset, img_size, img_type;
3970 int status, i, filehdr_size;
3971 bool crc_match, old_fw_img;
3972 u16 img_optype;
3973 const u8 *p;
773a2d7c
PR
3974
3975 filehdr_size = sizeof(struct flash_file_hdr_g3);
3976 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3977 if (!fsec) {
96c9b2e4 3978 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 3979 return -EINVAL;
773a2d7c
PR
3980 }
3981
3982 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3983 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3984 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
3985 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3986 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3987 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 3988
96c9b2e4 3989 if (img_optype == 0xFFFF)
773a2d7c 3990 continue;
96c9b2e4
VV
3991 /* Don't bother verifying CRC if an old FW image is being
3992 * flashed
3993 */
3994 if (old_fw_img)
3995 goto flash;
3996
3997 status = be_check_flash_crc(adapter, fw->data, img_offset,
3998 img_size, filehdr_size +
3999 img_hdrs_size, img_optype,
4000 &crc_match);
4001 /* The current FW image on the card does not recognize the new
4002 * FLASH op_type. The FW download is partially complete.
4003 * Reboot the server now to enable FW image to recognize the
4004 * new FLASH op_type. To complete the remaining process,
4005 * download the same FW again after the reboot.
4006 */
4c60005f
KA
4007 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4008 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
96c9b2e4
VV
4009 dev_err(dev, "Flash incomplete. Reset the server\n");
4010 dev_err(dev, "Download FW image again after reset\n");
4011 return -EAGAIN;
4012 } else if (status) {
4013 dev_err(dev, "Could not get CRC for 0x%x region\n",
4014 img_optype);
4015 return -EFAULT;
773a2d7c
PR
4016 }
4017
96c9b2e4
VV
4018 if (crc_match)
4019 continue;
773a2d7c 4020
96c9b2e4
VV
4021flash:
4022 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4023 if (p + img_size > fw->data + fw->size)
4024 return -1;
4025
4026 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
96c9b2e4
VV
4027 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4028 * UFI_DIR region
4029 */
4c60005f
KA
4030 if (old_fw_img &&
4031 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4032 (img_optype == OPTYPE_UFI_DIR &&
4033 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4034 continue;
4035 } else if (status) {
4036 dev_err(dev, "Flashing section type 0x%x failed\n",
4037 img_type);
4038 return -EFAULT;
773a2d7c
PR
4039 }
4040 }
4041 return 0;
3f0d4560
AK
4042}
4043
485bf569 4044static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4045 const struct firmware *fw)
84517482 4046{
485bf569
SN
4047#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4048#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 4049 struct be_dma_mem flash_cmd;
485bf569
SN
4050 const u8 *data_ptr = NULL;
4051 u8 *dest_image_ptr = NULL;
4052 size_t image_size = 0;
4053 u32 chunk_size = 0;
4054 u32 data_written = 0;
4055 u32 offset = 0;
4056 int status = 0;
4057 u8 add_status = 0;
f67ef7ba 4058 u8 change_status;
84517482 4059
485bf569 4060 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 4061 dev_err(&adapter->pdev->dev,
485bf569
SN
4062 "FW Image not properly aligned. "
4063 "Length must be 4 byte aligned.\n");
4064 status = -EINVAL;
4065 goto lancer_fw_exit;
d9efd2af
SB
4066 }
4067
485bf569
SN
4068 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4069 + LANCER_FW_DOWNLOAD_CHUNK;
4070 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 4071 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
4072 if (!flash_cmd.va) {
4073 status = -ENOMEM;
485bf569
SN
4074 goto lancer_fw_exit;
4075 }
84517482 4076
485bf569
SN
4077 dest_image_ptr = flash_cmd.va +
4078 sizeof(struct lancer_cmd_req_write_object);
4079 image_size = fw->size;
4080 data_ptr = fw->data;
4081
4082 while (image_size) {
4083 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4084
4085 /* Copy the image chunk content. */
4086 memcpy(dest_image_ptr, data_ptr, chunk_size);
4087
4088 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4089 chunk_size, offset,
4090 LANCER_FW_DOWNLOAD_LOCATION,
4091 &data_written, &change_status,
4092 &add_status);
485bf569
SN
4093 if (status)
4094 break;
4095
4096 offset += data_written;
4097 data_ptr += data_written;
4098 image_size -= data_written;
4099 }
4100
4101 if (!status) {
4102 /* Commit the FW written */
4103 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4104 0, offset,
4105 LANCER_FW_DOWNLOAD_LOCATION,
4106 &data_written, &change_status,
4107 &add_status);
485bf569
SN
4108 }
4109
4110 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
748b539a 4111 flash_cmd.dma);
485bf569
SN
4112 if (status) {
4113 dev_err(&adapter->pdev->dev,
4114 "Firmware load error. "
4115 "Status code: 0x%x Additional Status: 0x%x\n",
4116 status, add_status);
4117 goto lancer_fw_exit;
4118 }
4119
f67ef7ba 4120 if (change_status == LANCER_FW_RESET_NEEDED) {
4bebb56a
SK
4121 dev_info(&adapter->pdev->dev,
4122 "Resetting adapter to activate new FW\n");
5c510811
SK
4123 status = lancer_physdev_ctrl(adapter,
4124 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
4125 if (status) {
4126 dev_err(&adapter->pdev->dev,
4127 "Adapter busy for FW reset.\n"
4128 "New FW will not be active.\n");
4129 goto lancer_fw_exit;
4130 }
4131 } else if (change_status != LANCER_NO_RESET_NEEDED) {
748b539a
SP
4132 dev_err(&adapter->pdev->dev,
4133 "System reboot required for new FW to be active\n");
f67ef7ba
PR
4134 }
4135
485bf569
SN
4136 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4137lancer_fw_exit:
4138 return status;
4139}
4140
ca34fe38
SP
4141#define UFI_TYPE2 2
4142#define UFI_TYPE3 3
0ad3157e 4143#define UFI_TYPE3R 10
ca34fe38
SP
4144#define UFI_TYPE4 4
4145static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4146 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4147{
ddf1169f 4148 if (!fhdr)
773a2d7c
PR
4149 goto be_get_ufi_exit;
4150
ca34fe38
SP
4151 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4152 return UFI_TYPE4;
0ad3157e
VV
4153 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4154 if (fhdr->asic_type_rev == 0x10)
4155 return UFI_TYPE3R;
4156 else
4157 return UFI_TYPE3;
4158 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 4159 return UFI_TYPE2;
773a2d7c
PR
4160
4161be_get_ufi_exit:
4162 dev_err(&adapter->pdev->dev,
4163 "UFI and Interface are not compatible for flashing\n");
4164 return -1;
4165}
4166
485bf569
SN
4167static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4168{
485bf569
SN
4169 struct flash_file_hdr_g3 *fhdr3;
4170 struct image_hdr *img_hdr_ptr = NULL;
4171 struct be_dma_mem flash_cmd;
4172 const u8 *p;
773a2d7c 4173 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 4174
be716446 4175 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
4176 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4177 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
4178 if (!flash_cmd.va) {
4179 status = -ENOMEM;
485bf569 4180 goto be_fw_exit;
84517482
AK
4181 }
4182
773a2d7c 4183 p = fw->data;
0ad3157e 4184 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 4185
0ad3157e 4186 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 4187
773a2d7c
PR
4188 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4189 for (i = 0; i < num_imgs; i++) {
4190 img_hdr_ptr = (struct image_hdr *)(fw->data +
4191 (sizeof(struct flash_file_hdr_g3) +
4192 i * sizeof(struct image_hdr)));
4193 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
4194 switch (ufi_type) {
4195 case UFI_TYPE4:
773a2d7c 4196 status = be_flash_skyhawk(adapter, fw,
748b539a 4197 &flash_cmd, num_imgs);
0ad3157e
VV
4198 break;
4199 case UFI_TYPE3R:
ca34fe38
SP
4200 status = be_flash_BEx(adapter, fw, &flash_cmd,
4201 num_imgs);
0ad3157e
VV
4202 break;
4203 case UFI_TYPE3:
4204 /* Do not flash this ufi on BE3-R cards */
4205 if (adapter->asic_rev < 0x10)
4206 status = be_flash_BEx(adapter, fw,
4207 &flash_cmd,
4208 num_imgs);
4209 else {
56ace3a0 4210 status = -EINVAL;
0ad3157e
VV
4211 dev_err(&adapter->pdev->dev,
4212 "Can't load BE3 UFI on BE3R\n");
4213 }
4214 }
3f0d4560 4215 }
773a2d7c
PR
4216 }
4217
ca34fe38
SP
4218 if (ufi_type == UFI_TYPE2)
4219 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 4220 else if (ufi_type == -1)
56ace3a0 4221 status = -EINVAL;
84517482 4222
2b7bcebf
IV
4223 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4224 flash_cmd.dma);
84517482
AK
4225 if (status) {
4226 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 4227 goto be_fw_exit;
84517482
AK
4228 }
4229
af901ca1 4230 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 4231
485bf569
SN
4232be_fw_exit:
4233 return status;
4234}
4235
4236int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4237{
4238 const struct firmware *fw;
4239 int status;
4240
4241 if (!netif_running(adapter->netdev)) {
4242 dev_err(&adapter->pdev->dev,
4243 "Firmware load not allowed (interface is down)\n");
940a3fcd 4244 return -ENETDOWN;
485bf569
SN
4245 }
4246
4247 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4248 if (status)
4249 goto fw_exit;
4250
4251 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4252
4253 if (lancer_chip(adapter))
4254 status = lancer_fw_download(adapter, fw);
4255 else
4256 status = be_fw_download(adapter, fw);
4257
eeb65ced 4258 if (!status)
e97e3cda 4259 be_cmd_get_fw_ver(adapter);
eeb65ced 4260
84517482
AK
4261fw_exit:
4262 release_firmware(fw);
4263 return status;
4264}
4265
748b539a 4266static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
a77dcb8c
AK
4267{
4268 struct be_adapter *adapter = netdev_priv(dev);
4269 struct nlattr *attr, *br_spec;
4270 int rem;
4271 int status = 0;
4272 u16 mode = 0;
4273
4274 if (!sriov_enabled(adapter))
4275 return -EOPNOTSUPP;
4276
4277 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4278
4279 nla_for_each_nested(attr, br_spec, rem) {
4280 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4281 continue;
4282
4283 mode = nla_get_u16(attr);
4284 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4285 return -EINVAL;
4286
4287 status = be_cmd_set_hsw_config(adapter, 0, 0,
4288 adapter->if_handle,
4289 mode == BRIDGE_MODE_VEPA ?
4290 PORT_FWD_TYPE_VEPA :
4291 PORT_FWD_TYPE_VEB);
4292 if (status)
4293 goto err;
4294
4295 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4296 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4297
4298 return status;
4299 }
4300err:
4301 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4302 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4303
4304 return status;
4305}
4306
4307static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4308 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4309{
4310 struct be_adapter *adapter = netdev_priv(dev);
4311 int status = 0;
4312 u8 hsw_mode;
4313
4314 if (!sriov_enabled(adapter))
4315 return 0;
4316
4317 /* BE and Lancer chips support VEB mode only */
4318 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4319 hsw_mode = PORT_FWD_TYPE_VEB;
4320 } else {
4321 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4322 adapter->if_handle, &hsw_mode);
4323 if (status)
4324 return 0;
4325 }
4326
4327 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4328 hsw_mode == PORT_FWD_TYPE_VEPA ?
4329 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4330}
4331
c5abe7c0 4332#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4333static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4334 __be16 port)
4335{
4336 struct be_adapter *adapter = netdev_priv(netdev);
4337 struct device *dev = &adapter->pdev->dev;
4338 int status;
4339
4340 if (lancer_chip(adapter) || BEx_chip(adapter))
4341 return;
4342
4343 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4344 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4345 be16_to_cpu(port));
4346 dev_info(dev,
4347 "Only one UDP port supported for VxLAN offloads\n");
4348 return;
4349 }
4350
4351 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4352 OP_CONVERT_NORMAL_TO_TUNNEL);
4353 if (status) {
4354 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4355 goto err;
4356 }
4357
4358 status = be_cmd_set_vxlan_port(adapter, port);
4359 if (status) {
4360 dev_warn(dev, "Failed to add VxLAN port\n");
4361 goto err;
4362 }
4363 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4364 adapter->vxlan_port = port;
4365
4366 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4367 be16_to_cpu(port));
4368 return;
4369err:
4370 be_disable_vxlan_offloads(adapter);
4371 return;
4372}
4373
4374static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4375 __be16 port)
4376{
4377 struct be_adapter *adapter = netdev_priv(netdev);
4378
4379 if (lancer_chip(adapter) || BEx_chip(adapter))
4380 return;
4381
4382 if (adapter->vxlan_port != port)
4383 return;
4384
4385 be_disable_vxlan_offloads(adapter);
4386
4387 dev_info(&adapter->pdev->dev,
4388 "Disabled VxLAN offloads for UDP port %d\n",
4389 be16_to_cpu(port));
4390}
c5abe7c0 4391#endif
c9c47142 4392
e5686ad8 4393static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4394 .ndo_open = be_open,
4395 .ndo_stop = be_close,
4396 .ndo_start_xmit = be_xmit,
a54769f5 4397 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4398 .ndo_set_mac_address = be_mac_addr_set,
4399 .ndo_change_mtu = be_change_mtu,
ab1594e9 4400 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4401 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4402 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4403 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4404 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4405 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4406 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4407 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4408 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4409#ifdef CONFIG_NET_POLL_CONTROLLER
4410 .ndo_poll_controller = be_netpoll,
4411#endif
a77dcb8c
AK
4412 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4413 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4414#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4415 .ndo_busy_poll = be_busy_poll,
6384a4d0 4416#endif
c5abe7c0 4417#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4418 .ndo_add_vxlan_port = be_add_vxlan_port,
4419 .ndo_del_vxlan_port = be_del_vxlan_port,
c5abe7c0 4420#endif
6b7c5b94
SP
4421};
4422
4423static void be_netdev_init(struct net_device *netdev)
4424{
4425 struct be_adapter *adapter = netdev_priv(netdev);
4426
c9c47142
SP
4427 if (skyhawk_chip(adapter)) {
4428 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4429 NETIF_F_TSO | NETIF_F_TSO6 |
4430 NETIF_F_GSO_UDP_TUNNEL;
4431 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4432 }
6332c8d3 4433 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4434 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4435 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4436 if (be_multi_rxq(adapter))
4437 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4438
4439 netdev->features |= netdev->hw_features |
f646968f 4440 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4441
eb8a50d9 4442 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4443 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4444
fbc13f01
AK
4445 netdev->priv_flags |= IFF_UNICAST_FLT;
4446
6b7c5b94
SP
4447 netdev->flags |= IFF_MULTICAST;
4448
b7e5887e 4449 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4450
10ef9ab4 4451 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4452
7ad24ea4 4453 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4454}
4455
4456static void be_unmap_pci_bars(struct be_adapter *adapter)
4457{
c5b3ad4c
SP
4458 if (adapter->csr)
4459 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4460 if (adapter->db)
ce66f781 4461 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4462}
4463
ce66f781
SP
4464static int db_bar(struct be_adapter *adapter)
4465{
4466 if (lancer_chip(adapter) || !be_physfn(adapter))
4467 return 0;
4468 else
4469 return 4;
4470}
4471
4472static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4473{
dbf0f2a7 4474 if (skyhawk_chip(adapter)) {
ce66f781
SP
4475 adapter->roce_db.size = 4096;
4476 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4477 db_bar(adapter));
4478 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4479 db_bar(adapter));
4480 }
045508a8 4481 return 0;
6b7c5b94
SP
4482}
4483
4484static int be_map_pci_bars(struct be_adapter *adapter)
4485{
4486 u8 __iomem *addr;
fe6d2a38 4487
c5b3ad4c
SP
4488 if (BEx_chip(adapter) && be_physfn(adapter)) {
4489 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4490 if (!adapter->csr)
c5b3ad4c
SP
4491 return -ENOMEM;
4492 }
4493
ce66f781 4494 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4495 if (!addr)
6b7c5b94 4496 goto pci_map_err;
ba343c77 4497 adapter->db = addr;
ce66f781
SP
4498
4499 be_roce_map_pci_bars(adapter);
6b7c5b94 4500 return 0;
ce66f781 4501
6b7c5b94
SP
4502pci_map_err:
4503 be_unmap_pci_bars(adapter);
4504 return -ENOMEM;
4505}
4506
6b7c5b94
SP
4507static void be_ctrl_cleanup(struct be_adapter *adapter)
4508{
8788fdc2 4509 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4510
4511 be_unmap_pci_bars(adapter);
4512
4513 if (mem->va)
2b7bcebf
IV
4514 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4515 mem->dma);
e7b909a6 4516
5b8821b7 4517 mem = &adapter->rx_filter;
e7b909a6 4518 if (mem->va)
2b7bcebf
IV
4519 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4520 mem->dma);
6b7c5b94
SP
4521}
4522
6b7c5b94
SP
4523static int be_ctrl_init(struct be_adapter *adapter)
4524{
8788fdc2
SP
4525 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4526 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4527 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4528 u32 sli_intf;
6b7c5b94 4529 int status;
6b7c5b94 4530
ce66f781
SP
4531 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4532 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4533 SLI_INTF_FAMILY_SHIFT;
4534 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4535
6b7c5b94
SP
4536 status = be_map_pci_bars(adapter);
4537 if (status)
e7b909a6 4538 goto done;
6b7c5b94
SP
4539
4540 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4541 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4542 mbox_mem_alloc->size,
4543 &mbox_mem_alloc->dma,
4544 GFP_KERNEL);
6b7c5b94 4545 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4546 status = -ENOMEM;
4547 goto unmap_pci_bars;
6b7c5b94
SP
4548 }
4549 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4550 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4551 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4552 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4553
5b8821b7 4554 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4555 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4556 rx_filter->size, &rx_filter->dma,
4557 GFP_KERNEL);
ddf1169f 4558 if (!rx_filter->va) {
e7b909a6
SP
4559 status = -ENOMEM;
4560 goto free_mbox;
4561 }
1f9061d2 4562
2984961c 4563 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4564 spin_lock_init(&adapter->mcc_lock);
4565 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4566
5eeff635 4567 init_completion(&adapter->et_cmd_compl);
cf588477 4568 pci_save_state(adapter->pdev);
6b7c5b94 4569 return 0;
e7b909a6
SP
4570
4571free_mbox:
2b7bcebf
IV
4572 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4573 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4574
4575unmap_pci_bars:
4576 be_unmap_pci_bars(adapter);
4577
4578done:
4579 return status;
6b7c5b94
SP
4580}
4581
4582static void be_stats_cleanup(struct be_adapter *adapter)
4583{
3abcdeda 4584 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4585
4586 if (cmd->va)
2b7bcebf
IV
4587 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4588 cmd->va, cmd->dma);
6b7c5b94
SP
4589}
4590
4591static int be_stats_init(struct be_adapter *adapter)
4592{
3abcdeda 4593 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4594
ca34fe38
SP
4595 if (lancer_chip(adapter))
4596 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4597 else if (BE2_chip(adapter))
89a88ab8 4598 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 4599 else if (BE3_chip(adapter))
ca34fe38 4600 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
4601 else
4602 /* ALL non-BE ASICs */
4603 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 4604
ede23fa8
JP
4605 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4606 GFP_KERNEL);
ddf1169f 4607 if (!cmd->va)
6b568689 4608 return -ENOMEM;
6b7c5b94
SP
4609 return 0;
4610}
4611
3bc6b06c 4612static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4613{
4614 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4615
6b7c5b94
SP
4616 if (!adapter)
4617 return;
4618
045508a8 4619 be_roce_dev_remove(adapter);
8cef7a78 4620 be_intr_set(adapter, false);
045508a8 4621
f67ef7ba
PR
4622 cancel_delayed_work_sync(&adapter->func_recovery_work);
4623
6b7c5b94
SP
4624 unregister_netdev(adapter->netdev);
4625
5fb379ee
SP
4626 be_clear(adapter);
4627
bf99e50d
PR
4628 /* tell fw we're done with firing cmds */
4629 be_cmd_fw_clean(adapter);
4630
6b7c5b94
SP
4631 be_stats_cleanup(adapter);
4632
4633 be_ctrl_cleanup(adapter);
4634
d6b6d987
SP
4635 pci_disable_pcie_error_reporting(pdev);
4636
6b7c5b94
SP
4637 pci_release_regions(pdev);
4638 pci_disable_device(pdev);
4639
4640 free_netdev(adapter->netdev);
4641}
4642
39f1d94d 4643static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4644{
baaa08d1 4645 int status, level;
6b7c5b94 4646
9e1453c5
AK
4647 status = be_cmd_get_cntl_attributes(adapter);
4648 if (status)
4649 return status;
4650
7aeb2156
PR
4651 /* Must be a power of 2 or else MODULO will BUG_ON */
4652 adapter->be_get_temp_freq = 64;
4653
baaa08d1
VV
4654 if (BEx_chip(adapter)) {
4655 level = be_cmd_get_fw_log_level(adapter);
4656 adapter->msg_enable =
4657 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4658 }
941a77d5 4659
92bf14ab 4660 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 4661 return 0;
6b7c5b94
SP
4662}
4663
f67ef7ba 4664static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4665{
01e5b2c4 4666 struct device *dev = &adapter->pdev->dev;
d8110f62 4667 int status;
d8110f62 4668
f67ef7ba
PR
4669 status = lancer_test_and_set_rdy_state(adapter);
4670 if (status)
4671 goto err;
d8110f62 4672
f67ef7ba
PR
4673 if (netif_running(adapter->netdev))
4674 be_close(adapter->netdev);
d8110f62 4675
f67ef7ba
PR
4676 be_clear(adapter);
4677
01e5b2c4 4678 be_clear_all_error(adapter);
f67ef7ba
PR
4679
4680 status = be_setup(adapter);
4681 if (status)
4682 goto err;
d8110f62 4683
f67ef7ba
PR
4684 if (netif_running(adapter->netdev)) {
4685 status = be_open(adapter->netdev);
d8110f62
PR
4686 if (status)
4687 goto err;
f67ef7ba 4688 }
d8110f62 4689
4bebb56a 4690 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
4691 return 0;
4692err:
01e5b2c4
SK
4693 if (status == -EAGAIN)
4694 dev_err(dev, "Waiting for resource provisioning\n");
4695 else
4bebb56a 4696 dev_err(dev, "Adapter recovery failed\n");
d8110f62 4697
f67ef7ba
PR
4698 return status;
4699}
4700
4701static void be_func_recovery_task(struct work_struct *work)
4702{
4703 struct be_adapter *adapter =
4704 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4705 int status = 0;
d8110f62 4706
f67ef7ba 4707 be_detect_error(adapter);
d8110f62 4708
f67ef7ba 4709 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4710
f67ef7ba
PR
4711 rtnl_lock();
4712 netif_device_detach(adapter->netdev);
4713 rtnl_unlock();
d8110f62 4714
f67ef7ba 4715 status = lancer_recover_func(adapter);
f67ef7ba
PR
4716 if (!status)
4717 netif_device_attach(adapter->netdev);
d8110f62 4718 }
f67ef7ba 4719
01e5b2c4
SK
4720 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4721 * no need to attempt further recovery.
4722 */
4723 if (!status || status == -EAGAIN)
4724 schedule_delayed_work(&adapter->func_recovery_work,
4725 msecs_to_jiffies(1000));
d8110f62
PR
4726}
4727
4728static void be_worker(struct work_struct *work)
4729{
4730 struct be_adapter *adapter =
4731 container_of(work, struct be_adapter, work.work);
4732 struct be_rx_obj *rxo;
4733 int i;
4734
d8110f62
PR
4735 /* when interrupts are not yet enabled, just reap any pending
4736 * mcc completions */
4737 if (!netif_running(adapter->netdev)) {
072a9c48 4738 local_bh_disable();
10ef9ab4 4739 be_process_mcc(adapter);
072a9c48 4740 local_bh_enable();
d8110f62
PR
4741 goto reschedule;
4742 }
4743
4744 if (!adapter->stats_cmd_sent) {
4745 if (lancer_chip(adapter))
4746 lancer_cmd_get_pport_stats(adapter,
4747 &adapter->stats_cmd);
4748 else
4749 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4750 }
4751
d696b5e2
VV
4752 if (be_physfn(adapter) &&
4753 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
4754 be_cmd_get_die_temperature(adapter);
4755
d8110f62 4756 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
4757 /* Replenish RX-queues starved due to memory
4758 * allocation failures.
4759 */
4760 if (rxo->rx_post_starved)
d8110f62 4761 be_post_rx_frags(rxo, GFP_KERNEL);
d8110f62
PR
4762 }
4763
2632bafd 4764 be_eqd_update(adapter);
10ef9ab4 4765
d8110f62
PR
4766reschedule:
4767 adapter->work_counter++;
4768 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4769}
4770
257a3feb 4771/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
4772static bool be_reset_required(struct be_adapter *adapter)
4773{
257a3feb 4774 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
4775}
4776
d379142b
SP
4777static char *mc_name(struct be_adapter *adapter)
4778{
f93f160b
VV
4779 char *str = ""; /* default */
4780
4781 switch (adapter->mc_type) {
4782 case UMC:
4783 str = "UMC";
4784 break;
4785 case FLEX10:
4786 str = "FLEX10";
4787 break;
4788 case vNIC1:
4789 str = "vNIC-1";
4790 break;
4791 case nPAR:
4792 str = "nPAR";
4793 break;
4794 case UFP:
4795 str = "UFP";
4796 break;
4797 case vNIC2:
4798 str = "vNIC-2";
4799 break;
4800 default:
4801 str = "";
4802 }
4803
4804 return str;
d379142b
SP
4805}
4806
4807static inline char *func_name(struct be_adapter *adapter)
4808{
4809 return be_physfn(adapter) ? "PF" : "VF";
4810}
4811
1dd06ae8 4812static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4813{
4814 int status = 0;
4815 struct be_adapter *adapter;
4816 struct net_device *netdev;
b4e32a71 4817 char port_name;
6b7c5b94
SP
4818
4819 status = pci_enable_device(pdev);
4820 if (status)
4821 goto do_none;
4822
4823 status = pci_request_regions(pdev, DRV_NAME);
4824 if (status)
4825 goto disable_dev;
4826 pci_set_master(pdev);
4827
7f640062 4828 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 4829 if (!netdev) {
6b7c5b94
SP
4830 status = -ENOMEM;
4831 goto rel_reg;
4832 }
4833 adapter = netdev_priv(netdev);
4834 adapter->pdev = pdev;
4835 pci_set_drvdata(pdev, adapter);
4836 adapter->netdev = netdev;
2243e2e9 4837 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4838
4c15c243 4839 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4840 if (!status) {
4841 netdev->features |= NETIF_F_HIGHDMA;
4842 } else {
4c15c243 4843 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4844 if (status) {
4845 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4846 goto free_netdev;
4847 }
4848 }
4849
ea58c180
AK
4850 if (be_physfn(adapter)) {
4851 status = pci_enable_pcie_error_reporting(pdev);
4852 if (!status)
4853 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4854 }
d6b6d987 4855
6b7c5b94
SP
4856 status = be_ctrl_init(adapter);
4857 if (status)
39f1d94d 4858 goto free_netdev;
6b7c5b94 4859
2243e2e9 4860 /* sync up with fw's ready state */
ba343c77 4861 if (be_physfn(adapter)) {
bf99e50d 4862 status = be_fw_wait_ready(adapter);
ba343c77
SB
4863 if (status)
4864 goto ctrl_clean;
ba343c77 4865 }
6b7c5b94 4866
39f1d94d
SP
4867 if (be_reset_required(adapter)) {
4868 status = be_cmd_reset_function(adapter);
4869 if (status)
4870 goto ctrl_clean;
556ae191 4871
2d177be8
KA
4872 /* Wait for interrupts to quiesce after an FLR */
4873 msleep(100);
4874 }
8cef7a78
SK
4875
4876 /* Allow interrupts for other ULPs running on NIC function */
4877 be_intr_set(adapter, true);
10ef9ab4 4878
2d177be8
KA
4879 /* tell fw we're ready to fire cmds */
4880 status = be_cmd_fw_init(adapter);
4881 if (status)
4882 goto ctrl_clean;
4883
2243e2e9
SP
4884 status = be_stats_init(adapter);
4885 if (status)
4886 goto ctrl_clean;
4887
39f1d94d 4888 status = be_get_initial_config(adapter);
6b7c5b94
SP
4889 if (status)
4890 goto stats_clean;
6b7c5b94
SP
4891
4892 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4893 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4894 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4895
5fb379ee
SP
4896 status = be_setup(adapter);
4897 if (status)
55f5c3c5 4898 goto stats_clean;
2243e2e9 4899
3abcdeda 4900 be_netdev_init(netdev);
6b7c5b94
SP
4901 status = register_netdev(netdev);
4902 if (status != 0)
5fb379ee 4903 goto unsetup;
6b7c5b94 4904
045508a8
PP
4905 be_roce_dev_add(adapter);
4906
f67ef7ba
PR
4907 schedule_delayed_work(&adapter->func_recovery_work,
4908 msecs_to_jiffies(1000));
b4e32a71
PR
4909
4910 be_cmd_query_port_name(adapter, &port_name);
4911
d379142b
SP
4912 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4913 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4914
6b7c5b94
SP
4915 return 0;
4916
5fb379ee
SP
4917unsetup:
4918 be_clear(adapter);
6b7c5b94
SP
4919stats_clean:
4920 be_stats_cleanup(adapter);
4921ctrl_clean:
4922 be_ctrl_cleanup(adapter);
f9449ab7 4923free_netdev:
fe6d2a38 4924 free_netdev(netdev);
6b7c5b94
SP
4925rel_reg:
4926 pci_release_regions(pdev);
4927disable_dev:
4928 pci_disable_device(pdev);
4929do_none:
c4ca2374 4930 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4931 return status;
4932}
4933
4934static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4935{
4936 struct be_adapter *adapter = pci_get_drvdata(pdev);
4937 struct net_device *netdev = adapter->netdev;
4938
76a9e08e 4939 if (adapter->wol_en)
71d8d1b5
AK
4940 be_setup_wol(adapter, true);
4941
d4360d6f 4942 be_intr_set(adapter, false);
f67ef7ba
PR
4943 cancel_delayed_work_sync(&adapter->func_recovery_work);
4944
6b7c5b94
SP
4945 netif_device_detach(netdev);
4946 if (netif_running(netdev)) {
4947 rtnl_lock();
4948 be_close(netdev);
4949 rtnl_unlock();
4950 }
9b0365f1 4951 be_clear(adapter);
6b7c5b94
SP
4952
4953 pci_save_state(pdev);
4954 pci_disable_device(pdev);
4955 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4956 return 0;
4957}
4958
4959static int be_resume(struct pci_dev *pdev)
4960{
4961 int status = 0;
4962 struct be_adapter *adapter = pci_get_drvdata(pdev);
4963 struct net_device *netdev = adapter->netdev;
4964
4965 netif_device_detach(netdev);
4966
4967 status = pci_enable_device(pdev);
4968 if (status)
4969 return status;
4970
1ca01512 4971 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
4972 pci_restore_state(pdev);
4973
dd5746bf
SB
4974 status = be_fw_wait_ready(adapter);
4975 if (status)
4976 return status;
4977
d4360d6f 4978 be_intr_set(adapter, true);
2243e2e9
SP
4979 /* tell fw we're ready to fire cmds */
4980 status = be_cmd_fw_init(adapter);
4981 if (status)
4982 return status;
4983
9b0365f1 4984 be_setup(adapter);
6b7c5b94
SP
4985 if (netif_running(netdev)) {
4986 rtnl_lock();
4987 be_open(netdev);
4988 rtnl_unlock();
4989 }
f67ef7ba
PR
4990
4991 schedule_delayed_work(&adapter->func_recovery_work,
4992 msecs_to_jiffies(1000));
6b7c5b94 4993 netif_device_attach(netdev);
71d8d1b5 4994
76a9e08e 4995 if (adapter->wol_en)
71d8d1b5 4996 be_setup_wol(adapter, false);
a4ca055f 4997
6b7c5b94
SP
4998 return 0;
4999}
5000
82456b03
SP
5001/*
5002 * An FLR will stop BE from DMAing any data.
5003 */
5004static void be_shutdown(struct pci_dev *pdev)
5005{
5006 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5007
2d5d4154
AK
5008 if (!adapter)
5009 return;
82456b03 5010
0f4a6828 5011 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5012 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5013
2d5d4154 5014 netif_device_detach(adapter->netdev);
82456b03 5015
57841869
AK
5016 be_cmd_reset_function(adapter);
5017
82456b03 5018 pci_disable_device(pdev);
82456b03
SP
5019}
5020
cf588477 5021static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5022 pci_channel_state_t state)
cf588477
SP
5023{
5024 struct be_adapter *adapter = pci_get_drvdata(pdev);
5025 struct net_device *netdev = adapter->netdev;
5026
5027 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5028
01e5b2c4
SK
5029 if (!adapter->eeh_error) {
5030 adapter->eeh_error = true;
cf588477 5031
01e5b2c4 5032 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5033
cf588477 5034 rtnl_lock();
01e5b2c4
SK
5035 netif_device_detach(netdev);
5036 if (netif_running(netdev))
5037 be_close(netdev);
cf588477 5038 rtnl_unlock();
01e5b2c4
SK
5039
5040 be_clear(adapter);
cf588477 5041 }
cf588477
SP
5042
5043 if (state == pci_channel_io_perm_failure)
5044 return PCI_ERS_RESULT_DISCONNECT;
5045
5046 pci_disable_device(pdev);
5047
eeb7fc7b
SK
5048 /* The error could cause the FW to trigger a flash debug dump.
5049 * Resetting the card while flash dump is in progress
c8a54163
PR
5050 * can cause it not to recover; wait for it to finish.
5051 * Wait only for first function as it is needed only once per
5052 * adapter.
eeb7fc7b 5053 */
c8a54163
PR
5054 if (pdev->devfn == 0)
5055 ssleep(30);
5056
cf588477
SP
5057 return PCI_ERS_RESULT_NEED_RESET;
5058}
5059
5060static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5061{
5062 struct be_adapter *adapter = pci_get_drvdata(pdev);
5063 int status;
5064
5065 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5066
5067 status = pci_enable_device(pdev);
5068 if (status)
5069 return PCI_ERS_RESULT_DISCONNECT;
5070
5071 pci_set_master(pdev);
1ca01512 5072 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5073 pci_restore_state(pdev);
5074
5075 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5076 dev_info(&adapter->pdev->dev,
5077 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5078 status = be_fw_wait_ready(adapter);
cf588477
SP
5079 if (status)
5080 return PCI_ERS_RESULT_DISCONNECT;
5081
d6b6d987 5082 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5083 be_clear_all_error(adapter);
cf588477
SP
5084 return PCI_ERS_RESULT_RECOVERED;
5085}
5086
5087static void be_eeh_resume(struct pci_dev *pdev)
5088{
5089 int status = 0;
5090 struct be_adapter *adapter = pci_get_drvdata(pdev);
5091 struct net_device *netdev = adapter->netdev;
5092
5093 dev_info(&adapter->pdev->dev, "EEH resume\n");
5094
5095 pci_save_state(pdev);
5096
2d177be8 5097 status = be_cmd_reset_function(adapter);
cf588477
SP
5098 if (status)
5099 goto err;
5100
03a58baa
KA
5101 /* On some BE3 FW versions, after a HW reset,
5102 * interrupts will remain disabled for each function.
5103 * So, explicitly enable interrupts
5104 */
5105 be_intr_set(adapter, true);
5106
2d177be8
KA
5107 /* tell fw we're ready to fire cmds */
5108 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5109 if (status)
5110 goto err;
5111
cf588477
SP
5112 status = be_setup(adapter);
5113 if (status)
5114 goto err;
5115
5116 if (netif_running(netdev)) {
5117 status = be_open(netdev);
5118 if (status)
5119 goto err;
5120 }
f67ef7ba
PR
5121
5122 schedule_delayed_work(&adapter->func_recovery_work,
5123 msecs_to_jiffies(1000));
cf588477
SP
5124 netif_device_attach(netdev);
5125 return;
5126err:
5127 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5128}
5129
3646f0e5 5130static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5131 .error_detected = be_eeh_err_detected,
5132 .slot_reset = be_eeh_reset,
5133 .resume = be_eeh_resume,
5134};
5135
6b7c5b94
SP
5136static struct pci_driver be_driver = {
5137 .name = DRV_NAME,
5138 .id_table = be_dev_ids,
5139 .probe = be_probe,
5140 .remove = be_remove,
5141 .suspend = be_suspend,
cf588477 5142 .resume = be_resume,
82456b03 5143 .shutdown = be_shutdown,
cf588477 5144 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5145};
5146
5147static int __init be_init_module(void)
5148{
8e95a202
JP
5149 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5150 rx_frag_size != 2048) {
6b7c5b94
SP
5151 printk(KERN_WARNING DRV_NAME
5152 " : Module param rx_frag_size must be 2048/4096/8192."
5153 " Using 2048\n");
5154 rx_frag_size = 2048;
5155 }
6b7c5b94
SP
5156
5157 return pci_register_driver(&be_driver);
5158}
5159module_init(be_init_module);
5160
5161static void __exit be_exit_module(void)
5162{
5163 pci_unregister_driver(&be_driver);
5164}
5165module_exit(be_exit_module);