]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
fou: Don't use const __read_mostly
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
9baa3c34 41static const struct pci_device_id be_dev_ids[] = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
6bdf8f55
VV
83 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
7c185276 87};
e2fb1afa 88
7c185276 89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94
SP
124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 128
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
03d28ffe 190
6b7c5b94
SP
191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
196}
197
94d73aaa
VV
198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
6b7c5b94
SP
200{
201 u32 val = 0;
03d28ffe 202
94d73aaa 203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
205
206 wmb();
94d73aaa 207 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
208}
209
8788fdc2 210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 211 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
212{
213 u32 val = 0;
03d28ffe 214
6b7c5b94 215 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 217
f67ef7ba 218 if (adapter->eeh_error)
cf588477
SP
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
228}
229
8788fdc2 230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
231{
232 u32 val = 0;
03d28ffe 233
6b7c5b94 234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
ff32f8ab
VV
259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
5a712c13
SP
265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
704e4c88 270 */
5a712c13
SP
271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
704e4c88
PR
282 }
283
5a712c13
SP
284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
704e4c88 286 */
b188f090
SR
287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
a65027e4 289 if (status)
e3a7ae2c 290 goto err;
6b7c5b94 291
5a712c13
SP
292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
61d23e9f 295 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
296 status = -EPERM;
297 goto err;
298 }
299
e3a7ae2c 300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 301 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
302 return 0;
303err:
5a712c13 304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
305 return status;
306}
307
ca34fe38
SP
308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
61000861 315 } else if (BE3_chip(adapter)) {
ca34fe38
SP
316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
61000861
AK
318 return &cmd->hw_stats;
319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
ca34fe38
SP
322 return &cmd->hw_stats;
323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
61000861 333 } else if (BE3_chip(adapter)) {
ca34fe38
SP
334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
61000861
AK
336 return &hw_stats->erx;
337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
ca34fe38
SP
340 return &hw_stats->erx;
341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 345{
ac124ff9
SP
346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 349 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 352
ac124ff9 353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
89a88ab8
AK
374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
ac124ff9 381 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 382 else
ac124ff9 383 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
ca34fe38 393static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 394{
ac124ff9
SP
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 398 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 401
ac124ff9 402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
ac124ff9 425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
61000861
AK
439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 483 if (be_roce_supported(adapter)) {
461ae379
AK
484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
61000861
AK
491}
492
005d5696
SX
493static void populate_lancer_stats(struct be_adapter *adapter)
494{
005d5696 495 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
ac124ff9 519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 523 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 526 drvs->rx_drops_too_many_frags =
ac124ff9 527 pport_stats->rx_drops_too_many_frags_lo;
005d5696 528}
89a88ab8 529
09c1c68f
SP
530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
4188e7df 542static void populate_erx_stats(struct be_adapter *adapter,
748b539a 543 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
89a88ab8
AK
555void be_parse_stats(struct be_adapter *adapter)
556{
61000861 557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
558 struct be_rx_obj *rxo;
559 int i;
a6c578ef 560 u32 erx_stat;
ac124ff9 561
ca34fe38
SP
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
005d5696 564 } else {
ca34fe38
SP
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
61000861
AK
567 else if (BE3_chip(adapter))
568 /* for BE3 */
ca34fe38 569 populate_be_v1_stats(adapter);
61000861
AK
570 else
571 populate_be_v2_stats(adapter);
d51ebd33 572
61000861 573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 574 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 577 }
09c1c68f 578 }
89a88ab8
AK
579}
580
ab1594e9 581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 582 struct rtnl_link_stats64 *stats)
6b7c5b94 583{
ab1594e9 584 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 585 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 586 struct be_rx_obj *rxo;
3c8def97 587 struct be_tx_obj *txo;
ab1594e9
SP
588 u64 pkts, bytes;
589 unsigned int start;
3abcdeda 590 int i;
6b7c5b94 591
3abcdeda 592 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 594
ab1594e9 595 do {
57a7744e 596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
605 }
606
3c8def97 607 for_all_tx_queues(adapter, txo, i) {
ab1594e9 608 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
3c8def97 617 }
6b7c5b94
SP
618
619 /* bad pkts received */
ab1594e9 620 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
ab1594e9 629 drvs->rx_dropped_runt;
68110868 630
6b7c5b94 631 /* detailed rx errors */
ab1594e9 632 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
68110868 635
ab1594e9 636 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
637
638 /* frame alignment errors */
ab1594e9 639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 640
6b7c5b94
SP
641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
ab1594e9 646 return stats;
6b7c5b94
SP
647}
648
b236916a 649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 650{
6b7c5b94
SP
651 struct net_device *netdev = adapter->netdev;
652
b236916a 653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 654 netif_carrier_off(netdev);
b236916a 655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 656 }
b236916a 657
bdce2ad7 658 if (link_status)
b236916a
AK
659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
6b7c5b94
SP
662}
663
5f07b3c5 664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 665{
3c8def97
SP
666 struct be_tx_stats *stats = tx_stats(txo);
667
ab1594e9 668 u64_stats_update_begin(&stats->sync);
ac124ff9 669 stats->tx_reqs++;
5f07b3c5
SP
670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
5f07b3c5
SP
675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 677{
5f07b3c5
SP
678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
f986afcb
SP
684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
89b1f496 698 wrb->rsvd0 = 0;
6b7c5b94
SP
699}
700
1ded132d 701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 702 struct sk_buff *skb)
1ded132d
AK
703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
df8a39de 707 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
c9c47142
SP
717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
cc4ce020 730static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
748b539a
SP
731 struct sk_buff *skb, u32 wrb_cnt, u32 len,
732 bool skip_hw_vlan)
6b7c5b94 733{
c9c47142 734 u16 vlan_tag, proto;
cc4ce020 735
6b7c5b94
SP
736 memset(hdr, 0, sizeof(*hdr));
737
c3c18bc1 738 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
6b7c5b94 739
49e4b847 740 if (skb_is_gso(skb)) {
c3c18bc1
SP
741 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
742 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 743 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
c3c18bc1 744 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
6b7c5b94 745 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 746 if (skb->encapsulation) {
c3c18bc1 747 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
c9c47142
SP
748 proto = skb_inner_ip_proto(skb);
749 } else {
750 proto = skb_ip_proto(skb);
751 }
752 if (proto == IPPROTO_TCP)
c3c18bc1 753 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
c9c47142 754 else if (proto == IPPROTO_UDP)
c3c18bc1 755 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
6b7c5b94
SP
756 }
757
df8a39de 758 if (skb_vlan_tag_present(skb)) {
c3c18bc1 759 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
1ded132d 760 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
c3c18bc1 761 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
762 }
763
c3c18bc1
SP
764 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
765 SET_TX_WRB_HDR_BITS(len, hdr, len);
5f07b3c5
SP
766
767 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0
768 * When this hack is not needed, the evt bit is set while ringing DB
769 */
770 if (skip_hw_vlan)
771 SET_TX_WRB_HDR_BITS(event, hdr, 1);
6b7c5b94
SP
772}
773
2b7bcebf 774static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 775 bool unmap_single)
7101e111
SP
776{
777 dma_addr_t dma;
f986afcb 778 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 779
7101e111 780
f986afcb
SP
781 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
782 (u64)le32_to_cpu(wrb->frag_pa_lo);
783 if (frag_len) {
7101e111 784 if (unmap_single)
f986afcb 785 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 786 else
f986afcb 787 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
788 }
789}
6b7c5b94 790
5f07b3c5
SP
791/* Returns the number of WRBs used up by the skb */
792static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
793 struct sk_buff *skb, bool skip_hw_vlan)
6b7c5b94 794{
5f07b3c5 795 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 796 struct device *dev = &adapter->pdev->dev;
5f07b3c5 797 struct be_queue_info *txq = &txo->q;
6b7c5b94 798 struct be_eth_hdr_wrb *hdr;
7101e111 799 bool map_single = false;
5f07b3c5
SP
800 struct be_eth_wrb *wrb;
801 dma_addr_t busaddr;
802 u16 head = txq->head;
6b7c5b94 803
6b7c5b94 804 hdr = queue_head_node(txq);
5f07b3c5
SP
805 wrb_fill_hdr(adapter, hdr, skb, wrb_cnt, skb->len, skip_hw_vlan);
806 be_dws_cpu_to_le(hdr, sizeof(*hdr));
807
6b7c5b94
SP
808 queue_head_inc(txq);
809
ebc8d2ab 810 if (skb->len > skb->data_len) {
e743d313 811 int len = skb_headlen(skb);
03d28ffe 812
2b7bcebf
IV
813 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
814 if (dma_mapping_error(dev, busaddr))
7101e111
SP
815 goto dma_err;
816 map_single = true;
ebc8d2ab
DM
817 wrb = queue_head_node(txq);
818 wrb_fill(wrb, busaddr, len);
ebc8d2ab
DM
819 queue_head_inc(txq);
820 copied += len;
821 }
6b7c5b94 822
ebc8d2ab 823 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 824 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
03d28ffe 825
b061b39e 826 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 827 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 828 if (dma_mapping_error(dev, busaddr))
7101e111 829 goto dma_err;
ebc8d2ab 830 wrb = queue_head_node(txq);
9e903e08 831 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab 832 queue_head_inc(txq);
9e903e08 833 copied += skb_frag_size(frag);
6b7c5b94
SP
834 }
835
5f07b3c5
SP
836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
839 atomic_add(wrb_cnt, &txq->used);
840 txo->last_req_wrb_cnt = wrb_cnt;
841 txo->pend_wrb_cnt += wrb_cnt;
6b7c5b94 842
5f07b3c5
SP
843 be_tx_stats_update(txo, skb);
844 return wrb_cnt;
6b7c5b94 845
7101e111 846dma_err:
5f07b3c5
SP
847 /* Bring the queue back to the state it was in before this
848 * routine was invoked.
849 */
850 txq->head = head;
851 /* skip the first wrb (hdr); it's not mapped */
852 queue_head_inc(txq);
7101e111
SP
853 while (copied) {
854 wrb = queue_head_node(txq);
2b7bcebf 855 unmap_tx_frag(dev, wrb, map_single);
7101e111 856 map_single = false;
f986afcb 857 copied -= le32_to_cpu(wrb->frag_len);
d3de1540 858 adapter->drv_stats.dma_map_errors++;
7101e111
SP
859 queue_head_inc(txq);
860 }
5f07b3c5 861 txq->head = head;
7101e111 862 return 0;
6b7c5b94
SP
863}
864
f7062ee5
SP
865static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
866{
867 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
868}
869
93040ae5 870static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
871 struct sk_buff *skb,
872 bool *skip_hw_vlan)
93040ae5
SK
873{
874 u16 vlan_tag = 0;
875
876 skb = skb_share_check(skb, GFP_ATOMIC);
877 if (unlikely(!skb))
878 return skb;
879
df8a39de 880 if (skb_vlan_tag_present(skb))
93040ae5 881 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
882
883 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
884 if (!vlan_tag)
885 vlan_tag = adapter->pvid;
886 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
887 * skip VLAN insertion
888 */
889 if (skip_hw_vlan)
890 *skip_hw_vlan = true;
891 }
bc0c3405
AK
892
893 if (vlan_tag) {
62749e2c
JP
894 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
895 vlan_tag);
bc0c3405
AK
896 if (unlikely(!skb))
897 return skb;
bc0c3405
AK
898 skb->vlan_tci = 0;
899 }
900
901 /* Insert the outer VLAN, if any */
902 if (adapter->qnq_vid) {
903 vlan_tag = adapter->qnq_vid;
62749e2c
JP
904 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
905 vlan_tag);
bc0c3405
AK
906 if (unlikely(!skb))
907 return skb;
908 if (skip_hw_vlan)
909 *skip_hw_vlan = true;
910 }
911
93040ae5
SK
912 return skb;
913}
914
bc0c3405
AK
915static bool be_ipv6_exthdr_check(struct sk_buff *skb)
916{
917 struct ethhdr *eh = (struct ethhdr *)skb->data;
918 u16 offset = ETH_HLEN;
919
920 if (eh->h_proto == htons(ETH_P_IPV6)) {
921 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
922
923 offset += sizeof(struct ipv6hdr);
924 if (ip6h->nexthdr != NEXTHDR_TCP &&
925 ip6h->nexthdr != NEXTHDR_UDP) {
926 struct ipv6_opt_hdr *ehdr =
504fbf1e 927 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
928
929 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
930 if (ehdr->hdrlen == 0xff)
931 return true;
932 }
933 }
934 return false;
935}
936
937static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
938{
df8a39de 939 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
940}
941
748b539a 942static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 943{
ee9c799c 944 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
945}
946
ec495fac
VV
947static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
948 struct sk_buff *skb,
949 bool *skip_hw_vlan)
6b7c5b94 950{
d2cb6ce7 951 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
952 unsigned int eth_hdr_len;
953 struct iphdr *ip;
93040ae5 954
1297f9db
AK
955 /* For padded packets, BE HW modifies tot_len field in IP header
956 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 957 * For padded packets, Lancer computes incorrect checksum.
1ded132d 958 */
ee9c799c
SP
959 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
960 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 961 if (skb->len <= 60 &&
df8a39de 962 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 963 is_ipv4_pkt(skb)) {
93040ae5
SK
964 ip = (struct iphdr *)ip_hdr(skb);
965 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
966 }
1ded132d 967
d2cb6ce7 968 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 969 * tagging in pvid-tagging mode
d2cb6ce7 970 */
f93f160b 971 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 972 veh->h_vlan_proto == htons(ETH_P_8021Q))
748b539a 973 *skip_hw_vlan = true;
d2cb6ce7 974
93040ae5
SK
975 /* HW has a bug wherein it will calculate CSUM for VLAN
976 * pkts even though it is disabled.
977 * Manually insert VLAN in pkt.
978 */
979 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 980 skb_vlan_tag_present(skb)) {
ee9c799c 981 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405 982 if (unlikely(!skb))
c9128951 983 goto err;
bc0c3405
AK
984 }
985
986 /* HW may lockup when VLAN HW tagging is requested on
987 * certain ipv6 packets. Drop such pkts if the HW workaround to
988 * skip HW tagging is not enabled by FW.
989 */
990 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
991 (adapter->pvid || adapter->qnq_vid) &&
992 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
993 goto tx_drop;
994
995 /* Manual VLAN tag insertion to prevent:
996 * ASIC lockup when the ASIC inserts VLAN tag into
997 * certain ipv6 packets. Insert VLAN tags in driver,
998 * and set event, completion, vlan bits accordingly
999 * in the Tx WRB.
1000 */
1001 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1002 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 1003 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d 1004 if (unlikely(!skb))
c9128951 1005 goto err;
1ded132d
AK
1006 }
1007
ee9c799c
SP
1008 return skb;
1009tx_drop:
1010 dev_kfree_skb_any(skb);
c9128951 1011err:
ee9c799c
SP
1012 return NULL;
1013}
1014
ec495fac
VV
1015static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1016 struct sk_buff *skb,
1017 bool *skip_hw_vlan)
1018{
1019 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1020 * less may cause a transmit stall on that port. So the work-around is
1021 * to pad short packets (<= 32 bytes) to a 36-byte length.
1022 */
1023 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1024 if (skb_put_padto(skb, 36))
ec495fac 1025 return NULL;
ec495fac
VV
1026 }
1027
1028 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1029 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1030 if (!skb)
1031 return NULL;
1032 }
1033
1034 return skb;
1035}
1036
5f07b3c5
SP
1037static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1038{
1039 struct be_queue_info *txq = &txo->q;
1040 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1041
1042 /* Mark the last request eventable if it hasn't been marked already */
1043 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1044 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1045
1046 /* compose a dummy wrb if there are odd set of wrbs to notify */
1047 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1048 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1049 queue_head_inc(txq);
1050 atomic_inc(&txq->used);
1051 txo->pend_wrb_cnt++;
1052 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1053 TX_HDR_WRB_NUM_SHIFT);
1054 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1055 TX_HDR_WRB_NUM_SHIFT);
1056 }
1057 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1058 txo->pend_wrb_cnt = 0;
1059}
1060
ee9c799c
SP
1061static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1062{
5f07b3c5 1063 bool skip_hw_vlan = false, flush = !skb->xmit_more;
ee9c799c 1064 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1065 u16 q_idx = skb_get_queue_mapping(skb);
1066 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
ee9c799c 1067 struct be_queue_info *txq = &txo->q;
5f07b3c5 1068 u16 wrb_cnt;
ee9c799c
SP
1069
1070 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
5f07b3c5
SP
1071 if (unlikely(!skb))
1072 goto drop;
6b7c5b94 1073
5f07b3c5
SP
1074 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, skip_hw_vlan);
1075 if (unlikely(!wrb_cnt)) {
1076 dev_kfree_skb_any(skb);
1077 goto drop;
1078 }
cd8f76c0 1079
5f07b3c5
SP
1080 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1081 netif_stop_subqueue(netdev, q_idx);
1082 tx_stats(txo)->tx_stops++;
1083 }
c190e3c8 1084
5f07b3c5
SP
1085 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1086 be_xmit_flush(adapter, txo);
6b7c5b94 1087
5f07b3c5
SP
1088 return NETDEV_TX_OK;
1089drop:
1090 tx_stats(txo)->tx_drv_drops++;
1091 /* Flush the already enqueued tx requests */
1092 if (flush && txo->pend_wrb_cnt)
1093 be_xmit_flush(adapter, txo);
6b7c5b94 1094
6b7c5b94
SP
1095 return NETDEV_TX_OK;
1096}
1097
1098static int be_change_mtu(struct net_device *netdev, int new_mtu)
1099{
1100 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1101 struct device *dev = &adapter->pdev->dev;
1102
1103 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1104 dev_info(dev, "MTU must be between %d and %d bytes\n",
1105 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1106 return -EINVAL;
1107 }
0d3f5cce
KA
1108
1109 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1110 netdev->mtu, new_mtu);
6b7c5b94
SP
1111 netdev->mtu = new_mtu;
1112 return 0;
1113}
1114
f66b7cfd
SP
1115static inline bool be_in_all_promisc(struct be_adapter *adapter)
1116{
1117 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1118 BE_IF_FLAGS_ALL_PROMISCUOUS;
1119}
1120
1121static int be_set_vlan_promisc(struct be_adapter *adapter)
1122{
1123 struct device *dev = &adapter->pdev->dev;
1124 int status;
1125
1126 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1127 return 0;
1128
1129 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1130 if (!status) {
1131 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1132 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1133 } else {
1134 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1135 }
1136 return status;
1137}
1138
1139static int be_clear_vlan_promisc(struct be_adapter *adapter)
1140{
1141 struct device *dev = &adapter->pdev->dev;
1142 int status;
1143
1144 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1145 if (!status) {
1146 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1147 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1148 }
1149 return status;
1150}
1151
6b7c5b94 1152/*
82903e4b
AK
1153 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1154 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1155 */
10329df8 1156static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1157{
50762667 1158 struct device *dev = &adapter->pdev->dev;
10329df8 1159 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1160 u16 num = 0, i = 0;
82903e4b 1161 int status = 0;
1da87b7f 1162
c0e64ef4 1163 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1164 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1165 return 0;
1166
92bf14ab 1167 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1168 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1169
1170 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i);
0fc16ebf 1173
435452aa 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1175 if (status) {
f66b7cfd 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1178 if (addl_status(status) ==
1179 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1180 return be_set_vlan_promisc(adapter);
1181 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1182 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1183 }
0fc16ebf 1184 return status;
6b7c5b94
SP
1185}
1186
80d5c368 1187static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1188{
1189 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1190 int status = 0;
6b7c5b94 1191
a85e9986
PR
1192 /* Packets with VID 0 are always received by Lancer by default */
1193 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1194 return status;
1195
f6cbd364 1196 if (test_bit(vid, adapter->vids))
48291c22 1197 return status;
a85e9986 1198
f6cbd364 1199 set_bit(vid, adapter->vids);
a6b74e01 1200 adapter->vlans_added++;
8e586137 1201
a6b74e01
SK
1202 status = be_vid_config(adapter);
1203 if (status) {
1204 adapter->vlans_added--;
f6cbd364 1205 clear_bit(vid, adapter->vids);
a6b74e01 1206 }
48291c22 1207
80817cbf 1208 return status;
6b7c5b94
SP
1209}
1210
80d5c368 1211static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1212{
1213 struct be_adapter *adapter = netdev_priv(netdev);
1214
a85e9986
PR
1215 /* Packets with VID 0 are always received by Lancer by default */
1216 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1217 return 0;
a85e9986 1218
f6cbd364 1219 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1220 adapter->vlans_added--;
1221
1222 return be_vid_config(adapter);
6b7c5b94
SP
1223}
1224
f66b7cfd 1225static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1226{
ac34b743 1227 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1228 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1229}
1230
f66b7cfd
SP
1231static void be_set_all_promisc(struct be_adapter *adapter)
1232{
1233 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1234 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1235}
1236
1237static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1238{
0fc16ebf 1239 int status;
6b7c5b94 1240
f66b7cfd
SP
1241 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1242 return;
6b7c5b94 1243
f66b7cfd
SP
1244 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1245 if (!status)
1246 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1247}
1248
1249static void be_set_mc_list(struct be_adapter *adapter)
1250{
1251 int status;
1252
1253 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1254 if (!status)
1255 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1256 else
1257 be_set_mc_promisc(adapter);
1258}
1259
1260static void be_set_uc_list(struct be_adapter *adapter)
1261{
1262 struct netdev_hw_addr *ha;
1263 int i = 1; /* First slot is claimed by the Primary MAC */
1264
1265 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1266 be_cmd_pmac_del(adapter, adapter->if_handle,
1267 adapter->pmac_id[i], 0);
1268
1269 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1270 be_set_all_promisc(adapter);
1271 return;
6b7c5b94
SP
1272 }
1273
f66b7cfd
SP
1274 netdev_for_each_uc_addr(ha, adapter->netdev) {
1275 adapter->uc_macs++; /* First slot is for Primary MAC */
1276 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1277 &adapter->pmac_id[adapter->uc_macs], 0);
1278 }
1279}
6b7c5b94 1280
f66b7cfd
SP
1281static void be_clear_uc_list(struct be_adapter *adapter)
1282{
1283 int i;
fbc13f01 1284
f66b7cfd
SP
1285 for (i = 1; i < (adapter->uc_macs + 1); i++)
1286 be_cmd_pmac_del(adapter, adapter->if_handle,
1287 adapter->pmac_id[i], 0);
1288 adapter->uc_macs = 0;
1289}
fbc13f01 1290
f66b7cfd
SP
1291static void be_set_rx_mode(struct net_device *netdev)
1292{
1293 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1294
f66b7cfd
SP
1295 if (netdev->flags & IFF_PROMISC) {
1296 be_set_all_promisc(adapter);
1297 return;
fbc13f01
AK
1298 }
1299
f66b7cfd
SP
1300 /* Interface was previously in promiscuous mode; disable it */
1301 if (be_in_all_promisc(adapter)) {
1302 be_clear_all_promisc(adapter);
1303 if (adapter->vlans_added)
1304 be_vid_config(adapter);
0fc16ebf 1305 }
a0794885 1306
f66b7cfd
SP
1307 /* Enable multicast promisc if num configured exceeds what we support */
1308 if (netdev->flags & IFF_ALLMULTI ||
1309 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1310 be_set_mc_promisc(adapter);
a0794885 1311 return;
f66b7cfd 1312 }
a0794885 1313
f66b7cfd
SP
1314 if (netdev_uc_count(netdev) != adapter->uc_macs)
1315 be_set_uc_list(adapter);
1316
1317 be_set_mc_list(adapter);
6b7c5b94
SP
1318}
1319
ba343c77
SB
1320static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1321{
1322 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1323 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1324 int status;
1325
11ac75ed 1326 if (!sriov_enabled(adapter))
ba343c77
SB
1327 return -EPERM;
1328
11ac75ed 1329 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1330 return -EINVAL;
1331
3c31aaf3
VV
1332 /* Proceed further only if user provided MAC is different
1333 * from active MAC
1334 */
1335 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1336 return 0;
1337
3175d8c2
SP
1338 if (BEx_chip(adapter)) {
1339 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1340 vf + 1);
ba343c77 1341
11ac75ed
SP
1342 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1343 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1344 } else {
1345 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1346 vf + 1);
590c391d
PR
1347 }
1348
abccf23e
KA
1349 if (status) {
1350 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1351 mac, vf, status);
1352 return be_cmd_status(status);
1353 }
64600ea5 1354
abccf23e
KA
1355 ether_addr_copy(vf_cfg->mac_addr, mac);
1356
1357 return 0;
ba343c77
SB
1358}
1359
64600ea5 1360static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1361 struct ifla_vf_info *vi)
64600ea5
AK
1362{
1363 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1364 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1365
11ac75ed 1366 if (!sriov_enabled(adapter))
64600ea5
AK
1367 return -EPERM;
1368
11ac75ed 1369 if (vf >= adapter->num_vfs)
64600ea5
AK
1370 return -EINVAL;
1371
1372 vi->vf = vf;
ed616689
SC
1373 vi->max_tx_rate = vf_cfg->tx_rate;
1374 vi->min_tx_rate = 0;
a60b3a13
AK
1375 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1376 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1377 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1378 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1379
1380 return 0;
1381}
1382
435452aa
VV
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
748b539a 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1440{
1441 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1443 int status;
1da87b7f 1444
11ac75ed 1445 if (!sriov_enabled(adapter))
1da87b7f
AK
1446 return -EPERM;
1447
b9fc0e53 1448 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1449 return -EINVAL;
1450
b9fc0e53
AK
1451 if (vlan || qos) {
1452 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1454 } else {
435452aa 1455 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1456 }
1457
abccf23e
KA
1458 if (status) {
1459 dev_err(&adapter->pdev->dev,
435452aa
VV
1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1461 status);
abccf23e
KA
1462 return be_cmd_status(status);
1463 }
1464
1465 vf_cfg->vlan_tag = vlan;
abccf23e 1466 return 0;
1da87b7f
AK
1467}
1468
ed616689
SC
1469static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1470 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1471{
1472 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1473 struct device *dev = &adapter->pdev->dev;
1474 int percent_rate, status = 0;
1475 u16 link_speed = 0;
1476 u8 link_status;
e1d18735 1477
11ac75ed 1478 if (!sriov_enabled(adapter))
e1d18735
AK
1479 return -EPERM;
1480
94f434c2 1481 if (vf >= adapter->num_vfs)
e1d18735
AK
1482 return -EINVAL;
1483
ed616689
SC
1484 if (min_tx_rate)
1485 return -EINVAL;
1486
0f77ba73
RN
1487 if (!max_tx_rate)
1488 goto config_qos;
1489
1490 status = be_cmd_link_status_query(adapter, &link_speed,
1491 &link_status, 0);
1492 if (status)
1493 goto err;
1494
1495 if (!link_status) {
1496 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1497 status = -ENETDOWN;
0f77ba73
RN
1498 goto err;
1499 }
1500
1501 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1502 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1503 link_speed);
1504 status = -EINVAL;
1505 goto err;
1506 }
1507
1508 /* On Skyhawk the QOS setting must be done only as a % value */
1509 percent_rate = link_speed / 100;
1510 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1511 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1512 percent_rate);
1513 status = -EINVAL;
1514 goto err;
94f434c2 1515 }
e1d18735 1516
0f77ba73
RN
1517config_qos:
1518 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1519 if (status)
0f77ba73
RN
1520 goto err;
1521
1522 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1523 return 0;
1524
1525err:
1526 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1527 max_tx_rate, vf);
abccf23e 1528 return be_cmd_status(status);
e1d18735 1529}
e2fb1afa 1530
bdce2ad7
SR
1531static int be_set_vf_link_state(struct net_device *netdev, int vf,
1532 int link_state)
1533{
1534 struct be_adapter *adapter = netdev_priv(netdev);
1535 int status;
1536
1537 if (!sriov_enabled(adapter))
1538 return -EPERM;
1539
1540 if (vf >= adapter->num_vfs)
1541 return -EINVAL;
1542
1543 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1544 if (status) {
1545 dev_err(&adapter->pdev->dev,
1546 "Link state change on VF %d failed: %#x\n", vf, status);
1547 return be_cmd_status(status);
1548 }
bdce2ad7 1549
abccf23e
KA
1550 adapter->vf_cfg[vf].plink_tracking = link_state;
1551
1552 return 0;
bdce2ad7 1553}
e1d18735 1554
2632bafd
SP
1555static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1556 ulong now)
6b7c5b94 1557{
2632bafd
SP
1558 aic->rx_pkts_prev = rx_pkts;
1559 aic->tx_reqs_prev = tx_pkts;
1560 aic->jiffies = now;
1561}
ac124ff9 1562
2632bafd
SP
1563static void be_eqd_update(struct be_adapter *adapter)
1564{
1565 struct be_set_eqd set_eqd[MAX_EVT_QS];
1566 int eqd, i, num = 0, start;
1567 struct be_aic_obj *aic;
1568 struct be_eq_obj *eqo;
1569 struct be_rx_obj *rxo;
1570 struct be_tx_obj *txo;
1571 u64 rx_pkts, tx_pkts;
1572 ulong now;
1573 u32 pps, delta;
10ef9ab4 1574
2632bafd
SP
1575 for_all_evt_queues(adapter, eqo, i) {
1576 aic = &adapter->aic_obj[eqo->idx];
1577 if (!aic->enable) {
1578 if (aic->jiffies)
1579 aic->jiffies = 0;
1580 eqd = aic->et_eqd;
1581 goto modify_eqd;
1582 }
6b7c5b94 1583
2632bafd
SP
1584 rxo = &adapter->rx_obj[eqo->idx];
1585 do {
57a7744e 1586 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1587 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1588 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1589
2632bafd
SP
1590 txo = &adapter->tx_obj[eqo->idx];
1591 do {
57a7744e 1592 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1593 tx_pkts = txo->stats.tx_reqs;
57a7744e 1594 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1595
2632bafd
SP
1596 /* Skip, if wrapped around or first calculation */
1597 now = jiffies;
1598 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1599 rx_pkts < aic->rx_pkts_prev ||
1600 tx_pkts < aic->tx_reqs_prev) {
1601 be_aic_update(aic, rx_pkts, tx_pkts, now);
1602 continue;
1603 }
1604
1605 delta = jiffies_to_msecs(now - aic->jiffies);
1606 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1607 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1608 eqd = (pps / 15000) << 2;
10ef9ab4 1609
2632bafd
SP
1610 if (eqd < 8)
1611 eqd = 0;
1612 eqd = min_t(u32, eqd, aic->max_eqd);
1613 eqd = max_t(u32, eqd, aic->min_eqd);
1614
1615 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1616modify_eqd:
2632bafd
SP
1617 if (eqd != aic->prev_eqd) {
1618 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1619 set_eqd[num].eq_id = eqo->q.id;
1620 aic->prev_eqd = eqd;
1621 num++;
1622 }
ac124ff9 1623 }
2632bafd
SP
1624
1625 if (num)
1626 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1627}
1628
3abcdeda 1629static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1630 struct be_rx_compl_info *rxcp)
4097f663 1631{
ac124ff9 1632 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1633
ab1594e9 1634 u64_stats_update_begin(&stats->sync);
3abcdeda 1635 stats->rx_compl++;
2e588f84 1636 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1637 stats->rx_pkts++;
2e588f84 1638 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1639 stats->rx_mcast_pkts++;
2e588f84 1640 if (rxcp->err)
ac124ff9 1641 stats->rx_compl_err++;
ab1594e9 1642 u64_stats_update_end(&stats->sync);
4097f663
SP
1643}
1644
2e588f84 1645static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1646{
19fad86f 1647 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1648 * Also ignore ipcksm for ipv6 pkts
1649 */
2e588f84 1650 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1651 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1652}
1653
0b0ef1d0 1654static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1655{
10ef9ab4 1656 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1657 struct be_rx_page_info *rx_page_info;
3abcdeda 1658 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1659 u16 frag_idx = rxq->tail;
6b7c5b94 1660
3abcdeda 1661 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1662 BUG_ON(!rx_page_info->page);
1663
e50287be 1664 if (rx_page_info->last_frag) {
2b7bcebf
IV
1665 dma_unmap_page(&adapter->pdev->dev,
1666 dma_unmap_addr(rx_page_info, bus),
1667 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1668 rx_page_info->last_frag = false;
1669 } else {
1670 dma_sync_single_for_cpu(&adapter->pdev->dev,
1671 dma_unmap_addr(rx_page_info, bus),
1672 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1673 }
6b7c5b94 1674
0b0ef1d0 1675 queue_tail_inc(rxq);
6b7c5b94
SP
1676 atomic_dec(&rxq->used);
1677 return rx_page_info;
1678}
1679
1680/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1681static void be_rx_compl_discard(struct be_rx_obj *rxo,
1682 struct be_rx_compl_info *rxcp)
6b7c5b94 1683{
6b7c5b94 1684 struct be_rx_page_info *page_info;
2e588f84 1685 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1686
e80d9da6 1687 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1688 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1689 put_page(page_info->page);
1690 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1691 }
1692}
1693
1694/*
1695 * skb_fill_rx_data forms a complete skb for an ether frame
1696 * indicated by rxcp.
1697 */
10ef9ab4
SP
1698static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1699 struct be_rx_compl_info *rxcp)
6b7c5b94 1700{
6b7c5b94 1701 struct be_rx_page_info *page_info;
2e588f84
SP
1702 u16 i, j;
1703 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1704 u8 *start;
6b7c5b94 1705
0b0ef1d0 1706 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1707 start = page_address(page_info->page) + page_info->page_offset;
1708 prefetch(start);
1709
1710 /* Copy data in the first descriptor of this completion */
2e588f84 1711 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1712
6b7c5b94
SP
1713 skb->len = curr_frag_len;
1714 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1715 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1716 /* Complete packet has now been moved to data */
1717 put_page(page_info->page);
1718 skb->data_len = 0;
1719 skb->tail += curr_frag_len;
1720 } else {
ac1ae5f3
ED
1721 hdr_len = ETH_HLEN;
1722 memcpy(skb->data, start, hdr_len);
6b7c5b94 1723 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1724 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1725 skb_shinfo(skb)->frags[0].page_offset =
1726 page_info->page_offset + hdr_len;
748b539a
SP
1727 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1728 curr_frag_len - hdr_len);
6b7c5b94 1729 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1730 skb->truesize += rx_frag_size;
6b7c5b94
SP
1731 skb->tail += hdr_len;
1732 }
205859a2 1733 page_info->page = NULL;
6b7c5b94 1734
2e588f84
SP
1735 if (rxcp->pkt_size <= rx_frag_size) {
1736 BUG_ON(rxcp->num_rcvd != 1);
1737 return;
6b7c5b94
SP
1738 }
1739
1740 /* More frags present for this completion */
2e588f84
SP
1741 remaining = rxcp->pkt_size - curr_frag_len;
1742 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1743 page_info = get_rx_page_info(rxo);
2e588f84 1744 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1745
bd46cb6c
AK
1746 /* Coalesce all frags from the same physical page in one slot */
1747 if (page_info->page_offset == 0) {
1748 /* Fresh page */
1749 j++;
b061b39e 1750 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1751 skb_shinfo(skb)->frags[j].page_offset =
1752 page_info->page_offset;
9e903e08 1753 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1754 skb_shinfo(skb)->nr_frags++;
1755 } else {
1756 put_page(page_info->page);
1757 }
1758
9e903e08 1759 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1760 skb->len += curr_frag_len;
1761 skb->data_len += curr_frag_len;
bdb28a97 1762 skb->truesize += rx_frag_size;
2e588f84 1763 remaining -= curr_frag_len;
205859a2 1764 page_info->page = NULL;
6b7c5b94 1765 }
bd46cb6c 1766 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1767}
1768
5be93b9a 1769/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1770static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1771 struct be_rx_compl_info *rxcp)
6b7c5b94 1772{
10ef9ab4 1773 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1774 struct net_device *netdev = adapter->netdev;
6b7c5b94 1775 struct sk_buff *skb;
89420424 1776
bb349bb4 1777 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1778 if (unlikely(!skb)) {
ac124ff9 1779 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1780 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1781 return;
1782 }
1783
10ef9ab4 1784 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1785
6332c8d3 1786 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1787 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1788 else
1789 skb_checksum_none_assert(skb);
6b7c5b94 1790
6332c8d3 1791 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1792 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1793 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1794 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1795
b6c0e89d 1796 skb->csum_level = rxcp->tunneled;
6384a4d0 1797 skb_mark_napi_id(skb, napi);
6b7c5b94 1798
343e43c0 1799 if (rxcp->vlanf)
86a9bad3 1800 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1801
1802 netif_receive_skb(skb);
6b7c5b94
SP
1803}
1804
5be93b9a 1805/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1806static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1807 struct napi_struct *napi,
1808 struct be_rx_compl_info *rxcp)
6b7c5b94 1809{
10ef9ab4 1810 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1811 struct be_rx_page_info *page_info;
5be93b9a 1812 struct sk_buff *skb = NULL;
2e588f84
SP
1813 u16 remaining, curr_frag_len;
1814 u16 i, j;
3968fa1e 1815
10ef9ab4 1816 skb = napi_get_frags(napi);
5be93b9a 1817 if (!skb) {
10ef9ab4 1818 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1819 return;
1820 }
1821
2e588f84
SP
1822 remaining = rxcp->pkt_size;
1823 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1824 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1825
1826 curr_frag_len = min(remaining, rx_frag_size);
1827
bd46cb6c
AK
1828 /* Coalesce all frags from the same physical page in one slot */
1829 if (i == 0 || page_info->page_offset == 0) {
1830 /* First frag or Fresh page */
1831 j++;
b061b39e 1832 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1833 skb_shinfo(skb)->frags[j].page_offset =
1834 page_info->page_offset;
9e903e08 1835 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1836 } else {
1837 put_page(page_info->page);
1838 }
9e903e08 1839 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1840 skb->truesize += rx_frag_size;
bd46cb6c 1841 remaining -= curr_frag_len;
6b7c5b94
SP
1842 memset(page_info, 0, sizeof(*page_info));
1843 }
bd46cb6c 1844 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1845
5be93b9a 1846 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1847 skb->len = rxcp->pkt_size;
1848 skb->data_len = rxcp->pkt_size;
5be93b9a 1849 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1850 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1851 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1852 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1853
b6c0e89d 1854 skb->csum_level = rxcp->tunneled;
6384a4d0 1855 skb_mark_napi_id(skb, napi);
5be93b9a 1856
343e43c0 1857 if (rxcp->vlanf)
86a9bad3 1858 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1859
10ef9ab4 1860 napi_gro_frags(napi);
2e588f84
SP
1861}
1862
10ef9ab4
SP
1863static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1864 struct be_rx_compl_info *rxcp)
2e588f84 1865{
c3c18bc1
SP
1866 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1867 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1868 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1869 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1870 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1871 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1872 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1873 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1874 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1875 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1876 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1877 if (rxcp->vlanf) {
c3c18bc1
SP
1878 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1879 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1880 }
c3c18bc1 1881 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1882 rxcp->tunneled =
c3c18bc1 1883 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1884}
1885
10ef9ab4
SP
1886static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1887 struct be_rx_compl_info *rxcp)
2e588f84 1888{
c3c18bc1
SP
1889 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1890 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1891 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1892 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1893 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1894 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1895 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1896 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1897 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1898 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1899 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1900 if (rxcp->vlanf) {
c3c18bc1
SP
1901 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1902 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1903 }
c3c18bc1
SP
1904 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1905 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1906}
1907
1908static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1909{
1910 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1911 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1912 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1913
2e588f84
SP
1914 /* For checking the valid bit it is Ok to use either definition as the
1915 * valid bit is at the same position in both v0 and v1 Rx compl */
1916 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1917 return NULL;
6b7c5b94 1918
2e588f84
SP
1919 rmb();
1920 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1921
2e588f84 1922 if (adapter->be3_native)
10ef9ab4 1923 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1924 else
10ef9ab4 1925 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1926
e38b1706
SK
1927 if (rxcp->ip_frag)
1928 rxcp->l4_csum = 0;
1929
15d72184 1930 if (rxcp->vlanf) {
f93f160b
VV
1931 /* In QNQ modes, if qnq bit is not set, then the packet was
1932 * tagged only with the transparent outer vlan-tag and must
1933 * not be treated as a vlan packet by host
1934 */
1935 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1936 rxcp->vlanf = 0;
6b7c5b94 1937
15d72184 1938 if (!lancer_chip(adapter))
3c709f8f 1939 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1940
939cf306 1941 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1942 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1943 rxcp->vlanf = 0;
1944 }
2e588f84
SP
1945
1946 /* As the compl has been parsed, reset it; we wont touch it again */
1947 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1948
3abcdeda 1949 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1950 return rxcp;
1951}
1952
1829b086 1953static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1954{
6b7c5b94 1955 u32 order = get_order(size);
1829b086 1956
6b7c5b94 1957 if (order > 0)
1829b086
ED
1958 gfp |= __GFP_COMP;
1959 return alloc_pages(gfp, order);
6b7c5b94
SP
1960}
1961
1962/*
1963 * Allocate a page, split it to fragments of size rx_frag_size and post as
1964 * receive buffers to BE
1965 */
c30d7266 1966static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1967{
3abcdeda 1968 struct be_adapter *adapter = rxo->adapter;
26d92f92 1969 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1970 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1971 struct page *pagep = NULL;
ba42fad0 1972 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1973 struct be_eth_rx_d *rxd;
1974 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1975 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1976
3abcdeda 1977 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 1978 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 1979 if (!pagep) {
1829b086 1980 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1981 if (unlikely(!pagep)) {
ac124ff9 1982 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1983 break;
1984 }
ba42fad0
IV
1985 page_dmaaddr = dma_map_page(dev, pagep, 0,
1986 adapter->big_page_size,
2b7bcebf 1987 DMA_FROM_DEVICE);
ba42fad0
IV
1988 if (dma_mapping_error(dev, page_dmaaddr)) {
1989 put_page(pagep);
1990 pagep = NULL;
d3de1540 1991 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
1992 break;
1993 }
e50287be 1994 page_offset = 0;
6b7c5b94
SP
1995 } else {
1996 get_page(pagep);
e50287be 1997 page_offset += rx_frag_size;
6b7c5b94 1998 }
e50287be 1999 page_info->page_offset = page_offset;
6b7c5b94 2000 page_info->page = pagep;
6b7c5b94
SP
2001
2002 rxd = queue_head_node(rxq);
e50287be 2003 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2004 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2005 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2006
2007 /* Any space left in the current big page for another frag? */
2008 if ((page_offset + rx_frag_size + rx_frag_size) >
2009 adapter->big_page_size) {
2010 pagep = NULL;
e50287be
SP
2011 page_info->last_frag = true;
2012 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2013 } else {
2014 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2015 }
26d92f92
SP
2016
2017 prev_page_info = page_info;
2018 queue_head_inc(rxq);
10ef9ab4 2019 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2020 }
e50287be
SP
2021
2022 /* Mark the last frag of a page when we break out of the above loop
2023 * with no more slots available in the RXQ
2024 */
2025 if (pagep) {
2026 prev_page_info->last_frag = true;
2027 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2028 }
6b7c5b94
SP
2029
2030 if (posted) {
6b7c5b94 2031 atomic_add(posted, &rxq->used);
6384a4d0
SP
2032 if (rxo->rx_post_starved)
2033 rxo->rx_post_starved = false;
c30d7266
AK
2034 do {
2035 notify = min(256u, posted);
2036 be_rxq_notify(adapter, rxq->id, notify);
2037 posted -= notify;
2038 } while (posted);
ea1dae11
SP
2039 } else if (atomic_read(&rxq->used) == 0) {
2040 /* Let be_worker replenish when memory is available */
3abcdeda 2041 rxo->rx_post_starved = true;
6b7c5b94 2042 }
6b7c5b94
SP
2043}
2044
5fb379ee 2045static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 2046{
6b7c5b94
SP
2047 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
2048
2049 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2050 return NULL;
2051
f3eb62d2 2052 rmb();
6b7c5b94
SP
2053 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2054
2055 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2056
2057 queue_tail_inc(tx_cq);
2058 return txcp;
2059}
2060
3c8def97 2061static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2062 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2063{
5f07b3c5 2064 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2065 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2066 u16 frag_index, num_wrbs = 0;
2067 struct sk_buff *skb = NULL;
2068 bool unmap_skb_hdr = false;
a73b796e 2069 struct be_eth_wrb *wrb;
6b7c5b94 2070
ec43b1a6 2071 do {
5f07b3c5
SP
2072 if (sent_skbs[txq->tail]) {
2073 /* Free skb from prev req */
2074 if (skb)
2075 dev_consume_skb_any(skb);
2076 skb = sent_skbs[txq->tail];
2077 sent_skbs[txq->tail] = NULL;
2078 queue_tail_inc(txq); /* skip hdr wrb */
2079 num_wrbs++;
2080 unmap_skb_hdr = true;
2081 }
a73b796e 2082 wrb = queue_tail_node(txq);
5f07b3c5 2083 frag_index = txq->tail;
2b7bcebf 2084 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2085 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2086 unmap_skb_hdr = false;
6b7c5b94 2087 queue_tail_inc(txq);
5f07b3c5
SP
2088 num_wrbs++;
2089 } while (frag_index != last_index);
2090 dev_consume_skb_any(skb);
6b7c5b94 2091
4d586b82 2092 return num_wrbs;
6b7c5b94
SP
2093}
2094
10ef9ab4
SP
2095/* Return the number of events in the event queue */
2096static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2097{
10ef9ab4
SP
2098 struct be_eq_entry *eqe;
2099 int num = 0;
859b1e4e 2100
10ef9ab4
SP
2101 do {
2102 eqe = queue_tail_node(&eqo->q);
2103 if (eqe->evt == 0)
2104 break;
859b1e4e 2105
10ef9ab4
SP
2106 rmb();
2107 eqe->evt = 0;
2108 num++;
2109 queue_tail_inc(&eqo->q);
2110 } while (true);
2111
2112 return num;
859b1e4e
SP
2113}
2114
10ef9ab4
SP
2115/* Leaves the EQ is disarmed state */
2116static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2117{
10ef9ab4 2118 int num = events_get(eqo);
859b1e4e 2119
10ef9ab4 2120 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2121}
2122
10ef9ab4 2123static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2124{
2125 struct be_rx_page_info *page_info;
3abcdeda
SP
2126 struct be_queue_info *rxq = &rxo->q;
2127 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2128 struct be_rx_compl_info *rxcp;
d23e946c
SP
2129 struct be_adapter *adapter = rxo->adapter;
2130 int flush_wait = 0;
6b7c5b94 2131
d23e946c
SP
2132 /* Consume pending rx completions.
2133 * Wait for the flush completion (identified by zero num_rcvd)
2134 * to arrive. Notify CQ even when there are no more CQ entries
2135 * for HW to flush partially coalesced CQ entries.
2136 * In Lancer, there is no need to wait for flush compl.
2137 */
2138 for (;;) {
2139 rxcp = be_rx_compl_get(rxo);
ddf1169f 2140 if (!rxcp) {
d23e946c
SP
2141 if (lancer_chip(adapter))
2142 break;
2143
2144 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2145 dev_warn(&adapter->pdev->dev,
2146 "did not receive flush compl\n");
2147 break;
2148 }
2149 be_cq_notify(adapter, rx_cq->id, true, 0);
2150 mdelay(1);
2151 } else {
2152 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2153 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2154 if (rxcp->num_rcvd == 0)
2155 break;
2156 }
6b7c5b94
SP
2157 }
2158
d23e946c
SP
2159 /* After cleanup, leave the CQ in unarmed state */
2160 be_cq_notify(adapter, rx_cq->id, false, 0);
2161
2162 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2163 while (atomic_read(&rxq->used) > 0) {
2164 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2165 put_page(page_info->page);
2166 memset(page_info, 0, sizeof(*page_info));
2167 }
2168 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2169 rxq->tail = 0;
2170 rxq->head = 0;
6b7c5b94
SP
2171}
2172
0ae57bb3 2173static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2174{
5f07b3c5
SP
2175 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2176 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2177 struct be_tx_obj *txo;
2178 struct be_queue_info *txq;
a8e9179a 2179 struct be_eth_tx_compl *txcp;
0ae57bb3 2180 int i, pending_txqs;
a8e9179a 2181
1a3d0717 2182 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2183 do {
0ae57bb3
SP
2184 pending_txqs = adapter->num_tx_qs;
2185
2186 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2187 cmpl = 0;
2188 num_wrbs = 0;
0ae57bb3
SP
2189 txq = &txo->q;
2190 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2191 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2192 num_wrbs += be_tx_compl_process(adapter, txo,
2193 end_idx);
2194 cmpl++;
2195 }
2196 if (cmpl) {
2197 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2198 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2199 timeo = 0;
0ae57bb3 2200 }
5f07b3c5 2201 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2202 pending_txqs--;
a8e9179a
SP
2203 }
2204
1a3d0717 2205 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2206 break;
2207
2208 mdelay(1);
2209 } while (true);
2210
5f07b3c5 2211 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2212 for_all_tx_queues(adapter, txo, i) {
2213 txq = &txo->q;
0ae57bb3 2214
5f07b3c5
SP
2215 if (atomic_read(&txq->used)) {
2216 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2217 i, atomic_read(&txq->used));
2218 notified_idx = txq->tail;
0ae57bb3 2219 end_idx = txq->tail;
5f07b3c5
SP
2220 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2221 txq->len);
2222 /* Use the tx-compl process logic to handle requests
2223 * that were not sent to the HW.
2224 */
0ae57bb3
SP
2225 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2226 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2227 BUG_ON(atomic_read(&txq->used));
2228 txo->pend_wrb_cnt = 0;
2229 /* Since hw was never notified of these requests,
2230 * reset TXQ indices
2231 */
2232 txq->head = notified_idx;
2233 txq->tail = notified_idx;
0ae57bb3 2234 }
b03388d6 2235 }
6b7c5b94
SP
2236}
2237
10ef9ab4
SP
2238static void be_evt_queues_destroy(struct be_adapter *adapter)
2239{
2240 struct be_eq_obj *eqo;
2241 int i;
2242
2243 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2244 if (eqo->q.created) {
2245 be_eq_clean(eqo);
10ef9ab4 2246 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2247 napi_hash_del(&eqo->napi);
68d7bdcb 2248 netif_napi_del(&eqo->napi);
19d59aa7 2249 }
10ef9ab4
SP
2250 be_queue_free(adapter, &eqo->q);
2251 }
2252}
2253
2254static int be_evt_queues_create(struct be_adapter *adapter)
2255{
2256 struct be_queue_info *eq;
2257 struct be_eq_obj *eqo;
2632bafd 2258 struct be_aic_obj *aic;
10ef9ab4
SP
2259 int i, rc;
2260
92bf14ab
SP
2261 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2262 adapter->cfg_num_qs);
10ef9ab4
SP
2263
2264 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2265 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2266 BE_NAPI_WEIGHT);
6384a4d0 2267 napi_hash_add(&eqo->napi);
2632bafd 2268 aic = &adapter->aic_obj[i];
10ef9ab4 2269 eqo->adapter = adapter;
10ef9ab4 2270 eqo->idx = i;
2632bafd
SP
2271 aic->max_eqd = BE_MAX_EQD;
2272 aic->enable = true;
10ef9ab4
SP
2273
2274 eq = &eqo->q;
2275 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2276 sizeof(struct be_eq_entry));
10ef9ab4
SP
2277 if (rc)
2278 return rc;
2279
f2f781a7 2280 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2281 if (rc)
2282 return rc;
2283 }
1cfafab9 2284 return 0;
10ef9ab4
SP
2285}
2286
5fb379ee
SP
2287static void be_mcc_queues_destroy(struct be_adapter *adapter)
2288{
2289 struct be_queue_info *q;
5fb379ee 2290
8788fdc2 2291 q = &adapter->mcc_obj.q;
5fb379ee 2292 if (q->created)
8788fdc2 2293 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2294 be_queue_free(adapter, q);
2295
8788fdc2 2296 q = &adapter->mcc_obj.cq;
5fb379ee 2297 if (q->created)
8788fdc2 2298 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2299 be_queue_free(adapter, q);
2300}
2301
2302/* Must be called only after TX qs are created as MCC shares TX EQ */
2303static int be_mcc_queues_create(struct be_adapter *adapter)
2304{
2305 struct be_queue_info *q, *cq;
5fb379ee 2306
8788fdc2 2307 cq = &adapter->mcc_obj.cq;
5fb379ee 2308 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2309 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2310 goto err;
2311
10ef9ab4
SP
2312 /* Use the default EQ for MCC completions */
2313 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2314 goto mcc_cq_free;
2315
8788fdc2 2316 q = &adapter->mcc_obj.q;
5fb379ee
SP
2317 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2318 goto mcc_cq_destroy;
2319
8788fdc2 2320 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2321 goto mcc_q_free;
2322
2323 return 0;
2324
2325mcc_q_free:
2326 be_queue_free(adapter, q);
2327mcc_cq_destroy:
8788fdc2 2328 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2329mcc_cq_free:
2330 be_queue_free(adapter, cq);
2331err:
2332 return -1;
2333}
2334
6b7c5b94
SP
2335static void be_tx_queues_destroy(struct be_adapter *adapter)
2336{
2337 struct be_queue_info *q;
3c8def97
SP
2338 struct be_tx_obj *txo;
2339 u8 i;
6b7c5b94 2340
3c8def97
SP
2341 for_all_tx_queues(adapter, txo, i) {
2342 q = &txo->q;
2343 if (q->created)
2344 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2345 be_queue_free(adapter, q);
6b7c5b94 2346
3c8def97
SP
2347 q = &txo->cq;
2348 if (q->created)
2349 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2350 be_queue_free(adapter, q);
2351 }
6b7c5b94
SP
2352}
2353
7707133c 2354static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2355{
10ef9ab4 2356 struct be_queue_info *cq, *eq;
3c8def97 2357 struct be_tx_obj *txo;
92bf14ab 2358 int status, i;
6b7c5b94 2359
92bf14ab 2360 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2361
10ef9ab4
SP
2362 for_all_tx_queues(adapter, txo, i) {
2363 cq = &txo->cq;
2364 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2365 sizeof(struct be_eth_tx_compl));
2366 if (status)
2367 return status;
3c8def97 2368
827da44c
JS
2369 u64_stats_init(&txo->stats.sync);
2370 u64_stats_init(&txo->stats.sync_compl);
2371
10ef9ab4
SP
2372 /* If num_evt_qs is less than num_tx_qs, then more than
2373 * one txq share an eq
2374 */
2375 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2376 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2377 if (status)
2378 return status;
6b7c5b94 2379
10ef9ab4
SP
2380 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2381 sizeof(struct be_eth_wrb));
2382 if (status)
2383 return status;
6b7c5b94 2384
94d73aaa 2385 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2386 if (status)
2387 return status;
3c8def97 2388 }
6b7c5b94 2389
d379142b
SP
2390 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2391 adapter->num_tx_qs);
10ef9ab4 2392 return 0;
6b7c5b94
SP
2393}
2394
10ef9ab4 2395static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2396{
2397 struct be_queue_info *q;
3abcdeda
SP
2398 struct be_rx_obj *rxo;
2399 int i;
2400
2401 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2402 q = &rxo->cq;
2403 if (q->created)
2404 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2405 be_queue_free(adapter, q);
ac6a0c4a
SP
2406 }
2407}
2408
10ef9ab4 2409static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2410{
10ef9ab4 2411 struct be_queue_info *eq, *cq;
3abcdeda
SP
2412 struct be_rx_obj *rxo;
2413 int rc, i;
6b7c5b94 2414
92bf14ab
SP
2415 /* We can create as many RSS rings as there are EQs. */
2416 adapter->num_rx_qs = adapter->num_evt_qs;
2417
2418 /* We'll use RSS only if atleast 2 RSS rings are supported.
2419 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2420 */
92bf14ab
SP
2421 if (adapter->num_rx_qs > 1)
2422 adapter->num_rx_qs++;
2423
6b7c5b94 2424 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2425 for_all_rx_queues(adapter, rxo, i) {
2426 rxo->adapter = adapter;
3abcdeda
SP
2427 cq = &rxo->cq;
2428 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2429 sizeof(struct be_eth_rx_compl));
3abcdeda 2430 if (rc)
10ef9ab4 2431 return rc;
3abcdeda 2432
827da44c 2433 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2434 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2435 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2436 if (rc)
10ef9ab4 2437 return rc;
3abcdeda 2438 }
6b7c5b94 2439
d379142b
SP
2440 dev_info(&adapter->pdev->dev,
2441 "created %d RSS queue(s) and 1 default RX queue\n",
2442 adapter->num_rx_qs - 1);
10ef9ab4 2443 return 0;
b628bde2
SP
2444}
2445
6b7c5b94
SP
2446static irqreturn_t be_intx(int irq, void *dev)
2447{
e49cc34f
SP
2448 struct be_eq_obj *eqo = dev;
2449 struct be_adapter *adapter = eqo->adapter;
2450 int num_evts = 0;
6b7c5b94 2451
d0b9cec3
SP
2452 /* IRQ is not expected when NAPI is scheduled as the EQ
2453 * will not be armed.
2454 * But, this can happen on Lancer INTx where it takes
2455 * a while to de-assert INTx or in BE2 where occasionaly
2456 * an interrupt may be raised even when EQ is unarmed.
2457 * If NAPI is already scheduled, then counting & notifying
2458 * events will orphan them.
e49cc34f 2459 */
d0b9cec3 2460 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2461 num_evts = events_get(eqo);
d0b9cec3
SP
2462 __napi_schedule(&eqo->napi);
2463 if (num_evts)
2464 eqo->spurious_intr = 0;
2465 }
2466 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2467
d0b9cec3
SP
2468 /* Return IRQ_HANDLED only for the the first spurious intr
2469 * after a valid intr to stop the kernel from branding
2470 * this irq as a bad one!
e49cc34f 2471 */
d0b9cec3
SP
2472 if (num_evts || eqo->spurious_intr++ == 0)
2473 return IRQ_HANDLED;
2474 else
2475 return IRQ_NONE;
6b7c5b94
SP
2476}
2477
10ef9ab4 2478static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2479{
10ef9ab4 2480 struct be_eq_obj *eqo = dev;
6b7c5b94 2481
0b545a62
SP
2482 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2483 napi_schedule(&eqo->napi);
6b7c5b94
SP
2484 return IRQ_HANDLED;
2485}
2486
2e588f84 2487static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2488{
e38b1706 2489 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2490}
2491
10ef9ab4 2492static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2493 int budget, int polling)
6b7c5b94 2494{
3abcdeda
SP
2495 struct be_adapter *adapter = rxo->adapter;
2496 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2497 struct be_rx_compl_info *rxcp;
6b7c5b94 2498 u32 work_done;
c30d7266 2499 u32 frags_consumed = 0;
6b7c5b94
SP
2500
2501 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2502 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2503 if (!rxcp)
2504 break;
2505
12004ae9
SP
2506 /* Is it a flush compl that has no data */
2507 if (unlikely(rxcp->num_rcvd == 0))
2508 goto loop_continue;
2509
2510 /* Discard compl with partial DMA Lancer B0 */
2511 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2512 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2513 goto loop_continue;
2514 }
2515
2516 /* On BE drop pkts that arrive due to imperfect filtering in
2517 * promiscuous mode on some skews
2518 */
2519 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2520 !lancer_chip(adapter))) {
10ef9ab4 2521 be_rx_compl_discard(rxo, rxcp);
12004ae9 2522 goto loop_continue;
64642811 2523 }
009dd872 2524
6384a4d0
SP
2525 /* Don't do gro when we're busy_polling */
2526 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2527 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2528 else
6384a4d0
SP
2529 be_rx_compl_process(rxo, napi, rxcp);
2530
12004ae9 2531loop_continue:
c30d7266 2532 frags_consumed += rxcp->num_rcvd;
2e588f84 2533 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2534 }
2535
10ef9ab4
SP
2536 if (work_done) {
2537 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2538
6384a4d0
SP
2539 /* When an rx-obj gets into post_starved state, just
2540 * let be_worker do the posting.
2541 */
2542 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2543 !rxo->rx_post_starved)
c30d7266
AK
2544 be_post_rx_frags(rxo, GFP_ATOMIC,
2545 max_t(u32, MAX_RX_POST,
2546 frags_consumed));
6b7c5b94 2547 }
10ef9ab4 2548
6b7c5b94
SP
2549 return work_done;
2550}
2551
512bb8a2
KA
2552static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2553{
2554 switch (status) {
2555 case BE_TX_COMP_HDR_PARSE_ERR:
2556 tx_stats(txo)->tx_hdr_parse_err++;
2557 break;
2558 case BE_TX_COMP_NDMA_ERR:
2559 tx_stats(txo)->tx_dma_err++;
2560 break;
2561 case BE_TX_COMP_ACL_ERR:
2562 tx_stats(txo)->tx_spoof_check_err++;
2563 break;
2564 }
2565}
2566
2567static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2568{
2569 switch (status) {
2570 case LANCER_TX_COMP_LSO_ERR:
2571 tx_stats(txo)->tx_tso_err++;
2572 break;
2573 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2574 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2575 tx_stats(txo)->tx_spoof_check_err++;
2576 break;
2577 case LANCER_TX_COMP_QINQ_ERR:
2578 tx_stats(txo)->tx_qinq_err++;
2579 break;
2580 case LANCER_TX_COMP_PARITY_ERR:
2581 tx_stats(txo)->tx_internal_parity_err++;
2582 break;
2583 case LANCER_TX_COMP_DMA_ERR:
2584 tx_stats(txo)->tx_dma_err++;
2585 break;
2586 }
2587}
2588
c8f64615
SP
2589static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2590 int idx)
6b7c5b94 2591{
6b7c5b94 2592 struct be_eth_tx_compl *txcp;
c8f64615 2593 int num_wrbs = 0, work_done = 0;
512bb8a2 2594 u32 compl_status;
c8f64615
SP
2595 u16 last_idx;
2596
2597 while ((txcp = be_tx_compl_get(&txo->cq))) {
2598 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2599 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2600 work_done++;
3c8def97 2601
512bb8a2
KA
2602 compl_status = GET_TX_COMPL_BITS(status, txcp);
2603 if (compl_status) {
2604 if (lancer_chip(adapter))
2605 lancer_update_tx_err(txo, compl_status);
2606 else
2607 be_update_tx_err(txo, compl_status);
2608 }
10ef9ab4 2609 }
6b7c5b94 2610
10ef9ab4
SP
2611 if (work_done) {
2612 be_cq_notify(adapter, txo->cq.id, true, work_done);
2613 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2614
10ef9ab4
SP
2615 /* As Tx wrbs have been freed up, wake up netdev queue
2616 * if it was stopped due to lack of tx wrbs. */
2617 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2618 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2619 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2620 }
10ef9ab4
SP
2621
2622 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2623 tx_stats(txo)->tx_compl += work_done;
2624 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2625 }
10ef9ab4 2626}
6b7c5b94 2627
f7062ee5
SP
2628#ifdef CONFIG_NET_RX_BUSY_POLL
2629static inline bool be_lock_napi(struct be_eq_obj *eqo)
2630{
2631 bool status = true;
2632
2633 spin_lock(&eqo->lock); /* BH is already disabled */
2634 if (eqo->state & BE_EQ_LOCKED) {
2635 WARN_ON(eqo->state & BE_EQ_NAPI);
2636 eqo->state |= BE_EQ_NAPI_YIELD;
2637 status = false;
2638 } else {
2639 eqo->state = BE_EQ_NAPI;
2640 }
2641 spin_unlock(&eqo->lock);
2642 return status;
2643}
2644
2645static inline void be_unlock_napi(struct be_eq_obj *eqo)
2646{
2647 spin_lock(&eqo->lock); /* BH is already disabled */
2648
2649 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2650 eqo->state = BE_EQ_IDLE;
2651
2652 spin_unlock(&eqo->lock);
2653}
2654
2655static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2656{
2657 bool status = true;
2658
2659 spin_lock_bh(&eqo->lock);
2660 if (eqo->state & BE_EQ_LOCKED) {
2661 eqo->state |= BE_EQ_POLL_YIELD;
2662 status = false;
2663 } else {
2664 eqo->state |= BE_EQ_POLL;
2665 }
2666 spin_unlock_bh(&eqo->lock);
2667 return status;
2668}
2669
2670static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2671{
2672 spin_lock_bh(&eqo->lock);
2673
2674 WARN_ON(eqo->state & (BE_EQ_NAPI));
2675 eqo->state = BE_EQ_IDLE;
2676
2677 spin_unlock_bh(&eqo->lock);
2678}
2679
2680static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2681{
2682 spin_lock_init(&eqo->lock);
2683 eqo->state = BE_EQ_IDLE;
2684}
2685
2686static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2687{
2688 local_bh_disable();
2689
2690 /* It's enough to just acquire napi lock on the eqo to stop
2691 * be_busy_poll() from processing any queueus.
2692 */
2693 while (!be_lock_napi(eqo))
2694 mdelay(1);
2695
2696 local_bh_enable();
2697}
2698
2699#else /* CONFIG_NET_RX_BUSY_POLL */
2700
2701static inline bool be_lock_napi(struct be_eq_obj *eqo)
2702{
2703 return true;
2704}
2705
2706static inline void be_unlock_napi(struct be_eq_obj *eqo)
2707{
2708}
2709
2710static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2711{
2712 return false;
2713}
2714
2715static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2716{
2717}
2718
2719static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2720{
2721}
2722
2723static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2724{
2725}
2726#endif /* CONFIG_NET_RX_BUSY_POLL */
2727
68d7bdcb 2728int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2729{
2730 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2731 struct be_adapter *adapter = eqo->adapter;
0b545a62 2732 int max_work = 0, work, i, num_evts;
6384a4d0 2733 struct be_rx_obj *rxo;
a4906ea0 2734 struct be_tx_obj *txo;
f31e50a8 2735
0b545a62
SP
2736 num_evts = events_get(eqo);
2737
a4906ea0
SP
2738 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2739 be_process_tx(adapter, txo, i);
f31e50a8 2740
6384a4d0
SP
2741 if (be_lock_napi(eqo)) {
2742 /* This loop will iterate twice for EQ0 in which
2743 * completions of the last RXQ (default one) are also processed
2744 * For other EQs the loop iterates only once
2745 */
2746 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2747 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2748 max_work = max(work, max_work);
2749 }
2750 be_unlock_napi(eqo);
2751 } else {
2752 max_work = budget;
10ef9ab4 2753 }
6b7c5b94 2754
10ef9ab4
SP
2755 if (is_mcc_eqo(eqo))
2756 be_process_mcc(adapter);
93c86700 2757
10ef9ab4
SP
2758 if (max_work < budget) {
2759 napi_complete(napi);
0b545a62 2760 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2761 } else {
2762 /* As we'll continue in polling mode, count and clear events */
0b545a62 2763 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2764 }
10ef9ab4 2765 return max_work;
6b7c5b94
SP
2766}
2767
6384a4d0
SP
2768#ifdef CONFIG_NET_RX_BUSY_POLL
2769static int be_busy_poll(struct napi_struct *napi)
2770{
2771 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2772 struct be_adapter *adapter = eqo->adapter;
2773 struct be_rx_obj *rxo;
2774 int i, work = 0;
2775
2776 if (!be_lock_busy_poll(eqo))
2777 return LL_FLUSH_BUSY;
2778
2779 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2780 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2781 if (work)
2782 break;
2783 }
2784
2785 be_unlock_busy_poll(eqo);
2786 return work;
2787}
2788#endif
2789
f67ef7ba 2790void be_detect_error(struct be_adapter *adapter)
7c185276 2791{
e1cfb67a
PR
2792 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2793 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2794 u32 i;
eb0eecc1
SK
2795 bool error_detected = false;
2796 struct device *dev = &adapter->pdev->dev;
2797 struct net_device *netdev = adapter->netdev;
7c185276 2798
d23e946c 2799 if (be_hw_error(adapter))
72f02485
SP
2800 return;
2801
e1cfb67a
PR
2802 if (lancer_chip(adapter)) {
2803 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2804 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2805 sliport_err1 = ioread32(adapter->db +
748b539a 2806 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2807 sliport_err2 = ioread32(adapter->db +
748b539a 2808 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2809 adapter->hw_error = true;
2810 /* Do not log error messages if its a FW reset */
2811 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2812 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2813 dev_info(dev, "Firmware update in progress\n");
2814 } else {
2815 error_detected = true;
2816 dev_err(dev, "Error detected in the card\n");
2817 dev_err(dev, "ERR: sliport status 0x%x\n",
2818 sliport_status);
2819 dev_err(dev, "ERR: sliport error1 0x%x\n",
2820 sliport_err1);
2821 dev_err(dev, "ERR: sliport error2 0x%x\n",
2822 sliport_err2);
2823 }
e1cfb67a
PR
2824 }
2825 } else {
25848c90
SR
2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2828 ue_lo_mask = ioread32(adapter->pcicfg +
2829 PCICFG_UE_STATUS_LOW_MASK);
2830 ue_hi_mask = ioread32(adapter->pcicfg +
2831 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 2832
f67ef7ba
PR
2833 ue_lo = (ue_lo & ~ue_lo_mask);
2834 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2835
eb0eecc1
SK
2836 /* On certain platforms BE hardware can indicate spurious UEs.
2837 * Allow HW to stop working completely in case of a real UE.
2838 * Hence not setting the hw_error for UE detection.
2839 */
f67ef7ba 2840
eb0eecc1
SK
2841 if (ue_lo || ue_hi) {
2842 error_detected = true;
2843 dev_err(dev,
2844 "Unrecoverable Error detected in the adapter");
2845 dev_err(dev, "Please reboot server to recover");
2846 if (skyhawk_chip(adapter))
2847 adapter->hw_error = true;
2848 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2849 if (ue_lo & 1)
2850 dev_err(dev, "UE: %s bit set\n",
2851 ue_status_low_desc[i]);
2852 }
2853 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2854 if (ue_hi & 1)
2855 dev_err(dev, "UE: %s bit set\n",
2856 ue_status_hi_desc[i]);
2857 }
7c185276
AK
2858 }
2859 }
eb0eecc1
SK
2860 if (error_detected)
2861 netif_carrier_off(netdev);
7c185276
AK
2862}
2863
8d56ff11
SP
2864static void be_msix_disable(struct be_adapter *adapter)
2865{
ac6a0c4a 2866 if (msix_enabled(adapter)) {
8d56ff11 2867 pci_disable_msix(adapter->pdev);
ac6a0c4a 2868 adapter->num_msix_vec = 0;
68d7bdcb 2869 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2870 }
2871}
2872
c2bba3df 2873static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2874{
7dc4c064 2875 int i, num_vec;
d379142b 2876 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2877
92bf14ab
SP
2878 /* If RoCE is supported, program the max number of NIC vectors that
2879 * may be configured via set-channels, along with vectors needed for
2880 * RoCe. Else, just program the number we'll use initially.
2881 */
2882 if (be_roce_supported(adapter))
2883 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2884 2 * num_online_cpus());
2885 else
2886 num_vec = adapter->cfg_num_qs;
3abcdeda 2887
ac6a0c4a 2888 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2889 adapter->msix_entries[i].entry = i;
2890
7dc4c064
AG
2891 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2892 MIN_MSIX_VECTORS, num_vec);
2893 if (num_vec < 0)
2894 goto fail;
92bf14ab 2895
92bf14ab
SP
2896 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2897 adapter->num_msix_roce_vec = num_vec / 2;
2898 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2899 adapter->num_msix_roce_vec);
2900 }
2901
2902 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2903
2904 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2905 adapter->num_msix_vec);
c2bba3df 2906 return 0;
7dc4c064
AG
2907
2908fail:
2909 dev_warn(dev, "MSIx enable failed\n");
2910
2911 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2912 if (!be_physfn(adapter))
2913 return num_vec;
2914 return 0;
6b7c5b94
SP
2915}
2916
fe6d2a38 2917static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2918 struct be_eq_obj *eqo)
b628bde2 2919{
f2f781a7 2920 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2921}
6b7c5b94 2922
b628bde2
SP
2923static int be_msix_register(struct be_adapter *adapter)
2924{
10ef9ab4
SP
2925 struct net_device *netdev = adapter->netdev;
2926 struct be_eq_obj *eqo;
2927 int status, i, vec;
6b7c5b94 2928
10ef9ab4
SP
2929 for_all_evt_queues(adapter, eqo, i) {
2930 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2931 vec = be_msix_vec_get(adapter, eqo);
2932 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2933 if (status)
2934 goto err_msix;
2935 }
b628bde2 2936
6b7c5b94 2937 return 0;
3abcdeda 2938err_msix:
10ef9ab4
SP
2939 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2940 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2941 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2942 status);
ac6a0c4a 2943 be_msix_disable(adapter);
6b7c5b94
SP
2944 return status;
2945}
2946
2947static int be_irq_register(struct be_adapter *adapter)
2948{
2949 struct net_device *netdev = adapter->netdev;
2950 int status;
2951
ac6a0c4a 2952 if (msix_enabled(adapter)) {
6b7c5b94
SP
2953 status = be_msix_register(adapter);
2954 if (status == 0)
2955 goto done;
ba343c77
SB
2956 /* INTx is not supported for VF */
2957 if (!be_physfn(adapter))
2958 return status;
6b7c5b94
SP
2959 }
2960
e49cc34f 2961 /* INTx: only the first EQ is used */
6b7c5b94
SP
2962 netdev->irq = adapter->pdev->irq;
2963 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2964 &adapter->eq_obj[0]);
6b7c5b94
SP
2965 if (status) {
2966 dev_err(&adapter->pdev->dev,
2967 "INTx request IRQ failed - err %d\n", status);
2968 return status;
2969 }
2970done:
2971 adapter->isr_registered = true;
2972 return 0;
2973}
2974
2975static void be_irq_unregister(struct be_adapter *adapter)
2976{
2977 struct net_device *netdev = adapter->netdev;
10ef9ab4 2978 struct be_eq_obj *eqo;
3abcdeda 2979 int i;
6b7c5b94
SP
2980
2981 if (!adapter->isr_registered)
2982 return;
2983
2984 /* INTx */
ac6a0c4a 2985 if (!msix_enabled(adapter)) {
e49cc34f 2986 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2987 goto done;
2988 }
2989
2990 /* MSIx */
10ef9ab4
SP
2991 for_all_evt_queues(adapter, eqo, i)
2992 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2993
6b7c5b94
SP
2994done:
2995 adapter->isr_registered = false;
6b7c5b94
SP
2996}
2997
10ef9ab4 2998static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2999{
3000 struct be_queue_info *q;
3001 struct be_rx_obj *rxo;
3002 int i;
3003
3004 for_all_rx_queues(adapter, rxo, i) {
3005 q = &rxo->q;
3006 if (q->created) {
3007 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3008 be_rx_cq_clean(rxo);
482c9e79 3009 }
10ef9ab4 3010 be_queue_free(adapter, q);
482c9e79
SP
3011 }
3012}
3013
889cd4b2
SP
3014static int be_close(struct net_device *netdev)
3015{
3016 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3017 struct be_eq_obj *eqo;
3018 int i;
889cd4b2 3019
e1ad8e33
KA
3020 /* This protection is needed as be_close() may be called even when the
3021 * adapter is in cleared state (after eeh perm failure)
3022 */
3023 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3024 return 0;
3025
045508a8
PP
3026 be_roce_dev_close(adapter);
3027
dff345c5
IV
3028 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3029 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3030 napi_disable(&eqo->napi);
6384a4d0
SP
3031 be_disable_busy_poll(eqo);
3032 }
71237b6f 3033 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3034 }
a323d9bf
SP
3035
3036 be_async_mcc_disable(adapter);
3037
3038 /* Wait for all pending tx completions to arrive so that
3039 * all tx skbs are freed.
3040 */
fba87559 3041 netif_tx_disable(netdev);
6e1f9975 3042 be_tx_compl_clean(adapter);
a323d9bf
SP
3043
3044 be_rx_qs_destroy(adapter);
f66b7cfd 3045 be_clear_uc_list(adapter);
d11a347d 3046
a323d9bf 3047 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3048 if (msix_enabled(adapter))
3049 synchronize_irq(be_msix_vec_get(adapter, eqo));
3050 else
3051 synchronize_irq(netdev->irq);
3052 be_eq_clean(eqo);
63fcb27f
PR
3053 }
3054
889cd4b2
SP
3055 be_irq_unregister(adapter);
3056
482c9e79
SP
3057 return 0;
3058}
3059
10ef9ab4 3060static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3061{
1dcf7b1c
ED
3062 struct rss_info *rss = &adapter->rss_info;
3063 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3064 struct be_rx_obj *rxo;
e9008ee9 3065 int rc, i, j;
482c9e79
SP
3066
3067 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3068 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3069 sizeof(struct be_eth_rx_d));
3070 if (rc)
3071 return rc;
3072 }
3073
3074 /* The FW would like the default RXQ to be created first */
3075 rxo = default_rxo(adapter);
3076 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3077 adapter->if_handle, false, &rxo->rss_id);
3078 if (rc)
3079 return rc;
3080
3081 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3082 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3083 rx_frag_size, adapter->if_handle,
3084 true, &rxo->rss_id);
482c9e79
SP
3085 if (rc)
3086 return rc;
3087 }
3088
3089 if (be_multi_rxq(adapter)) {
e2557877
VD
3090 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3091 j += adapter->num_rx_qs - 1) {
e9008ee9 3092 for_all_rss_queues(adapter, rxo, i) {
e2557877 3093 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3094 break;
e2557877
VD
3095 rss->rsstable[j + i] = rxo->rss_id;
3096 rss->rss_queue[j + i] = i;
e9008ee9
PR
3097 }
3098 }
e2557877
VD
3099 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3100 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3101
3102 if (!BEx_chip(adapter))
e2557877
VD
3103 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3104 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3105 } else {
3106 /* Disable RSS, if only default RX Q is created */
e2557877 3107 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3108 }
594ad54a 3109
1dcf7b1c 3110 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3111 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3112 128, rss_key);
da1388d6 3113 if (rc) {
e2557877 3114 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3115 return rc;
482c9e79
SP
3116 }
3117
1dcf7b1c 3118 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3119
482c9e79 3120 /* First time posting */
10ef9ab4 3121 for_all_rx_queues(adapter, rxo, i)
c30d7266 3122 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3123 return 0;
3124}
3125
6b7c5b94
SP
3126static int be_open(struct net_device *netdev)
3127{
3128 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3129 struct be_eq_obj *eqo;
3abcdeda 3130 struct be_rx_obj *rxo;
10ef9ab4 3131 struct be_tx_obj *txo;
b236916a 3132 u8 link_status;
3abcdeda 3133 int status, i;
5fb379ee 3134
10ef9ab4 3135 status = be_rx_qs_create(adapter);
482c9e79
SP
3136 if (status)
3137 goto err;
3138
c2bba3df
SK
3139 status = be_irq_register(adapter);
3140 if (status)
3141 goto err;
5fb379ee 3142
10ef9ab4 3143 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3144 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3145
10ef9ab4
SP
3146 for_all_tx_queues(adapter, txo, i)
3147 be_cq_notify(adapter, txo->cq.id, true, 0);
3148
7a1e9b20
SP
3149 be_async_mcc_enable(adapter);
3150
10ef9ab4
SP
3151 for_all_evt_queues(adapter, eqo, i) {
3152 napi_enable(&eqo->napi);
6384a4d0 3153 be_enable_busy_poll(eqo);
4cad9f3b 3154 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3155 }
04d3d624 3156 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3157
323ff71e 3158 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3159 if (!status)
3160 be_link_status_update(adapter, link_status);
3161
fba87559 3162 netif_tx_start_all_queues(netdev);
045508a8 3163 be_roce_dev_open(adapter);
c9c47142 3164
c5abe7c0 3165#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3166 if (skyhawk_chip(adapter))
3167 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3168#endif
3169
889cd4b2
SP
3170 return 0;
3171err:
3172 be_close(adapter->netdev);
3173 return -EIO;
5fb379ee
SP
3174}
3175
71d8d1b5
AK
3176static int be_setup_wol(struct be_adapter *adapter, bool enable)
3177{
3178 struct be_dma_mem cmd;
3179 int status = 0;
3180 u8 mac[ETH_ALEN];
3181
3182 memset(mac, 0, ETH_ALEN);
3183
3184 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3185 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3186 GFP_KERNEL);
ddf1169f 3187 if (!cmd.va)
6b568689 3188 return -ENOMEM;
71d8d1b5
AK
3189
3190 if (enable) {
3191 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3192 PCICFG_PM_CONTROL_OFFSET,
3193 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3194 if (status) {
3195 dev_err(&adapter->pdev->dev,
2381a55c 3196 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3197 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3198 cmd.dma);
71d8d1b5
AK
3199 return status;
3200 }
3201 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3202 adapter->netdev->dev_addr,
3203 &cmd);
71d8d1b5
AK
3204 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3205 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3206 } else {
3207 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3208 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3209 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3210 }
3211
2b7bcebf 3212 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3213 return status;
3214}
3215
f7062ee5
SP
3216static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3217{
3218 u32 addr;
3219
3220 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3221
3222 mac[5] = (u8)(addr & 0xFF);
3223 mac[4] = (u8)((addr >> 8) & 0xFF);
3224 mac[3] = (u8)((addr >> 16) & 0xFF);
3225 /* Use the OUI from the current MAC address */
3226 memcpy(mac, adapter->netdev->dev_addr, 3);
3227}
3228
6d87f5c3
AK
3229/*
3230 * Generate a seed MAC address from the PF MAC Address using jhash.
3231 * MAC Address for VFs are assigned incrementally starting from the seed.
3232 * These addresses are programmed in the ASIC by the PF and the VF driver
3233 * queries for the MAC address during its probe.
3234 */
4c876616 3235static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3236{
f9449ab7 3237 u32 vf;
3abcdeda 3238 int status = 0;
6d87f5c3 3239 u8 mac[ETH_ALEN];
11ac75ed 3240 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3241
3242 be_vf_eth_addr_generate(adapter, mac);
3243
11ac75ed 3244 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3245 if (BEx_chip(adapter))
590c391d 3246 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3247 vf_cfg->if_handle,
3248 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3249 else
3250 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3251 vf + 1);
590c391d 3252
6d87f5c3
AK
3253 if (status)
3254 dev_err(&adapter->pdev->dev,
748b539a
SP
3255 "Mac address assignment failed for VF %d\n",
3256 vf);
6d87f5c3 3257 else
11ac75ed 3258 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3259
3260 mac[5] += 1;
3261 }
3262 return status;
3263}
3264
4c876616
SP
3265static int be_vfs_mac_query(struct be_adapter *adapter)
3266{
3267 int status, vf;
3268 u8 mac[ETH_ALEN];
3269 struct be_vf_cfg *vf_cfg;
4c876616
SP
3270
3271 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3272 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3273 mac, vf_cfg->if_handle,
3274 false, vf+1);
4c876616
SP
3275 if (status)
3276 return status;
3277 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3278 }
3279 return 0;
3280}
3281
f9449ab7 3282static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3283{
11ac75ed 3284 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3285 u32 vf;
3286
257a3feb 3287 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3288 dev_warn(&adapter->pdev->dev,
3289 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3290 goto done;
3291 }
3292
b4c1df93
SP
3293 pci_disable_sriov(adapter->pdev);
3294
11ac75ed 3295 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3296 if (BEx_chip(adapter))
11ac75ed
SP
3297 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3298 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3299 else
3300 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3301 vf + 1);
f9449ab7 3302
11ac75ed
SP
3303 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3304 }
39f1d94d
SP
3305done:
3306 kfree(adapter->vf_cfg);
3307 adapter->num_vfs = 0;
f174c7ec 3308 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3309}
3310
7707133c
SP
3311static void be_clear_queues(struct be_adapter *adapter)
3312{
3313 be_mcc_queues_destroy(adapter);
3314 be_rx_cqs_destroy(adapter);
3315 be_tx_queues_destroy(adapter);
3316 be_evt_queues_destroy(adapter);
3317}
3318
68d7bdcb 3319static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3320{
191eb756
SP
3321 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3322 cancel_delayed_work_sync(&adapter->work);
3323 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3324 }
68d7bdcb
SP
3325}
3326
b05004ad 3327static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3328{
b05004ad 3329 if (adapter->pmac_id) {
f66b7cfd
SP
3330 be_cmd_pmac_del(adapter, adapter->if_handle,
3331 adapter->pmac_id[0], 0);
b05004ad
SK
3332 kfree(adapter->pmac_id);
3333 adapter->pmac_id = NULL;
3334 }
3335}
3336
c5abe7c0 3337#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3338static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3339{
630f4b70
SB
3340 struct net_device *netdev = adapter->netdev;
3341
c9c47142
SP
3342 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3343 be_cmd_manage_iface(adapter, adapter->if_handle,
3344 OP_CONVERT_TUNNEL_TO_NORMAL);
3345
3346 if (adapter->vxlan_port)
3347 be_cmd_set_vxlan_port(adapter, 0);
3348
3349 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3350 adapter->vxlan_port = 0;
630f4b70
SB
3351
3352 netdev->hw_enc_features = 0;
3353 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3354 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3355}
c5abe7c0 3356#endif
c9c47142 3357
b05004ad
SK
3358static int be_clear(struct be_adapter *adapter)
3359{
68d7bdcb 3360 be_cancel_worker(adapter);
191eb756 3361
11ac75ed 3362 if (sriov_enabled(adapter))
f9449ab7
SP
3363 be_vf_clear(adapter);
3364
bec84e6b
VV
3365 /* Re-configure FW to distribute resources evenly across max-supported
3366 * number of VFs, only when VFs are not already enabled.
3367 */
3368 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3369 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3370 pci_sriov_get_totalvfs(adapter->pdev));
3371
c5abe7c0 3372#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3373 be_disable_vxlan_offloads(adapter);
c5abe7c0 3374#endif
2d17f403 3375 /* delete the primary mac along with the uc-mac list */
b05004ad 3376 be_mac_clear(adapter);
fbc13f01 3377
f9449ab7 3378 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3379
7707133c 3380 be_clear_queues(adapter);
a54769f5 3381
10ef9ab4 3382 be_msix_disable(adapter);
e1ad8e33 3383 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3384 return 0;
3385}
3386
0700d816
KA
3387static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3388 u32 cap_flags, u32 vf)
3389{
3390 u32 en_flags;
0700d816
KA
3391
3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3394 BE_IF_FLAGS_RSS;
3395
3396 en_flags &= cap_flags;
3397
435452aa 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
0700d816
KA
3399}
3400
4c876616 3401static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3402{
92bf14ab 3403 struct be_resources res = {0};
4c876616 3404 struct be_vf_cfg *vf_cfg;
0700d816
KA
3405 u32 cap_flags, vf;
3406 int status;
abb93951 3407
0700d816 3408 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3409 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3410 BE_IF_FLAGS_MULTICAST;
abb93951 3411
4c876616 3412 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3413 if (!BE3_chip(adapter)) {
3414 status = be_cmd_get_profile_config(adapter, &res,
3415 vf + 1);
435452aa 3416 if (!status) {
92bf14ab 3417 cap_flags = res.if_cap_flags;
435452aa
VV
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
92bf14ab 3423 }
4c876616 3424
0700d816
KA
3425 status = be_if_create(adapter, &vf_cfg->if_handle,
3426 cap_flags, vf + 1);
4c876616 3427 if (status)
0700d816 3428 return status;
4c876616 3429 }
0700d816
KA
3430
3431 return 0;
abb93951
PR
3432}
3433
39f1d94d 3434static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3435{
11ac75ed 3436 struct be_vf_cfg *vf_cfg;
30128031
SP
3437 int vf;
3438
39f1d94d
SP
3439 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3440 GFP_KERNEL);
3441 if (!adapter->vf_cfg)
3442 return -ENOMEM;
3443
11ac75ed
SP
3444 for_all_vfs(adapter, vf_cfg, vf) {
3445 vf_cfg->if_handle = -1;
3446 vf_cfg->pmac_id = -1;
30128031 3447 }
39f1d94d 3448 return 0;
30128031
SP
3449}
3450
f9449ab7
SP
3451static int be_vf_setup(struct be_adapter *adapter)
3452{
c502224e 3453 struct device *dev = &adapter->pdev->dev;
11ac75ed 3454 struct be_vf_cfg *vf_cfg;
4c876616 3455 int status, old_vfs, vf;
39f1d94d 3456
257a3feb 3457 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3458
3459 status = be_vf_setup_init(adapter);
3460 if (status)
3461 goto err;
30128031 3462
4c876616
SP
3463 if (old_vfs) {
3464 for_all_vfs(adapter, vf_cfg, vf) {
3465 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3466 if (status)
3467 goto err;
3468 }
f9449ab7 3469
4c876616
SP
3470 status = be_vfs_mac_query(adapter);
3471 if (status)
3472 goto err;
3473 } else {
bec84e6b
VV
3474 status = be_vfs_if_create(adapter);
3475 if (status)
3476 goto err;
3477
39f1d94d
SP
3478 status = be_vf_eth_addr_config(adapter);
3479 if (status)
3480 goto err;
3481 }
f9449ab7 3482
11ac75ed 3483 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3484 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3488 status = be_cmd_set_fn_privileges(adapter,
435452aa 3489 vf_cfg->privileges |
04a06028
SP
3490 BE_PRIV_FILTMGMT,
3491 vf + 1);
435452aa
VV
3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3495 vf);
435452aa 3496 }
04a06028
SP
3497 }
3498
0f77ba73
RN
3499 /* Allow full available bandwidth */
3500 if (!old_vfs)
3501 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3502
bdce2ad7 3503 if (!old_vfs) {
0599863d 3504 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3505 be_cmd_set_logical_link_config(adapter,
3506 IFLA_VF_LINK_STATE_AUTO,
3507 vf+1);
3508 }
f9449ab7 3509 }
b4c1df93
SP
3510
3511 if (!old_vfs) {
3512 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3513 if (status) {
3514 dev_err(dev, "SRIOV enable failed\n");
3515 adapter->num_vfs = 0;
3516 goto err;
3517 }
3518 }
f174c7ec
VV
3519
3520 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3521 return 0;
3522err:
4c876616
SP
3523 dev_err(dev, "VF setup failed\n");
3524 be_vf_clear(adapter);
f9449ab7
SP
3525 return status;
3526}
3527
f93f160b
VV
3528/* Converting function_mode bits on BE3 to SH mc_type enums */
3529
3530static u8 be_convert_mc_type(u32 function_mode)
3531{
66064dbc 3532 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3533 return vNIC1;
66064dbc 3534 else if (function_mode & QNQ_MODE)
f93f160b
VV
3535 return FLEX10;
3536 else if (function_mode & VNIC_MODE)
3537 return vNIC2;
3538 else if (function_mode & UMC_ENABLED)
3539 return UMC;
3540 else
3541 return MC_NONE;
3542}
3543
92bf14ab
SP
3544/* On BE2/BE3 FW does not suggest the supported limits */
3545static void BEx_get_resources(struct be_adapter *adapter,
3546 struct be_resources *res)
3547{
bec84e6b 3548 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3549
3550 if (be_physfn(adapter))
3551 res->max_uc_mac = BE_UC_PMAC_COUNT;
3552 else
3553 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3554
f93f160b
VV
3555 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3556
3557 if (be_is_mc(adapter)) {
3558 /* Assuming that there are 4 channels per port,
3559 * when multi-channel is enabled
3560 */
3561 if (be_is_qnq_mode(adapter))
3562 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3563 else
3564 /* In a non-qnq multichannel mode, the pvid
3565 * takes up one vlan entry
3566 */
3567 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3568 } else {
92bf14ab 3569 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3570 }
3571
92bf14ab
SP
3572 res->max_mcast_mac = BE_MAX_MC;
3573
a5243dab
VV
3574 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3575 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3576 * *only* if it is RSS-capable.
3577 */
3578 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3579 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3580 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3581 res->max_tx_qs = 1;
a28277dc
SR
3582 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3583 struct be_resources super_nic_res = {0};
3584
3585 /* On a SuperNIC profile, the driver needs to use the
3586 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3587 */
3588 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3589 /* Some old versions of BE3 FW don't report max_tx_qs value */
3590 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3591 } else {
92bf14ab 3592 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3593 }
92bf14ab
SP
3594
3595 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3596 !use_sriov && be_physfn(adapter))
3597 res->max_rss_qs = (adapter->be3_native) ?
3598 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3599 res->max_rx_qs = res->max_rss_qs + 1;
3600
e3dc867c 3601 if (be_physfn(adapter))
d3518e21 3602 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3603 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3604 else
3605 res->max_evt_qs = 1;
92bf14ab
SP
3606
3607 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3608 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3609 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3610}
3611
30128031
SP
3612static void be_setup_init(struct be_adapter *adapter)
3613{
3614 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3615 adapter->phy.link_speed = -1;
30128031
SP
3616 adapter->if_handle = -1;
3617 adapter->be3_native = false;
f66b7cfd 3618 adapter->if_flags = 0;
f25b119c
PR
3619 if (be_physfn(adapter))
3620 adapter->cmd_privileges = MAX_PRIVILEGES;
3621 else
3622 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3623}
3624
bec84e6b
VV
3625static int be_get_sriov_config(struct be_adapter *adapter)
3626{
3627 struct device *dev = &adapter->pdev->dev;
3628 struct be_resources res = {0};
d3d18312 3629 int max_vfs, old_vfs;
bec84e6b
VV
3630
3631 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3632 be_cmd_get_profile_config(adapter, &res, 0);
3633
bec84e6b
VV
3634 if (BE3_chip(adapter) && !res.max_vfs) {
3635 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3636 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3637 }
3638
d3d18312 3639 adapter->pool_res = res;
bec84e6b
VV
3640
3641 if (!be_max_vfs(adapter)) {
3642 if (num_vfs)
50762667 3643 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3644 adapter->num_vfs = 0;
3645 return 0;
3646 }
3647
d3d18312
SP
3648 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3649
bec84e6b
VV
3650 /* validate num_vfs module param */
3651 old_vfs = pci_num_vf(adapter->pdev);
3652 if (old_vfs) {
3653 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3654 if (old_vfs != num_vfs)
3655 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3656 adapter->num_vfs = old_vfs;
3657 } else {
3658 if (num_vfs > be_max_vfs(adapter)) {
3659 dev_info(dev, "Resources unavailable to init %d VFs\n",
3660 num_vfs);
3661 dev_info(dev, "Limiting to %d VFs\n",
3662 be_max_vfs(adapter));
3663 }
3664 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3665 }
3666
3667 return 0;
3668}
3669
92bf14ab 3670static int be_get_resources(struct be_adapter *adapter)
abb93951 3671{
92bf14ab
SP
3672 struct device *dev = &adapter->pdev->dev;
3673 struct be_resources res = {0};
3674 int status;
abb93951 3675
92bf14ab
SP
3676 if (BEx_chip(adapter)) {
3677 BEx_get_resources(adapter, &res);
3678 adapter->res = res;
abb93951
PR
3679 }
3680
92bf14ab
SP
3681 /* For Lancer, SH etc read per-function resource limits from FW.
3682 * GET_FUNC_CONFIG returns per function guaranteed limits.
3683 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3684 */
3685 if (!BEx_chip(adapter)) {
3686 status = be_cmd_get_func_config(adapter, &res);
3687 if (status)
3688 return status;
abb93951 3689
92bf14ab
SP
3690 /* If RoCE may be enabled stash away half the EQs for RoCE */
3691 if (be_roce_supported(adapter))
3692 res.max_evt_qs /= 2;
3693 adapter->res = res;
abb93951 3694 }
4c876616 3695
acbafeb1
SP
3696 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3697 be_max_txqs(adapter), be_max_rxqs(adapter),
3698 be_max_rss(adapter), be_max_eqs(adapter),
3699 be_max_vfs(adapter));
3700 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3701 be_max_uc(adapter), be_max_mc(adapter),
3702 be_max_vlans(adapter));
3703
92bf14ab 3704 return 0;
abb93951
PR
3705}
3706
d3d18312
SP
3707static void be_sriov_config(struct be_adapter *adapter)
3708{
3709 struct device *dev = &adapter->pdev->dev;
3710 int status;
3711
3712 status = be_get_sriov_config(adapter);
3713 if (status) {
3714 dev_err(dev, "Failed to query SR-IOV configuration\n");
3715 dev_err(dev, "SR-IOV cannot be enabled\n");
3716 return;
3717 }
3718
3719 /* When the HW is in SRIOV capable configuration, the PF-pool
3720 * resources are equally distributed across the max-number of
3721 * VFs. The user may request only a subset of the max-vfs to be
3722 * enabled. Based on num_vfs, redistribute the resources across
3723 * num_vfs so that each VF will have access to more number of
3724 * resources. This facility is not available in BE3 FW.
3725 * Also, this is done by FW in Lancer chip.
3726 */
3727 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3728 status = be_cmd_set_sriov_config(adapter,
3729 adapter->pool_res,
3730 adapter->num_vfs);
3731 if (status)
3732 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3733 }
3734}
3735
39f1d94d
SP
3736static int be_get_config(struct be_adapter *adapter)
3737{
542963b7 3738 u16 profile_id;
4c876616 3739 int status;
39f1d94d 3740
e97e3cda 3741 status = be_cmd_query_fw_cfg(adapter);
abb93951 3742 if (status)
92bf14ab 3743 return status;
abb93951 3744
21252377
VV
3745 be_cmd_query_port_name(adapter);
3746
3747 if (be_physfn(adapter)) {
542963b7
VV
3748 status = be_cmd_get_active_profile(adapter, &profile_id);
3749 if (!status)
3750 dev_info(&adapter->pdev->dev,
3751 "Using profile 0x%x\n", profile_id);
962bcb75 3752 }
bec84e6b 3753
d3d18312
SP
3754 if (!BE2_chip(adapter) && be_physfn(adapter))
3755 be_sriov_config(adapter);
542963b7 3756
92bf14ab
SP
3757 status = be_get_resources(adapter);
3758 if (status)
3759 return status;
abb93951 3760
46ee9c14
RN
3761 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3762 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3763 if (!adapter->pmac_id)
3764 return -ENOMEM;
abb93951 3765
92bf14ab
SP
3766 /* Sanitize cfg_num_qs based on HW and platform limits */
3767 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3768
3769 return 0;
39f1d94d
SP
3770}
3771
95046b92
SP
3772static int be_mac_setup(struct be_adapter *adapter)
3773{
3774 u8 mac[ETH_ALEN];
3775 int status;
3776
3777 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3778 status = be_cmd_get_perm_mac(adapter, mac);
3779 if (status)
3780 return status;
3781
3782 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3783 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3784 } else {
3785 /* Maybe the HW was reset; dev_addr must be re-programmed */
3786 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3787 }
3788
2c7a9dc1
AK
3789 /* For BE3-R VFs, the PF programs the initial MAC address */
3790 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3791 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3792 &adapter->pmac_id[0], 0);
95046b92
SP
3793 return 0;
3794}
3795
68d7bdcb
SP
3796static void be_schedule_worker(struct be_adapter *adapter)
3797{
3798 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3799 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3800}
3801
7707133c 3802static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3803{
68d7bdcb 3804 struct net_device *netdev = adapter->netdev;
10ef9ab4 3805 int status;
ba343c77 3806
7707133c 3807 status = be_evt_queues_create(adapter);
abb93951
PR
3808 if (status)
3809 goto err;
73d540f2 3810
7707133c 3811 status = be_tx_qs_create(adapter);
c2bba3df
SK
3812 if (status)
3813 goto err;
10ef9ab4 3814
7707133c 3815 status = be_rx_cqs_create(adapter);
10ef9ab4 3816 if (status)
a54769f5 3817 goto err;
6b7c5b94 3818
7707133c 3819 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3820 if (status)
3821 goto err;
3822
68d7bdcb
SP
3823 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3824 if (status)
3825 goto err;
3826
3827 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3828 if (status)
3829 goto err;
3830
7707133c
SP
3831 return 0;
3832err:
3833 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3834 return status;
3835}
3836
68d7bdcb
SP
3837int be_update_queues(struct be_adapter *adapter)
3838{
3839 struct net_device *netdev = adapter->netdev;
3840 int status;
3841
3842 if (netif_running(netdev))
3843 be_close(netdev);
3844
3845 be_cancel_worker(adapter);
3846
3847 /* If any vectors have been shared with RoCE we cannot re-program
3848 * the MSIx table.
3849 */
3850 if (!adapter->num_msix_roce_vec)
3851 be_msix_disable(adapter);
3852
3853 be_clear_queues(adapter);
3854
3855 if (!msix_enabled(adapter)) {
3856 status = be_msix_enable(adapter);
3857 if (status)
3858 return status;
3859 }
3860
3861 status = be_setup_queues(adapter);
3862 if (status)
3863 return status;
3864
3865 be_schedule_worker(adapter);
3866
3867 if (netif_running(netdev))
3868 status = be_open(netdev);
3869
3870 return status;
3871}
3872
f7062ee5
SP
3873static inline int fw_major_num(const char *fw_ver)
3874{
3875 int fw_major = 0, i;
3876
3877 i = sscanf(fw_ver, "%d.", &fw_major);
3878 if (i != 1)
3879 return 0;
3880
3881 return fw_major;
3882}
3883
7707133c
SP
3884static int be_setup(struct be_adapter *adapter)
3885{
3886 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3887 int status;
3888
3889 be_setup_init(adapter);
3890
3891 if (!lancer_chip(adapter))
3892 be_cmd_req_native_mode(adapter);
3893
3894 status = be_get_config(adapter);
10ef9ab4 3895 if (status)
a54769f5 3896 goto err;
6b7c5b94 3897
7707133c 3898 status = be_msix_enable(adapter);
10ef9ab4 3899 if (status)
a54769f5 3900 goto err;
6b7c5b94 3901
0700d816
KA
3902 status = be_if_create(adapter, &adapter->if_handle,
3903 be_if_cap_flags(adapter), 0);
7707133c 3904 if (status)
a54769f5 3905 goto err;
6b7c5b94 3906
68d7bdcb
SP
3907 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3908 rtnl_lock();
7707133c 3909 status = be_setup_queues(adapter);
68d7bdcb 3910 rtnl_unlock();
95046b92 3911 if (status)
1578e777
PR
3912 goto err;
3913
7707133c 3914 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3915
3916 status = be_mac_setup(adapter);
10ef9ab4
SP
3917 if (status)
3918 goto err;
3919
e97e3cda 3920 be_cmd_get_fw_ver(adapter);
acbafeb1 3921 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3922
e9e2a904 3923 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3924 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3925 adapter->fw_ver);
3926 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3927 }
3928
1d1e9a46 3929 if (adapter->vlans_added)
10329df8 3930 be_vid_config(adapter);
7ab8b0b4 3931
a54769f5 3932 be_set_rx_mode(adapter->netdev);
5fb379ee 3933
76a9e08e
SR
3934 be_cmd_get_acpi_wol_cap(adapter);
3935
00d594c3
KA
3936 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3937 adapter->rx_fc);
3938 if (status)
3939 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3940 &adapter->rx_fc);
590c391d 3941
00d594c3
KA
3942 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3943 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3944
bdce2ad7
SR
3945 if (be_physfn(adapter))
3946 be_cmd_set_logical_link_config(adapter,
3947 IFLA_VF_LINK_STATE_AUTO, 0);
3948
bec84e6b
VV
3949 if (adapter->num_vfs)
3950 be_vf_setup(adapter);
f9449ab7 3951
f25b119c
PR
3952 status = be_cmd_get_phy_info(adapter);
3953 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3954 adapter->phy.fc_autoneg = 1;
3955
68d7bdcb 3956 be_schedule_worker(adapter);
e1ad8e33 3957 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3958 return 0;
a54769f5
SP
3959err:
3960 be_clear(adapter);
3961 return status;
3962}
6b7c5b94 3963
66268739
IV
3964#ifdef CONFIG_NET_POLL_CONTROLLER
3965static void be_netpoll(struct net_device *netdev)
3966{
3967 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3968 struct be_eq_obj *eqo;
66268739
IV
3969 int i;
3970
e49cc34f
SP
3971 for_all_evt_queues(adapter, eqo, i) {
3972 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3973 napi_schedule(&eqo->napi);
3974 }
66268739
IV
3975}
3976#endif
3977
96c9b2e4 3978static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 3979
306f1348
SP
3980static bool phy_flashing_required(struct be_adapter *adapter)
3981{
e02cfd96 3982 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 3983 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3984}
3985
c165541e
PR
3986static bool is_comp_in_ufi(struct be_adapter *adapter,
3987 struct flash_section_info *fsec, int type)
3988{
3989 int i = 0, img_type = 0;
3990 struct flash_section_info_g2 *fsec_g2 = NULL;
3991
ca34fe38 3992 if (BE2_chip(adapter))
c165541e
PR
3993 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3994
3995 for (i = 0; i < MAX_FLASH_COMP; i++) {
3996 if (fsec_g2)
3997 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3998 else
3999 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4000
4001 if (img_type == type)
4002 return true;
4003 }
4004 return false;
4005
4006}
4007
4188e7df 4008static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4009 int header_size,
4010 const struct firmware *fw)
c165541e
PR
4011{
4012 struct flash_section_info *fsec = NULL;
4013 const u8 *p = fw->data;
4014
4015 p += header_size;
4016 while (p < (fw->data + fw->size)) {
4017 fsec = (struct flash_section_info *)p;
4018 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4019 return fsec;
4020 p += 32;
4021 }
4022 return NULL;
4023}
4024
96c9b2e4
VV
4025static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4026 u32 img_offset, u32 img_size, int hdr_size,
4027 u16 img_optype, bool *crc_match)
4028{
4029 u32 crc_offset;
4030 int status;
4031 u8 crc[4];
4032
70a7b525
VV
4033 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4034 img_size - 4);
96c9b2e4
VV
4035 if (status)
4036 return status;
4037
4038 crc_offset = hdr_size + img_offset + img_size - 4;
4039
4040 /* Skip flashing, if crc of flashed region matches */
4041 if (!memcmp(crc, p + crc_offset, 4))
4042 *crc_match = true;
4043 else
4044 *crc_match = false;
4045
4046 return status;
4047}
4048
773a2d7c 4049static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4050 struct be_dma_mem *flash_cmd, int optype, int img_size,
4051 u32 img_offset)
773a2d7c 4052{
70a7b525 4053 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4054 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4055 int status;
773a2d7c 4056
773a2d7c
PR
4057 while (total_bytes) {
4058 num_bytes = min_t(u32, 32*1024, total_bytes);
4059
4060 total_bytes -= num_bytes;
4061
4062 if (!total_bytes) {
4063 if (optype == OPTYPE_PHY_FW)
4064 flash_op = FLASHROM_OPER_PHY_FLASH;
4065 else
4066 flash_op = FLASHROM_OPER_FLASH;
4067 } else {
4068 if (optype == OPTYPE_PHY_FW)
4069 flash_op = FLASHROM_OPER_PHY_SAVE;
4070 else
4071 flash_op = FLASHROM_OPER_SAVE;
4072 }
4073
be716446 4074 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4075 img += num_bytes;
4076 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4077 flash_op, img_offset +
4078 bytes_sent, num_bytes);
4c60005f 4079 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4080 optype == OPTYPE_PHY_FW)
4081 break;
4082 else if (status)
773a2d7c 4083 return status;
70a7b525
VV
4084
4085 bytes_sent += num_bytes;
773a2d7c
PR
4086 }
4087 return 0;
4088}
4089
0ad3157e 4090/* For BE2, BE3 and BE3-R */
ca34fe38 4091static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4092 const struct firmware *fw,
4093 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4094{
c165541e 4095 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4096 struct device *dev = &adapter->pdev->dev;
c165541e 4097 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4098 int status, i, filehdr_size, num_comp;
4099 const struct flash_comp *pflashcomp;
4100 bool crc_match;
4101 const u8 *p;
c165541e
PR
4102
4103 struct flash_comp gen3_flash_types[] = {
4104 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4105 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4106 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4107 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4108 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4109 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4110 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4111 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4112 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4113 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4114 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4115 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4116 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4117 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4118 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4119 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4120 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4121 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4122 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4123 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4124 };
c165541e
PR
4125
4126 struct flash_comp gen2_flash_types[] = {
4127 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4128 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4129 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4130 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4131 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4132 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4133 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4134 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4135 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4136 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4137 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4138 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4139 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4140 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4141 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4142 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4143 };
4144
ca34fe38 4145 if (BE3_chip(adapter)) {
3f0d4560
AK
4146 pflashcomp = gen3_flash_types;
4147 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4148 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4149 } else {
4150 pflashcomp = gen2_flash_types;
4151 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4152 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4153 img_hdrs_size = 0;
84517482 4154 }
ca34fe38 4155
c165541e
PR
4156 /* Get flash section info*/
4157 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4158 if (!fsec) {
96c9b2e4 4159 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4160 return -1;
4161 }
9fe96934 4162 for (i = 0; i < num_comp; i++) {
c165541e 4163 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4164 continue;
c165541e
PR
4165
4166 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4167 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4168 continue;
4169
773a2d7c
PR
4170 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4171 !phy_flashing_required(adapter))
306f1348 4172 continue;
c165541e 4173
773a2d7c 4174 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4175 status = be_check_flash_crc(adapter, fw->data,
4176 pflashcomp[i].offset,
4177 pflashcomp[i].size,
4178 filehdr_size +
4179 img_hdrs_size,
4180 OPTYPE_REDBOOT, &crc_match);
4181 if (status) {
4182 dev_err(dev,
4183 "Could not get CRC for 0x%x region\n",
4184 pflashcomp[i].optype);
4185 continue;
4186 }
4187
4188 if (crc_match)
773a2d7c
PR
4189 continue;
4190 }
c165541e 4191
96c9b2e4
VV
4192 p = fw->data + filehdr_size + pflashcomp[i].offset +
4193 img_hdrs_size;
306f1348
SP
4194 if (p + pflashcomp[i].size > fw->data + fw->size)
4195 return -1;
773a2d7c
PR
4196
4197 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4198 pflashcomp[i].size, 0);
773a2d7c 4199 if (status) {
96c9b2e4 4200 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4201 pflashcomp[i].img_type);
4202 return status;
84517482 4203 }
84517482 4204 }
84517482
AK
4205 return 0;
4206}
4207
96c9b2e4
VV
4208static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4209{
4210 u32 img_type = le32_to_cpu(fsec_entry.type);
4211 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4212
4213 if (img_optype != 0xFFFF)
4214 return img_optype;
4215
4216 switch (img_type) {
4217 case IMAGE_FIRMWARE_iSCSI:
4218 img_optype = OPTYPE_ISCSI_ACTIVE;
4219 break;
4220 case IMAGE_BOOT_CODE:
4221 img_optype = OPTYPE_REDBOOT;
4222 break;
4223 case IMAGE_OPTION_ROM_ISCSI:
4224 img_optype = OPTYPE_BIOS;
4225 break;
4226 case IMAGE_OPTION_ROM_PXE:
4227 img_optype = OPTYPE_PXE_BIOS;
4228 break;
4229 case IMAGE_OPTION_ROM_FCoE:
4230 img_optype = OPTYPE_FCOE_BIOS;
4231 break;
4232 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4233 img_optype = OPTYPE_ISCSI_BACKUP;
4234 break;
4235 case IMAGE_NCSI:
4236 img_optype = OPTYPE_NCSI_FW;
4237 break;
4238 case IMAGE_FLASHISM_JUMPVECTOR:
4239 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4240 break;
4241 case IMAGE_FIRMWARE_PHY:
4242 img_optype = OPTYPE_SH_PHY_FW;
4243 break;
4244 case IMAGE_REDBOOT_DIR:
4245 img_optype = OPTYPE_REDBOOT_DIR;
4246 break;
4247 case IMAGE_REDBOOT_CONFIG:
4248 img_optype = OPTYPE_REDBOOT_CONFIG;
4249 break;
4250 case IMAGE_UFI_DIR:
4251 img_optype = OPTYPE_UFI_DIR;
4252 break;
4253 default:
4254 break;
4255 }
4256
4257 return img_optype;
4258}
4259
773a2d7c 4260static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4261 const struct firmware *fw,
4262 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4263{
773a2d7c 4264 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4265 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4266 struct device *dev = &adapter->pdev->dev;
773a2d7c 4267 struct flash_section_info *fsec = NULL;
96c9b2e4 4268 u32 img_offset, img_size, img_type;
70a7b525 4269 u16 img_optype, flash_optype;
96c9b2e4 4270 int status, i, filehdr_size;
96c9b2e4 4271 const u8 *p;
773a2d7c
PR
4272
4273 filehdr_size = sizeof(struct flash_file_hdr_g3);
4274 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4275 if (!fsec) {
96c9b2e4 4276 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4277 return -EINVAL;
773a2d7c
PR
4278 }
4279
70a7b525 4280retry_flash:
773a2d7c
PR
4281 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4282 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4283 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4284 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4285 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4286 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4287
96c9b2e4 4288 if (img_optype == 0xFFFF)
773a2d7c 4289 continue;
70a7b525
VV
4290
4291 if (flash_offset_support)
4292 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4293 else
4294 flash_optype = img_optype;
4295
96c9b2e4
VV
4296 /* Don't bother verifying CRC if an old FW image is being
4297 * flashed
4298 */
4299 if (old_fw_img)
4300 goto flash;
4301
4302 status = be_check_flash_crc(adapter, fw->data, img_offset,
4303 img_size, filehdr_size +
70a7b525 4304 img_hdrs_size, flash_optype,
96c9b2e4 4305 &crc_match);
4c60005f
KA
4306 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4307 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4308 /* The current FW image on the card does not support
4309 * OFFSET based flashing. Retry using older mechanism
4310 * of OPTYPE based flashing
4311 */
4312 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4313 flash_offset_support = false;
4314 goto retry_flash;
4315 }
4316
4317 /* The current FW image on the card does not recognize
4318 * the new FLASH op_type. The FW download is partially
4319 * complete. Reboot the server now to enable FW image
4320 * to recognize the new FLASH op_type. To complete the
4321 * remaining process, download the same FW again after
4322 * the reboot.
4323 */
96c9b2e4
VV
4324 dev_err(dev, "Flash incomplete. Reset the server\n");
4325 dev_err(dev, "Download FW image again after reset\n");
4326 return -EAGAIN;
4327 } else if (status) {
4328 dev_err(dev, "Could not get CRC for 0x%x region\n",
4329 img_optype);
4330 return -EFAULT;
773a2d7c
PR
4331 }
4332
96c9b2e4
VV
4333 if (crc_match)
4334 continue;
773a2d7c 4335
96c9b2e4
VV
4336flash:
4337 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4338 if (p + img_size > fw->data + fw->size)
4339 return -1;
4340
70a7b525
VV
4341 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4342 img_offset);
4343
4344 /* The current FW image on the card does not support OFFSET
4345 * based flashing. Retry using older mechanism of OPTYPE based
4346 * flashing
4347 */
4348 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4349 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4350 flash_offset_support = false;
4351 goto retry_flash;
4352 }
4353
96c9b2e4
VV
4354 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4355 * UFI_DIR region
4356 */
4c60005f
KA
4357 if (old_fw_img &&
4358 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4359 (img_optype == OPTYPE_UFI_DIR &&
4360 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4361 continue;
4362 } else if (status) {
4363 dev_err(dev, "Flashing section type 0x%x failed\n",
4364 img_type);
4365 return -EFAULT;
773a2d7c
PR
4366 }
4367 }
4368 return 0;
3f0d4560
AK
4369}
4370
485bf569 4371static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4372 const struct firmware *fw)
84517482 4373{
485bf569
SN
4374#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4375#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4376 struct device *dev = &adapter->pdev->dev;
84517482 4377 struct be_dma_mem flash_cmd;
485bf569
SN
4378 const u8 *data_ptr = NULL;
4379 u8 *dest_image_ptr = NULL;
4380 size_t image_size = 0;
4381 u32 chunk_size = 0;
4382 u32 data_written = 0;
4383 u32 offset = 0;
4384 int status = 0;
4385 u8 add_status = 0;
f67ef7ba 4386 u8 change_status;
84517482 4387
485bf569 4388 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4389 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4390 return -EINVAL;
d9efd2af
SB
4391 }
4392
485bf569
SN
4393 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4394 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4395 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4396 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4397 if (!flash_cmd.va)
4398 return -ENOMEM;
84517482 4399
485bf569
SN
4400 dest_image_ptr = flash_cmd.va +
4401 sizeof(struct lancer_cmd_req_write_object);
4402 image_size = fw->size;
4403 data_ptr = fw->data;
4404
4405 while (image_size) {
4406 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4407
4408 /* Copy the image chunk content. */
4409 memcpy(dest_image_ptr, data_ptr, chunk_size);
4410
4411 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4412 chunk_size, offset,
4413 LANCER_FW_DOWNLOAD_LOCATION,
4414 &data_written, &change_status,
4415 &add_status);
485bf569
SN
4416 if (status)
4417 break;
4418
4419 offset += data_written;
4420 data_ptr += data_written;
4421 image_size -= data_written;
4422 }
4423
4424 if (!status) {
4425 /* Commit the FW written */
4426 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4427 0, offset,
4428 LANCER_FW_DOWNLOAD_LOCATION,
4429 &data_written, &change_status,
4430 &add_status);
485bf569
SN
4431 }
4432
bb864e07 4433 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4434 if (status) {
bb864e07 4435 dev_err(dev, "Firmware load error\n");
3fb8cb80 4436 return be_cmd_status(status);
485bf569
SN
4437 }
4438
bb864e07
KA
4439 dev_info(dev, "Firmware flashed successfully\n");
4440
f67ef7ba 4441 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4442 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4443 status = lancer_physdev_ctrl(adapter,
4444 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4445 if (status) {
bb864e07
KA
4446 dev_err(dev, "Adapter busy, could not reset FW\n");
4447 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4448 }
4449 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4450 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4451 }
3fb8cb80
KA
4452
4453 return 0;
485bf569
SN
4454}
4455
5d3acd0d
VV
4456#define BE2_UFI 2
4457#define BE3_UFI 3
4458#define BE3R_UFI 10
4459#define SH_UFI 4
81a9e226 4460#define SH_P2_UFI 11
5d3acd0d 4461
ca34fe38 4462static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4463 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4464{
5d3acd0d
VV
4465 if (!fhdr) {
4466 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4467 return -1;
4468 }
773a2d7c 4469
5d3acd0d
VV
4470 /* First letter of the build version is used to identify
4471 * which chip this image file is meant for.
4472 */
4473 switch (fhdr->build[0]) {
4474 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4475 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4476 SH_UFI;
5d3acd0d
VV
4477 case BLD_STR_UFI_TYPE_BE3:
4478 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4479 BE3_UFI;
4480 case BLD_STR_UFI_TYPE_BE2:
4481 return BE2_UFI;
4482 default:
4483 return -1;
4484 }
4485}
773a2d7c 4486
5d3acd0d
VV
4487/* Check if the flash image file is compatible with the adapter that
4488 * is being flashed.
4489 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4490 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4491 */
4492static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4493 struct flash_file_hdr_g3 *fhdr)
4494{
4495 int ufi_type = be_get_ufi_type(adapter, fhdr);
4496
4497 switch (ufi_type) {
81a9e226 4498 case SH_P2_UFI:
5d3acd0d 4499 return skyhawk_chip(adapter);
81a9e226
VV
4500 case SH_UFI:
4501 return (skyhawk_chip(adapter) &&
4502 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4503 case BE3R_UFI:
4504 return BE3_chip(adapter);
4505 case BE3_UFI:
4506 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4507 case BE2_UFI:
4508 return BE2_chip(adapter);
4509 default:
4510 return false;
4511 }
773a2d7c
PR
4512}
4513
485bf569
SN
4514static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4515{
5d3acd0d 4516 struct device *dev = &adapter->pdev->dev;
485bf569 4517 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4518 struct image_hdr *img_hdr_ptr;
4519 int status = 0, i, num_imgs;
485bf569 4520 struct be_dma_mem flash_cmd;
84517482 4521
5d3acd0d
VV
4522 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4523 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4524 dev_err(dev, "Flash image is not compatible with adapter\n");
4525 return -EINVAL;
84517482
AK
4526 }
4527
5d3acd0d
VV
4528 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4529 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4530 GFP_KERNEL);
4531 if (!flash_cmd.va)
4532 return -ENOMEM;
773a2d7c 4533
773a2d7c
PR
4534 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4535 for (i = 0; i < num_imgs; i++) {
4536 img_hdr_ptr = (struct image_hdr *)(fw->data +
4537 (sizeof(struct flash_file_hdr_g3) +
4538 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4539 if (!BE2_chip(adapter) &&
4540 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4541 continue;
84517482 4542
5d3acd0d
VV
4543 if (skyhawk_chip(adapter))
4544 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4545 num_imgs);
4546 else
4547 status = be_flash_BEx(adapter, fw, &flash_cmd,
4548 num_imgs);
84517482
AK
4549 }
4550
5d3acd0d
VV
4551 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4552 if (!status)
4553 dev_info(dev, "Firmware flashed successfully\n");
84517482 4554
485bf569
SN
4555 return status;
4556}
4557
4558int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4559{
4560 const struct firmware *fw;
4561 int status;
4562
4563 if (!netif_running(adapter->netdev)) {
4564 dev_err(&adapter->pdev->dev,
4565 "Firmware load not allowed (interface is down)\n");
940a3fcd 4566 return -ENETDOWN;
485bf569
SN
4567 }
4568
4569 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4570 if (status)
4571 goto fw_exit;
4572
4573 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4574
4575 if (lancer_chip(adapter))
4576 status = lancer_fw_download(adapter, fw);
4577 else
4578 status = be_fw_download(adapter, fw);
4579
eeb65ced 4580 if (!status)
e97e3cda 4581 be_cmd_get_fw_ver(adapter);
eeb65ced 4582
84517482
AK
4583fw_exit:
4584 release_firmware(fw);
4585 return status;
4586}
4587
add511b3
RP
4588static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4589 u16 flags)
a77dcb8c
AK
4590{
4591 struct be_adapter *adapter = netdev_priv(dev);
4592 struct nlattr *attr, *br_spec;
4593 int rem;
4594 int status = 0;
4595 u16 mode = 0;
4596
4597 if (!sriov_enabled(adapter))
4598 return -EOPNOTSUPP;
4599
4600 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4601 if (!br_spec)
4602 return -EINVAL;
a77dcb8c
AK
4603
4604 nla_for_each_nested(attr, br_spec, rem) {
4605 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4606 continue;
4607
b7c1a314
TG
4608 if (nla_len(attr) < sizeof(mode))
4609 return -EINVAL;
4610
a77dcb8c
AK
4611 mode = nla_get_u16(attr);
4612 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4613 return -EINVAL;
4614
4615 status = be_cmd_set_hsw_config(adapter, 0, 0,
4616 adapter->if_handle,
4617 mode == BRIDGE_MODE_VEPA ?
4618 PORT_FWD_TYPE_VEPA :
4619 PORT_FWD_TYPE_VEB);
4620 if (status)
4621 goto err;
4622
4623 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4624 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4625
4626 return status;
4627 }
4628err:
4629 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4630 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4631
4632 return status;
4633}
4634
4635static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4636 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4637{
4638 struct be_adapter *adapter = netdev_priv(dev);
4639 int status = 0;
4640 u8 hsw_mode;
4641
4642 if (!sriov_enabled(adapter))
4643 return 0;
4644
4645 /* BE and Lancer chips support VEB mode only */
4646 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4647 hsw_mode = PORT_FWD_TYPE_VEB;
4648 } else {
4649 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4650 adapter->if_handle, &hsw_mode);
4651 if (status)
4652 return 0;
4653 }
4654
4655 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4656 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4657 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4658 0, 0);
a77dcb8c
AK
4659}
4660
c5abe7c0 4661#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4662/* VxLAN offload Notes:
4663 *
4664 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4665 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4666 * is expected to work across all types of IP tunnels once exported. Skyhawk
4667 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4668 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4669 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4670 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4671 *
4672 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4673 * adds more than one port, disable offloads and don't re-enable them again
4674 * until after all the tunnels are removed.
4675 */
c9c47142
SP
4676static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4677 __be16 port)
4678{
4679 struct be_adapter *adapter = netdev_priv(netdev);
4680 struct device *dev = &adapter->pdev->dev;
4681 int status;
4682
4683 if (lancer_chip(adapter) || BEx_chip(adapter))
4684 return;
4685
4686 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4687 dev_info(dev,
4688 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4689 dev_info(dev, "Disabling VxLAN offloads\n");
4690 adapter->vxlan_port_count++;
4691 goto err;
c9c47142
SP
4692 }
4693
630f4b70
SB
4694 if (adapter->vxlan_port_count++ >= 1)
4695 return;
4696
c9c47142
SP
4697 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4698 OP_CONVERT_NORMAL_TO_TUNNEL);
4699 if (status) {
4700 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4701 goto err;
4702 }
4703
4704 status = be_cmd_set_vxlan_port(adapter, port);
4705 if (status) {
4706 dev_warn(dev, "Failed to add VxLAN port\n");
4707 goto err;
4708 }
4709 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4710 adapter->vxlan_port = port;
4711
630f4b70
SB
4712 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4713 NETIF_F_TSO | NETIF_F_TSO6 |
4714 NETIF_F_GSO_UDP_TUNNEL;
4715 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4716 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4717
c9c47142
SP
4718 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4719 be16_to_cpu(port));
4720 return;
4721err:
4722 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4723}
4724
4725static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4726 __be16 port)
4727{
4728 struct be_adapter *adapter = netdev_priv(netdev);
4729
4730 if (lancer_chip(adapter) || BEx_chip(adapter))
4731 return;
4732
4733 if (adapter->vxlan_port != port)
630f4b70 4734 goto done;
c9c47142
SP
4735
4736 be_disable_vxlan_offloads(adapter);
4737
4738 dev_info(&adapter->pdev->dev,
4739 "Disabled VxLAN offloads for UDP port %d\n",
4740 be16_to_cpu(port));
630f4b70
SB
4741done:
4742 adapter->vxlan_port_count--;
c9c47142 4743}
725d548f 4744
5f35227e
JG
4745static netdev_features_t be_features_check(struct sk_buff *skb,
4746 struct net_device *dev,
4747 netdev_features_t features)
725d548f 4748{
16dde0d6
SB
4749 struct be_adapter *adapter = netdev_priv(dev);
4750 u8 l4_hdr = 0;
4751
4752 /* The code below restricts offload features for some tunneled packets.
4753 * Offload features for normal (non tunnel) packets are unchanged.
4754 */
4755 if (!skb->encapsulation ||
4756 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4757 return features;
4758
4759 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4760 * should disable tunnel offload features if it's not a VxLAN packet,
4761 * as tunnel offloads have been enabled only for VxLAN. This is done to
4762 * allow other tunneled traffic like GRE work fine while VxLAN
4763 * offloads are configured in Skyhawk-R.
4764 */
4765 switch (vlan_get_protocol(skb)) {
4766 case htons(ETH_P_IP):
4767 l4_hdr = ip_hdr(skb)->protocol;
4768 break;
4769 case htons(ETH_P_IPV6):
4770 l4_hdr = ipv6_hdr(skb)->nexthdr;
4771 break;
4772 default:
4773 return features;
4774 }
4775
4776 if (l4_hdr != IPPROTO_UDP ||
4777 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4778 skb->inner_protocol != htons(ETH_P_TEB) ||
4779 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4780 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4781 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4782
4783 return features;
725d548f 4784}
c5abe7c0 4785#endif
c9c47142 4786
e5686ad8 4787static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4788 .ndo_open = be_open,
4789 .ndo_stop = be_close,
4790 .ndo_start_xmit = be_xmit,
a54769f5 4791 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4792 .ndo_set_mac_address = be_mac_addr_set,
4793 .ndo_change_mtu = be_change_mtu,
ab1594e9 4794 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4795 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4796 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4797 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4798 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4799 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4800 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4801 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4802 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4803#ifdef CONFIG_NET_POLL_CONTROLLER
4804 .ndo_poll_controller = be_netpoll,
4805#endif
a77dcb8c
AK
4806 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4807 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4808#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4809 .ndo_busy_poll = be_busy_poll,
6384a4d0 4810#endif
c5abe7c0 4811#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4812 .ndo_add_vxlan_port = be_add_vxlan_port,
4813 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4814 .ndo_features_check = be_features_check,
c5abe7c0 4815#endif
6b7c5b94
SP
4816};
4817
4818static void be_netdev_init(struct net_device *netdev)
4819{
4820 struct be_adapter *adapter = netdev_priv(netdev);
4821
6332c8d3 4822 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4823 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4824 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4825 if (be_multi_rxq(adapter))
4826 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4827
4828 netdev->features |= netdev->hw_features |
f646968f 4829 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4830
eb8a50d9 4831 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4832 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4833
fbc13f01
AK
4834 netdev->priv_flags |= IFF_UNICAST_FLT;
4835
6b7c5b94
SP
4836 netdev->flags |= IFF_MULTICAST;
4837
b7e5887e 4838 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4839
10ef9ab4 4840 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4841
7ad24ea4 4842 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4843}
4844
4845static void be_unmap_pci_bars(struct be_adapter *adapter)
4846{
c5b3ad4c
SP
4847 if (adapter->csr)
4848 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4849 if (adapter->db)
ce66f781 4850 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4851}
4852
ce66f781
SP
4853static int db_bar(struct be_adapter *adapter)
4854{
4855 if (lancer_chip(adapter) || !be_physfn(adapter))
4856 return 0;
4857 else
4858 return 4;
4859}
4860
4861static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4862{
dbf0f2a7 4863 if (skyhawk_chip(adapter)) {
ce66f781
SP
4864 adapter->roce_db.size = 4096;
4865 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4866 db_bar(adapter));
4867 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4868 db_bar(adapter));
4869 }
045508a8 4870 return 0;
6b7c5b94
SP
4871}
4872
4873static int be_map_pci_bars(struct be_adapter *adapter)
4874{
25848c90 4875 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 4876 u8 __iomem *addr;
fe6d2a38 4877
c5b3ad4c 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
25848c90 4879 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 4880 if (!adapter->csr)
c5b3ad4c
SP
4881 return -ENOMEM;
4882 }
4883
25848c90 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 4885 if (!addr)
6b7c5b94 4886 goto pci_map_err;
ba343c77 4887 adapter->db = addr;
ce66f781 4888
25848c90
SR
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
ce66f781 4901 be_roce_map_pci_bars(adapter);
6b7c5b94 4902 return 0;
ce66f781 4903
6b7c5b94 4904pci_map_err:
25848c90 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4906 be_unmap_pci_bars(adapter);
4907 return -ENOMEM;
4908}
4909
6b7c5b94
SP
4910static void be_ctrl_cleanup(struct be_adapter *adapter)
4911{
8788fdc2 4912 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4913
4914 be_unmap_pci_bars(adapter);
4915
4916 if (mem->va)
2b7bcebf
IV
4917 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4918 mem->dma);
e7b909a6 4919
5b8821b7 4920 mem = &adapter->rx_filter;
e7b909a6 4921 if (mem->va)
2b7bcebf
IV
4922 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4923 mem->dma);
6b7c5b94
SP
4924}
4925
6b7c5b94
SP
4926static int be_ctrl_init(struct be_adapter *adapter)
4927{
8788fdc2
SP
4928 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4929 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4930 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4931 u32 sli_intf;
6b7c5b94 4932 int status;
6b7c5b94 4933
ce66f781
SP
4934 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4935 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4936 SLI_INTF_FAMILY_SHIFT;
4937 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4938
6b7c5b94
SP
4939 status = be_map_pci_bars(adapter);
4940 if (status)
e7b909a6 4941 goto done;
6b7c5b94
SP
4942
4943 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4944 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4945 mbox_mem_alloc->size,
4946 &mbox_mem_alloc->dma,
4947 GFP_KERNEL);
6b7c5b94 4948 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4949 status = -ENOMEM;
4950 goto unmap_pci_bars;
6b7c5b94
SP
4951 }
4952 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4953 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4954 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4955 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4956
5b8821b7 4957 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4958 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4959 rx_filter->size, &rx_filter->dma,
4960 GFP_KERNEL);
ddf1169f 4961 if (!rx_filter->va) {
e7b909a6
SP
4962 status = -ENOMEM;
4963 goto free_mbox;
4964 }
1f9061d2 4965
2984961c 4966 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4967 spin_lock_init(&adapter->mcc_lock);
4968 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4969
5eeff635 4970 init_completion(&adapter->et_cmd_compl);
cf588477 4971 pci_save_state(adapter->pdev);
6b7c5b94 4972 return 0;
e7b909a6
SP
4973
4974free_mbox:
2b7bcebf
IV
4975 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4976 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4977
4978unmap_pci_bars:
4979 be_unmap_pci_bars(adapter);
4980
4981done:
4982 return status;
6b7c5b94
SP
4983}
4984
4985static void be_stats_cleanup(struct be_adapter *adapter)
4986{
3abcdeda 4987 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4988
4989 if (cmd->va)
2b7bcebf
IV
4990 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4991 cmd->va, cmd->dma);
6b7c5b94
SP
4992}
4993
4994static int be_stats_init(struct be_adapter *adapter)
4995{
3abcdeda 4996 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 4997
ca34fe38
SP
4998 if (lancer_chip(adapter))
4999 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5000 else if (BE2_chip(adapter))
89a88ab8 5001 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 5002 else if (BE3_chip(adapter))
ca34fe38 5003 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
5004 else
5005 /* ALL non-BE ASICs */
5006 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 5007
ede23fa8
JP
5008 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
5009 GFP_KERNEL);
ddf1169f 5010 if (!cmd->va)
6b568689 5011 return -ENOMEM;
6b7c5b94
SP
5012 return 0;
5013}
5014
3bc6b06c 5015static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5016{
5017 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5018
6b7c5b94
SP
5019 if (!adapter)
5020 return;
5021
045508a8 5022 be_roce_dev_remove(adapter);
8cef7a78 5023 be_intr_set(adapter, false);
045508a8 5024
f67ef7ba
PR
5025 cancel_delayed_work_sync(&adapter->func_recovery_work);
5026
6b7c5b94
SP
5027 unregister_netdev(adapter->netdev);
5028
5fb379ee
SP
5029 be_clear(adapter);
5030
bf99e50d
PR
5031 /* tell fw we're done with firing cmds */
5032 be_cmd_fw_clean(adapter);
5033
6b7c5b94
SP
5034 be_stats_cleanup(adapter);
5035
5036 be_ctrl_cleanup(adapter);
5037
d6b6d987
SP
5038 pci_disable_pcie_error_reporting(pdev);
5039
6b7c5b94
SP
5040 pci_release_regions(pdev);
5041 pci_disable_device(pdev);
5042
5043 free_netdev(adapter->netdev);
5044}
5045
39f1d94d 5046static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 5047{
baaa08d1 5048 int status, level;
6b7c5b94 5049
9e1453c5
AK
5050 status = be_cmd_get_cntl_attributes(adapter);
5051 if (status)
5052 return status;
5053
7aeb2156
PR
5054 /* Must be a power of 2 or else MODULO will BUG_ON */
5055 adapter->be_get_temp_freq = 64;
5056
baaa08d1
VV
5057 if (BEx_chip(adapter)) {
5058 level = be_cmd_get_fw_log_level(adapter);
5059 adapter->msg_enable =
5060 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
5061 }
941a77d5 5062
92bf14ab 5063 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 5064 return 0;
6b7c5b94
SP
5065}
5066
f67ef7ba 5067static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 5068{
01e5b2c4 5069 struct device *dev = &adapter->pdev->dev;
d8110f62 5070 int status;
d8110f62 5071
f67ef7ba
PR
5072 status = lancer_test_and_set_rdy_state(adapter);
5073 if (status)
5074 goto err;
d8110f62 5075
f67ef7ba
PR
5076 if (netif_running(adapter->netdev))
5077 be_close(adapter->netdev);
d8110f62 5078
f67ef7ba
PR
5079 be_clear(adapter);
5080
01e5b2c4 5081 be_clear_all_error(adapter);
f67ef7ba
PR
5082
5083 status = be_setup(adapter);
5084 if (status)
5085 goto err;
d8110f62 5086
f67ef7ba
PR
5087 if (netif_running(adapter->netdev)) {
5088 status = be_open(adapter->netdev);
d8110f62
PR
5089 if (status)
5090 goto err;
f67ef7ba 5091 }
d8110f62 5092
4bebb56a 5093 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
5094 return 0;
5095err:
01e5b2c4
SK
5096 if (status == -EAGAIN)
5097 dev_err(dev, "Waiting for resource provisioning\n");
5098 else
4bebb56a 5099 dev_err(dev, "Adapter recovery failed\n");
d8110f62 5100
f67ef7ba
PR
5101 return status;
5102}
5103
5104static void be_func_recovery_task(struct work_struct *work)
5105{
5106 struct be_adapter *adapter =
5107 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 5108 int status = 0;
d8110f62 5109
f67ef7ba 5110 be_detect_error(adapter);
d8110f62 5111
f67ef7ba 5112 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
5113 rtnl_lock();
5114 netif_device_detach(adapter->netdev);
5115 rtnl_unlock();
d8110f62 5116
f67ef7ba 5117 status = lancer_recover_func(adapter);
f67ef7ba
PR
5118 if (!status)
5119 netif_device_attach(adapter->netdev);
d8110f62 5120 }
f67ef7ba 5121
01e5b2c4
SK
5122 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5123 * no need to attempt further recovery.
5124 */
5125 if (!status || status == -EAGAIN)
5126 schedule_delayed_work(&adapter->func_recovery_work,
5127 msecs_to_jiffies(1000));
d8110f62
PR
5128}
5129
21252377
VV
5130static void be_log_sfp_info(struct be_adapter *adapter)
5131{
5132 int status;
5133
5134 status = be_cmd_query_sfp_info(adapter);
5135 if (!status) {
5136 dev_err(&adapter->pdev->dev,
5137 "Unqualified SFP+ detected on %c from %s part no: %s",
5138 adapter->port_name, adapter->phy.vendor_name,
5139 adapter->phy.vendor_pn);
5140 }
5141 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5142}
5143
d8110f62
PR
5144static void be_worker(struct work_struct *work)
5145{
5146 struct be_adapter *adapter =
5147 container_of(work, struct be_adapter, work.work);
5148 struct be_rx_obj *rxo;
5149 int i;
5150
d8110f62
PR
5151 /* when interrupts are not yet enabled, just reap any pending
5152 * mcc completions */
5153 if (!netif_running(adapter->netdev)) {
072a9c48 5154 local_bh_disable();
10ef9ab4 5155 be_process_mcc(adapter);
072a9c48 5156 local_bh_enable();
d8110f62
PR
5157 goto reschedule;
5158 }
5159
5160 if (!adapter->stats_cmd_sent) {
5161 if (lancer_chip(adapter))
5162 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5163 &adapter->stats_cmd);
d8110f62
PR
5164 else
5165 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5166 }
5167
d696b5e2
VV
5168 if (be_physfn(adapter) &&
5169 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5170 be_cmd_get_die_temperature(adapter);
5171
d8110f62 5172 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5173 /* Replenish RX-queues starved due to memory
5174 * allocation failures.
5175 */
5176 if (rxo->rx_post_starved)
c30d7266 5177 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5178 }
5179
2632bafd 5180 be_eqd_update(adapter);
10ef9ab4 5181
21252377
VV
5182 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5183 be_log_sfp_info(adapter);
5184
d8110f62
PR
5185reschedule:
5186 adapter->work_counter++;
5187 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5188}
5189
257a3feb 5190/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5191static bool be_reset_required(struct be_adapter *adapter)
5192{
257a3feb 5193 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5194}
5195
d379142b
SP
5196static char *mc_name(struct be_adapter *adapter)
5197{
f93f160b
VV
5198 char *str = ""; /* default */
5199
5200 switch (adapter->mc_type) {
5201 case UMC:
5202 str = "UMC";
5203 break;
5204 case FLEX10:
5205 str = "FLEX10";
5206 break;
5207 case vNIC1:
5208 str = "vNIC-1";
5209 break;
5210 case nPAR:
5211 str = "nPAR";
5212 break;
5213 case UFP:
5214 str = "UFP";
5215 break;
5216 case vNIC2:
5217 str = "vNIC-2";
5218 break;
5219 default:
5220 str = "";
5221 }
5222
5223 return str;
d379142b
SP
5224}
5225
5226static inline char *func_name(struct be_adapter *adapter)
5227{
5228 return be_physfn(adapter) ? "PF" : "VF";
5229}
5230
f7062ee5
SP
5231static inline char *nic_name(struct pci_dev *pdev)
5232{
5233 switch (pdev->device) {
5234 case OC_DEVICE_ID1:
5235 return OC_NAME;
5236 case OC_DEVICE_ID2:
5237 return OC_NAME_BE;
5238 case OC_DEVICE_ID3:
5239 case OC_DEVICE_ID4:
5240 return OC_NAME_LANCER;
5241 case BE_DEVICE_ID2:
5242 return BE3_NAME;
5243 case OC_DEVICE_ID5:
5244 case OC_DEVICE_ID6:
5245 return OC_NAME_SH;
5246 default:
5247 return BE_NAME;
5248 }
5249}
5250
1dd06ae8 5251static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5252{
6b7c5b94
SP
5253 struct be_adapter *adapter;
5254 struct net_device *netdev;
21252377 5255 int status = 0;
6b7c5b94 5256
acbafeb1
SP
5257 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5258
6b7c5b94
SP
5259 status = pci_enable_device(pdev);
5260 if (status)
5261 goto do_none;
5262
5263 status = pci_request_regions(pdev, DRV_NAME);
5264 if (status)
5265 goto disable_dev;
5266 pci_set_master(pdev);
5267
7f640062 5268 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5269 if (!netdev) {
6b7c5b94
SP
5270 status = -ENOMEM;
5271 goto rel_reg;
5272 }
5273 adapter = netdev_priv(netdev);
5274 adapter->pdev = pdev;
5275 pci_set_drvdata(pdev, adapter);
5276 adapter->netdev = netdev;
2243e2e9 5277 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5278
4c15c243 5279 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5280 if (!status) {
5281 netdev->features |= NETIF_F_HIGHDMA;
5282 } else {
4c15c243 5283 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5284 if (status) {
5285 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5286 goto free_netdev;
5287 }
5288 }
5289
2f951a9a
KA
5290 status = pci_enable_pcie_error_reporting(pdev);
5291 if (!status)
5292 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5293
6b7c5b94
SP
5294 status = be_ctrl_init(adapter);
5295 if (status)
39f1d94d 5296 goto free_netdev;
6b7c5b94 5297
2243e2e9 5298 /* sync up with fw's ready state */
ba343c77 5299 if (be_physfn(adapter)) {
bf99e50d 5300 status = be_fw_wait_ready(adapter);
ba343c77
SB
5301 if (status)
5302 goto ctrl_clean;
ba343c77 5303 }
6b7c5b94 5304
39f1d94d
SP
5305 if (be_reset_required(adapter)) {
5306 status = be_cmd_reset_function(adapter);
5307 if (status)
5308 goto ctrl_clean;
556ae191 5309
2d177be8
KA
5310 /* Wait for interrupts to quiesce after an FLR */
5311 msleep(100);
5312 }
8cef7a78
SK
5313
5314 /* Allow interrupts for other ULPs running on NIC function */
5315 be_intr_set(adapter, true);
10ef9ab4 5316
2d177be8
KA
5317 /* tell fw we're ready to fire cmds */
5318 status = be_cmd_fw_init(adapter);
5319 if (status)
5320 goto ctrl_clean;
5321
2243e2e9
SP
5322 status = be_stats_init(adapter);
5323 if (status)
5324 goto ctrl_clean;
5325
39f1d94d 5326 status = be_get_initial_config(adapter);
6b7c5b94
SP
5327 if (status)
5328 goto stats_clean;
6b7c5b94
SP
5329
5330 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5331 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5332 adapter->rx_fc = true;
5333 adapter->tx_fc = true;
6b7c5b94 5334
5fb379ee
SP
5335 status = be_setup(adapter);
5336 if (status)
55f5c3c5 5337 goto stats_clean;
2243e2e9 5338
3abcdeda 5339 be_netdev_init(netdev);
6b7c5b94
SP
5340 status = register_netdev(netdev);
5341 if (status != 0)
5fb379ee 5342 goto unsetup;
6b7c5b94 5343
045508a8
PP
5344 be_roce_dev_add(adapter);
5345
f67ef7ba
PR
5346 schedule_delayed_work(&adapter->func_recovery_work,
5347 msecs_to_jiffies(1000));
b4e32a71 5348
d379142b 5349 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5350 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5351
6b7c5b94
SP
5352 return 0;
5353
5fb379ee
SP
5354unsetup:
5355 be_clear(adapter);
6b7c5b94
SP
5356stats_clean:
5357 be_stats_cleanup(adapter);
5358ctrl_clean:
5359 be_ctrl_cleanup(adapter);
f9449ab7 5360free_netdev:
fe6d2a38 5361 free_netdev(netdev);
6b7c5b94
SP
5362rel_reg:
5363 pci_release_regions(pdev);
5364disable_dev:
5365 pci_disable_device(pdev);
5366do_none:
c4ca2374 5367 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5368 return status;
5369}
5370
5371static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5372{
5373 struct be_adapter *adapter = pci_get_drvdata(pdev);
5374 struct net_device *netdev = adapter->netdev;
5375
76a9e08e 5376 if (adapter->wol_en)
71d8d1b5
AK
5377 be_setup_wol(adapter, true);
5378
d4360d6f 5379 be_intr_set(adapter, false);
f67ef7ba
PR
5380 cancel_delayed_work_sync(&adapter->func_recovery_work);
5381
6b7c5b94
SP
5382 netif_device_detach(netdev);
5383 if (netif_running(netdev)) {
5384 rtnl_lock();
5385 be_close(netdev);
5386 rtnl_unlock();
5387 }
9b0365f1 5388 be_clear(adapter);
6b7c5b94
SP
5389
5390 pci_save_state(pdev);
5391 pci_disable_device(pdev);
5392 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5393 return 0;
5394}
5395
5396static int be_resume(struct pci_dev *pdev)
5397{
5398 int status = 0;
5399 struct be_adapter *adapter = pci_get_drvdata(pdev);
5400 struct net_device *netdev = adapter->netdev;
5401
5402 netif_device_detach(netdev);
5403
5404 status = pci_enable_device(pdev);
5405 if (status)
5406 return status;
5407
1ca01512 5408 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5409 pci_restore_state(pdev);
5410
dd5746bf
SB
5411 status = be_fw_wait_ready(adapter);
5412 if (status)
5413 return status;
5414
9a6d73d9
KA
5415 status = be_cmd_reset_function(adapter);
5416 if (status)
5417 return status;
5418
d4360d6f 5419 be_intr_set(adapter, true);
2243e2e9
SP
5420 /* tell fw we're ready to fire cmds */
5421 status = be_cmd_fw_init(adapter);
5422 if (status)
5423 return status;
5424
9b0365f1 5425 be_setup(adapter);
6b7c5b94
SP
5426 if (netif_running(netdev)) {
5427 rtnl_lock();
5428 be_open(netdev);
5429 rtnl_unlock();
5430 }
f67ef7ba
PR
5431
5432 schedule_delayed_work(&adapter->func_recovery_work,
5433 msecs_to_jiffies(1000));
6b7c5b94 5434 netif_device_attach(netdev);
71d8d1b5 5435
76a9e08e 5436 if (adapter->wol_en)
71d8d1b5 5437 be_setup_wol(adapter, false);
a4ca055f 5438
6b7c5b94
SP
5439 return 0;
5440}
5441
82456b03
SP
5442/*
5443 * An FLR will stop BE from DMAing any data.
5444 */
5445static void be_shutdown(struct pci_dev *pdev)
5446{
5447 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5448
2d5d4154
AK
5449 if (!adapter)
5450 return;
82456b03 5451
d114f99a 5452 be_roce_dev_shutdown(adapter);
0f4a6828 5453 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5454 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5455
2d5d4154 5456 netif_device_detach(adapter->netdev);
82456b03 5457
57841869
AK
5458 be_cmd_reset_function(adapter);
5459
82456b03 5460 pci_disable_device(pdev);
82456b03
SP
5461}
5462
cf588477 5463static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5464 pci_channel_state_t state)
cf588477
SP
5465{
5466 struct be_adapter *adapter = pci_get_drvdata(pdev);
5467 struct net_device *netdev = adapter->netdev;
5468
5469 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5470
01e5b2c4
SK
5471 if (!adapter->eeh_error) {
5472 adapter->eeh_error = true;
cf588477 5473
01e5b2c4 5474 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5475
cf588477 5476 rtnl_lock();
01e5b2c4
SK
5477 netif_device_detach(netdev);
5478 if (netif_running(netdev))
5479 be_close(netdev);
cf588477 5480 rtnl_unlock();
01e5b2c4
SK
5481
5482 be_clear(adapter);
cf588477 5483 }
cf588477
SP
5484
5485 if (state == pci_channel_io_perm_failure)
5486 return PCI_ERS_RESULT_DISCONNECT;
5487
5488 pci_disable_device(pdev);
5489
eeb7fc7b
SK
5490 /* The error could cause the FW to trigger a flash debug dump.
5491 * Resetting the card while flash dump is in progress
c8a54163
PR
5492 * can cause it not to recover; wait for it to finish.
5493 * Wait only for first function as it is needed only once per
5494 * adapter.
eeb7fc7b 5495 */
c8a54163
PR
5496 if (pdev->devfn == 0)
5497 ssleep(30);
5498
cf588477
SP
5499 return PCI_ERS_RESULT_NEED_RESET;
5500}
5501
5502static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5503{
5504 struct be_adapter *adapter = pci_get_drvdata(pdev);
5505 int status;
5506
5507 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5508
5509 status = pci_enable_device(pdev);
5510 if (status)
5511 return PCI_ERS_RESULT_DISCONNECT;
5512
5513 pci_set_master(pdev);
1ca01512 5514 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5515 pci_restore_state(pdev);
5516
5517 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5518 dev_info(&adapter->pdev->dev,
5519 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5520 status = be_fw_wait_ready(adapter);
cf588477
SP
5521 if (status)
5522 return PCI_ERS_RESULT_DISCONNECT;
5523
d6b6d987 5524 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5525 be_clear_all_error(adapter);
cf588477
SP
5526 return PCI_ERS_RESULT_RECOVERED;
5527}
5528
5529static void be_eeh_resume(struct pci_dev *pdev)
5530{
5531 int status = 0;
5532 struct be_adapter *adapter = pci_get_drvdata(pdev);
5533 struct net_device *netdev = adapter->netdev;
5534
5535 dev_info(&adapter->pdev->dev, "EEH resume\n");
5536
5537 pci_save_state(pdev);
5538
2d177be8 5539 status = be_cmd_reset_function(adapter);
cf588477
SP
5540 if (status)
5541 goto err;
5542
03a58baa
KA
5543 /* On some BE3 FW versions, after a HW reset,
5544 * interrupts will remain disabled for each function.
5545 * So, explicitly enable interrupts
5546 */
5547 be_intr_set(adapter, true);
5548
2d177be8
KA
5549 /* tell fw we're ready to fire cmds */
5550 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5551 if (status)
5552 goto err;
5553
cf588477
SP
5554 status = be_setup(adapter);
5555 if (status)
5556 goto err;
5557
5558 if (netif_running(netdev)) {
5559 status = be_open(netdev);
5560 if (status)
5561 goto err;
5562 }
f67ef7ba
PR
5563
5564 schedule_delayed_work(&adapter->func_recovery_work,
5565 msecs_to_jiffies(1000));
cf588477
SP
5566 netif_device_attach(netdev);
5567 return;
5568err:
5569 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5570}
5571
3646f0e5 5572static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5573 .error_detected = be_eeh_err_detected,
5574 .slot_reset = be_eeh_reset,
5575 .resume = be_eeh_resume,
5576};
5577
6b7c5b94
SP
5578static struct pci_driver be_driver = {
5579 .name = DRV_NAME,
5580 .id_table = be_dev_ids,
5581 .probe = be_probe,
5582 .remove = be_remove,
5583 .suspend = be_suspend,
cf588477 5584 .resume = be_resume,
82456b03 5585 .shutdown = be_shutdown,
cf588477 5586 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5587};
5588
5589static int __init be_init_module(void)
5590{
8e95a202
JP
5591 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5592 rx_frag_size != 2048) {
6b7c5b94
SP
5593 printk(KERN_WARNING DRV_NAME
5594 " : Module param rx_frag_size must be 2048/4096/8192."
5595 " Using 2048\n");
5596 rx_frag_size = 2048;
5597 }
6b7c5b94
SP
5598
5599 return pci_register_driver(&be_driver);
5600}
5601module_init(be_init_module);
5602
5603static void __exit be_exit_module(void)
5604{
5605 pci_unregister_driver(&be_driver);
5606}
5607module_exit(be_exit_module);