]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Refactor be_xmit_enqueue() routine
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ba343c77 33static unsigned int num_vfs;
ba343c77 34module_param(num_vfs, uint, S_IRUGO);
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
11ac75ed
SP
37static ushort rx_frag_size = 2048;
38module_param(rx_frag_size, ushort, S_IRUGO);
39MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
9baa3c34 41static const struct pci_device_id be_dev_ids[] = {
c4ca2374 42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
50 { 0 }
51};
52MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 53/* UE Status Low CSR */
42c8b11e 54static const char * const ue_status_low_desc[] = {
7c185276
AK
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
6bdf8f55
VV
83 "ERX2 ",
84 "SPARE ",
85 "JTAG ",
86 "MPU_INTPEND "
7c185276 87};
e2fb1afa 88
7c185276 89/* UE Status High CSR */
42c8b11e 90static const char * const ue_status_hi_desc[] = {
7c185276
AK
91 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
6bdf8f55
VV
112 "ECRC",
113 "Poison TLP",
42c8b11e 114 "NETC",
6bdf8f55
VV
115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
7c185276
AK
122 "Unknown"
123};
6b7c5b94
SP
124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 128
1cfafab9 129 if (mem->va) {
2b7bcebf
IV
130 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131 mem->dma);
1cfafab9
SP
132 mem->va = NULL;
133 }
6b7c5b94
SP
134}
135
136static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 137 u16 len, u16 entry_size)
6b7c5b94
SP
138{
139 struct be_dma_mem *mem = &q->dma_mem;
140
141 memset(q, 0, sizeof(*q));
142 q->len = len;
143 q->entry_size = entry_size;
144 mem->size = len * entry_size;
ede23fa8
JP
145 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 GFP_KERNEL);
6b7c5b94 147 if (!mem->va)
10ef9ab4 148 return -ENOMEM;
6b7c5b94
SP
149 return 0;
150}
151
68c45a2d 152static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 153{
db3ea781 154 u32 reg, enabled;
5f0b849e 155
db3ea781 156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 157 &reg);
db3ea781
SP
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781 167 pci_write_config_dword(adapter->pdev,
748b539a 168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
68c45a2d
SK
171static void be_intr_set(struct be_adapter *adapter, bool enable)
172{
173 int status = 0;
174
175 /* On lancer interrupts can't be controlled via this register */
176 if (lancer_chip(adapter))
177 return;
178
179 if (adapter->eeh_error)
180 return;
181
182 status = be_cmd_intr_set(adapter, enable);
183 if (status)
184 be_reg_intr_set(adapter, enable);
185}
186
8788fdc2 187static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
188{
189 u32 val = 0;
03d28ffe 190
6b7c5b94
SP
191 val |= qid & DB_RQ_RING_ID_MASK;
192 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
196}
197
94d73aaa
VV
198static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
199 u16 posted)
6b7c5b94
SP
200{
201 u32 val = 0;
03d28ffe 202
94d73aaa 203 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 204 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
205
206 wmb();
94d73aaa 207 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
208}
209
8788fdc2 210static void be_eq_notify(struct be_adapter *adapter, u16 qid,
748b539a 211 bool arm, bool clear_int, u16 num_popped)
6b7c5b94
SP
212{
213 u32 val = 0;
03d28ffe 214
6b7c5b94 215 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 217
f67ef7ba 218 if (adapter->eeh_error)
cf588477
SP
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_EQ_REARM_SHIFT;
223 if (clear_int)
224 val |= 1 << DB_EQ_CLR_SHIFT;
225 val |= 1 << DB_EQ_EVNT_SHIFT;
226 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 227 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
228}
229
8788fdc2 230void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
231{
232 u32 val = 0;
03d28ffe 233
6b7c5b94 234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 250 struct device *dev = &adapter->pdev->dev;
6b7c5b94 251 struct sockaddr *addr = p;
5a712c13
SP
252 int status;
253 u8 mac[ETH_ALEN];
254 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
ff32f8ab
VV
259 /* Proceed further only if, User provided MAC is different
260 * from active MAC
261 */
262 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
263 return 0;
264
5a712c13
SP
265 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
266 * privilege or if PF did not provision the new MAC address.
267 * On BE3, this cmd will always fail if the VF doesn't have the
268 * FILTMGMT privilege. This failure is OK, only if the PF programmed
269 * the MAC for the VF.
704e4c88 270 */
5a712c13
SP
271 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
272 adapter->if_handle, &adapter->pmac_id[0], 0);
273 if (!status) {
274 curr_pmac_id = adapter->pmac_id[0];
275
276 /* Delete the old programmed MAC. This call may fail if the
277 * old MAC was already deleted by the PF driver.
278 */
279 if (adapter->pmac_id[0] != old_pmac_id)
280 be_cmd_pmac_del(adapter, adapter->if_handle,
281 old_pmac_id, 0);
704e4c88
PR
282 }
283
5a712c13
SP
284 /* Decide if the new MAC is successfully activated only after
285 * querying the FW
704e4c88 286 */
b188f090
SR
287 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
288 adapter->if_handle, true, 0);
a65027e4 289 if (status)
e3a7ae2c 290 goto err;
6b7c5b94 291
5a712c13
SP
292 /* The MAC change did not happen, either due to lack of privilege
293 * or PF didn't pre-provision.
294 */
61d23e9f 295 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
296 status = -EPERM;
297 goto err;
298 }
299
e3a7ae2c 300 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5a712c13 301 dev_info(dev, "MAC address changed to %pM\n", mac);
e3a7ae2c
SK
302 return 0;
303err:
5a712c13 304 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
305 return status;
306}
307
ca34fe38
SP
308/* BE2 supports only v0 cmd */
309static void *hw_stats_from_cmd(struct be_adapter *adapter)
310{
311 if (BE2_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
61000861 315 } else if (BE3_chip(adapter)) {
ca34fe38
SP
316 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
317
61000861
AK
318 return &cmd->hw_stats;
319 } else {
320 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
321
ca34fe38
SP
322 return &cmd->hw_stats;
323 }
324}
325
326/* BE2 supports only v0 cmd */
327static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
328{
329 if (BE2_chip(adapter)) {
330 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
61000861 333 } else if (BE3_chip(adapter)) {
ca34fe38
SP
334 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
335
61000861
AK
336 return &hw_stats->erx;
337 } else {
338 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
339
ca34fe38
SP
340 return &hw_stats->erx;
341 }
342}
343
344static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 345{
ac124ff9
SP
346 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
347 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
348 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 349 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
350 &rxf_stats->port[adapter->port_num];
351 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 352
ac124ff9 353 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
354 drvs->rx_pause_frames = port_stats->rx_pause_frames;
355 drvs->rx_crc_errors = port_stats->rx_crc_errors;
356 drvs->rx_control_frames = port_stats->rx_control_frames;
357 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
358 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
359 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
360 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
361 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
362 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
363 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
364 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
365 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
366 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
367 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 368 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
369 drvs->rx_dropped_header_too_small =
370 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
371 drvs->rx_address_filtered =
372 port_stats->rx_address_filtered +
373 port_stats->rx_vlan_filtered;
89a88ab8
AK
374 drvs->rx_alignment_symbol_errors =
375 port_stats->rx_alignment_symbol_errors;
376
377 drvs->tx_pauseframes = port_stats->tx_pauseframes;
378 drvs->tx_controlframes = port_stats->tx_controlframes;
379
380 if (adapter->port_num)
ac124ff9 381 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 382 else
ac124ff9 383 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 384 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 385 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
386 drvs->forwarded_packets = rxf_stats->forwarded_packets;
387 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
388 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
389 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
390 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
391}
392
ca34fe38 393static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 394{
ac124ff9
SP
395 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
396 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
397 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 398 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
399 &rxf_stats->port[adapter->port_num];
400 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 401
ac124ff9 402 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
403 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
404 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
405 drvs->rx_pause_frames = port_stats->rx_pause_frames;
406 drvs->rx_crc_errors = port_stats->rx_crc_errors;
407 drvs->rx_control_frames = port_stats->rx_control_frames;
408 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
409 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
410 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
411 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
412 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
413 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
414 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
415 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
416 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
417 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
418 drvs->rx_dropped_header_too_small =
419 port_stats->rx_dropped_header_too_small;
420 drvs->rx_input_fifo_overflow_drop =
421 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 422 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
423 drvs->rx_alignment_symbol_errors =
424 port_stats->rx_alignment_symbol_errors;
ac124ff9 425 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
426 drvs->tx_pauseframes = port_stats->tx_pauseframes;
427 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 428 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
429 drvs->jabber_events = port_stats->jabber_events;
430 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 431 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
432 drvs->forwarded_packets = rxf_stats->forwarded_packets;
433 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
434 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
435 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
436 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
437}
438
61000861
AK
439static void populate_be_v2_stats(struct be_adapter *adapter)
440{
441 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
442 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
443 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
444 struct be_port_rxf_stats_v2 *port_stats =
445 &rxf_stats->port[adapter->port_num];
446 struct be_drv_stats *drvs = &adapter->drv_stats;
447
448 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
449 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
450 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
451 drvs->rx_pause_frames = port_stats->rx_pause_frames;
452 drvs->rx_crc_errors = port_stats->rx_crc_errors;
453 drvs->rx_control_frames = port_stats->rx_control_frames;
454 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
455 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
456 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
457 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
458 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
459 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
460 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
461 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
462 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
463 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
464 drvs->rx_dropped_header_too_small =
465 port_stats->rx_dropped_header_too_small;
466 drvs->rx_input_fifo_overflow_drop =
467 port_stats->rx_input_fifo_overflow_drop;
468 drvs->rx_address_filtered = port_stats->rx_address_filtered;
469 drvs->rx_alignment_symbol_errors =
470 port_stats->rx_alignment_symbol_errors;
471 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
472 drvs->tx_pauseframes = port_stats->tx_pauseframes;
473 drvs->tx_controlframes = port_stats->tx_controlframes;
474 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
475 drvs->jabber_events = port_stats->jabber_events;
476 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
477 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
478 drvs->forwarded_packets = rxf_stats->forwarded_packets;
479 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
480 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
481 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
482 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 483 if (be_roce_supported(adapter)) {
461ae379
AK
484 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
485 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
486 drvs->rx_roce_frames = port_stats->roce_frames_received;
487 drvs->roce_drops_crc = port_stats->roce_drops_crc;
488 drvs->roce_drops_payload_len =
489 port_stats->roce_drops_payload_len;
490 }
61000861
AK
491}
492
005d5696
SX
493static void populate_lancer_stats(struct be_adapter *adapter)
494{
005d5696 495 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 496 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
497
498 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
499 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
500 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
501 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 502 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 503 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
504 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
505 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
506 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
507 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
508 drvs->rx_dropped_tcp_length =
509 pport_stats->rx_dropped_invalid_tcp_length;
510 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
511 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
512 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
513 drvs->rx_dropped_header_too_small =
514 pport_stats->rx_dropped_header_too_small;
515 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
516 drvs->rx_address_filtered =
517 pport_stats->rx_address_filtered +
518 pport_stats->rx_vlan_filtered;
ac124ff9 519 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 520 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
521 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
522 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 523 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
524 drvs->forwarded_packets = pport_stats->num_forwards_lo;
525 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 526 drvs->rx_drops_too_many_frags =
ac124ff9 527 pport_stats->rx_drops_too_many_frags_lo;
005d5696 528}
89a88ab8 529
09c1c68f
SP
530static void accumulate_16bit_val(u32 *acc, u16 val)
531{
532#define lo(x) (x & 0xFFFF)
533#define hi(x) (x & 0xFFFF0000)
534 bool wrapped = val < lo(*acc);
535 u32 newacc = hi(*acc) + val;
536
537 if (wrapped)
538 newacc += 65536;
539 ACCESS_ONCE(*acc) = newacc;
540}
541
4188e7df 542static void populate_erx_stats(struct be_adapter *adapter,
748b539a 543 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
544{
545 if (!BEx_chip(adapter))
546 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
547 else
548 /* below erx HW counter can actually wrap around after
549 * 65535. Driver accumulates a 32-bit value
550 */
551 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
552 (u16)erx_stat);
553}
554
89a88ab8
AK
555void be_parse_stats(struct be_adapter *adapter)
556{
61000861 557 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
558 struct be_rx_obj *rxo;
559 int i;
a6c578ef 560 u32 erx_stat;
ac124ff9 561
ca34fe38
SP
562 if (lancer_chip(adapter)) {
563 populate_lancer_stats(adapter);
005d5696 564 } else {
ca34fe38
SP
565 if (BE2_chip(adapter))
566 populate_be_v0_stats(adapter);
61000861
AK
567 else if (BE3_chip(adapter))
568 /* for BE3 */
ca34fe38 569 populate_be_v1_stats(adapter);
61000861
AK
570 else
571 populate_be_v2_stats(adapter);
d51ebd33 572
61000861 573 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 574 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
575 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
576 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 577 }
09c1c68f 578 }
89a88ab8
AK
579}
580
ab1594e9 581static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 582 struct rtnl_link_stats64 *stats)
6b7c5b94 583{
ab1594e9 584 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 585 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 586 struct be_rx_obj *rxo;
3c8def97 587 struct be_tx_obj *txo;
ab1594e9
SP
588 u64 pkts, bytes;
589 unsigned int start;
3abcdeda 590 int i;
6b7c5b94 591
3abcdeda 592 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 593 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 594
ab1594e9 595 do {
57a7744e 596 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
597 pkts = rx_stats(rxo)->rx_pkts;
598 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 599 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
600 stats->rx_packets += pkts;
601 stats->rx_bytes += bytes;
602 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
603 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
604 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
605 }
606
3c8def97 607 for_all_tx_queues(adapter, txo, i) {
ab1594e9 608 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
612 pkts = tx_stats(txo)->tx_pkts;
613 bytes = tx_stats(txo)->tx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
615 stats->tx_packets += pkts;
616 stats->tx_bytes += bytes;
3c8def97 617 }
6b7c5b94
SP
618
619 /* bad pkts received */
ab1594e9 620 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
621 drvs->rx_alignment_symbol_errors +
622 drvs->rx_in_range_errors +
623 drvs->rx_out_range_errors +
624 drvs->rx_frame_too_long +
625 drvs->rx_dropped_too_small +
626 drvs->rx_dropped_too_short +
627 drvs->rx_dropped_header_too_small +
628 drvs->rx_dropped_tcp_length +
ab1594e9 629 drvs->rx_dropped_runt;
68110868 630
6b7c5b94 631 /* detailed rx errors */
ab1594e9 632 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
633 drvs->rx_out_range_errors +
634 drvs->rx_frame_too_long;
68110868 635
ab1594e9 636 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
637
638 /* frame alignment errors */
ab1594e9 639 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 640
6b7c5b94
SP
641 /* receiver fifo overrun */
642 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 643 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
644 drvs->rx_input_fifo_overflow_drop +
645 drvs->rx_drops_no_pbuf;
ab1594e9 646 return stats;
6b7c5b94
SP
647}
648
b236916a 649void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 650{
6b7c5b94
SP
651 struct net_device *netdev = adapter->netdev;
652
b236916a 653 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 654 netif_carrier_off(netdev);
b236916a 655 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 656 }
b236916a 657
bdce2ad7 658 if (link_status)
b236916a
AK
659 netif_carrier_on(netdev);
660 else
661 netif_carrier_off(netdev);
6b7c5b94
SP
662}
663
5f07b3c5 664static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 665{
3c8def97
SP
666 struct be_tx_stats *stats = tx_stats(txo);
667
ab1594e9 668 u64_stats_update_begin(&stats->sync);
ac124ff9 669 stats->tx_reqs++;
5f07b3c5
SP
670 stats->tx_bytes += skb->len;
671 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 672 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
673}
674
5f07b3c5
SP
675/* Returns number of WRBs needed for the skb */
676static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 677{
5f07b3c5
SP
678 /* +1 for the header wrb */
679 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
680}
681
682static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
683{
f986afcb
SP
684 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
685 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
686 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
687 wrb->rsvd0 = 0;
688}
689
690/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
691 * to avoid the swap and shift/mask operations in wrb_fill().
692 */
693static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
694{
695 wrb->frag_pa_hi = 0;
696 wrb->frag_pa_lo = 0;
697 wrb->frag_len = 0;
89b1f496 698 wrb->rsvd0 = 0;
6b7c5b94
SP
699}
700
1ded132d 701static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 702 struct sk_buff *skb)
1ded132d
AK
703{
704 u8 vlan_prio;
705 u16 vlan_tag;
706
df8a39de 707 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
708 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
709 /* If vlan priority provided by OS is NOT in available bmap */
710 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
711 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
712 adapter->recommended_prio;
713
714 return vlan_tag;
715}
716
c9c47142
SP
717/* Used only for IP tunnel packets */
718static u16 skb_inner_ip_proto(struct sk_buff *skb)
719{
720 return (inner_ip_hdr(skb)->version == 4) ?
721 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
722}
723
724static u16 skb_ip_proto(struct sk_buff *skb)
725{
726 return (ip_hdr(skb)->version == 4) ?
727 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
728}
729
804abcdb
SB
730static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
731 struct sk_buff *skb,
732 struct be_wrb_params *wrb_params)
6b7c5b94 733{
804abcdb 734 u16 proto;
6b7c5b94 735
49e4b847 736 if (skb_is_gso(skb)) {
804abcdb
SB
737 BE_WRB_F_SET(wrb_params->features, LSO, 1);
738 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 739 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 740 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 741 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 742 if (skb->encapsulation) {
804abcdb 743 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
744 proto = skb_inner_ip_proto(skb);
745 } else {
746 proto = skb_ip_proto(skb);
747 }
748 if (proto == IPPROTO_TCP)
804abcdb 749 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 750 else if (proto == IPPROTO_UDP)
804abcdb 751 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
752 }
753
df8a39de 754 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
755 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
756 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
757 }
758
804abcdb
SB
759 BE_WRB_F_SET(wrb_params->features, CRC, 1);
760}
5f07b3c5 761
804abcdb
SB
762static void wrb_fill_hdr(struct be_adapter *adapter,
763 struct be_eth_hdr_wrb *hdr,
764 struct be_wrb_params *wrb_params,
765 struct sk_buff *skb)
766{
767 memset(hdr, 0, sizeof(*hdr));
768
769 SET_TX_WRB_HDR_BITS(crc, hdr,
770 BE_WRB_F_GET(wrb_params->features, CRC));
771 SET_TX_WRB_HDR_BITS(ipcs, hdr,
772 BE_WRB_F_GET(wrb_params->features, IPCS));
773 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
774 BE_WRB_F_GET(wrb_params->features, TCPCS));
775 SET_TX_WRB_HDR_BITS(udpcs, hdr,
776 BE_WRB_F_GET(wrb_params->features, UDPCS));
777
778 SET_TX_WRB_HDR_BITS(lso, hdr,
779 BE_WRB_F_GET(wrb_params->features, LSO));
780 SET_TX_WRB_HDR_BITS(lso6, hdr,
781 BE_WRB_F_GET(wrb_params->features, LSO6));
782 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
783
784 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
785 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 786 */
804abcdb
SB
787 SET_TX_WRB_HDR_BITS(event, hdr,
788 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
789 SET_TX_WRB_HDR_BITS(vlan, hdr,
790 BE_WRB_F_GET(wrb_params->features, VLAN));
791 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
792
793 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
794 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
6b7c5b94
SP
795}
796
2b7bcebf 797static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 798 bool unmap_single)
7101e111
SP
799{
800 dma_addr_t dma;
f986afcb 801 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 802
7101e111 803
f986afcb
SP
804 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
805 (u64)le32_to_cpu(wrb->frag_pa_lo);
806 if (frag_len) {
7101e111 807 if (unmap_single)
f986afcb 808 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 809 else
f986afcb 810 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
811 }
812}
6b7c5b94 813
79a0d7d8
SB
814/* Grab a WRB header for xmit */
815static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
816{
817 u16 head = txo->q.head;
818
819 queue_head_inc(&txo->q);
820 return head;
821}
822
823/* Set up the WRB header for xmit */
824static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
825 struct be_tx_obj *txo,
826 struct be_wrb_params *wrb_params,
827 struct sk_buff *skb, u16 head)
828{
829 u32 num_frags = skb_wrb_cnt(skb);
830 struct be_queue_info *txq = &txo->q;
831 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
832
833 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
834 be_dws_cpu_to_le(hdr, sizeof(*hdr));
835
836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
839 atomic_add(num_frags, &txq->used);
840 txo->last_req_wrb_cnt = num_frags;
841 txo->pend_wrb_cnt += num_frags;
842}
843
844/* Setup a WRB fragment (buffer descriptor) for xmit */
845static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
846 int len)
847{
848 struct be_eth_wrb *wrb;
849 struct be_queue_info *txq = &txo->q;
850
851 wrb = queue_head_node(txq);
852 wrb_fill(wrb, busaddr, len);
853 queue_head_inc(txq);
854}
855
856/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
857 * was invoked. The producer index is restored to the previous packet and the
858 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
859 */
860static void be_xmit_restore(struct be_adapter *adapter,
861 struct be_tx_obj *txo, u16 head, bool map_single,
862 u32 copied)
863{
864 struct device *dev;
865 struct be_eth_wrb *wrb;
866 struct be_queue_info *txq = &txo->q;
867
868 dev = &adapter->pdev->dev;
869 txq->head = head;
870
871 /* skip the first wrb (hdr); it's not mapped */
872 queue_head_inc(txq);
873 while (copied) {
874 wrb = queue_head_node(txq);
875 unmap_tx_frag(dev, wrb, map_single);
876 map_single = false;
877 copied -= le32_to_cpu(wrb->frag_len);
878 queue_head_inc(txq);
879 }
880
881 txq->head = head;
882}
883
884/* Enqueue the given packet for transmit. This routine allocates WRBs for the
885 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
886 * of WRBs used up by the packet.
887 */
5f07b3c5 888static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
889 struct sk_buff *skb,
890 struct be_wrb_params *wrb_params)
6b7c5b94 891{
5f07b3c5 892 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 893 struct device *dev = &adapter->pdev->dev;
5f07b3c5 894 struct be_queue_info *txq = &txo->q;
7101e111 895 bool map_single = false;
5f07b3c5 896 u16 head = txq->head;
79a0d7d8
SB
897 dma_addr_t busaddr;
898 int len;
6b7c5b94 899
79a0d7d8 900 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 901
ebc8d2ab 902 if (skb->len > skb->data_len) {
79a0d7d8 903 len = skb_headlen(skb);
03d28ffe 904
2b7bcebf
IV
905 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
906 if (dma_mapping_error(dev, busaddr))
7101e111
SP
907 goto dma_err;
908 map_single = true;
79a0d7d8 909 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
910 copied += len;
911 }
6b7c5b94 912
ebc8d2ab 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 914 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 915 len = skb_frag_size(frag);
03d28ffe 916
79a0d7d8 917 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 918 if (dma_mapping_error(dev, busaddr))
7101e111 919 goto dma_err;
79a0d7d8
SB
920 be_tx_setup_wrb_frag(txo, busaddr, len);
921 copied += len;
6b7c5b94
SP
922 }
923
79a0d7d8 924 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 925
5f07b3c5
SP
926 be_tx_stats_update(txo, skb);
927 return wrb_cnt;
6b7c5b94 928
7101e111 929dma_err:
79a0d7d8
SB
930 adapter->drv_stats.dma_map_errors++;
931 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 932 return 0;
6b7c5b94
SP
933}
934
f7062ee5
SP
935static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
936{
937 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
938}
939
93040ae5 940static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 941 struct sk_buff *skb,
804abcdb
SB
942 struct be_wrb_params
943 *wrb_params)
93040ae5
SK
944{
945 u16 vlan_tag = 0;
946
947 skb = skb_share_check(skb, GFP_ATOMIC);
948 if (unlikely(!skb))
949 return skb;
950
df8a39de 951 if (skb_vlan_tag_present(skb))
93040ae5 952 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
953
954 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
955 if (!vlan_tag)
956 vlan_tag = adapter->pvid;
957 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
958 * skip VLAN insertion
959 */
804abcdb 960 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 961 }
bc0c3405
AK
962
963 if (vlan_tag) {
62749e2c
JP
964 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
965 vlan_tag);
bc0c3405
AK
966 if (unlikely(!skb))
967 return skb;
bc0c3405
AK
968 skb->vlan_tci = 0;
969 }
970
971 /* Insert the outer VLAN, if any */
972 if (adapter->qnq_vid) {
973 vlan_tag = adapter->qnq_vid;
62749e2c
JP
974 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
975 vlan_tag);
bc0c3405
AK
976 if (unlikely(!skb))
977 return skb;
804abcdb 978 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
979 }
980
93040ae5
SK
981 return skb;
982}
983
bc0c3405
AK
984static bool be_ipv6_exthdr_check(struct sk_buff *skb)
985{
986 struct ethhdr *eh = (struct ethhdr *)skb->data;
987 u16 offset = ETH_HLEN;
988
989 if (eh->h_proto == htons(ETH_P_IPV6)) {
990 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
991
992 offset += sizeof(struct ipv6hdr);
993 if (ip6h->nexthdr != NEXTHDR_TCP &&
994 ip6h->nexthdr != NEXTHDR_UDP) {
995 struct ipv6_opt_hdr *ehdr =
504fbf1e 996 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
997
998 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
999 if (ehdr->hdrlen == 0xff)
1000 return true;
1001 }
1002 }
1003 return false;
1004}
1005
1006static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1007{
df8a39de 1008 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1009}
1010
748b539a 1011static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1012{
ee9c799c 1013 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1014}
1015
ec495fac
VV
1016static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1017 struct sk_buff *skb,
804abcdb
SB
1018 struct be_wrb_params
1019 *wrb_params)
6b7c5b94 1020{
d2cb6ce7 1021 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1022 unsigned int eth_hdr_len;
1023 struct iphdr *ip;
93040ae5 1024
1297f9db
AK
1025 /* For padded packets, BE HW modifies tot_len field in IP header
1026 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1027 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1028 */
ee9c799c
SP
1029 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1030 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1031 if (skb->len <= 60 &&
df8a39de 1032 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1033 is_ipv4_pkt(skb)) {
93040ae5
SK
1034 ip = (struct iphdr *)ip_hdr(skb);
1035 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1036 }
1ded132d 1037
d2cb6ce7 1038 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1039 * tagging in pvid-tagging mode
d2cb6ce7 1040 */
f93f160b 1041 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1042 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1043 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1044
93040ae5
SK
1045 /* HW has a bug wherein it will calculate CSUM for VLAN
1046 * pkts even though it is disabled.
1047 * Manually insert VLAN in pkt.
1048 */
1049 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1050 skb_vlan_tag_present(skb)) {
804abcdb 1051 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1052 if (unlikely(!skb))
c9128951 1053 goto err;
bc0c3405
AK
1054 }
1055
1056 /* HW may lockup when VLAN HW tagging is requested on
1057 * certain ipv6 packets. Drop such pkts if the HW workaround to
1058 * skip HW tagging is not enabled by FW.
1059 */
1060 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1061 (adapter->pvid || adapter->qnq_vid) &&
1062 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1063 goto tx_drop;
1064
1065 /* Manual VLAN tag insertion to prevent:
1066 * ASIC lockup when the ASIC inserts VLAN tag into
1067 * certain ipv6 packets. Insert VLAN tags in driver,
1068 * and set event, completion, vlan bits accordingly
1069 * in the Tx WRB.
1070 */
1071 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1072 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1073 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1074 if (unlikely(!skb))
c9128951 1075 goto err;
1ded132d
AK
1076 }
1077
ee9c799c
SP
1078 return skb;
1079tx_drop:
1080 dev_kfree_skb_any(skb);
c9128951 1081err:
ee9c799c
SP
1082 return NULL;
1083}
1084
ec495fac
VV
1085static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1086 struct sk_buff *skb,
804abcdb 1087 struct be_wrb_params *wrb_params)
ec495fac
VV
1088{
1089 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1090 * less may cause a transmit stall on that port. So the work-around is
1091 * to pad short packets (<= 32 bytes) to a 36-byte length.
1092 */
1093 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1094 if (skb_put_padto(skb, 36))
ec495fac 1095 return NULL;
ec495fac
VV
1096 }
1097
1098 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1099 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1100 if (!skb)
1101 return NULL;
1102 }
1103
1104 return skb;
1105}
1106
5f07b3c5
SP
1107static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1108{
1109 struct be_queue_info *txq = &txo->q;
1110 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1111
1112 /* Mark the last request eventable if it hasn't been marked already */
1113 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1114 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1115
1116 /* compose a dummy wrb if there are odd set of wrbs to notify */
1117 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1118 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1119 queue_head_inc(txq);
1120 atomic_inc(&txq->used);
1121 txo->pend_wrb_cnt++;
1122 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1123 TX_HDR_WRB_NUM_SHIFT);
1124 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1125 TX_HDR_WRB_NUM_SHIFT);
1126 }
1127 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1128 txo->pend_wrb_cnt = 0;
1129}
1130
ee9c799c
SP
1131static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1132{
1133 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1134 u16 q_idx = skb_get_queue_mapping(skb);
1135 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1136 struct be_wrb_params wrb_params = { 0 };
ee9c799c 1137 struct be_queue_info *txq = &txo->q;
804abcdb 1138 bool flush = !skb->xmit_more;
5f07b3c5 1139 u16 wrb_cnt;
ee9c799c 1140
804abcdb 1141 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1142 if (unlikely(!skb))
1143 goto drop;
6b7c5b94 1144
804abcdb
SB
1145 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1146
1147 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1148 if (unlikely(!wrb_cnt)) {
1149 dev_kfree_skb_any(skb);
1150 goto drop;
1151 }
cd8f76c0 1152
5f07b3c5
SP
1153 if ((atomic_read(&txq->used) + BE_MAX_TX_FRAG_COUNT) >= txq->len) {
1154 netif_stop_subqueue(netdev, q_idx);
1155 tx_stats(txo)->tx_stops++;
1156 }
c190e3c8 1157
5f07b3c5
SP
1158 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1159 be_xmit_flush(adapter, txo);
6b7c5b94 1160
5f07b3c5
SP
1161 return NETDEV_TX_OK;
1162drop:
1163 tx_stats(txo)->tx_drv_drops++;
1164 /* Flush the already enqueued tx requests */
1165 if (flush && txo->pend_wrb_cnt)
1166 be_xmit_flush(adapter, txo);
6b7c5b94 1167
6b7c5b94
SP
1168 return NETDEV_TX_OK;
1169}
1170
1171static int be_change_mtu(struct net_device *netdev, int new_mtu)
1172{
1173 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1174 struct device *dev = &adapter->pdev->dev;
1175
1176 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1177 dev_info(dev, "MTU must be between %d and %d bytes\n",
1178 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1179 return -EINVAL;
1180 }
0d3f5cce
KA
1181
1182 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1183 netdev->mtu, new_mtu);
6b7c5b94
SP
1184 netdev->mtu = new_mtu;
1185 return 0;
1186}
1187
f66b7cfd
SP
1188static inline bool be_in_all_promisc(struct be_adapter *adapter)
1189{
1190 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1191 BE_IF_FLAGS_ALL_PROMISCUOUS;
1192}
1193
1194static int be_set_vlan_promisc(struct be_adapter *adapter)
1195{
1196 struct device *dev = &adapter->pdev->dev;
1197 int status;
1198
1199 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1200 return 0;
1201
1202 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1203 if (!status) {
1204 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1205 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1206 } else {
1207 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1208 }
1209 return status;
1210}
1211
1212static int be_clear_vlan_promisc(struct be_adapter *adapter)
1213{
1214 struct device *dev = &adapter->pdev->dev;
1215 int status;
1216
1217 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1218 if (!status) {
1219 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1220 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1221 }
1222 return status;
1223}
1224
6b7c5b94 1225/*
82903e4b
AK
1226 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1227 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1228 */
10329df8 1229static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1230{
50762667 1231 struct device *dev = &adapter->pdev->dev;
10329df8 1232 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1233 u16 num = 0, i = 0;
82903e4b 1234 int status = 0;
1da87b7f 1235
c0e64ef4 1236 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1237 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1238 return 0;
1239
92bf14ab 1240 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1241 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1242
1243 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1244 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1245 vids[num++] = cpu_to_le16(i);
0fc16ebf 1246
4d567d97 1247 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
0fc16ebf 1248 if (status) {
f66b7cfd 1249 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1250 /* Set to VLAN promisc mode as setting VLAN filter failed */
4c60005f
KA
1251 if (addl_status(status) ==
1252 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1253 return be_set_vlan_promisc(adapter);
1254 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1255 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1256 }
0fc16ebf 1257 return status;
6b7c5b94
SP
1258}
1259
80d5c368 1260static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1261{
1262 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1263 int status = 0;
6b7c5b94 1264
a85e9986
PR
1265 /* Packets with VID 0 are always received by Lancer by default */
1266 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1267 return status;
1268
f6cbd364 1269 if (test_bit(vid, adapter->vids))
48291c22 1270 return status;
a85e9986 1271
f6cbd364 1272 set_bit(vid, adapter->vids);
a6b74e01 1273 adapter->vlans_added++;
8e586137 1274
a6b74e01
SK
1275 status = be_vid_config(adapter);
1276 if (status) {
1277 adapter->vlans_added--;
f6cbd364 1278 clear_bit(vid, adapter->vids);
a6b74e01 1279 }
48291c22 1280
80817cbf 1281 return status;
6b7c5b94
SP
1282}
1283
80d5c368 1284static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1285{
1286 struct be_adapter *adapter = netdev_priv(netdev);
1287
a85e9986
PR
1288 /* Packets with VID 0 are always received by Lancer by default */
1289 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1290 return 0;
a85e9986 1291
f6cbd364 1292 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1293 adapter->vlans_added--;
1294
1295 return be_vid_config(adapter);
6b7c5b94
SP
1296}
1297
f66b7cfd 1298static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1299{
ac34b743 1300 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1301 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1302}
1303
f66b7cfd
SP
1304static void be_set_all_promisc(struct be_adapter *adapter)
1305{
1306 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1307 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1308}
1309
1310static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1311{
0fc16ebf 1312 int status;
6b7c5b94 1313
f66b7cfd
SP
1314 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1315 return;
6b7c5b94 1316
f66b7cfd
SP
1317 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1318 if (!status)
1319 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1320}
1321
1322static void be_set_mc_list(struct be_adapter *adapter)
1323{
1324 int status;
1325
1326 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1327 if (!status)
1328 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1329 else
1330 be_set_mc_promisc(adapter);
1331}
1332
1333static void be_set_uc_list(struct be_adapter *adapter)
1334{
1335 struct netdev_hw_addr *ha;
1336 int i = 1; /* First slot is claimed by the Primary MAC */
1337
1338 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1339 be_cmd_pmac_del(adapter, adapter->if_handle,
1340 adapter->pmac_id[i], 0);
1341
1342 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1343 be_set_all_promisc(adapter);
1344 return;
6b7c5b94
SP
1345 }
1346
f66b7cfd
SP
1347 netdev_for_each_uc_addr(ha, adapter->netdev) {
1348 adapter->uc_macs++; /* First slot is for Primary MAC */
1349 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1350 &adapter->pmac_id[adapter->uc_macs], 0);
1351 }
1352}
6b7c5b94 1353
f66b7cfd
SP
1354static void be_clear_uc_list(struct be_adapter *adapter)
1355{
1356 int i;
fbc13f01 1357
f66b7cfd
SP
1358 for (i = 1; i < (adapter->uc_macs + 1); i++)
1359 be_cmd_pmac_del(adapter, adapter->if_handle,
1360 adapter->pmac_id[i], 0);
1361 adapter->uc_macs = 0;
1362}
fbc13f01 1363
f66b7cfd
SP
1364static void be_set_rx_mode(struct net_device *netdev)
1365{
1366 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1367
f66b7cfd
SP
1368 if (netdev->flags & IFF_PROMISC) {
1369 be_set_all_promisc(adapter);
1370 return;
fbc13f01
AK
1371 }
1372
f66b7cfd
SP
1373 /* Interface was previously in promiscuous mode; disable it */
1374 if (be_in_all_promisc(adapter)) {
1375 be_clear_all_promisc(adapter);
1376 if (adapter->vlans_added)
1377 be_vid_config(adapter);
0fc16ebf 1378 }
a0794885 1379
f66b7cfd
SP
1380 /* Enable multicast promisc if num configured exceeds what we support */
1381 if (netdev->flags & IFF_ALLMULTI ||
1382 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1383 be_set_mc_promisc(adapter);
a0794885 1384 return;
f66b7cfd 1385 }
a0794885 1386
f66b7cfd
SP
1387 if (netdev_uc_count(netdev) != adapter->uc_macs)
1388 be_set_uc_list(adapter);
1389
1390 be_set_mc_list(adapter);
6b7c5b94
SP
1391}
1392
ba343c77
SB
1393static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1394{
1395 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1396 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1397 int status;
1398
11ac75ed 1399 if (!sriov_enabled(adapter))
ba343c77
SB
1400 return -EPERM;
1401
11ac75ed 1402 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1403 return -EINVAL;
1404
3c31aaf3
VV
1405 /* Proceed further only if user provided MAC is different
1406 * from active MAC
1407 */
1408 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1409 return 0;
1410
3175d8c2
SP
1411 if (BEx_chip(adapter)) {
1412 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1413 vf + 1);
ba343c77 1414
11ac75ed
SP
1415 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1416 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1417 } else {
1418 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1419 vf + 1);
590c391d
PR
1420 }
1421
abccf23e
KA
1422 if (status) {
1423 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1424 mac, vf, status);
1425 return be_cmd_status(status);
1426 }
64600ea5 1427
abccf23e
KA
1428 ether_addr_copy(vf_cfg->mac_addr, mac);
1429
1430 return 0;
ba343c77
SB
1431}
1432
64600ea5 1433static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1434 struct ifla_vf_info *vi)
64600ea5
AK
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1437 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1438
11ac75ed 1439 if (!sriov_enabled(adapter))
64600ea5
AK
1440 return -EPERM;
1441
11ac75ed 1442 if (vf >= adapter->num_vfs)
64600ea5
AK
1443 return -EINVAL;
1444
1445 vi->vf = vf;
ed616689
SC
1446 vi->max_tx_rate = vf_cfg->tx_rate;
1447 vi->min_tx_rate = 0;
a60b3a13
AK
1448 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1449 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1450 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1451 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
64600ea5
AK
1452
1453 return 0;
1454}
1455
748b539a 1456static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1457{
1458 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1459 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1da87b7f
AK
1460 int status = 0;
1461
11ac75ed 1462 if (!sriov_enabled(adapter))
1da87b7f
AK
1463 return -EPERM;
1464
b9fc0e53 1465 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1466 return -EINVAL;
1467
b9fc0e53
AK
1468 if (vlan || qos) {
1469 vlan |= qos << VLAN_PRIO_SHIFT;
c502224e 1470 if (vf_cfg->vlan_tag != vlan)
b9fc0e53
AK
1471 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1472 vf_cfg->if_handle, 0);
1da87b7f 1473 } else {
f1f3ee1b 1474 /* Reset Transparent Vlan Tagging. */
c502224e
SK
1475 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1476 vf + 1, vf_cfg->if_handle, 0);
1da87b7f
AK
1477 }
1478
abccf23e
KA
1479 if (status) {
1480 dev_err(&adapter->pdev->dev,
1481 "VLAN %d config on VF %d failed : %#x\n", vlan,
1482 vf, status);
1483 return be_cmd_status(status);
1484 }
1485
1486 vf_cfg->vlan_tag = vlan;
1487
1488 return 0;
1da87b7f
AK
1489}
1490
ed616689
SC
1491static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1492 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1495 struct device *dev = &adapter->pdev->dev;
1496 int percent_rate, status = 0;
1497 u16 link_speed = 0;
1498 u8 link_status;
e1d18735 1499
11ac75ed 1500 if (!sriov_enabled(adapter))
e1d18735
AK
1501 return -EPERM;
1502
94f434c2 1503 if (vf >= adapter->num_vfs)
e1d18735
AK
1504 return -EINVAL;
1505
ed616689
SC
1506 if (min_tx_rate)
1507 return -EINVAL;
1508
0f77ba73
RN
1509 if (!max_tx_rate)
1510 goto config_qos;
1511
1512 status = be_cmd_link_status_query(adapter, &link_speed,
1513 &link_status, 0);
1514 if (status)
1515 goto err;
1516
1517 if (!link_status) {
1518 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1519 status = -ENETDOWN;
0f77ba73
RN
1520 goto err;
1521 }
1522
1523 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1524 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1525 link_speed);
1526 status = -EINVAL;
1527 goto err;
1528 }
1529
1530 /* On Skyhawk the QOS setting must be done only as a % value */
1531 percent_rate = link_speed / 100;
1532 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1533 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1534 percent_rate);
1535 status = -EINVAL;
1536 goto err;
94f434c2 1537 }
e1d18735 1538
0f77ba73
RN
1539config_qos:
1540 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1541 if (status)
0f77ba73
RN
1542 goto err;
1543
1544 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1545 return 0;
1546
1547err:
1548 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1549 max_tx_rate, vf);
abccf23e 1550 return be_cmd_status(status);
e1d18735 1551}
e2fb1afa 1552
bdce2ad7
SR
1553static int be_set_vf_link_state(struct net_device *netdev, int vf,
1554 int link_state)
1555{
1556 struct be_adapter *adapter = netdev_priv(netdev);
1557 int status;
1558
1559 if (!sriov_enabled(adapter))
1560 return -EPERM;
1561
1562 if (vf >= adapter->num_vfs)
1563 return -EINVAL;
1564
1565 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1566 if (status) {
1567 dev_err(&adapter->pdev->dev,
1568 "Link state change on VF %d failed: %#x\n", vf, status);
1569 return be_cmd_status(status);
1570 }
bdce2ad7 1571
abccf23e
KA
1572 adapter->vf_cfg[vf].plink_tracking = link_state;
1573
1574 return 0;
bdce2ad7 1575}
e1d18735 1576
2632bafd
SP
1577static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1578 ulong now)
6b7c5b94 1579{
2632bafd
SP
1580 aic->rx_pkts_prev = rx_pkts;
1581 aic->tx_reqs_prev = tx_pkts;
1582 aic->jiffies = now;
1583}
ac124ff9 1584
2632bafd
SP
1585static void be_eqd_update(struct be_adapter *adapter)
1586{
1587 struct be_set_eqd set_eqd[MAX_EVT_QS];
1588 int eqd, i, num = 0, start;
1589 struct be_aic_obj *aic;
1590 struct be_eq_obj *eqo;
1591 struct be_rx_obj *rxo;
1592 struct be_tx_obj *txo;
1593 u64 rx_pkts, tx_pkts;
1594 ulong now;
1595 u32 pps, delta;
10ef9ab4 1596
2632bafd
SP
1597 for_all_evt_queues(adapter, eqo, i) {
1598 aic = &adapter->aic_obj[eqo->idx];
1599 if (!aic->enable) {
1600 if (aic->jiffies)
1601 aic->jiffies = 0;
1602 eqd = aic->et_eqd;
1603 goto modify_eqd;
1604 }
6b7c5b94 1605
2632bafd
SP
1606 rxo = &adapter->rx_obj[eqo->idx];
1607 do {
57a7744e 1608 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2632bafd 1609 rx_pkts = rxo->stats.rx_pkts;
57a7744e 1610 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
10ef9ab4 1611
2632bafd
SP
1612 txo = &adapter->tx_obj[eqo->idx];
1613 do {
57a7744e 1614 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2632bafd 1615 tx_pkts = txo->stats.tx_reqs;
57a7744e 1616 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
6b7c5b94 1617
2632bafd
SP
1618 /* Skip, if wrapped around or first calculation */
1619 now = jiffies;
1620 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1621 rx_pkts < aic->rx_pkts_prev ||
1622 tx_pkts < aic->tx_reqs_prev) {
1623 be_aic_update(aic, rx_pkts, tx_pkts, now);
1624 continue;
1625 }
1626
1627 delta = jiffies_to_msecs(now - aic->jiffies);
1628 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1629 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1630 eqd = (pps / 15000) << 2;
10ef9ab4 1631
2632bafd
SP
1632 if (eqd < 8)
1633 eqd = 0;
1634 eqd = min_t(u32, eqd, aic->max_eqd);
1635 eqd = max_t(u32, eqd, aic->min_eqd);
1636
1637 be_aic_update(aic, rx_pkts, tx_pkts, now);
10ef9ab4 1638modify_eqd:
2632bafd
SP
1639 if (eqd != aic->prev_eqd) {
1640 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1641 set_eqd[num].eq_id = eqo->q.id;
1642 aic->prev_eqd = eqd;
1643 num++;
1644 }
ac124ff9 1645 }
2632bafd
SP
1646
1647 if (num)
1648 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1649}
1650
3abcdeda 1651static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1652 struct be_rx_compl_info *rxcp)
4097f663 1653{
ac124ff9 1654 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1655
ab1594e9 1656 u64_stats_update_begin(&stats->sync);
3abcdeda 1657 stats->rx_compl++;
2e588f84 1658 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1659 stats->rx_pkts++;
2e588f84 1660 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1661 stats->rx_mcast_pkts++;
2e588f84 1662 if (rxcp->err)
ac124ff9 1663 stats->rx_compl_err++;
ab1594e9 1664 u64_stats_update_end(&stats->sync);
4097f663
SP
1665}
1666
2e588f84 1667static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1668{
19fad86f 1669 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1670 * Also ignore ipcksm for ipv6 pkts
1671 */
2e588f84 1672 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1673 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1674}
1675
0b0ef1d0 1676static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1677{
10ef9ab4 1678 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1679 struct be_rx_page_info *rx_page_info;
3abcdeda 1680 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1681 u16 frag_idx = rxq->tail;
6b7c5b94 1682
3abcdeda 1683 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1684 BUG_ON(!rx_page_info->page);
1685
e50287be 1686 if (rx_page_info->last_frag) {
2b7bcebf
IV
1687 dma_unmap_page(&adapter->pdev->dev,
1688 dma_unmap_addr(rx_page_info, bus),
1689 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1690 rx_page_info->last_frag = false;
1691 } else {
1692 dma_sync_single_for_cpu(&adapter->pdev->dev,
1693 dma_unmap_addr(rx_page_info, bus),
1694 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1695 }
6b7c5b94 1696
0b0ef1d0 1697 queue_tail_inc(rxq);
6b7c5b94
SP
1698 atomic_dec(&rxq->used);
1699 return rx_page_info;
1700}
1701
1702/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1703static void be_rx_compl_discard(struct be_rx_obj *rxo,
1704 struct be_rx_compl_info *rxcp)
6b7c5b94 1705{
6b7c5b94 1706 struct be_rx_page_info *page_info;
2e588f84 1707 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1708
e80d9da6 1709 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 1710 page_info = get_rx_page_info(rxo);
e80d9da6
PR
1711 put_page(page_info->page);
1712 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
1713 }
1714}
1715
1716/*
1717 * skb_fill_rx_data forms a complete skb for an ether frame
1718 * indicated by rxcp.
1719 */
10ef9ab4
SP
1720static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1721 struct be_rx_compl_info *rxcp)
6b7c5b94 1722{
6b7c5b94 1723 struct be_rx_page_info *page_info;
2e588f84
SP
1724 u16 i, j;
1725 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1726 u8 *start;
6b7c5b94 1727
0b0ef1d0 1728 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1729 start = page_address(page_info->page) + page_info->page_offset;
1730 prefetch(start);
1731
1732 /* Copy data in the first descriptor of this completion */
2e588f84 1733 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1734
6b7c5b94
SP
1735 skb->len = curr_frag_len;
1736 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1737 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1738 /* Complete packet has now been moved to data */
1739 put_page(page_info->page);
1740 skb->data_len = 0;
1741 skb->tail += curr_frag_len;
1742 } else {
ac1ae5f3
ED
1743 hdr_len = ETH_HLEN;
1744 memcpy(skb->data, start, hdr_len);
6b7c5b94 1745 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1746 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1747 skb_shinfo(skb)->frags[0].page_offset =
1748 page_info->page_offset + hdr_len;
748b539a
SP
1749 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1750 curr_frag_len - hdr_len);
6b7c5b94 1751 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1752 skb->truesize += rx_frag_size;
6b7c5b94
SP
1753 skb->tail += hdr_len;
1754 }
205859a2 1755 page_info->page = NULL;
6b7c5b94 1756
2e588f84
SP
1757 if (rxcp->pkt_size <= rx_frag_size) {
1758 BUG_ON(rxcp->num_rcvd != 1);
1759 return;
6b7c5b94
SP
1760 }
1761
1762 /* More frags present for this completion */
2e588f84
SP
1763 remaining = rxcp->pkt_size - curr_frag_len;
1764 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1765 page_info = get_rx_page_info(rxo);
2e588f84 1766 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1767
bd46cb6c
AK
1768 /* Coalesce all frags from the same physical page in one slot */
1769 if (page_info->page_offset == 0) {
1770 /* Fresh page */
1771 j++;
b061b39e 1772 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1773 skb_shinfo(skb)->frags[j].page_offset =
1774 page_info->page_offset;
9e903e08 1775 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1776 skb_shinfo(skb)->nr_frags++;
1777 } else {
1778 put_page(page_info->page);
1779 }
1780
9e903e08 1781 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1782 skb->len += curr_frag_len;
1783 skb->data_len += curr_frag_len;
bdb28a97 1784 skb->truesize += rx_frag_size;
2e588f84 1785 remaining -= curr_frag_len;
205859a2 1786 page_info->page = NULL;
6b7c5b94 1787 }
bd46cb6c 1788 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1789}
1790
5be93b9a 1791/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 1792static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 1793 struct be_rx_compl_info *rxcp)
6b7c5b94 1794{
10ef9ab4 1795 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1796 struct net_device *netdev = adapter->netdev;
6b7c5b94 1797 struct sk_buff *skb;
89420424 1798
bb349bb4 1799 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1800 if (unlikely(!skb)) {
ac124ff9 1801 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1802 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1803 return;
1804 }
1805
10ef9ab4 1806 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1807
6332c8d3 1808 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1809 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1810 else
1811 skb_checksum_none_assert(skb);
6b7c5b94 1812
6332c8d3 1813 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1814 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1815 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 1816 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1817
b6c0e89d 1818 skb->csum_level = rxcp->tunneled;
6384a4d0 1819 skb_mark_napi_id(skb, napi);
6b7c5b94 1820
343e43c0 1821 if (rxcp->vlanf)
86a9bad3 1822 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1823
1824 netif_receive_skb(skb);
6b7c5b94
SP
1825}
1826
5be93b9a 1827/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
1828static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1829 struct napi_struct *napi,
1830 struct be_rx_compl_info *rxcp)
6b7c5b94 1831{
10ef9ab4 1832 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1833 struct be_rx_page_info *page_info;
5be93b9a 1834 struct sk_buff *skb = NULL;
2e588f84
SP
1835 u16 remaining, curr_frag_len;
1836 u16 i, j;
3968fa1e 1837
10ef9ab4 1838 skb = napi_get_frags(napi);
5be93b9a 1839 if (!skb) {
10ef9ab4 1840 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1841 return;
1842 }
1843
2e588f84
SP
1844 remaining = rxcp->pkt_size;
1845 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 1846 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
1847
1848 curr_frag_len = min(remaining, rx_frag_size);
1849
bd46cb6c
AK
1850 /* Coalesce all frags from the same physical page in one slot */
1851 if (i == 0 || page_info->page_offset == 0) {
1852 /* First frag or Fresh page */
1853 j++;
b061b39e 1854 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1855 skb_shinfo(skb)->frags[j].page_offset =
1856 page_info->page_offset;
9e903e08 1857 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1858 } else {
1859 put_page(page_info->page);
1860 }
9e903e08 1861 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1862 skb->truesize += rx_frag_size;
bd46cb6c 1863 remaining -= curr_frag_len;
6b7c5b94
SP
1864 memset(page_info, 0, sizeof(*page_info));
1865 }
bd46cb6c 1866 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1867
5be93b9a 1868 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1869 skb->len = rxcp->pkt_size;
1870 skb->data_len = rxcp->pkt_size;
5be93b9a 1871 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1872 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 1873 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 1874 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 1875
b6c0e89d 1876 skb->csum_level = rxcp->tunneled;
6384a4d0 1877 skb_mark_napi_id(skb, napi);
5be93b9a 1878
343e43c0 1879 if (rxcp->vlanf)
86a9bad3 1880 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1881
10ef9ab4 1882 napi_gro_frags(napi);
2e588f84
SP
1883}
1884
10ef9ab4
SP
1885static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1886 struct be_rx_compl_info *rxcp)
2e588f84 1887{
c3c18bc1
SP
1888 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1889 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1890 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1891 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1892 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1893 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1894 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1895 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1896 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1897 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1898 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 1899 if (rxcp->vlanf) {
c3c18bc1
SP
1900 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1901 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 1902 }
c3c18bc1 1903 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 1904 rxcp->tunneled =
c3c18bc1 1905 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
1906}
1907
10ef9ab4
SP
1908static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1909 struct be_rx_compl_info *rxcp)
2e588f84 1910{
c3c18bc1
SP
1911 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1912 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1913 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1914 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1915 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1916 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1917 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1918 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1919 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1920 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1921 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 1922 if (rxcp->vlanf) {
c3c18bc1
SP
1923 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1924 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 1925 }
c3c18bc1
SP
1926 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1927 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
1928}
1929
1930static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1931{
1932 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1933 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1934 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1935
2e588f84
SP
1936 /* For checking the valid bit it is Ok to use either definition as the
1937 * valid bit is at the same position in both v0 and v1 Rx compl */
1938 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1939 return NULL;
6b7c5b94 1940
2e588f84
SP
1941 rmb();
1942 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1943
2e588f84 1944 if (adapter->be3_native)
10ef9ab4 1945 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1946 else
10ef9ab4 1947 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1948
e38b1706
SK
1949 if (rxcp->ip_frag)
1950 rxcp->l4_csum = 0;
1951
15d72184 1952 if (rxcp->vlanf) {
f93f160b
VV
1953 /* In QNQ modes, if qnq bit is not set, then the packet was
1954 * tagged only with the transparent outer vlan-tag and must
1955 * not be treated as a vlan packet by host
1956 */
1957 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 1958 rxcp->vlanf = 0;
6b7c5b94 1959
15d72184 1960 if (!lancer_chip(adapter))
3c709f8f 1961 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1962
939cf306 1963 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 1964 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
1965 rxcp->vlanf = 0;
1966 }
2e588f84
SP
1967
1968 /* As the compl has been parsed, reset it; we wont touch it again */
1969 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1970
3abcdeda 1971 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1972 return rxcp;
1973}
1974
1829b086 1975static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1976{
6b7c5b94 1977 u32 order = get_order(size);
1829b086 1978
6b7c5b94 1979 if (order > 0)
1829b086
ED
1980 gfp |= __GFP_COMP;
1981 return alloc_pages(gfp, order);
6b7c5b94
SP
1982}
1983
1984/*
1985 * Allocate a page, split it to fragments of size rx_frag_size and post as
1986 * receive buffers to BE
1987 */
c30d7266 1988static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 1989{
3abcdeda 1990 struct be_adapter *adapter = rxo->adapter;
26d92f92 1991 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1992 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1993 struct page *pagep = NULL;
ba42fad0 1994 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
1995 struct be_eth_rx_d *rxd;
1996 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 1997 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 1998
3abcdeda 1999 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2000 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2001 if (!pagep) {
1829b086 2002 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2003 if (unlikely(!pagep)) {
ac124ff9 2004 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2005 break;
2006 }
ba42fad0
IV
2007 page_dmaaddr = dma_map_page(dev, pagep, 0,
2008 adapter->big_page_size,
2b7bcebf 2009 DMA_FROM_DEVICE);
ba42fad0
IV
2010 if (dma_mapping_error(dev, page_dmaaddr)) {
2011 put_page(pagep);
2012 pagep = NULL;
d3de1540 2013 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2014 break;
2015 }
e50287be 2016 page_offset = 0;
6b7c5b94
SP
2017 } else {
2018 get_page(pagep);
e50287be 2019 page_offset += rx_frag_size;
6b7c5b94 2020 }
e50287be 2021 page_info->page_offset = page_offset;
6b7c5b94 2022 page_info->page = pagep;
6b7c5b94
SP
2023
2024 rxd = queue_head_node(rxq);
e50287be 2025 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2026 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2027 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2028
2029 /* Any space left in the current big page for another frag? */
2030 if ((page_offset + rx_frag_size + rx_frag_size) >
2031 adapter->big_page_size) {
2032 pagep = NULL;
e50287be
SP
2033 page_info->last_frag = true;
2034 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2035 } else {
2036 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2037 }
26d92f92
SP
2038
2039 prev_page_info = page_info;
2040 queue_head_inc(rxq);
10ef9ab4 2041 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2042 }
e50287be
SP
2043
2044 /* Mark the last frag of a page when we break out of the above loop
2045 * with no more slots available in the RXQ
2046 */
2047 if (pagep) {
2048 prev_page_info->last_frag = true;
2049 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2050 }
6b7c5b94
SP
2051
2052 if (posted) {
6b7c5b94 2053 atomic_add(posted, &rxq->used);
6384a4d0
SP
2054 if (rxo->rx_post_starved)
2055 rxo->rx_post_starved = false;
c30d7266
AK
2056 do {
2057 notify = min(256u, posted);
2058 be_rxq_notify(adapter, rxq->id, notify);
2059 posted -= notify;
2060 } while (posted);
ea1dae11
SP
2061 } else if (atomic_read(&rxq->used) == 0) {
2062 /* Let be_worker replenish when memory is available */
3abcdeda 2063 rxo->rx_post_starved = true;
6b7c5b94 2064 }
6b7c5b94
SP
2065}
2066
5fb379ee 2067static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 2068{
6b7c5b94
SP
2069 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
2070
2071 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2072 return NULL;
2073
f3eb62d2 2074 rmb();
6b7c5b94
SP
2075 be_dws_le_to_cpu(txcp, sizeof(*txcp));
2076
2077 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2078
2079 queue_tail_inc(tx_cq);
2080 return txcp;
2081}
2082
3c8def97 2083static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2084 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2085{
5f07b3c5 2086 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2087 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2088 u16 frag_index, num_wrbs = 0;
2089 struct sk_buff *skb = NULL;
2090 bool unmap_skb_hdr = false;
a73b796e 2091 struct be_eth_wrb *wrb;
6b7c5b94 2092
ec43b1a6 2093 do {
5f07b3c5
SP
2094 if (sent_skbs[txq->tail]) {
2095 /* Free skb from prev req */
2096 if (skb)
2097 dev_consume_skb_any(skb);
2098 skb = sent_skbs[txq->tail];
2099 sent_skbs[txq->tail] = NULL;
2100 queue_tail_inc(txq); /* skip hdr wrb */
2101 num_wrbs++;
2102 unmap_skb_hdr = true;
2103 }
a73b796e 2104 wrb = queue_tail_node(txq);
5f07b3c5 2105 frag_index = txq->tail;
2b7bcebf 2106 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2107 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2108 unmap_skb_hdr = false;
6b7c5b94 2109 queue_tail_inc(txq);
5f07b3c5
SP
2110 num_wrbs++;
2111 } while (frag_index != last_index);
2112 dev_consume_skb_any(skb);
6b7c5b94 2113
4d586b82 2114 return num_wrbs;
6b7c5b94
SP
2115}
2116
10ef9ab4
SP
2117/* Return the number of events in the event queue */
2118static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2119{
10ef9ab4
SP
2120 struct be_eq_entry *eqe;
2121 int num = 0;
859b1e4e 2122
10ef9ab4
SP
2123 do {
2124 eqe = queue_tail_node(&eqo->q);
2125 if (eqe->evt == 0)
2126 break;
859b1e4e 2127
10ef9ab4
SP
2128 rmb();
2129 eqe->evt = 0;
2130 num++;
2131 queue_tail_inc(&eqo->q);
2132 } while (true);
2133
2134 return num;
859b1e4e
SP
2135}
2136
10ef9ab4
SP
2137/* Leaves the EQ is disarmed state */
2138static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2139{
10ef9ab4 2140 int num = events_get(eqo);
859b1e4e 2141
10ef9ab4 2142 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
2143}
2144
10ef9ab4 2145static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
2146{
2147 struct be_rx_page_info *page_info;
3abcdeda
SP
2148 struct be_queue_info *rxq = &rxo->q;
2149 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2150 struct be_rx_compl_info *rxcp;
d23e946c
SP
2151 struct be_adapter *adapter = rxo->adapter;
2152 int flush_wait = 0;
6b7c5b94 2153
d23e946c
SP
2154 /* Consume pending rx completions.
2155 * Wait for the flush completion (identified by zero num_rcvd)
2156 * to arrive. Notify CQ even when there are no more CQ entries
2157 * for HW to flush partially coalesced CQ entries.
2158 * In Lancer, there is no need to wait for flush compl.
2159 */
2160 for (;;) {
2161 rxcp = be_rx_compl_get(rxo);
ddf1169f 2162 if (!rxcp) {
d23e946c
SP
2163 if (lancer_chip(adapter))
2164 break;
2165
2166 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2167 dev_warn(&adapter->pdev->dev,
2168 "did not receive flush compl\n");
2169 break;
2170 }
2171 be_cq_notify(adapter, rx_cq->id, true, 0);
2172 mdelay(1);
2173 } else {
2174 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2175 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2176 if (rxcp->num_rcvd == 0)
2177 break;
2178 }
6b7c5b94
SP
2179 }
2180
d23e946c
SP
2181 /* After cleanup, leave the CQ in unarmed state */
2182 be_cq_notify(adapter, rx_cq->id, false, 0);
2183
2184 /* Then free posted rx buffers that were not used */
0b0ef1d0
SR
2185 while (atomic_read(&rxq->used) > 0) {
2186 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2187 put_page(page_info->page);
2188 memset(page_info, 0, sizeof(*page_info));
2189 }
2190 BUG_ON(atomic_read(&rxq->used));
5f820b6c
KA
2191 rxq->tail = 0;
2192 rxq->head = 0;
6b7c5b94
SP
2193}
2194
0ae57bb3 2195static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2196{
5f07b3c5
SP
2197 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2198 struct device *dev = &adapter->pdev->dev;
0ae57bb3
SP
2199 struct be_tx_obj *txo;
2200 struct be_queue_info *txq;
a8e9179a 2201 struct be_eth_tx_compl *txcp;
0ae57bb3 2202 int i, pending_txqs;
a8e9179a 2203
1a3d0717 2204 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2205 do {
0ae57bb3
SP
2206 pending_txqs = adapter->num_tx_qs;
2207
2208 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2209 cmpl = 0;
2210 num_wrbs = 0;
0ae57bb3
SP
2211 txq = &txo->q;
2212 while ((txcp = be_tx_compl_get(&txo->cq))) {
c3c18bc1 2213 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
0ae57bb3
SP
2214 num_wrbs += be_tx_compl_process(adapter, txo,
2215 end_idx);
2216 cmpl++;
2217 }
2218 if (cmpl) {
2219 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2220 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2221 timeo = 0;
0ae57bb3 2222 }
5f07b3c5 2223 if (atomic_read(&txq->used) == txo->pend_wrb_cnt)
0ae57bb3 2224 pending_txqs--;
a8e9179a
SP
2225 }
2226
1a3d0717 2227 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
a8e9179a
SP
2228 break;
2229
2230 mdelay(1);
2231 } while (true);
2232
5f07b3c5 2233 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2234 for_all_tx_queues(adapter, txo, i) {
2235 txq = &txo->q;
0ae57bb3 2236
5f07b3c5
SP
2237 if (atomic_read(&txq->used)) {
2238 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2239 i, atomic_read(&txq->used));
2240 notified_idx = txq->tail;
0ae57bb3 2241 end_idx = txq->tail;
5f07b3c5
SP
2242 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2243 txq->len);
2244 /* Use the tx-compl process logic to handle requests
2245 * that were not sent to the HW.
2246 */
0ae57bb3
SP
2247 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2248 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2249 BUG_ON(atomic_read(&txq->used));
2250 txo->pend_wrb_cnt = 0;
2251 /* Since hw was never notified of these requests,
2252 * reset TXQ indices
2253 */
2254 txq->head = notified_idx;
2255 txq->tail = notified_idx;
0ae57bb3 2256 }
b03388d6 2257 }
6b7c5b94
SP
2258}
2259
10ef9ab4
SP
2260static void be_evt_queues_destroy(struct be_adapter *adapter)
2261{
2262 struct be_eq_obj *eqo;
2263 int i;
2264
2265 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2266 if (eqo->q.created) {
2267 be_eq_clean(eqo);
10ef9ab4 2268 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2269 napi_hash_del(&eqo->napi);
68d7bdcb 2270 netif_napi_del(&eqo->napi);
19d59aa7 2271 }
10ef9ab4
SP
2272 be_queue_free(adapter, &eqo->q);
2273 }
2274}
2275
2276static int be_evt_queues_create(struct be_adapter *adapter)
2277{
2278 struct be_queue_info *eq;
2279 struct be_eq_obj *eqo;
2632bafd 2280 struct be_aic_obj *aic;
10ef9ab4
SP
2281 int i, rc;
2282
92bf14ab
SP
2283 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2284 adapter->cfg_num_qs);
10ef9ab4
SP
2285
2286 for_all_evt_queues(adapter, eqo, i) {
68d7bdcb
SP
2287 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2288 BE_NAPI_WEIGHT);
6384a4d0 2289 napi_hash_add(&eqo->napi);
2632bafd 2290 aic = &adapter->aic_obj[i];
10ef9ab4 2291 eqo->adapter = adapter;
10ef9ab4 2292 eqo->idx = i;
2632bafd
SP
2293 aic->max_eqd = BE_MAX_EQD;
2294 aic->enable = true;
10ef9ab4
SP
2295
2296 eq = &eqo->q;
2297 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2298 sizeof(struct be_eq_entry));
10ef9ab4
SP
2299 if (rc)
2300 return rc;
2301
f2f781a7 2302 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2303 if (rc)
2304 return rc;
2305 }
1cfafab9 2306 return 0;
10ef9ab4
SP
2307}
2308
5fb379ee
SP
2309static void be_mcc_queues_destroy(struct be_adapter *adapter)
2310{
2311 struct be_queue_info *q;
5fb379ee 2312
8788fdc2 2313 q = &adapter->mcc_obj.q;
5fb379ee 2314 if (q->created)
8788fdc2 2315 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2316 be_queue_free(adapter, q);
2317
8788fdc2 2318 q = &adapter->mcc_obj.cq;
5fb379ee 2319 if (q->created)
8788fdc2 2320 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2321 be_queue_free(adapter, q);
2322}
2323
2324/* Must be called only after TX qs are created as MCC shares TX EQ */
2325static int be_mcc_queues_create(struct be_adapter *adapter)
2326{
2327 struct be_queue_info *q, *cq;
5fb379ee 2328
8788fdc2 2329 cq = &adapter->mcc_obj.cq;
5fb379ee 2330 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2331 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2332 goto err;
2333
10ef9ab4
SP
2334 /* Use the default EQ for MCC completions */
2335 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2336 goto mcc_cq_free;
2337
8788fdc2 2338 q = &adapter->mcc_obj.q;
5fb379ee
SP
2339 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2340 goto mcc_cq_destroy;
2341
8788fdc2 2342 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2343 goto mcc_q_free;
2344
2345 return 0;
2346
2347mcc_q_free:
2348 be_queue_free(adapter, q);
2349mcc_cq_destroy:
8788fdc2 2350 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2351mcc_cq_free:
2352 be_queue_free(adapter, cq);
2353err:
2354 return -1;
2355}
2356
6b7c5b94
SP
2357static void be_tx_queues_destroy(struct be_adapter *adapter)
2358{
2359 struct be_queue_info *q;
3c8def97
SP
2360 struct be_tx_obj *txo;
2361 u8 i;
6b7c5b94 2362
3c8def97
SP
2363 for_all_tx_queues(adapter, txo, i) {
2364 q = &txo->q;
2365 if (q->created)
2366 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2367 be_queue_free(adapter, q);
6b7c5b94 2368
3c8def97
SP
2369 q = &txo->cq;
2370 if (q->created)
2371 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2372 be_queue_free(adapter, q);
2373 }
6b7c5b94
SP
2374}
2375
7707133c 2376static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2377{
10ef9ab4 2378 struct be_queue_info *cq, *eq;
3c8def97 2379 struct be_tx_obj *txo;
92bf14ab 2380 int status, i;
6b7c5b94 2381
92bf14ab 2382 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2383
10ef9ab4
SP
2384 for_all_tx_queues(adapter, txo, i) {
2385 cq = &txo->cq;
2386 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2387 sizeof(struct be_eth_tx_compl));
2388 if (status)
2389 return status;
3c8def97 2390
827da44c
JS
2391 u64_stats_init(&txo->stats.sync);
2392 u64_stats_init(&txo->stats.sync_compl);
2393
10ef9ab4
SP
2394 /* If num_evt_qs is less than num_tx_qs, then more than
2395 * one txq share an eq
2396 */
2397 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2398 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2399 if (status)
2400 return status;
6b7c5b94 2401
10ef9ab4
SP
2402 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2403 sizeof(struct be_eth_wrb));
2404 if (status)
2405 return status;
6b7c5b94 2406
94d73aaa 2407 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2408 if (status)
2409 return status;
3c8def97 2410 }
6b7c5b94 2411
d379142b
SP
2412 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2413 adapter->num_tx_qs);
10ef9ab4 2414 return 0;
6b7c5b94
SP
2415}
2416
10ef9ab4 2417static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2418{
2419 struct be_queue_info *q;
3abcdeda
SP
2420 struct be_rx_obj *rxo;
2421 int i;
2422
2423 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2424 q = &rxo->cq;
2425 if (q->created)
2426 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2427 be_queue_free(adapter, q);
ac6a0c4a
SP
2428 }
2429}
2430
10ef9ab4 2431static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2432{
10ef9ab4 2433 struct be_queue_info *eq, *cq;
3abcdeda
SP
2434 struct be_rx_obj *rxo;
2435 int rc, i;
6b7c5b94 2436
92bf14ab
SP
2437 /* We can create as many RSS rings as there are EQs. */
2438 adapter->num_rx_qs = adapter->num_evt_qs;
2439
2440 /* We'll use RSS only if atleast 2 RSS rings are supported.
2441 * When RSS is used, we'll need a default RXQ for non-IP traffic.
10ef9ab4 2442 */
92bf14ab
SP
2443 if (adapter->num_rx_qs > 1)
2444 adapter->num_rx_qs++;
2445
6b7c5b94 2446 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2447 for_all_rx_queues(adapter, rxo, i) {
2448 rxo->adapter = adapter;
3abcdeda
SP
2449 cq = &rxo->cq;
2450 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2451 sizeof(struct be_eth_rx_compl));
3abcdeda 2452 if (rc)
10ef9ab4 2453 return rc;
3abcdeda 2454
827da44c 2455 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2456 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2457 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2458 if (rc)
10ef9ab4 2459 return rc;
3abcdeda 2460 }
6b7c5b94 2461
d379142b
SP
2462 dev_info(&adapter->pdev->dev,
2463 "created %d RSS queue(s) and 1 default RX queue\n",
2464 adapter->num_rx_qs - 1);
10ef9ab4 2465 return 0;
b628bde2
SP
2466}
2467
6b7c5b94
SP
2468static irqreturn_t be_intx(int irq, void *dev)
2469{
e49cc34f
SP
2470 struct be_eq_obj *eqo = dev;
2471 struct be_adapter *adapter = eqo->adapter;
2472 int num_evts = 0;
6b7c5b94 2473
d0b9cec3
SP
2474 /* IRQ is not expected when NAPI is scheduled as the EQ
2475 * will not be armed.
2476 * But, this can happen on Lancer INTx where it takes
2477 * a while to de-assert INTx or in BE2 where occasionaly
2478 * an interrupt may be raised even when EQ is unarmed.
2479 * If NAPI is already scheduled, then counting & notifying
2480 * events will orphan them.
e49cc34f 2481 */
d0b9cec3 2482 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2483 num_evts = events_get(eqo);
d0b9cec3
SP
2484 __napi_schedule(&eqo->napi);
2485 if (num_evts)
2486 eqo->spurious_intr = 0;
2487 }
2488 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2489
d0b9cec3
SP
2490 /* Return IRQ_HANDLED only for the the first spurious intr
2491 * after a valid intr to stop the kernel from branding
2492 * this irq as a bad one!
e49cc34f 2493 */
d0b9cec3
SP
2494 if (num_evts || eqo->spurious_intr++ == 0)
2495 return IRQ_HANDLED;
2496 else
2497 return IRQ_NONE;
6b7c5b94
SP
2498}
2499
10ef9ab4 2500static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2501{
10ef9ab4 2502 struct be_eq_obj *eqo = dev;
6b7c5b94 2503
0b545a62
SP
2504 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2505 napi_schedule(&eqo->napi);
6b7c5b94
SP
2506 return IRQ_HANDLED;
2507}
2508
2e588f84 2509static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2510{
e38b1706 2511 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2512}
2513
10ef9ab4 2514static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2515 int budget, int polling)
6b7c5b94 2516{
3abcdeda
SP
2517 struct be_adapter *adapter = rxo->adapter;
2518 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2519 struct be_rx_compl_info *rxcp;
6b7c5b94 2520 u32 work_done;
c30d7266 2521 u32 frags_consumed = 0;
6b7c5b94
SP
2522
2523 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2524 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2525 if (!rxcp)
2526 break;
2527
12004ae9
SP
2528 /* Is it a flush compl that has no data */
2529 if (unlikely(rxcp->num_rcvd == 0))
2530 goto loop_continue;
2531
2532 /* Discard compl with partial DMA Lancer B0 */
2533 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2534 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2535 goto loop_continue;
2536 }
2537
2538 /* On BE drop pkts that arrive due to imperfect filtering in
2539 * promiscuous mode on some skews
2540 */
2541 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2542 !lancer_chip(adapter))) {
10ef9ab4 2543 be_rx_compl_discard(rxo, rxcp);
12004ae9 2544 goto loop_continue;
64642811 2545 }
009dd872 2546
6384a4d0
SP
2547 /* Don't do gro when we're busy_polling */
2548 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2549 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2550 else
6384a4d0
SP
2551 be_rx_compl_process(rxo, napi, rxcp);
2552
12004ae9 2553loop_continue:
c30d7266 2554 frags_consumed += rxcp->num_rcvd;
2e588f84 2555 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2556 }
2557
10ef9ab4
SP
2558 if (work_done) {
2559 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2560
6384a4d0
SP
2561 /* When an rx-obj gets into post_starved state, just
2562 * let be_worker do the posting.
2563 */
2564 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2565 !rxo->rx_post_starved)
c30d7266
AK
2566 be_post_rx_frags(rxo, GFP_ATOMIC,
2567 max_t(u32, MAX_RX_POST,
2568 frags_consumed));
6b7c5b94 2569 }
10ef9ab4 2570
6b7c5b94
SP
2571 return work_done;
2572}
2573
512bb8a2
KA
2574static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2575{
2576 switch (status) {
2577 case BE_TX_COMP_HDR_PARSE_ERR:
2578 tx_stats(txo)->tx_hdr_parse_err++;
2579 break;
2580 case BE_TX_COMP_NDMA_ERR:
2581 tx_stats(txo)->tx_dma_err++;
2582 break;
2583 case BE_TX_COMP_ACL_ERR:
2584 tx_stats(txo)->tx_spoof_check_err++;
2585 break;
2586 }
2587}
2588
2589static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2590{
2591 switch (status) {
2592 case LANCER_TX_COMP_LSO_ERR:
2593 tx_stats(txo)->tx_tso_err++;
2594 break;
2595 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2596 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2597 tx_stats(txo)->tx_spoof_check_err++;
2598 break;
2599 case LANCER_TX_COMP_QINQ_ERR:
2600 tx_stats(txo)->tx_qinq_err++;
2601 break;
2602 case LANCER_TX_COMP_PARITY_ERR:
2603 tx_stats(txo)->tx_internal_parity_err++;
2604 break;
2605 case LANCER_TX_COMP_DMA_ERR:
2606 tx_stats(txo)->tx_dma_err++;
2607 break;
2608 }
2609}
2610
c8f64615
SP
2611static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2612 int idx)
6b7c5b94 2613{
6b7c5b94 2614 struct be_eth_tx_compl *txcp;
c8f64615 2615 int num_wrbs = 0, work_done = 0;
512bb8a2 2616 u32 compl_status;
c8f64615
SP
2617 u16 last_idx;
2618
2619 while ((txcp = be_tx_compl_get(&txo->cq))) {
2620 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2621 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2622 work_done++;
3c8def97 2623
512bb8a2
KA
2624 compl_status = GET_TX_COMPL_BITS(status, txcp);
2625 if (compl_status) {
2626 if (lancer_chip(adapter))
2627 lancer_update_tx_err(txo, compl_status);
2628 else
2629 be_update_tx_err(txo, compl_status);
2630 }
10ef9ab4 2631 }
6b7c5b94 2632
10ef9ab4
SP
2633 if (work_done) {
2634 be_cq_notify(adapter, txo->cq.id, true, work_done);
2635 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2636
10ef9ab4
SP
2637 /* As Tx wrbs have been freed up, wake up netdev queue
2638 * if it was stopped due to lack of tx wrbs. */
2639 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
748b539a 2640 atomic_read(&txo->q.used) < txo->q.len / 2) {
10ef9ab4 2641 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2642 }
10ef9ab4
SP
2643
2644 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2645 tx_stats(txo)->tx_compl += work_done;
2646 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2647 }
10ef9ab4 2648}
6b7c5b94 2649
f7062ee5
SP
2650#ifdef CONFIG_NET_RX_BUSY_POLL
2651static inline bool be_lock_napi(struct be_eq_obj *eqo)
2652{
2653 bool status = true;
2654
2655 spin_lock(&eqo->lock); /* BH is already disabled */
2656 if (eqo->state & BE_EQ_LOCKED) {
2657 WARN_ON(eqo->state & BE_EQ_NAPI);
2658 eqo->state |= BE_EQ_NAPI_YIELD;
2659 status = false;
2660 } else {
2661 eqo->state = BE_EQ_NAPI;
2662 }
2663 spin_unlock(&eqo->lock);
2664 return status;
2665}
2666
2667static inline void be_unlock_napi(struct be_eq_obj *eqo)
2668{
2669 spin_lock(&eqo->lock); /* BH is already disabled */
2670
2671 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2672 eqo->state = BE_EQ_IDLE;
2673
2674 spin_unlock(&eqo->lock);
2675}
2676
2677static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2678{
2679 bool status = true;
2680
2681 spin_lock_bh(&eqo->lock);
2682 if (eqo->state & BE_EQ_LOCKED) {
2683 eqo->state |= BE_EQ_POLL_YIELD;
2684 status = false;
2685 } else {
2686 eqo->state |= BE_EQ_POLL;
2687 }
2688 spin_unlock_bh(&eqo->lock);
2689 return status;
2690}
2691
2692static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2693{
2694 spin_lock_bh(&eqo->lock);
2695
2696 WARN_ON(eqo->state & (BE_EQ_NAPI));
2697 eqo->state = BE_EQ_IDLE;
2698
2699 spin_unlock_bh(&eqo->lock);
2700}
2701
2702static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2703{
2704 spin_lock_init(&eqo->lock);
2705 eqo->state = BE_EQ_IDLE;
2706}
2707
2708static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2709{
2710 local_bh_disable();
2711
2712 /* It's enough to just acquire napi lock on the eqo to stop
2713 * be_busy_poll() from processing any queueus.
2714 */
2715 while (!be_lock_napi(eqo))
2716 mdelay(1);
2717
2718 local_bh_enable();
2719}
2720
2721#else /* CONFIG_NET_RX_BUSY_POLL */
2722
2723static inline bool be_lock_napi(struct be_eq_obj *eqo)
2724{
2725 return true;
2726}
2727
2728static inline void be_unlock_napi(struct be_eq_obj *eqo)
2729{
2730}
2731
2732static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
2733{
2734 return false;
2735}
2736
2737static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
2738{
2739}
2740
2741static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
2742{
2743}
2744
2745static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
2746{
2747}
2748#endif /* CONFIG_NET_RX_BUSY_POLL */
2749
68d7bdcb 2750int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
2751{
2752 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2753 struct be_adapter *adapter = eqo->adapter;
0b545a62 2754 int max_work = 0, work, i, num_evts;
6384a4d0 2755 struct be_rx_obj *rxo;
a4906ea0 2756 struct be_tx_obj *txo;
f31e50a8 2757
0b545a62
SP
2758 num_evts = events_get(eqo);
2759
a4906ea0
SP
2760 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2761 be_process_tx(adapter, txo, i);
f31e50a8 2762
6384a4d0
SP
2763 if (be_lock_napi(eqo)) {
2764 /* This loop will iterate twice for EQ0 in which
2765 * completions of the last RXQ (default one) are also processed
2766 * For other EQs the loop iterates only once
2767 */
2768 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2769 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2770 max_work = max(work, max_work);
2771 }
2772 be_unlock_napi(eqo);
2773 } else {
2774 max_work = budget;
10ef9ab4 2775 }
6b7c5b94 2776
10ef9ab4
SP
2777 if (is_mcc_eqo(eqo))
2778 be_process_mcc(adapter);
93c86700 2779
10ef9ab4
SP
2780 if (max_work < budget) {
2781 napi_complete(napi);
0b545a62 2782 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2783 } else {
2784 /* As we'll continue in polling mode, count and clear events */
0b545a62 2785 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2786 }
10ef9ab4 2787 return max_work;
6b7c5b94
SP
2788}
2789
6384a4d0
SP
2790#ifdef CONFIG_NET_RX_BUSY_POLL
2791static int be_busy_poll(struct napi_struct *napi)
2792{
2793 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2794 struct be_adapter *adapter = eqo->adapter;
2795 struct be_rx_obj *rxo;
2796 int i, work = 0;
2797
2798 if (!be_lock_busy_poll(eqo))
2799 return LL_FLUSH_BUSY;
2800
2801 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2802 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2803 if (work)
2804 break;
2805 }
2806
2807 be_unlock_busy_poll(eqo);
2808 return work;
2809}
2810#endif
2811
f67ef7ba 2812void be_detect_error(struct be_adapter *adapter)
7c185276 2813{
e1cfb67a
PR
2814 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2815 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 2816 u32 i;
eb0eecc1
SK
2817 bool error_detected = false;
2818 struct device *dev = &adapter->pdev->dev;
2819 struct net_device *netdev = adapter->netdev;
7c185276 2820
d23e946c 2821 if (be_hw_error(adapter))
72f02485
SP
2822 return;
2823
e1cfb67a
PR
2824 if (lancer_chip(adapter)) {
2825 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2826 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2827 sliport_err1 = ioread32(adapter->db +
748b539a 2828 SLIPORT_ERROR1_OFFSET);
e1cfb67a 2829 sliport_err2 = ioread32(adapter->db +
748b539a 2830 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
2831 adapter->hw_error = true;
2832 /* Do not log error messages if its a FW reset */
2833 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2834 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2835 dev_info(dev, "Firmware update in progress\n");
2836 } else {
2837 error_detected = true;
2838 dev_err(dev, "Error detected in the card\n");
2839 dev_err(dev, "ERR: sliport status 0x%x\n",
2840 sliport_status);
2841 dev_err(dev, "ERR: sliport error1 0x%x\n",
2842 sliport_err1);
2843 dev_err(dev, "ERR: sliport error2 0x%x\n",
2844 sliport_err2);
2845 }
e1cfb67a
PR
2846 }
2847 } else {
2848 pci_read_config_dword(adapter->pdev,
748b539a 2849 PCICFG_UE_STATUS_LOW, &ue_lo);
e1cfb67a 2850 pci_read_config_dword(adapter->pdev,
748b539a 2851 PCICFG_UE_STATUS_HIGH, &ue_hi);
e1cfb67a 2852 pci_read_config_dword(adapter->pdev,
748b539a 2853 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
e1cfb67a 2854 pci_read_config_dword(adapter->pdev,
748b539a 2855 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
e1cfb67a 2856
f67ef7ba
PR
2857 ue_lo = (ue_lo & ~ue_lo_mask);
2858 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 2859
eb0eecc1
SK
2860 /* On certain platforms BE hardware can indicate spurious UEs.
2861 * Allow HW to stop working completely in case of a real UE.
2862 * Hence not setting the hw_error for UE detection.
2863 */
f67ef7ba 2864
eb0eecc1
SK
2865 if (ue_lo || ue_hi) {
2866 error_detected = true;
2867 dev_err(dev,
2868 "Unrecoverable Error detected in the adapter");
2869 dev_err(dev, "Please reboot server to recover");
2870 if (skyhawk_chip(adapter))
2871 adapter->hw_error = true;
2872 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2873 if (ue_lo & 1)
2874 dev_err(dev, "UE: %s bit set\n",
2875 ue_status_low_desc[i]);
2876 }
2877 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2878 if (ue_hi & 1)
2879 dev_err(dev, "UE: %s bit set\n",
2880 ue_status_hi_desc[i]);
2881 }
7c185276
AK
2882 }
2883 }
eb0eecc1
SK
2884 if (error_detected)
2885 netif_carrier_off(netdev);
7c185276
AK
2886}
2887
8d56ff11
SP
2888static void be_msix_disable(struct be_adapter *adapter)
2889{
ac6a0c4a 2890 if (msix_enabled(adapter)) {
8d56ff11 2891 pci_disable_msix(adapter->pdev);
ac6a0c4a 2892 adapter->num_msix_vec = 0;
68d7bdcb 2893 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
2894 }
2895}
2896
c2bba3df 2897static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2898{
7dc4c064 2899 int i, num_vec;
d379142b 2900 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2901
92bf14ab
SP
2902 /* If RoCE is supported, program the max number of NIC vectors that
2903 * may be configured via set-channels, along with vectors needed for
2904 * RoCe. Else, just program the number we'll use initially.
2905 */
2906 if (be_roce_supported(adapter))
2907 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2908 2 * num_online_cpus());
2909 else
2910 num_vec = adapter->cfg_num_qs;
3abcdeda 2911
ac6a0c4a 2912 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2913 adapter->msix_entries[i].entry = i;
2914
7dc4c064
AG
2915 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2916 MIN_MSIX_VECTORS, num_vec);
2917 if (num_vec < 0)
2918 goto fail;
92bf14ab 2919
92bf14ab
SP
2920 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2921 adapter->num_msix_roce_vec = num_vec / 2;
2922 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2923 adapter->num_msix_roce_vec);
2924 }
2925
2926 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2927
2928 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2929 adapter->num_msix_vec);
c2bba3df 2930 return 0;
7dc4c064
AG
2931
2932fail:
2933 dev_warn(dev, "MSIx enable failed\n");
2934
2935 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2936 if (!be_physfn(adapter))
2937 return num_vec;
2938 return 0;
6b7c5b94
SP
2939}
2940
fe6d2a38 2941static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 2942 struct be_eq_obj *eqo)
b628bde2 2943{
f2f781a7 2944 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 2945}
6b7c5b94 2946
b628bde2
SP
2947static int be_msix_register(struct be_adapter *adapter)
2948{
10ef9ab4
SP
2949 struct net_device *netdev = adapter->netdev;
2950 struct be_eq_obj *eqo;
2951 int status, i, vec;
6b7c5b94 2952
10ef9ab4
SP
2953 for_all_evt_queues(adapter, eqo, i) {
2954 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2955 vec = be_msix_vec_get(adapter, eqo);
2956 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2957 if (status)
2958 goto err_msix;
2959 }
b628bde2 2960
6b7c5b94 2961 return 0;
3abcdeda 2962err_msix:
10ef9ab4
SP
2963 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2964 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2965 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 2966 status);
ac6a0c4a 2967 be_msix_disable(adapter);
6b7c5b94
SP
2968 return status;
2969}
2970
2971static int be_irq_register(struct be_adapter *adapter)
2972{
2973 struct net_device *netdev = adapter->netdev;
2974 int status;
2975
ac6a0c4a 2976 if (msix_enabled(adapter)) {
6b7c5b94
SP
2977 status = be_msix_register(adapter);
2978 if (status == 0)
2979 goto done;
ba343c77
SB
2980 /* INTx is not supported for VF */
2981 if (!be_physfn(adapter))
2982 return status;
6b7c5b94
SP
2983 }
2984
e49cc34f 2985 /* INTx: only the first EQ is used */
6b7c5b94
SP
2986 netdev->irq = adapter->pdev->irq;
2987 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2988 &adapter->eq_obj[0]);
6b7c5b94
SP
2989 if (status) {
2990 dev_err(&adapter->pdev->dev,
2991 "INTx request IRQ failed - err %d\n", status);
2992 return status;
2993 }
2994done:
2995 adapter->isr_registered = true;
2996 return 0;
2997}
2998
2999static void be_irq_unregister(struct be_adapter *adapter)
3000{
3001 struct net_device *netdev = adapter->netdev;
10ef9ab4 3002 struct be_eq_obj *eqo;
3abcdeda 3003 int i;
6b7c5b94
SP
3004
3005 if (!adapter->isr_registered)
3006 return;
3007
3008 /* INTx */
ac6a0c4a 3009 if (!msix_enabled(adapter)) {
e49cc34f 3010 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3011 goto done;
3012 }
3013
3014 /* MSIx */
10ef9ab4
SP
3015 for_all_evt_queues(adapter, eqo, i)
3016 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 3017
6b7c5b94
SP
3018done:
3019 adapter->isr_registered = false;
6b7c5b94
SP
3020}
3021
10ef9ab4 3022static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3023{
3024 struct be_queue_info *q;
3025 struct be_rx_obj *rxo;
3026 int i;
3027
3028 for_all_rx_queues(adapter, rxo, i) {
3029 q = &rxo->q;
3030 if (q->created) {
3031 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3032 be_rx_cq_clean(rxo);
482c9e79 3033 }
10ef9ab4 3034 be_queue_free(adapter, q);
482c9e79
SP
3035 }
3036}
3037
889cd4b2
SP
3038static int be_close(struct net_device *netdev)
3039{
3040 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3041 struct be_eq_obj *eqo;
3042 int i;
889cd4b2 3043
e1ad8e33
KA
3044 /* This protection is needed as be_close() may be called even when the
3045 * adapter is in cleared state (after eeh perm failure)
3046 */
3047 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3048 return 0;
3049
045508a8
PP
3050 be_roce_dev_close(adapter);
3051
dff345c5
IV
3052 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3053 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3054 napi_disable(&eqo->napi);
6384a4d0
SP
3055 be_disable_busy_poll(eqo);
3056 }
71237b6f 3057 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3058 }
a323d9bf
SP
3059
3060 be_async_mcc_disable(adapter);
3061
3062 /* Wait for all pending tx completions to arrive so that
3063 * all tx skbs are freed.
3064 */
fba87559 3065 netif_tx_disable(netdev);
6e1f9975 3066 be_tx_compl_clean(adapter);
a323d9bf
SP
3067
3068 be_rx_qs_destroy(adapter);
f66b7cfd 3069 be_clear_uc_list(adapter);
d11a347d 3070
a323d9bf 3071 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3072 if (msix_enabled(adapter))
3073 synchronize_irq(be_msix_vec_get(adapter, eqo));
3074 else
3075 synchronize_irq(netdev->irq);
3076 be_eq_clean(eqo);
63fcb27f
PR
3077 }
3078
889cd4b2
SP
3079 be_irq_unregister(adapter);
3080
482c9e79
SP
3081 return 0;
3082}
3083
10ef9ab4 3084static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3085{
1dcf7b1c
ED
3086 struct rss_info *rss = &adapter->rss_info;
3087 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3088 struct be_rx_obj *rxo;
e9008ee9 3089 int rc, i, j;
482c9e79
SP
3090
3091 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3092 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3093 sizeof(struct be_eth_rx_d));
3094 if (rc)
3095 return rc;
3096 }
3097
3098 /* The FW would like the default RXQ to be created first */
3099 rxo = default_rxo(adapter);
3100 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
3101 adapter->if_handle, false, &rxo->rss_id);
3102 if (rc)
3103 return rc;
3104
3105 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3106 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3107 rx_frag_size, adapter->if_handle,
3108 true, &rxo->rss_id);
482c9e79
SP
3109 if (rc)
3110 return rc;
3111 }
3112
3113 if (be_multi_rxq(adapter)) {
e2557877
VD
3114 for (j = 0; j < RSS_INDIR_TABLE_LEN;
3115 j += adapter->num_rx_qs - 1) {
e9008ee9 3116 for_all_rss_queues(adapter, rxo, i) {
e2557877 3117 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3118 break;
e2557877
VD
3119 rss->rsstable[j + i] = rxo->rss_id;
3120 rss->rss_queue[j + i] = i;
e9008ee9
PR
3121 }
3122 }
e2557877
VD
3123 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3124 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3125
3126 if (!BEx_chip(adapter))
e2557877
VD
3127 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3128 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3129 } else {
3130 /* Disable RSS, if only default RX Q is created */
e2557877 3131 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3132 }
594ad54a 3133
1dcf7b1c 3134 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3135 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3136 128, rss_key);
da1388d6 3137 if (rc) {
e2557877 3138 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3139 return rc;
482c9e79
SP
3140 }
3141
1dcf7b1c 3142 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3143
482c9e79 3144 /* First time posting */
10ef9ab4 3145 for_all_rx_queues(adapter, rxo, i)
c30d7266 3146 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
889cd4b2
SP
3147 return 0;
3148}
3149
6b7c5b94
SP
3150static int be_open(struct net_device *netdev)
3151{
3152 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3153 struct be_eq_obj *eqo;
3abcdeda 3154 struct be_rx_obj *rxo;
10ef9ab4 3155 struct be_tx_obj *txo;
b236916a 3156 u8 link_status;
3abcdeda 3157 int status, i;
5fb379ee 3158
10ef9ab4 3159 status = be_rx_qs_create(adapter);
482c9e79
SP
3160 if (status)
3161 goto err;
3162
c2bba3df
SK
3163 status = be_irq_register(adapter);
3164 if (status)
3165 goto err;
5fb379ee 3166
10ef9ab4 3167 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3168 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3169
10ef9ab4
SP
3170 for_all_tx_queues(adapter, txo, i)
3171 be_cq_notify(adapter, txo->cq.id, true, 0);
3172
7a1e9b20
SP
3173 be_async_mcc_enable(adapter);
3174
10ef9ab4
SP
3175 for_all_evt_queues(adapter, eqo, i) {
3176 napi_enable(&eqo->napi);
6384a4d0 3177 be_enable_busy_poll(eqo);
4cad9f3b 3178 be_eq_notify(adapter, eqo->q.id, true, true, 0);
10ef9ab4 3179 }
04d3d624 3180 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3181
323ff71e 3182 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3183 if (!status)
3184 be_link_status_update(adapter, link_status);
3185
fba87559 3186 netif_tx_start_all_queues(netdev);
045508a8 3187 be_roce_dev_open(adapter);
c9c47142 3188
c5abe7c0 3189#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3190 if (skyhawk_chip(adapter))
3191 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3192#endif
3193
889cd4b2
SP
3194 return 0;
3195err:
3196 be_close(adapter->netdev);
3197 return -EIO;
5fb379ee
SP
3198}
3199
71d8d1b5
AK
3200static int be_setup_wol(struct be_adapter *adapter, bool enable)
3201{
3202 struct be_dma_mem cmd;
3203 int status = 0;
3204 u8 mac[ETH_ALEN];
3205
3206 memset(mac, 0, ETH_ALEN);
3207
3208 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3209 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3210 GFP_KERNEL);
ddf1169f 3211 if (!cmd.va)
6b568689 3212 return -ENOMEM;
71d8d1b5
AK
3213
3214 if (enable) {
3215 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3216 PCICFG_PM_CONTROL_OFFSET,
3217 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3218 if (status) {
3219 dev_err(&adapter->pdev->dev,
2381a55c 3220 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3221 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3222 cmd.dma);
71d8d1b5
AK
3223 return status;
3224 }
3225 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3226 adapter->netdev->dev_addr,
3227 &cmd);
71d8d1b5
AK
3228 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3229 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3230 } else {
3231 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3232 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3233 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3234 }
3235
2b7bcebf 3236 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3237 return status;
3238}
3239
f7062ee5
SP
3240static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3241{
3242 u32 addr;
3243
3244 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3245
3246 mac[5] = (u8)(addr & 0xFF);
3247 mac[4] = (u8)((addr >> 8) & 0xFF);
3248 mac[3] = (u8)((addr >> 16) & 0xFF);
3249 /* Use the OUI from the current MAC address */
3250 memcpy(mac, adapter->netdev->dev_addr, 3);
3251}
3252
6d87f5c3
AK
3253/*
3254 * Generate a seed MAC address from the PF MAC Address using jhash.
3255 * MAC Address for VFs are assigned incrementally starting from the seed.
3256 * These addresses are programmed in the ASIC by the PF and the VF driver
3257 * queries for the MAC address during its probe.
3258 */
4c876616 3259static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3260{
f9449ab7 3261 u32 vf;
3abcdeda 3262 int status = 0;
6d87f5c3 3263 u8 mac[ETH_ALEN];
11ac75ed 3264 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3265
3266 be_vf_eth_addr_generate(adapter, mac);
3267
11ac75ed 3268 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3269 if (BEx_chip(adapter))
590c391d 3270 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3271 vf_cfg->if_handle,
3272 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3273 else
3274 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3275 vf + 1);
590c391d 3276
6d87f5c3
AK
3277 if (status)
3278 dev_err(&adapter->pdev->dev,
748b539a
SP
3279 "Mac address assignment failed for VF %d\n",
3280 vf);
6d87f5c3 3281 else
11ac75ed 3282 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3283
3284 mac[5] += 1;
3285 }
3286 return status;
3287}
3288
4c876616
SP
3289static int be_vfs_mac_query(struct be_adapter *adapter)
3290{
3291 int status, vf;
3292 u8 mac[ETH_ALEN];
3293 struct be_vf_cfg *vf_cfg;
4c876616
SP
3294
3295 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3296 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3297 mac, vf_cfg->if_handle,
3298 false, vf+1);
4c876616
SP
3299 if (status)
3300 return status;
3301 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3302 }
3303 return 0;
3304}
3305
f9449ab7 3306static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3307{
11ac75ed 3308 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3309 u32 vf;
3310
257a3feb 3311 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3312 dev_warn(&adapter->pdev->dev,
3313 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3314 goto done;
3315 }
3316
b4c1df93
SP
3317 pci_disable_sriov(adapter->pdev);
3318
11ac75ed 3319 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3320 if (BEx_chip(adapter))
11ac75ed
SP
3321 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3322 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3323 else
3324 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3325 vf + 1);
f9449ab7 3326
11ac75ed
SP
3327 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3328 }
39f1d94d
SP
3329done:
3330 kfree(adapter->vf_cfg);
3331 adapter->num_vfs = 0;
f174c7ec 3332 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3333}
3334
7707133c
SP
3335static void be_clear_queues(struct be_adapter *adapter)
3336{
3337 be_mcc_queues_destroy(adapter);
3338 be_rx_cqs_destroy(adapter);
3339 be_tx_queues_destroy(adapter);
3340 be_evt_queues_destroy(adapter);
3341}
3342
68d7bdcb 3343static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3344{
191eb756
SP
3345 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3346 cancel_delayed_work_sync(&adapter->work);
3347 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3348 }
68d7bdcb
SP
3349}
3350
b05004ad 3351static void be_mac_clear(struct be_adapter *adapter)
68d7bdcb 3352{
b05004ad 3353 if (adapter->pmac_id) {
f66b7cfd
SP
3354 be_cmd_pmac_del(adapter, adapter->if_handle,
3355 adapter->pmac_id[0], 0);
b05004ad
SK
3356 kfree(adapter->pmac_id);
3357 adapter->pmac_id = NULL;
3358 }
3359}
3360
c5abe7c0 3361#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3362static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3363{
630f4b70
SB
3364 struct net_device *netdev = adapter->netdev;
3365
c9c47142
SP
3366 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3367 be_cmd_manage_iface(adapter, adapter->if_handle,
3368 OP_CONVERT_TUNNEL_TO_NORMAL);
3369
3370 if (adapter->vxlan_port)
3371 be_cmd_set_vxlan_port(adapter, 0);
3372
3373 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3374 adapter->vxlan_port = 0;
630f4b70
SB
3375
3376 netdev->hw_enc_features = 0;
3377 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3378 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3379}
c5abe7c0 3380#endif
c9c47142 3381
b05004ad
SK
3382static int be_clear(struct be_adapter *adapter)
3383{
68d7bdcb 3384 be_cancel_worker(adapter);
191eb756 3385
11ac75ed 3386 if (sriov_enabled(adapter))
f9449ab7
SP
3387 be_vf_clear(adapter);
3388
bec84e6b
VV
3389 /* Re-configure FW to distribute resources evenly across max-supported
3390 * number of VFs, only when VFs are not already enabled.
3391 */
3392 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3393 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3394 pci_sriov_get_totalvfs(adapter->pdev));
3395
c5abe7c0 3396#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3397 be_disable_vxlan_offloads(adapter);
c5abe7c0 3398#endif
2d17f403 3399 /* delete the primary mac along with the uc-mac list */
b05004ad 3400 be_mac_clear(adapter);
fbc13f01 3401
f9449ab7 3402 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3403
7707133c 3404 be_clear_queues(adapter);
a54769f5 3405
10ef9ab4 3406 be_msix_disable(adapter);
e1ad8e33 3407 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3408 return 0;
3409}
3410
0700d816
KA
3411static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3412 u32 cap_flags, u32 vf)
3413{
3414 u32 en_flags;
3415 int status;
3416
3417 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3418 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3419 BE_IF_FLAGS_RSS;
3420
3421 en_flags &= cap_flags;
3422
3423 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3424 if_handle, vf);
3425
3426 return status;
3427}
3428
4c876616 3429static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3430{
92bf14ab 3431 struct be_resources res = {0};
4c876616 3432 struct be_vf_cfg *vf_cfg;
0700d816
KA
3433 u32 cap_flags, vf;
3434 int status;
abb93951 3435
0700d816 3436 /* If a FW profile exists, then cap_flags are updated */
4c876616
SP
3437 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3438 BE_IF_FLAGS_MULTICAST;
abb93951 3439
4c876616 3440 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3441 if (!BE3_chip(adapter)) {
3442 status = be_cmd_get_profile_config(adapter, &res,
3443 vf + 1);
3444 if (!status)
3445 cap_flags = res.if_cap_flags;
3446 }
4c876616 3447
0700d816
KA
3448 status = be_if_create(adapter, &vf_cfg->if_handle,
3449 cap_flags, vf + 1);
4c876616 3450 if (status)
0700d816 3451 return status;
4c876616 3452 }
0700d816
KA
3453
3454 return 0;
abb93951
PR
3455}
3456
39f1d94d 3457static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3458{
11ac75ed 3459 struct be_vf_cfg *vf_cfg;
30128031
SP
3460 int vf;
3461
39f1d94d
SP
3462 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3463 GFP_KERNEL);
3464 if (!adapter->vf_cfg)
3465 return -ENOMEM;
3466
11ac75ed
SP
3467 for_all_vfs(adapter, vf_cfg, vf) {
3468 vf_cfg->if_handle = -1;
3469 vf_cfg->pmac_id = -1;
30128031 3470 }
39f1d94d 3471 return 0;
30128031
SP
3472}
3473
f9449ab7
SP
3474static int be_vf_setup(struct be_adapter *adapter)
3475{
c502224e 3476 struct device *dev = &adapter->pdev->dev;
11ac75ed 3477 struct be_vf_cfg *vf_cfg;
4c876616 3478 int status, old_vfs, vf;
04a06028 3479 u32 privileges;
39f1d94d 3480
257a3feb 3481 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3482
3483 status = be_vf_setup_init(adapter);
3484 if (status)
3485 goto err;
30128031 3486
4c876616
SP
3487 if (old_vfs) {
3488 for_all_vfs(adapter, vf_cfg, vf) {
3489 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3490 if (status)
3491 goto err;
3492 }
f9449ab7 3493
4c876616
SP
3494 status = be_vfs_mac_query(adapter);
3495 if (status)
3496 goto err;
3497 } else {
bec84e6b
VV
3498 status = be_vfs_if_create(adapter);
3499 if (status)
3500 goto err;
3501
39f1d94d
SP
3502 status = be_vf_eth_addr_config(adapter);
3503 if (status)
3504 goto err;
3505 }
f9449ab7 3506
11ac75ed 3507 for_all_vfs(adapter, vf_cfg, vf) {
04a06028
SP
3508 /* Allow VFs to programs MAC/VLAN filters */
3509 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3510 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3511 status = be_cmd_set_fn_privileges(adapter,
3512 privileges |
3513 BE_PRIV_FILTMGMT,
3514 vf + 1);
3515 if (!status)
3516 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3517 vf);
3518 }
3519
0f77ba73
RN
3520 /* Allow full available bandwidth */
3521 if (!old_vfs)
3522 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3523
bdce2ad7 3524 if (!old_vfs) {
0599863d 3525 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3526 be_cmd_set_logical_link_config(adapter,
3527 IFLA_VF_LINK_STATE_AUTO,
3528 vf+1);
3529 }
f9449ab7 3530 }
b4c1df93
SP
3531
3532 if (!old_vfs) {
3533 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3534 if (status) {
3535 dev_err(dev, "SRIOV enable failed\n");
3536 adapter->num_vfs = 0;
3537 goto err;
3538 }
3539 }
f174c7ec
VV
3540
3541 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3542 return 0;
3543err:
4c876616
SP
3544 dev_err(dev, "VF setup failed\n");
3545 be_vf_clear(adapter);
f9449ab7
SP
3546 return status;
3547}
3548
f93f160b
VV
3549/* Converting function_mode bits on BE3 to SH mc_type enums */
3550
3551static u8 be_convert_mc_type(u32 function_mode)
3552{
66064dbc 3553 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3554 return vNIC1;
66064dbc 3555 else if (function_mode & QNQ_MODE)
f93f160b
VV
3556 return FLEX10;
3557 else if (function_mode & VNIC_MODE)
3558 return vNIC2;
3559 else if (function_mode & UMC_ENABLED)
3560 return UMC;
3561 else
3562 return MC_NONE;
3563}
3564
92bf14ab
SP
3565/* On BE2/BE3 FW does not suggest the supported limits */
3566static void BEx_get_resources(struct be_adapter *adapter,
3567 struct be_resources *res)
3568{
bec84e6b 3569 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
3570
3571 if (be_physfn(adapter))
3572 res->max_uc_mac = BE_UC_PMAC_COUNT;
3573 else
3574 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3575
f93f160b
VV
3576 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3577
3578 if (be_is_mc(adapter)) {
3579 /* Assuming that there are 4 channels per port,
3580 * when multi-channel is enabled
3581 */
3582 if (be_is_qnq_mode(adapter))
3583 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3584 else
3585 /* In a non-qnq multichannel mode, the pvid
3586 * takes up one vlan entry
3587 */
3588 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3589 } else {
92bf14ab 3590 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
3591 }
3592
92bf14ab
SP
3593 res->max_mcast_mac = BE_MAX_MC;
3594
a5243dab
VV
3595 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3596 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3597 * *only* if it is RSS-capable.
3598 */
3599 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3600 !be_physfn(adapter) || (be_is_mc(adapter) &&
a28277dc 3601 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 3602 res->max_tx_qs = 1;
a28277dc
SR
3603 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3604 struct be_resources super_nic_res = {0};
3605
3606 /* On a SuperNIC profile, the driver needs to use the
3607 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3608 */
3609 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3610 /* Some old versions of BE3 FW don't report max_tx_qs value */
3611 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3612 } else {
92bf14ab 3613 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 3614 }
92bf14ab
SP
3615
3616 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3617 !use_sriov && be_physfn(adapter))
3618 res->max_rss_qs = (adapter->be3_native) ?
3619 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3620 res->max_rx_qs = res->max_rss_qs + 1;
3621
e3dc867c 3622 if (be_physfn(adapter))
d3518e21 3623 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
3624 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3625 else
3626 res->max_evt_qs = 1;
92bf14ab
SP
3627
3628 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3629 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3630 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3631}
3632
30128031
SP
3633static void be_setup_init(struct be_adapter *adapter)
3634{
3635 adapter->vlan_prio_bmap = 0xff;
42f11cf2 3636 adapter->phy.link_speed = -1;
30128031
SP
3637 adapter->if_handle = -1;
3638 adapter->be3_native = false;
f66b7cfd 3639 adapter->if_flags = 0;
f25b119c
PR
3640 if (be_physfn(adapter))
3641 adapter->cmd_privileges = MAX_PRIVILEGES;
3642 else
3643 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
3644}
3645
bec84e6b
VV
3646static int be_get_sriov_config(struct be_adapter *adapter)
3647{
3648 struct device *dev = &adapter->pdev->dev;
3649 struct be_resources res = {0};
d3d18312 3650 int max_vfs, old_vfs;
bec84e6b
VV
3651
3652 /* Some old versions of BE3 FW don't report max_vfs value */
d3d18312
SP
3653 be_cmd_get_profile_config(adapter, &res, 0);
3654
bec84e6b
VV
3655 if (BE3_chip(adapter) && !res.max_vfs) {
3656 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3657 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3658 }
3659
d3d18312 3660 adapter->pool_res = res;
bec84e6b
VV
3661
3662 if (!be_max_vfs(adapter)) {
3663 if (num_vfs)
50762667 3664 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
bec84e6b
VV
3665 adapter->num_vfs = 0;
3666 return 0;
3667 }
3668
d3d18312
SP
3669 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3670
bec84e6b
VV
3671 /* validate num_vfs module param */
3672 old_vfs = pci_num_vf(adapter->pdev);
3673 if (old_vfs) {
3674 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3675 if (old_vfs != num_vfs)
3676 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3677 adapter->num_vfs = old_vfs;
3678 } else {
3679 if (num_vfs > be_max_vfs(adapter)) {
3680 dev_info(dev, "Resources unavailable to init %d VFs\n",
3681 num_vfs);
3682 dev_info(dev, "Limiting to %d VFs\n",
3683 be_max_vfs(adapter));
3684 }
3685 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3686 }
3687
3688 return 0;
3689}
3690
92bf14ab 3691static int be_get_resources(struct be_adapter *adapter)
abb93951 3692{
92bf14ab
SP
3693 struct device *dev = &adapter->pdev->dev;
3694 struct be_resources res = {0};
3695 int status;
abb93951 3696
92bf14ab
SP
3697 if (BEx_chip(adapter)) {
3698 BEx_get_resources(adapter, &res);
3699 adapter->res = res;
abb93951
PR
3700 }
3701
92bf14ab
SP
3702 /* For Lancer, SH etc read per-function resource limits from FW.
3703 * GET_FUNC_CONFIG returns per function guaranteed limits.
3704 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3705 */
3706 if (!BEx_chip(adapter)) {
3707 status = be_cmd_get_func_config(adapter, &res);
3708 if (status)
3709 return status;
abb93951 3710
92bf14ab
SP
3711 /* If RoCE may be enabled stash away half the EQs for RoCE */
3712 if (be_roce_supported(adapter))
3713 res.max_evt_qs /= 2;
3714 adapter->res = res;
abb93951 3715 }
4c876616 3716
acbafeb1
SP
3717 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3718 be_max_txqs(adapter), be_max_rxqs(adapter),
3719 be_max_rss(adapter), be_max_eqs(adapter),
3720 be_max_vfs(adapter));
3721 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3722 be_max_uc(adapter), be_max_mc(adapter),
3723 be_max_vlans(adapter));
3724
92bf14ab 3725 return 0;
abb93951
PR
3726}
3727
d3d18312
SP
3728static void be_sriov_config(struct be_adapter *adapter)
3729{
3730 struct device *dev = &adapter->pdev->dev;
3731 int status;
3732
3733 status = be_get_sriov_config(adapter);
3734 if (status) {
3735 dev_err(dev, "Failed to query SR-IOV configuration\n");
3736 dev_err(dev, "SR-IOV cannot be enabled\n");
3737 return;
3738 }
3739
3740 /* When the HW is in SRIOV capable configuration, the PF-pool
3741 * resources are equally distributed across the max-number of
3742 * VFs. The user may request only a subset of the max-vfs to be
3743 * enabled. Based on num_vfs, redistribute the resources across
3744 * num_vfs so that each VF will have access to more number of
3745 * resources. This facility is not available in BE3 FW.
3746 * Also, this is done by FW in Lancer chip.
3747 */
3748 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3749 status = be_cmd_set_sriov_config(adapter,
3750 adapter->pool_res,
3751 adapter->num_vfs);
3752 if (status)
3753 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3754 }
3755}
3756
39f1d94d
SP
3757static int be_get_config(struct be_adapter *adapter)
3758{
542963b7 3759 u16 profile_id;
4c876616 3760 int status;
39f1d94d 3761
e97e3cda 3762 status = be_cmd_query_fw_cfg(adapter);
abb93951 3763 if (status)
92bf14ab 3764 return status;
abb93951 3765
21252377
VV
3766 be_cmd_query_port_name(adapter);
3767
3768 if (be_physfn(adapter)) {
542963b7
VV
3769 status = be_cmd_get_active_profile(adapter, &profile_id);
3770 if (!status)
3771 dev_info(&adapter->pdev->dev,
3772 "Using profile 0x%x\n", profile_id);
962bcb75 3773 }
bec84e6b 3774
d3d18312
SP
3775 if (!BE2_chip(adapter) && be_physfn(adapter))
3776 be_sriov_config(adapter);
542963b7 3777
92bf14ab
SP
3778 status = be_get_resources(adapter);
3779 if (status)
3780 return status;
abb93951 3781
46ee9c14
RN
3782 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3783 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
3784 if (!adapter->pmac_id)
3785 return -ENOMEM;
abb93951 3786
92bf14ab
SP
3787 /* Sanitize cfg_num_qs based on HW and platform limits */
3788 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3789
3790 return 0;
39f1d94d
SP
3791}
3792
95046b92
SP
3793static int be_mac_setup(struct be_adapter *adapter)
3794{
3795 u8 mac[ETH_ALEN];
3796 int status;
3797
3798 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3799 status = be_cmd_get_perm_mac(adapter, mac);
3800 if (status)
3801 return status;
3802
3803 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3804 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3805 } else {
3806 /* Maybe the HW was reset; dev_addr must be re-programmed */
3807 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3808 }
3809
2c7a9dc1
AK
3810 /* For BE3-R VFs, the PF programs the initial MAC address */
3811 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3812 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3813 &adapter->pmac_id[0], 0);
95046b92
SP
3814 return 0;
3815}
3816
68d7bdcb
SP
3817static void be_schedule_worker(struct be_adapter *adapter)
3818{
3819 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3820 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3821}
3822
7707133c 3823static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 3824{
68d7bdcb 3825 struct net_device *netdev = adapter->netdev;
10ef9ab4 3826 int status;
ba343c77 3827
7707133c 3828 status = be_evt_queues_create(adapter);
abb93951
PR
3829 if (status)
3830 goto err;
73d540f2 3831
7707133c 3832 status = be_tx_qs_create(adapter);
c2bba3df
SK
3833 if (status)
3834 goto err;
10ef9ab4 3835
7707133c 3836 status = be_rx_cqs_create(adapter);
10ef9ab4 3837 if (status)
a54769f5 3838 goto err;
6b7c5b94 3839
7707133c 3840 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
3841 if (status)
3842 goto err;
3843
68d7bdcb
SP
3844 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3845 if (status)
3846 goto err;
3847
3848 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3849 if (status)
3850 goto err;
3851
7707133c
SP
3852 return 0;
3853err:
3854 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3855 return status;
3856}
3857
68d7bdcb
SP
3858int be_update_queues(struct be_adapter *adapter)
3859{
3860 struct net_device *netdev = adapter->netdev;
3861 int status;
3862
3863 if (netif_running(netdev))
3864 be_close(netdev);
3865
3866 be_cancel_worker(adapter);
3867
3868 /* If any vectors have been shared with RoCE we cannot re-program
3869 * the MSIx table.
3870 */
3871 if (!adapter->num_msix_roce_vec)
3872 be_msix_disable(adapter);
3873
3874 be_clear_queues(adapter);
3875
3876 if (!msix_enabled(adapter)) {
3877 status = be_msix_enable(adapter);
3878 if (status)
3879 return status;
3880 }
3881
3882 status = be_setup_queues(adapter);
3883 if (status)
3884 return status;
3885
3886 be_schedule_worker(adapter);
3887
3888 if (netif_running(netdev))
3889 status = be_open(netdev);
3890
3891 return status;
3892}
3893
f7062ee5
SP
3894static inline int fw_major_num(const char *fw_ver)
3895{
3896 int fw_major = 0, i;
3897
3898 i = sscanf(fw_ver, "%d.", &fw_major);
3899 if (i != 1)
3900 return 0;
3901
3902 return fw_major;
3903}
3904
7707133c
SP
3905static int be_setup(struct be_adapter *adapter)
3906{
3907 struct device *dev = &adapter->pdev->dev;
7707133c
SP
3908 int status;
3909
3910 be_setup_init(adapter);
3911
3912 if (!lancer_chip(adapter))
3913 be_cmd_req_native_mode(adapter);
3914
3915 status = be_get_config(adapter);
10ef9ab4 3916 if (status)
a54769f5 3917 goto err;
6b7c5b94 3918
7707133c 3919 status = be_msix_enable(adapter);
10ef9ab4 3920 if (status)
a54769f5 3921 goto err;
6b7c5b94 3922
0700d816
KA
3923 status = be_if_create(adapter, &adapter->if_handle,
3924 be_if_cap_flags(adapter), 0);
7707133c 3925 if (status)
a54769f5 3926 goto err;
6b7c5b94 3927
68d7bdcb
SP
3928 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3929 rtnl_lock();
7707133c 3930 status = be_setup_queues(adapter);
68d7bdcb 3931 rtnl_unlock();
95046b92 3932 if (status)
1578e777
PR
3933 goto err;
3934
7707133c 3935 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
3936
3937 status = be_mac_setup(adapter);
10ef9ab4
SP
3938 if (status)
3939 goto err;
3940
e97e3cda 3941 be_cmd_get_fw_ver(adapter);
acbafeb1 3942 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 3943
e9e2a904 3944 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 3945 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
3946 adapter->fw_ver);
3947 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3948 }
3949
1d1e9a46 3950 if (adapter->vlans_added)
10329df8 3951 be_vid_config(adapter);
7ab8b0b4 3952
a54769f5 3953 be_set_rx_mode(adapter->netdev);
5fb379ee 3954
76a9e08e
SR
3955 be_cmd_get_acpi_wol_cap(adapter);
3956
00d594c3
KA
3957 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
3958 adapter->rx_fc);
3959 if (status)
3960 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
3961 &adapter->rx_fc);
590c391d 3962
00d594c3
KA
3963 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
3964 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 3965
bdce2ad7
SR
3966 if (be_physfn(adapter))
3967 be_cmd_set_logical_link_config(adapter,
3968 IFLA_VF_LINK_STATE_AUTO, 0);
3969
bec84e6b
VV
3970 if (adapter->num_vfs)
3971 be_vf_setup(adapter);
f9449ab7 3972
f25b119c
PR
3973 status = be_cmd_get_phy_info(adapter);
3974 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3975 adapter->phy.fc_autoneg = 1;
3976
68d7bdcb 3977 be_schedule_worker(adapter);
e1ad8e33 3978 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 3979 return 0;
a54769f5
SP
3980err:
3981 be_clear(adapter);
3982 return status;
3983}
6b7c5b94 3984
66268739
IV
3985#ifdef CONFIG_NET_POLL_CONTROLLER
3986static void be_netpoll(struct net_device *netdev)
3987{
3988 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3989 struct be_eq_obj *eqo;
66268739
IV
3990 int i;
3991
e49cc34f
SP
3992 for_all_evt_queues(adapter, eqo, i) {
3993 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3994 napi_schedule(&eqo->napi);
3995 }
66268739
IV
3996}
3997#endif
3998
96c9b2e4 3999static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4000
306f1348
SP
4001static bool phy_flashing_required(struct be_adapter *adapter)
4002{
e02cfd96 4003 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4004 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4005}
4006
c165541e
PR
4007static bool is_comp_in_ufi(struct be_adapter *adapter,
4008 struct flash_section_info *fsec, int type)
4009{
4010 int i = 0, img_type = 0;
4011 struct flash_section_info_g2 *fsec_g2 = NULL;
4012
ca34fe38 4013 if (BE2_chip(adapter))
c165541e
PR
4014 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4015
4016 for (i = 0; i < MAX_FLASH_COMP; i++) {
4017 if (fsec_g2)
4018 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4019 else
4020 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4021
4022 if (img_type == type)
4023 return true;
4024 }
4025 return false;
4026
4027}
4028
4188e7df 4029static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4030 int header_size,
4031 const struct firmware *fw)
c165541e
PR
4032{
4033 struct flash_section_info *fsec = NULL;
4034 const u8 *p = fw->data;
4035
4036 p += header_size;
4037 while (p < (fw->data + fw->size)) {
4038 fsec = (struct flash_section_info *)p;
4039 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4040 return fsec;
4041 p += 32;
4042 }
4043 return NULL;
4044}
4045
96c9b2e4
VV
4046static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4047 u32 img_offset, u32 img_size, int hdr_size,
4048 u16 img_optype, bool *crc_match)
4049{
4050 u32 crc_offset;
4051 int status;
4052 u8 crc[4];
4053
70a7b525
VV
4054 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4055 img_size - 4);
96c9b2e4
VV
4056 if (status)
4057 return status;
4058
4059 crc_offset = hdr_size + img_offset + img_size - 4;
4060
4061 /* Skip flashing, if crc of flashed region matches */
4062 if (!memcmp(crc, p + crc_offset, 4))
4063 *crc_match = true;
4064 else
4065 *crc_match = false;
4066
4067 return status;
4068}
4069
773a2d7c 4070static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4071 struct be_dma_mem *flash_cmd, int optype, int img_size,
4072 u32 img_offset)
773a2d7c 4073{
70a7b525 4074 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4075 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4076 int status;
773a2d7c 4077
773a2d7c
PR
4078 while (total_bytes) {
4079 num_bytes = min_t(u32, 32*1024, total_bytes);
4080
4081 total_bytes -= num_bytes;
4082
4083 if (!total_bytes) {
4084 if (optype == OPTYPE_PHY_FW)
4085 flash_op = FLASHROM_OPER_PHY_FLASH;
4086 else
4087 flash_op = FLASHROM_OPER_FLASH;
4088 } else {
4089 if (optype == OPTYPE_PHY_FW)
4090 flash_op = FLASHROM_OPER_PHY_SAVE;
4091 else
4092 flash_op = FLASHROM_OPER_SAVE;
4093 }
4094
be716446 4095 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4096 img += num_bytes;
4097 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4098 flash_op, img_offset +
4099 bytes_sent, num_bytes);
4c60005f 4100 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4101 optype == OPTYPE_PHY_FW)
4102 break;
4103 else if (status)
773a2d7c 4104 return status;
70a7b525
VV
4105
4106 bytes_sent += num_bytes;
773a2d7c
PR
4107 }
4108 return 0;
4109}
4110
0ad3157e 4111/* For BE2, BE3 and BE3-R */
ca34fe38 4112static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4113 const struct firmware *fw,
4114 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4115{
c165541e 4116 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4117 struct device *dev = &adapter->pdev->dev;
c165541e 4118 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4119 int status, i, filehdr_size, num_comp;
4120 const struct flash_comp *pflashcomp;
4121 bool crc_match;
4122 const u8 *p;
c165541e
PR
4123
4124 struct flash_comp gen3_flash_types[] = {
4125 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4126 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4127 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4128 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4129 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4130 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4131 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4132 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4133 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4134 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4135 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4136 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4137 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4138 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4139 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4140 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4141 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4142 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4143 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4144 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4145 };
c165541e
PR
4146
4147 struct flash_comp gen2_flash_types[] = {
4148 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4149 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4150 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4151 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4152 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4153 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4154 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4155 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4156 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4157 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4158 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4159 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4160 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4161 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4162 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4163 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4164 };
4165
ca34fe38 4166 if (BE3_chip(adapter)) {
3f0d4560
AK
4167 pflashcomp = gen3_flash_types;
4168 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4169 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4170 } else {
4171 pflashcomp = gen2_flash_types;
4172 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4173 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4174 img_hdrs_size = 0;
84517482 4175 }
ca34fe38 4176
c165541e
PR
4177 /* Get flash section info*/
4178 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4179 if (!fsec) {
96c9b2e4 4180 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4181 return -1;
4182 }
9fe96934 4183 for (i = 0; i < num_comp; i++) {
c165541e 4184 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4185 continue;
c165541e
PR
4186
4187 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4188 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4189 continue;
4190
773a2d7c
PR
4191 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4192 !phy_flashing_required(adapter))
306f1348 4193 continue;
c165541e 4194
773a2d7c 4195 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4196 status = be_check_flash_crc(adapter, fw->data,
4197 pflashcomp[i].offset,
4198 pflashcomp[i].size,
4199 filehdr_size +
4200 img_hdrs_size,
4201 OPTYPE_REDBOOT, &crc_match);
4202 if (status) {
4203 dev_err(dev,
4204 "Could not get CRC for 0x%x region\n",
4205 pflashcomp[i].optype);
4206 continue;
4207 }
4208
4209 if (crc_match)
773a2d7c
PR
4210 continue;
4211 }
c165541e 4212
96c9b2e4
VV
4213 p = fw->data + filehdr_size + pflashcomp[i].offset +
4214 img_hdrs_size;
306f1348
SP
4215 if (p + pflashcomp[i].size > fw->data + fw->size)
4216 return -1;
773a2d7c
PR
4217
4218 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4219 pflashcomp[i].size, 0);
773a2d7c 4220 if (status) {
96c9b2e4 4221 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4222 pflashcomp[i].img_type);
4223 return status;
84517482 4224 }
84517482 4225 }
84517482
AK
4226 return 0;
4227}
4228
96c9b2e4
VV
4229static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4230{
4231 u32 img_type = le32_to_cpu(fsec_entry.type);
4232 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4233
4234 if (img_optype != 0xFFFF)
4235 return img_optype;
4236
4237 switch (img_type) {
4238 case IMAGE_FIRMWARE_iSCSI:
4239 img_optype = OPTYPE_ISCSI_ACTIVE;
4240 break;
4241 case IMAGE_BOOT_CODE:
4242 img_optype = OPTYPE_REDBOOT;
4243 break;
4244 case IMAGE_OPTION_ROM_ISCSI:
4245 img_optype = OPTYPE_BIOS;
4246 break;
4247 case IMAGE_OPTION_ROM_PXE:
4248 img_optype = OPTYPE_PXE_BIOS;
4249 break;
4250 case IMAGE_OPTION_ROM_FCoE:
4251 img_optype = OPTYPE_FCOE_BIOS;
4252 break;
4253 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4254 img_optype = OPTYPE_ISCSI_BACKUP;
4255 break;
4256 case IMAGE_NCSI:
4257 img_optype = OPTYPE_NCSI_FW;
4258 break;
4259 case IMAGE_FLASHISM_JUMPVECTOR:
4260 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4261 break;
4262 case IMAGE_FIRMWARE_PHY:
4263 img_optype = OPTYPE_SH_PHY_FW;
4264 break;
4265 case IMAGE_REDBOOT_DIR:
4266 img_optype = OPTYPE_REDBOOT_DIR;
4267 break;
4268 case IMAGE_REDBOOT_CONFIG:
4269 img_optype = OPTYPE_REDBOOT_CONFIG;
4270 break;
4271 case IMAGE_UFI_DIR:
4272 img_optype = OPTYPE_UFI_DIR;
4273 break;
4274 default:
4275 break;
4276 }
4277
4278 return img_optype;
4279}
4280
773a2d7c 4281static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4282 const struct firmware *fw,
4283 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4284{
773a2d7c 4285 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4286 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4287 struct device *dev = &adapter->pdev->dev;
773a2d7c 4288 struct flash_section_info *fsec = NULL;
96c9b2e4 4289 u32 img_offset, img_size, img_type;
70a7b525 4290 u16 img_optype, flash_optype;
96c9b2e4 4291 int status, i, filehdr_size;
96c9b2e4 4292 const u8 *p;
773a2d7c
PR
4293
4294 filehdr_size = sizeof(struct flash_file_hdr_g3);
4295 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4296 if (!fsec) {
96c9b2e4 4297 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4298 return -EINVAL;
773a2d7c
PR
4299 }
4300
70a7b525 4301retry_flash:
773a2d7c
PR
4302 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4303 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4304 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4305 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4306 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4307 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4308
96c9b2e4 4309 if (img_optype == 0xFFFF)
773a2d7c 4310 continue;
70a7b525
VV
4311
4312 if (flash_offset_support)
4313 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4314 else
4315 flash_optype = img_optype;
4316
96c9b2e4
VV
4317 /* Don't bother verifying CRC if an old FW image is being
4318 * flashed
4319 */
4320 if (old_fw_img)
4321 goto flash;
4322
4323 status = be_check_flash_crc(adapter, fw->data, img_offset,
4324 img_size, filehdr_size +
70a7b525 4325 img_hdrs_size, flash_optype,
96c9b2e4 4326 &crc_match);
4c60005f
KA
4327 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4328 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4329 /* The current FW image on the card does not support
4330 * OFFSET based flashing. Retry using older mechanism
4331 * of OPTYPE based flashing
4332 */
4333 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4334 flash_offset_support = false;
4335 goto retry_flash;
4336 }
4337
4338 /* The current FW image on the card does not recognize
4339 * the new FLASH op_type. The FW download is partially
4340 * complete. Reboot the server now to enable FW image
4341 * to recognize the new FLASH op_type. To complete the
4342 * remaining process, download the same FW again after
4343 * the reboot.
4344 */
96c9b2e4
VV
4345 dev_err(dev, "Flash incomplete. Reset the server\n");
4346 dev_err(dev, "Download FW image again after reset\n");
4347 return -EAGAIN;
4348 } else if (status) {
4349 dev_err(dev, "Could not get CRC for 0x%x region\n",
4350 img_optype);
4351 return -EFAULT;
773a2d7c
PR
4352 }
4353
96c9b2e4
VV
4354 if (crc_match)
4355 continue;
773a2d7c 4356
96c9b2e4
VV
4357flash:
4358 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4359 if (p + img_size > fw->data + fw->size)
4360 return -1;
4361
70a7b525
VV
4362 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4363 img_offset);
4364
4365 /* The current FW image on the card does not support OFFSET
4366 * based flashing. Retry using older mechanism of OPTYPE based
4367 * flashing
4368 */
4369 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4370 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4371 flash_offset_support = false;
4372 goto retry_flash;
4373 }
4374
96c9b2e4
VV
4375 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4376 * UFI_DIR region
4377 */
4c60005f
KA
4378 if (old_fw_img &&
4379 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4380 (img_optype == OPTYPE_UFI_DIR &&
4381 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4382 continue;
4383 } else if (status) {
4384 dev_err(dev, "Flashing section type 0x%x failed\n",
4385 img_type);
4386 return -EFAULT;
773a2d7c
PR
4387 }
4388 }
4389 return 0;
3f0d4560
AK
4390}
4391
485bf569 4392static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4393 const struct firmware *fw)
84517482 4394{
485bf569
SN
4395#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4396#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4397 struct device *dev = &adapter->pdev->dev;
84517482 4398 struct be_dma_mem flash_cmd;
485bf569
SN
4399 const u8 *data_ptr = NULL;
4400 u8 *dest_image_ptr = NULL;
4401 size_t image_size = 0;
4402 u32 chunk_size = 0;
4403 u32 data_written = 0;
4404 u32 offset = 0;
4405 int status = 0;
4406 u8 add_status = 0;
f67ef7ba 4407 u8 change_status;
84517482 4408
485bf569 4409 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4410 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4411 return -EINVAL;
d9efd2af
SB
4412 }
4413
485bf569
SN
4414 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4415 + LANCER_FW_DOWNLOAD_CHUNK;
bb864e07 4416 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
d0320f75 4417 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4418 if (!flash_cmd.va)
4419 return -ENOMEM;
84517482 4420
485bf569
SN
4421 dest_image_ptr = flash_cmd.va +
4422 sizeof(struct lancer_cmd_req_write_object);
4423 image_size = fw->size;
4424 data_ptr = fw->data;
4425
4426 while (image_size) {
4427 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4428
4429 /* Copy the image chunk content. */
4430 memcpy(dest_image_ptr, data_ptr, chunk_size);
4431
4432 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4433 chunk_size, offset,
4434 LANCER_FW_DOWNLOAD_LOCATION,
4435 &data_written, &change_status,
4436 &add_status);
485bf569
SN
4437 if (status)
4438 break;
4439
4440 offset += data_written;
4441 data_ptr += data_written;
4442 image_size -= data_written;
4443 }
4444
4445 if (!status) {
4446 /* Commit the FW written */
4447 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4448 0, offset,
4449 LANCER_FW_DOWNLOAD_LOCATION,
4450 &data_written, &change_status,
4451 &add_status);
485bf569
SN
4452 }
4453
bb864e07 4454 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4455 if (status) {
bb864e07 4456 dev_err(dev, "Firmware load error\n");
3fb8cb80 4457 return be_cmd_status(status);
485bf569
SN
4458 }
4459
bb864e07
KA
4460 dev_info(dev, "Firmware flashed successfully\n");
4461
f67ef7ba 4462 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4463 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4464 status = lancer_physdev_ctrl(adapter,
4465 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4466 if (status) {
bb864e07
KA
4467 dev_err(dev, "Adapter busy, could not reset FW\n");
4468 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4469 }
4470 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4471 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4472 }
3fb8cb80
KA
4473
4474 return 0;
485bf569
SN
4475}
4476
5d3acd0d
VV
4477#define BE2_UFI 2
4478#define BE3_UFI 3
4479#define BE3R_UFI 10
4480#define SH_UFI 4
81a9e226 4481#define SH_P2_UFI 11
5d3acd0d 4482
ca34fe38 4483static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 4484 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4485{
5d3acd0d
VV
4486 if (!fhdr) {
4487 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4488 return -1;
4489 }
773a2d7c 4490
5d3acd0d
VV
4491 /* First letter of the build version is used to identify
4492 * which chip this image file is meant for.
4493 */
4494 switch (fhdr->build[0]) {
4495 case BLD_STR_UFI_TYPE_SH:
81a9e226
VV
4496 return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
4497 SH_UFI;
5d3acd0d
VV
4498 case BLD_STR_UFI_TYPE_BE3:
4499 return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
4500 BE3_UFI;
4501 case BLD_STR_UFI_TYPE_BE2:
4502 return BE2_UFI;
4503 default:
4504 return -1;
4505 }
4506}
773a2d7c 4507
5d3acd0d
VV
4508/* Check if the flash image file is compatible with the adapter that
4509 * is being flashed.
4510 * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
81a9e226 4511 * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
5d3acd0d
VV
4512 */
4513static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4514 struct flash_file_hdr_g3 *fhdr)
4515{
4516 int ufi_type = be_get_ufi_type(adapter, fhdr);
4517
4518 switch (ufi_type) {
81a9e226 4519 case SH_P2_UFI:
5d3acd0d 4520 return skyhawk_chip(adapter);
81a9e226
VV
4521 case SH_UFI:
4522 return (skyhawk_chip(adapter) &&
4523 adapter->asic_rev < ASIC_REV_P2);
5d3acd0d
VV
4524 case BE3R_UFI:
4525 return BE3_chip(adapter);
4526 case BE3_UFI:
4527 return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
4528 case BE2_UFI:
4529 return BE2_chip(adapter);
4530 default:
4531 return false;
4532 }
773a2d7c
PR
4533}
4534
485bf569
SN
4535static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4536{
5d3acd0d 4537 struct device *dev = &adapter->pdev->dev;
485bf569 4538 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
4539 struct image_hdr *img_hdr_ptr;
4540 int status = 0, i, num_imgs;
485bf569 4541 struct be_dma_mem flash_cmd;
84517482 4542
5d3acd0d
VV
4543 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
4544 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
4545 dev_err(dev, "Flash image is not compatible with adapter\n");
4546 return -EINVAL;
84517482
AK
4547 }
4548
5d3acd0d
VV
4549 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4550 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
4551 GFP_KERNEL);
4552 if (!flash_cmd.va)
4553 return -ENOMEM;
773a2d7c 4554
773a2d7c
PR
4555 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4556 for (i = 0; i < num_imgs; i++) {
4557 img_hdr_ptr = (struct image_hdr *)(fw->data +
4558 (sizeof(struct flash_file_hdr_g3) +
4559 i * sizeof(struct image_hdr)));
5d3acd0d
VV
4560 if (!BE2_chip(adapter) &&
4561 le32_to_cpu(img_hdr_ptr->imageid) != 1)
4562 continue;
84517482 4563
5d3acd0d
VV
4564 if (skyhawk_chip(adapter))
4565 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
4566 num_imgs);
4567 else
4568 status = be_flash_BEx(adapter, fw, &flash_cmd,
4569 num_imgs);
84517482
AK
4570 }
4571
5d3acd0d
VV
4572 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
4573 if (!status)
4574 dev_info(dev, "Firmware flashed successfully\n");
84517482 4575
485bf569
SN
4576 return status;
4577}
4578
4579int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4580{
4581 const struct firmware *fw;
4582 int status;
4583
4584 if (!netif_running(adapter->netdev)) {
4585 dev_err(&adapter->pdev->dev,
4586 "Firmware load not allowed (interface is down)\n");
940a3fcd 4587 return -ENETDOWN;
485bf569
SN
4588 }
4589
4590 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4591 if (status)
4592 goto fw_exit;
4593
4594 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4595
4596 if (lancer_chip(adapter))
4597 status = lancer_fw_download(adapter, fw);
4598 else
4599 status = be_fw_download(adapter, fw);
4600
eeb65ced 4601 if (!status)
e97e3cda 4602 be_cmd_get_fw_ver(adapter);
eeb65ced 4603
84517482
AK
4604fw_exit:
4605 release_firmware(fw);
4606 return status;
4607}
4608
add511b3
RP
4609static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4610 u16 flags)
a77dcb8c
AK
4611{
4612 struct be_adapter *adapter = netdev_priv(dev);
4613 struct nlattr *attr, *br_spec;
4614 int rem;
4615 int status = 0;
4616 u16 mode = 0;
4617
4618 if (!sriov_enabled(adapter))
4619 return -EOPNOTSUPP;
4620
4621 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4622 if (!br_spec)
4623 return -EINVAL;
a77dcb8c
AK
4624
4625 nla_for_each_nested(attr, br_spec, rem) {
4626 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4627 continue;
4628
b7c1a314
TG
4629 if (nla_len(attr) < sizeof(mode))
4630 return -EINVAL;
4631
a77dcb8c
AK
4632 mode = nla_get_u16(attr);
4633 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4634 return -EINVAL;
4635
4636 status = be_cmd_set_hsw_config(adapter, 0, 0,
4637 adapter->if_handle,
4638 mode == BRIDGE_MODE_VEPA ?
4639 PORT_FWD_TYPE_VEPA :
4640 PORT_FWD_TYPE_VEB);
4641 if (status)
4642 goto err;
4643
4644 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4645 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4646
4647 return status;
4648 }
4649err:
4650 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4651 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4652
4653 return status;
4654}
4655
4656static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
748b539a 4657 struct net_device *dev, u32 filter_mask)
a77dcb8c
AK
4658{
4659 struct be_adapter *adapter = netdev_priv(dev);
4660 int status = 0;
4661 u8 hsw_mode;
4662
4663 if (!sriov_enabled(adapter))
4664 return 0;
4665
4666 /* BE and Lancer chips support VEB mode only */
4667 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4668 hsw_mode = PORT_FWD_TYPE_VEB;
4669 } else {
4670 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4671 adapter->if_handle, &hsw_mode);
4672 if (status)
4673 return 0;
4674 }
4675
4676 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4677 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c
SF
4678 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4679 0, 0);
a77dcb8c
AK
4680}
4681
c5abe7c0 4682#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4683/* VxLAN offload Notes:
4684 *
4685 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4686 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4687 * is expected to work across all types of IP tunnels once exported. Skyhawk
4688 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4689 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4690 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4691 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4692 *
4693 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4694 * adds more than one port, disable offloads and don't re-enable them again
4695 * until after all the tunnels are removed.
4696 */
c9c47142
SP
4697static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4698 __be16 port)
4699{
4700 struct be_adapter *adapter = netdev_priv(netdev);
4701 struct device *dev = &adapter->pdev->dev;
4702 int status;
4703
4704 if (lancer_chip(adapter) || BEx_chip(adapter))
4705 return;
4706
4707 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4708 dev_info(dev,
4709 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4710 dev_info(dev, "Disabling VxLAN offloads\n");
4711 adapter->vxlan_port_count++;
4712 goto err;
c9c47142
SP
4713 }
4714
630f4b70
SB
4715 if (adapter->vxlan_port_count++ >= 1)
4716 return;
4717
c9c47142
SP
4718 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4719 OP_CONVERT_NORMAL_TO_TUNNEL);
4720 if (status) {
4721 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4722 goto err;
4723 }
4724
4725 status = be_cmd_set_vxlan_port(adapter, port);
4726 if (status) {
4727 dev_warn(dev, "Failed to add VxLAN port\n");
4728 goto err;
4729 }
4730 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4731 adapter->vxlan_port = port;
4732
630f4b70
SB
4733 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4734 NETIF_F_TSO | NETIF_F_TSO6 |
4735 NETIF_F_GSO_UDP_TUNNEL;
4736 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4737 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4738
c9c47142
SP
4739 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4740 be16_to_cpu(port));
4741 return;
4742err:
4743 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4744}
4745
4746static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4747 __be16 port)
4748{
4749 struct be_adapter *adapter = netdev_priv(netdev);
4750
4751 if (lancer_chip(adapter) || BEx_chip(adapter))
4752 return;
4753
4754 if (adapter->vxlan_port != port)
630f4b70 4755 goto done;
c9c47142
SP
4756
4757 be_disable_vxlan_offloads(adapter);
4758
4759 dev_info(&adapter->pdev->dev,
4760 "Disabled VxLAN offloads for UDP port %d\n",
4761 be16_to_cpu(port));
630f4b70
SB
4762done:
4763 adapter->vxlan_port_count--;
c9c47142 4764}
725d548f 4765
5f35227e
JG
4766static netdev_features_t be_features_check(struct sk_buff *skb,
4767 struct net_device *dev,
4768 netdev_features_t features)
725d548f 4769{
16dde0d6
SB
4770 struct be_adapter *adapter = netdev_priv(dev);
4771 u8 l4_hdr = 0;
4772
4773 /* The code below restricts offload features for some tunneled packets.
4774 * Offload features for normal (non tunnel) packets are unchanged.
4775 */
4776 if (!skb->encapsulation ||
4777 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4778 return features;
4779
4780 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4781 * should disable tunnel offload features if it's not a VxLAN packet,
4782 * as tunnel offloads have been enabled only for VxLAN. This is done to
4783 * allow other tunneled traffic like GRE work fine while VxLAN
4784 * offloads are configured in Skyhawk-R.
4785 */
4786 switch (vlan_get_protocol(skb)) {
4787 case htons(ETH_P_IP):
4788 l4_hdr = ip_hdr(skb)->protocol;
4789 break;
4790 case htons(ETH_P_IPV6):
4791 l4_hdr = ipv6_hdr(skb)->nexthdr;
4792 break;
4793 default:
4794 return features;
4795 }
4796
4797 if (l4_hdr != IPPROTO_UDP ||
4798 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4799 skb->inner_protocol != htons(ETH_P_TEB) ||
4800 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4801 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
4802 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
4803
4804 return features;
725d548f 4805}
c5abe7c0 4806#endif
c9c47142 4807
e5686ad8 4808static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4809 .ndo_open = be_open,
4810 .ndo_stop = be_close,
4811 .ndo_start_xmit = be_xmit,
a54769f5 4812 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4813 .ndo_set_mac_address = be_mac_addr_set,
4814 .ndo_change_mtu = be_change_mtu,
ab1594e9 4815 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4816 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4817 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4818 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4819 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4820 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4821 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4822 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4823 .ndo_set_vf_link_state = be_set_vf_link_state,
66268739
IV
4824#ifdef CONFIG_NET_POLL_CONTROLLER
4825 .ndo_poll_controller = be_netpoll,
4826#endif
a77dcb8c
AK
4827 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4828 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4829#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4830 .ndo_busy_poll = be_busy_poll,
6384a4d0 4831#endif
c5abe7c0 4832#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4833 .ndo_add_vxlan_port = be_add_vxlan_port,
4834 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4835 .ndo_features_check = be_features_check,
c5abe7c0 4836#endif
6b7c5b94
SP
4837};
4838
4839static void be_netdev_init(struct net_device *netdev)
4840{
4841 struct be_adapter *adapter = netdev_priv(netdev);
4842
6332c8d3 4843 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4844 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4845 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
4846 if (be_multi_rxq(adapter))
4847 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4848
4849 netdev->features |= netdev->hw_features |
f646968f 4850 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4851
eb8a50d9 4852 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4853 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4854
fbc13f01
AK
4855 netdev->priv_flags |= IFF_UNICAST_FLT;
4856
6b7c5b94
SP
4857 netdev->flags |= IFF_MULTICAST;
4858
b7e5887e 4859 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4860
10ef9ab4 4861 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4862
7ad24ea4 4863 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4864}
4865
4866static void be_unmap_pci_bars(struct be_adapter *adapter)
4867{
c5b3ad4c
SP
4868 if (adapter->csr)
4869 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 4870 if (adapter->db)
ce66f781 4871 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
4872}
4873
ce66f781
SP
4874static int db_bar(struct be_adapter *adapter)
4875{
4876 if (lancer_chip(adapter) || !be_physfn(adapter))
4877 return 0;
4878 else
4879 return 4;
4880}
4881
4882static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 4883{
dbf0f2a7 4884 if (skyhawk_chip(adapter)) {
ce66f781
SP
4885 adapter->roce_db.size = 4096;
4886 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4887 db_bar(adapter));
4888 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4889 db_bar(adapter));
4890 }
045508a8 4891 return 0;
6b7c5b94
SP
4892}
4893
4894static int be_map_pci_bars(struct be_adapter *adapter)
4895{
4896 u8 __iomem *addr;
fe6d2a38 4897
c5b3ad4c
SP
4898 if (BEx_chip(adapter) && be_physfn(adapter)) {
4899 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
ddf1169f 4900 if (!adapter->csr)
c5b3ad4c
SP
4901 return -ENOMEM;
4902 }
4903
ce66f781 4904 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
ddf1169f 4905 if (!addr)
6b7c5b94 4906 goto pci_map_err;
ba343c77 4907 adapter->db = addr;
ce66f781
SP
4908
4909 be_roce_map_pci_bars(adapter);
6b7c5b94 4910 return 0;
ce66f781 4911
6b7c5b94 4912pci_map_err:
acbafeb1 4913 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
4914 be_unmap_pci_bars(adapter);
4915 return -ENOMEM;
4916}
4917
6b7c5b94
SP
4918static void be_ctrl_cleanup(struct be_adapter *adapter)
4919{
8788fdc2 4920 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
4921
4922 be_unmap_pci_bars(adapter);
4923
4924 if (mem->va)
2b7bcebf
IV
4925 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4926 mem->dma);
e7b909a6 4927
5b8821b7 4928 mem = &adapter->rx_filter;
e7b909a6 4929 if (mem->va)
2b7bcebf
IV
4930 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4931 mem->dma);
6b7c5b94
SP
4932}
4933
6b7c5b94
SP
4934static int be_ctrl_init(struct be_adapter *adapter)
4935{
8788fdc2
SP
4936 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4937 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 4938 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 4939 u32 sli_intf;
6b7c5b94 4940 int status;
6b7c5b94 4941
ce66f781
SP
4942 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4943 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4944 SLI_INTF_FAMILY_SHIFT;
4945 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4946
6b7c5b94
SP
4947 status = be_map_pci_bars(adapter);
4948 if (status)
e7b909a6 4949 goto done;
6b7c5b94
SP
4950
4951 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
4952 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4953 mbox_mem_alloc->size,
4954 &mbox_mem_alloc->dma,
4955 GFP_KERNEL);
6b7c5b94 4956 if (!mbox_mem_alloc->va) {
e7b909a6
SP
4957 status = -ENOMEM;
4958 goto unmap_pci_bars;
6b7c5b94
SP
4959 }
4960 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4961 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4962 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4963 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 4964
5b8821b7 4965 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
ede23fa8
JP
4966 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4967 rx_filter->size, &rx_filter->dma,
4968 GFP_KERNEL);
ddf1169f 4969 if (!rx_filter->va) {
e7b909a6
SP
4970 status = -ENOMEM;
4971 goto free_mbox;
4972 }
1f9061d2 4973
2984961c 4974 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
4975 spin_lock_init(&adapter->mcc_lock);
4976 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 4977
5eeff635 4978 init_completion(&adapter->et_cmd_compl);
cf588477 4979 pci_save_state(adapter->pdev);
6b7c5b94 4980 return 0;
e7b909a6
SP
4981
4982free_mbox:
2b7bcebf
IV
4983 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4984 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
4985
4986unmap_pci_bars:
4987 be_unmap_pci_bars(adapter);
4988
4989done:
4990 return status;
6b7c5b94
SP
4991}
4992
4993static void be_stats_cleanup(struct be_adapter *adapter)
4994{
3abcdeda 4995 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
4996
4997 if (cmd->va)
2b7bcebf
IV
4998 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4999 cmd->va, cmd->dma);
6b7c5b94
SP
5000}
5001
5002static int be_stats_init(struct be_adapter *adapter)
5003{
3abcdeda 5004 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 5005
ca34fe38
SP
5006 if (lancer_chip(adapter))
5007 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5008 else if (BE2_chip(adapter))
89a88ab8 5009 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
61000861 5010 else if (BE3_chip(adapter))
ca34fe38 5011 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
61000861
AK
5012 else
5013 /* ALL non-BE ASICs */
5014 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
ca34fe38 5015
ede23fa8
JP
5016 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
5017 GFP_KERNEL);
ddf1169f 5018 if (!cmd->va)
6b568689 5019 return -ENOMEM;
6b7c5b94
SP
5020 return 0;
5021}
5022
3bc6b06c 5023static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5024{
5025 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5026
6b7c5b94
SP
5027 if (!adapter)
5028 return;
5029
045508a8 5030 be_roce_dev_remove(adapter);
8cef7a78 5031 be_intr_set(adapter, false);
045508a8 5032
f67ef7ba
PR
5033 cancel_delayed_work_sync(&adapter->func_recovery_work);
5034
6b7c5b94
SP
5035 unregister_netdev(adapter->netdev);
5036
5fb379ee
SP
5037 be_clear(adapter);
5038
bf99e50d
PR
5039 /* tell fw we're done with firing cmds */
5040 be_cmd_fw_clean(adapter);
5041
6b7c5b94
SP
5042 be_stats_cleanup(adapter);
5043
5044 be_ctrl_cleanup(adapter);
5045
d6b6d987
SP
5046 pci_disable_pcie_error_reporting(pdev);
5047
6b7c5b94
SP
5048 pci_release_regions(pdev);
5049 pci_disable_device(pdev);
5050
5051 free_netdev(adapter->netdev);
5052}
5053
39f1d94d 5054static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 5055{
baaa08d1 5056 int status, level;
6b7c5b94 5057
9e1453c5
AK
5058 status = be_cmd_get_cntl_attributes(adapter);
5059 if (status)
5060 return status;
5061
7aeb2156
PR
5062 /* Must be a power of 2 or else MODULO will BUG_ON */
5063 adapter->be_get_temp_freq = 64;
5064
baaa08d1
VV
5065 if (BEx_chip(adapter)) {
5066 level = be_cmd_get_fw_log_level(adapter);
5067 adapter->msg_enable =
5068 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
5069 }
941a77d5 5070
92bf14ab 5071 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
2243e2e9 5072 return 0;
6b7c5b94
SP
5073}
5074
f67ef7ba 5075static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 5076{
01e5b2c4 5077 struct device *dev = &adapter->pdev->dev;
d8110f62 5078 int status;
d8110f62 5079
f67ef7ba
PR
5080 status = lancer_test_and_set_rdy_state(adapter);
5081 if (status)
5082 goto err;
d8110f62 5083
f67ef7ba
PR
5084 if (netif_running(adapter->netdev))
5085 be_close(adapter->netdev);
d8110f62 5086
f67ef7ba
PR
5087 be_clear(adapter);
5088
01e5b2c4 5089 be_clear_all_error(adapter);
f67ef7ba
PR
5090
5091 status = be_setup(adapter);
5092 if (status)
5093 goto err;
d8110f62 5094
f67ef7ba
PR
5095 if (netif_running(adapter->netdev)) {
5096 status = be_open(adapter->netdev);
d8110f62
PR
5097 if (status)
5098 goto err;
f67ef7ba 5099 }
d8110f62 5100
4bebb56a 5101 dev_err(dev, "Adapter recovery successful\n");
f67ef7ba
PR
5102 return 0;
5103err:
01e5b2c4
SK
5104 if (status == -EAGAIN)
5105 dev_err(dev, "Waiting for resource provisioning\n");
5106 else
4bebb56a 5107 dev_err(dev, "Adapter recovery failed\n");
d8110f62 5108
f67ef7ba
PR
5109 return status;
5110}
5111
5112static void be_func_recovery_task(struct work_struct *work)
5113{
5114 struct be_adapter *adapter =
5115 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 5116 int status = 0;
d8110f62 5117
f67ef7ba 5118 be_detect_error(adapter);
d8110f62 5119
f67ef7ba 5120 if (adapter->hw_error && lancer_chip(adapter)) {
f67ef7ba
PR
5121 rtnl_lock();
5122 netif_device_detach(adapter->netdev);
5123 rtnl_unlock();
d8110f62 5124
f67ef7ba 5125 status = lancer_recover_func(adapter);
f67ef7ba
PR
5126 if (!status)
5127 netif_device_attach(adapter->netdev);
d8110f62 5128 }
f67ef7ba 5129
01e5b2c4
SK
5130 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
5131 * no need to attempt further recovery.
5132 */
5133 if (!status || status == -EAGAIN)
5134 schedule_delayed_work(&adapter->func_recovery_work,
5135 msecs_to_jiffies(1000));
d8110f62
PR
5136}
5137
21252377
VV
5138static void be_log_sfp_info(struct be_adapter *adapter)
5139{
5140 int status;
5141
5142 status = be_cmd_query_sfp_info(adapter);
5143 if (!status) {
5144 dev_err(&adapter->pdev->dev,
5145 "Unqualified SFP+ detected on %c from %s part no: %s",
5146 adapter->port_name, adapter->phy.vendor_name,
5147 adapter->phy.vendor_pn);
5148 }
5149 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5150}
5151
d8110f62
PR
5152static void be_worker(struct work_struct *work)
5153{
5154 struct be_adapter *adapter =
5155 container_of(work, struct be_adapter, work.work);
5156 struct be_rx_obj *rxo;
5157 int i;
5158
d8110f62
PR
5159 /* when interrupts are not yet enabled, just reap any pending
5160 * mcc completions */
5161 if (!netif_running(adapter->netdev)) {
072a9c48 5162 local_bh_disable();
10ef9ab4 5163 be_process_mcc(adapter);
072a9c48 5164 local_bh_enable();
d8110f62
PR
5165 goto reschedule;
5166 }
5167
5168 if (!adapter->stats_cmd_sent) {
5169 if (lancer_chip(adapter))
5170 lancer_cmd_get_pport_stats(adapter,
cd3307aa 5171 &adapter->stats_cmd);
d8110f62
PR
5172 else
5173 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5174 }
5175
d696b5e2
VV
5176 if (be_physfn(adapter) &&
5177 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
7aeb2156
PR
5178 be_cmd_get_die_temperature(adapter);
5179
d8110f62 5180 for_all_rx_queues(adapter, rxo, i) {
6384a4d0
SP
5181 /* Replenish RX-queues starved due to memory
5182 * allocation failures.
5183 */
5184 if (rxo->rx_post_starved)
c30d7266 5185 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
d8110f62
PR
5186 }
5187
2632bafd 5188 be_eqd_update(adapter);
10ef9ab4 5189
21252377
VV
5190 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5191 be_log_sfp_info(adapter);
5192
d8110f62
PR
5193reschedule:
5194 adapter->work_counter++;
5195 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5196}
5197
257a3feb 5198/* If any VFs are already enabled don't FLR the PF */
39f1d94d
SP
5199static bool be_reset_required(struct be_adapter *adapter)
5200{
257a3feb 5201 return pci_num_vf(adapter->pdev) ? false : true;
39f1d94d
SP
5202}
5203
d379142b
SP
5204static char *mc_name(struct be_adapter *adapter)
5205{
f93f160b
VV
5206 char *str = ""; /* default */
5207
5208 switch (adapter->mc_type) {
5209 case UMC:
5210 str = "UMC";
5211 break;
5212 case FLEX10:
5213 str = "FLEX10";
5214 break;
5215 case vNIC1:
5216 str = "vNIC-1";
5217 break;
5218 case nPAR:
5219 str = "nPAR";
5220 break;
5221 case UFP:
5222 str = "UFP";
5223 break;
5224 case vNIC2:
5225 str = "vNIC-2";
5226 break;
5227 default:
5228 str = "";
5229 }
5230
5231 return str;
d379142b
SP
5232}
5233
5234static inline char *func_name(struct be_adapter *adapter)
5235{
5236 return be_physfn(adapter) ? "PF" : "VF";
5237}
5238
f7062ee5
SP
5239static inline char *nic_name(struct pci_dev *pdev)
5240{
5241 switch (pdev->device) {
5242 case OC_DEVICE_ID1:
5243 return OC_NAME;
5244 case OC_DEVICE_ID2:
5245 return OC_NAME_BE;
5246 case OC_DEVICE_ID3:
5247 case OC_DEVICE_ID4:
5248 return OC_NAME_LANCER;
5249 case BE_DEVICE_ID2:
5250 return BE3_NAME;
5251 case OC_DEVICE_ID5:
5252 case OC_DEVICE_ID6:
5253 return OC_NAME_SH;
5254 default:
5255 return BE_NAME;
5256 }
5257}
5258
1dd06ae8 5259static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5260{
6b7c5b94
SP
5261 struct be_adapter *adapter;
5262 struct net_device *netdev;
21252377 5263 int status = 0;
6b7c5b94 5264
acbafeb1
SP
5265 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5266
6b7c5b94
SP
5267 status = pci_enable_device(pdev);
5268 if (status)
5269 goto do_none;
5270
5271 status = pci_request_regions(pdev, DRV_NAME);
5272 if (status)
5273 goto disable_dev;
5274 pci_set_master(pdev);
5275
7f640062 5276 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5277 if (!netdev) {
6b7c5b94
SP
5278 status = -ENOMEM;
5279 goto rel_reg;
5280 }
5281 adapter = netdev_priv(netdev);
5282 adapter->pdev = pdev;
5283 pci_set_drvdata(pdev, adapter);
5284 adapter->netdev = netdev;
2243e2e9 5285 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5286
4c15c243 5287 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5288 if (!status) {
5289 netdev->features |= NETIF_F_HIGHDMA;
5290 } else {
4c15c243 5291 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5292 if (status) {
5293 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5294 goto free_netdev;
5295 }
5296 }
5297
2f951a9a
KA
5298 status = pci_enable_pcie_error_reporting(pdev);
5299 if (!status)
5300 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5301
6b7c5b94
SP
5302 status = be_ctrl_init(adapter);
5303 if (status)
39f1d94d 5304 goto free_netdev;
6b7c5b94 5305
2243e2e9 5306 /* sync up with fw's ready state */
ba343c77 5307 if (be_physfn(adapter)) {
bf99e50d 5308 status = be_fw_wait_ready(adapter);
ba343c77
SB
5309 if (status)
5310 goto ctrl_clean;
ba343c77 5311 }
6b7c5b94 5312
39f1d94d
SP
5313 if (be_reset_required(adapter)) {
5314 status = be_cmd_reset_function(adapter);
5315 if (status)
5316 goto ctrl_clean;
556ae191 5317
2d177be8
KA
5318 /* Wait for interrupts to quiesce after an FLR */
5319 msleep(100);
5320 }
8cef7a78
SK
5321
5322 /* Allow interrupts for other ULPs running on NIC function */
5323 be_intr_set(adapter, true);
10ef9ab4 5324
2d177be8
KA
5325 /* tell fw we're ready to fire cmds */
5326 status = be_cmd_fw_init(adapter);
5327 if (status)
5328 goto ctrl_clean;
5329
2243e2e9
SP
5330 status = be_stats_init(adapter);
5331 if (status)
5332 goto ctrl_clean;
5333
39f1d94d 5334 status = be_get_initial_config(adapter);
6b7c5b94
SP
5335 if (status)
5336 goto stats_clean;
6b7c5b94
SP
5337
5338 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 5339 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
5f820b6c
KA
5340 adapter->rx_fc = true;
5341 adapter->tx_fc = true;
6b7c5b94 5342
5fb379ee
SP
5343 status = be_setup(adapter);
5344 if (status)
55f5c3c5 5345 goto stats_clean;
2243e2e9 5346
3abcdeda 5347 be_netdev_init(netdev);
6b7c5b94
SP
5348 status = register_netdev(netdev);
5349 if (status != 0)
5fb379ee 5350 goto unsetup;
6b7c5b94 5351
045508a8
PP
5352 be_roce_dev_add(adapter);
5353
f67ef7ba
PR
5354 schedule_delayed_work(&adapter->func_recovery_work,
5355 msecs_to_jiffies(1000));
b4e32a71 5356
d379142b 5357 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5358 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5359
6b7c5b94
SP
5360 return 0;
5361
5fb379ee
SP
5362unsetup:
5363 be_clear(adapter);
6b7c5b94
SP
5364stats_clean:
5365 be_stats_cleanup(adapter);
5366ctrl_clean:
5367 be_ctrl_cleanup(adapter);
f9449ab7 5368free_netdev:
fe6d2a38 5369 free_netdev(netdev);
6b7c5b94
SP
5370rel_reg:
5371 pci_release_regions(pdev);
5372disable_dev:
5373 pci_disable_device(pdev);
5374do_none:
c4ca2374 5375 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5376 return status;
5377}
5378
5379static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5380{
5381 struct be_adapter *adapter = pci_get_drvdata(pdev);
5382 struct net_device *netdev = adapter->netdev;
5383
76a9e08e 5384 if (adapter->wol_en)
71d8d1b5
AK
5385 be_setup_wol(adapter, true);
5386
d4360d6f 5387 be_intr_set(adapter, false);
f67ef7ba
PR
5388 cancel_delayed_work_sync(&adapter->func_recovery_work);
5389
6b7c5b94
SP
5390 netif_device_detach(netdev);
5391 if (netif_running(netdev)) {
5392 rtnl_lock();
5393 be_close(netdev);
5394 rtnl_unlock();
5395 }
9b0365f1 5396 be_clear(adapter);
6b7c5b94
SP
5397
5398 pci_save_state(pdev);
5399 pci_disable_device(pdev);
5400 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5401 return 0;
5402}
5403
5404static int be_resume(struct pci_dev *pdev)
5405{
5406 int status = 0;
5407 struct be_adapter *adapter = pci_get_drvdata(pdev);
5408 struct net_device *netdev = adapter->netdev;
5409
5410 netif_device_detach(netdev);
5411
5412 status = pci_enable_device(pdev);
5413 if (status)
5414 return status;
5415
1ca01512 5416 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5417 pci_restore_state(pdev);
5418
dd5746bf
SB
5419 status = be_fw_wait_ready(adapter);
5420 if (status)
5421 return status;
5422
9a6d73d9
KA
5423 status = be_cmd_reset_function(adapter);
5424 if (status)
5425 return status;
5426
d4360d6f 5427 be_intr_set(adapter, true);
2243e2e9
SP
5428 /* tell fw we're ready to fire cmds */
5429 status = be_cmd_fw_init(adapter);
5430 if (status)
5431 return status;
5432
9b0365f1 5433 be_setup(adapter);
6b7c5b94
SP
5434 if (netif_running(netdev)) {
5435 rtnl_lock();
5436 be_open(netdev);
5437 rtnl_unlock();
5438 }
f67ef7ba
PR
5439
5440 schedule_delayed_work(&adapter->func_recovery_work,
5441 msecs_to_jiffies(1000));
6b7c5b94 5442 netif_device_attach(netdev);
71d8d1b5 5443
76a9e08e 5444 if (adapter->wol_en)
71d8d1b5 5445 be_setup_wol(adapter, false);
a4ca055f 5446
6b7c5b94
SP
5447 return 0;
5448}
5449
82456b03
SP
5450/*
5451 * An FLR will stop BE from DMAing any data.
5452 */
5453static void be_shutdown(struct pci_dev *pdev)
5454{
5455 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5456
2d5d4154
AK
5457 if (!adapter)
5458 return;
82456b03 5459
d114f99a 5460 be_roce_dev_shutdown(adapter);
0f4a6828 5461 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 5462 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 5463
2d5d4154 5464 netif_device_detach(adapter->netdev);
82456b03 5465
57841869
AK
5466 be_cmd_reset_function(adapter);
5467
82456b03 5468 pci_disable_device(pdev);
82456b03
SP
5469}
5470
cf588477 5471static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5472 pci_channel_state_t state)
cf588477
SP
5473{
5474 struct be_adapter *adapter = pci_get_drvdata(pdev);
5475 struct net_device *netdev = adapter->netdev;
5476
5477 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5478
01e5b2c4
SK
5479 if (!adapter->eeh_error) {
5480 adapter->eeh_error = true;
cf588477 5481
01e5b2c4 5482 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 5483
cf588477 5484 rtnl_lock();
01e5b2c4
SK
5485 netif_device_detach(netdev);
5486 if (netif_running(netdev))
5487 be_close(netdev);
cf588477 5488 rtnl_unlock();
01e5b2c4
SK
5489
5490 be_clear(adapter);
cf588477 5491 }
cf588477
SP
5492
5493 if (state == pci_channel_io_perm_failure)
5494 return PCI_ERS_RESULT_DISCONNECT;
5495
5496 pci_disable_device(pdev);
5497
eeb7fc7b
SK
5498 /* The error could cause the FW to trigger a flash debug dump.
5499 * Resetting the card while flash dump is in progress
c8a54163
PR
5500 * can cause it not to recover; wait for it to finish.
5501 * Wait only for first function as it is needed only once per
5502 * adapter.
eeb7fc7b 5503 */
c8a54163
PR
5504 if (pdev->devfn == 0)
5505 ssleep(30);
5506
cf588477
SP
5507 return PCI_ERS_RESULT_NEED_RESET;
5508}
5509
5510static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5511{
5512 struct be_adapter *adapter = pci_get_drvdata(pdev);
5513 int status;
5514
5515 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5516
5517 status = pci_enable_device(pdev);
5518 if (status)
5519 return PCI_ERS_RESULT_DISCONNECT;
5520
5521 pci_set_master(pdev);
1ca01512 5522 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5523 pci_restore_state(pdev);
5524
5525 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5526 dev_info(&adapter->pdev->dev,
5527 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5528 status = be_fw_wait_ready(adapter);
cf588477
SP
5529 if (status)
5530 return PCI_ERS_RESULT_DISCONNECT;
5531
d6b6d987 5532 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 5533 be_clear_all_error(adapter);
cf588477
SP
5534 return PCI_ERS_RESULT_RECOVERED;
5535}
5536
5537static void be_eeh_resume(struct pci_dev *pdev)
5538{
5539 int status = 0;
5540 struct be_adapter *adapter = pci_get_drvdata(pdev);
5541 struct net_device *netdev = adapter->netdev;
5542
5543 dev_info(&adapter->pdev->dev, "EEH resume\n");
5544
5545 pci_save_state(pdev);
5546
2d177be8 5547 status = be_cmd_reset_function(adapter);
cf588477
SP
5548 if (status)
5549 goto err;
5550
03a58baa
KA
5551 /* On some BE3 FW versions, after a HW reset,
5552 * interrupts will remain disabled for each function.
5553 * So, explicitly enable interrupts
5554 */
5555 be_intr_set(adapter, true);
5556
2d177be8
KA
5557 /* tell fw we're ready to fire cmds */
5558 status = be_cmd_fw_init(adapter);
bf99e50d
PR
5559 if (status)
5560 goto err;
5561
cf588477
SP
5562 status = be_setup(adapter);
5563 if (status)
5564 goto err;
5565
5566 if (netif_running(netdev)) {
5567 status = be_open(netdev);
5568 if (status)
5569 goto err;
5570 }
f67ef7ba
PR
5571
5572 schedule_delayed_work(&adapter->func_recovery_work,
5573 msecs_to_jiffies(1000));
cf588477
SP
5574 netif_device_attach(netdev);
5575 return;
5576err:
5577 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5578}
5579
3646f0e5 5580static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5581 .error_detected = be_eeh_err_detected,
5582 .slot_reset = be_eeh_reset,
5583 .resume = be_eeh_resume,
5584};
5585
6b7c5b94
SP
5586static struct pci_driver be_driver = {
5587 .name = DRV_NAME,
5588 .id_table = be_dev_ids,
5589 .probe = be_probe,
5590 .remove = be_remove,
5591 .suspend = be_suspend,
cf588477 5592 .resume = be_resume,
82456b03 5593 .shutdown = be_shutdown,
cf588477 5594 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5595};
5596
5597static int __init be_init_module(void)
5598{
8e95a202
JP
5599 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5600 rx_frag_size != 2048) {
6b7c5b94
SP
5601 printk(KERN_WARNING DRV_NAME
5602 " : Module param rx_frag_size must be 2048/4096/8192."
5603 " Using 2048\n");
5604 rx_frag_size = 2048;
5605 }
6b7c5b94
SP
5606
5607 return pci_register_driver(&be_driver);
5608}
5609module_init(be_init_module);
5610
5611static void __exit be_exit_module(void)
5612{
5613 pci_unregister_driver(&be_driver);
5614}
5615module_exit(be_exit_module);