]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
Merge remote-tracking branches 'asoc/topic/msm8916', 'asoc/topic/mtk', 'asoc/topic...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
7dfbe7d7 2 * Copyright (C) 2005 - 2016 Broadcom
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
710f3e59
SB
44/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
e6053dd5 47static struct workqueue_struct *be_err_recovery_workq;
710f3e59 48
9baa3c34 49static const struct pci_device_id be_dev_ids[] = {
c4ca2374 50 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 51 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
52 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 54 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 55 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 56 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 57 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
58 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
b7172414
SP
61
62/* Workqueue used by all functions for defering cmd calls to the adapter */
e6053dd5 63static struct workqueue_struct *be_wq;
b7172414 64
7c185276 65/* UE Status Low CSR */
42c8b11e 66static const char * const ue_status_low_desc[] = {
7c185276
AK
67 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
6bdf8f55
VV
95 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
7c185276 99};
e2fb1afa 100
7c185276 101/* UE Status High CSR */
42c8b11e 102static const char * const ue_status_hi_desc[] = {
7c185276
AK
103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
6bdf8f55
VV
124 "ECRC",
125 "Poison TLP",
42c8b11e 126 "NETC",
6bdf8f55
VV
127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
7c185276
AK
134 "Unknown"
135};
6b7c5b94 136
c1bb0a55
VD
137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
6b7c5b94
SP
142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 145
1cfafab9 146 if (mem->va) {
2b7bcebf
IV
147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
1cfafab9
SP
149 mem->va = NULL;
150 }
6b7c5b94
SP
151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 154 u16 len, u16 entry_size)
6b7c5b94
SP
155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
ede23fa8
JP
162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
6b7c5b94 164 if (!mem->va)
10ef9ab4 165 return -ENOMEM;
6b7c5b94
SP
166 return 0;
167}
168
68c45a2d 169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 170{
db3ea781 171 u32 reg, enabled;
5f0b849e 172
db3ea781 173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 174 &reg);
db3ea781
SP
175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
5f0b849e 177 if (!enabled && enable)
6b7c5b94 178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 179 else if (enabled && !enable)
6b7c5b94 180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 181 else
6b7c5b94 182 return;
5f0b849e 183
db3ea781 184 pci_write_config_dword(adapter->pdev,
748b539a 185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
186}
187
68c45a2d
SK
188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
954f6825 196 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
8788fdc2 204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
205{
206 u32 val = 0;
03d28ffe 207
954f6825
VD
208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
6b7c5b94
SP
211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
213
214 wmb();
8788fdc2 215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
216}
217
94d73aaa
VV
218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
6b7c5b94
SP
220{
221 u32 val = 0;
03d28ffe 222
954f6825
VD
223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
94d73aaa 226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
228
229 wmb();
94d73aaa 230 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
231}
232
8788fdc2 233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
6b7c5b94
SP
236{
237 u32 val = 0;
03d28ffe 238
6b7c5b94 239 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 241
954f6825 242 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
243 return;
244
6b7c5b94
SP
245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
253}
254
8788fdc2 255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
256{
257 u32 val = 0;
03d28ffe 258
6b7c5b94 259 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 262
954f6825 263 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
264 return;
265
6b7c5b94
SP
266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
270}
271
988d44b1
SR
272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
1d0f110a 278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
988d44b1
SR
279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
6b7c5b94
SP
303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 306 struct device *dev = &adapter->pdev->dev;
6b7c5b94 307 struct sockaddr *addr = p;
5a712c13
SP
308 int status;
309 u8 mac[ETH_ALEN];
988d44b1 310 u32 old_pmac_id = adapter->pmac_id[0];
6b7c5b94 311
ca9e4988
AK
312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
ff32f8ab
VV
315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
c27ebf58 318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
ff32f8ab
VV
319 return 0;
320
34393529
IV
321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
bcc84140
KA
328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
5a712c13
SP
332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
704e4c88 337 */
988d44b1
SR
338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
5a712c13 340 if (!status) {
5a712c13
SP
341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
988d44b1 346 be_dev_mac_del(adapter, old_pmac_id);
704e4c88
PR
347 }
348
988d44b1 349 mutex_unlock(&adapter->rx_filter_lock);
5a712c13
SP
350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
704e4c88 352 */
988d44b1 353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
b188f090 354 adapter->if_handle, true, 0);
a65027e4 355 if (status)
e3a7ae2c 356 goto err;
6b7c5b94 357
5a712c13
SP
358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
61d23e9f 361 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
362 status = -EPERM;
363 goto err;
364 }
4993b39a
IV
365
366 /* Remember currently programmed MAC */
c27ebf58 367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
4993b39a 368done:
bcc84140
KA
369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
371 return 0;
372err:
5a712c13 373 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
374 return status;
375}
376
ca34fe38
SP
377/* BE2 supports only v0 cmd */
378static void *hw_stats_from_cmd(struct be_adapter *adapter)
379{
380 if (BE2_chip(adapter)) {
381 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
61000861 384 } else if (BE3_chip(adapter)) {
ca34fe38
SP
385 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
386
61000861
AK
387 return &cmd->hw_stats;
388 } else {
389 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
390
ca34fe38
SP
391 return &cmd->hw_stats;
392 }
393}
394
395/* BE2 supports only v0 cmd */
396static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
397{
398 if (BE2_chip(adapter)) {
399 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
61000861 402 } else if (BE3_chip(adapter)) {
ca34fe38
SP
403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
404
61000861
AK
405 return &hw_stats->erx;
406 } else {
407 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
408
ca34fe38
SP
409 return &hw_stats->erx;
410 }
411}
412
413static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 414{
ac124ff9
SP
415 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 418 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 421
ac124ff9 422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
423 drvs->rx_pause_frames = port_stats->rx_pause_frames;
424 drvs->rx_crc_errors = port_stats->rx_crc_errors;
425 drvs->rx_control_frames = port_stats->rx_control_frames;
426 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
427 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
428 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
429 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
430 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
431 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
432 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
433 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
434 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
435 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
436 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 437 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
440 drvs->rx_address_filtered =
441 port_stats->rx_address_filtered +
442 port_stats->rx_vlan_filtered;
89a88ab8
AK
443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448
449 if (adapter->port_num)
ac124ff9 450 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 451 else
ac124ff9 452 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 453 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 454 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
455 drvs->forwarded_packets = rxf_stats->forwarded_packets;
456 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
457 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
458 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
459 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
460}
461
ca34fe38 462static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 463{
ac124ff9
SP
464 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
465 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
466 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 467 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
468 &rxf_stats->port[adapter->port_num];
469 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 470
ac124ff9 471 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
472 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
473 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
474 drvs->rx_pause_frames = port_stats->rx_pause_frames;
475 drvs->rx_crc_errors = port_stats->rx_crc_errors;
476 drvs->rx_control_frames = port_stats->rx_control_frames;
477 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
478 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
479 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
480 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
481 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
482 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
483 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
484 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
485 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
486 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
487 drvs->rx_dropped_header_too_small =
488 port_stats->rx_dropped_header_too_small;
489 drvs->rx_input_fifo_overflow_drop =
490 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 491 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
492 drvs->rx_alignment_symbol_errors =
493 port_stats->rx_alignment_symbol_errors;
ac124ff9 494 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
495 drvs->tx_pauseframes = port_stats->tx_pauseframes;
496 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 497 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
498 drvs->jabber_events = port_stats->jabber_events;
499 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 500 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
501 drvs->forwarded_packets = rxf_stats->forwarded_packets;
502 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
503 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
504 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
505 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
506}
507
61000861
AK
508static void populate_be_v2_stats(struct be_adapter *adapter)
509{
510 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
511 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
512 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
513 struct be_port_rxf_stats_v2 *port_stats =
514 &rxf_stats->port[adapter->port_num];
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516
517 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
518 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
519 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
520 drvs->rx_pause_frames = port_stats->rx_pause_frames;
521 drvs->rx_crc_errors = port_stats->rx_crc_errors;
522 drvs->rx_control_frames = port_stats->rx_control_frames;
523 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
524 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
525 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
526 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
527 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
528 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
529 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
530 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
533 drvs->rx_dropped_header_too_small =
534 port_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop =
536 port_stats->rx_input_fifo_overflow_drop;
537 drvs->rx_address_filtered = port_stats->rx_address_filtered;
538 drvs->rx_alignment_symbol_errors =
539 port_stats->rx_alignment_symbol_errors;
540 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
541 drvs->tx_pauseframes = port_stats->tx_pauseframes;
542 drvs->tx_controlframes = port_stats->tx_controlframes;
543 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
544 drvs->jabber_events = port_stats->jabber_events;
545 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
546 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
547 drvs->forwarded_packets = rxf_stats->forwarded_packets;
548 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
549 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
550 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
551 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 552 if (be_roce_supported(adapter)) {
461ae379
AK
553 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
554 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
555 drvs->rx_roce_frames = port_stats->roce_frames_received;
556 drvs->roce_drops_crc = port_stats->roce_drops_crc;
557 drvs->roce_drops_payload_len =
558 port_stats->roce_drops_payload_len;
559 }
61000861
AK
560}
561
005d5696
SX
562static void populate_lancer_stats(struct be_adapter *adapter)
563{
005d5696 564 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 565 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
566
567 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
568 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
569 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
570 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 571 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 572 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
573 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
574 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
575 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
576 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
577 drvs->rx_dropped_tcp_length =
578 pport_stats->rx_dropped_invalid_tcp_length;
579 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
580 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
581 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
582 drvs->rx_dropped_header_too_small =
583 pport_stats->rx_dropped_header_too_small;
584 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
585 drvs->rx_address_filtered =
586 pport_stats->rx_address_filtered +
587 pport_stats->rx_vlan_filtered;
ac124ff9 588 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 589 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
590 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
591 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 592 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
593 drvs->forwarded_packets = pport_stats->num_forwards_lo;
594 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 595 drvs->rx_drops_too_many_frags =
ac124ff9 596 pport_stats->rx_drops_too_many_frags_lo;
005d5696 597}
89a88ab8 598
09c1c68f
SP
599static void accumulate_16bit_val(u32 *acc, u16 val)
600{
601#define lo(x) (x & 0xFFFF)
602#define hi(x) (x & 0xFFFF0000)
603 bool wrapped = val < lo(*acc);
604 u32 newacc = hi(*acc) + val;
605
606 if (wrapped)
607 newacc += 65536;
608 ACCESS_ONCE(*acc) = newacc;
609}
610
4188e7df 611static void populate_erx_stats(struct be_adapter *adapter,
748b539a 612 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
613{
614 if (!BEx_chip(adapter))
615 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
616 else
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
619 */
620 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
621 (u16)erx_stat);
622}
623
89a88ab8
AK
624void be_parse_stats(struct be_adapter *adapter)
625{
61000861 626 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
627 struct be_rx_obj *rxo;
628 int i;
a6c578ef 629 u32 erx_stat;
ac124ff9 630
ca34fe38
SP
631 if (lancer_chip(adapter)) {
632 populate_lancer_stats(adapter);
005d5696 633 } else {
ca34fe38
SP
634 if (BE2_chip(adapter))
635 populate_be_v0_stats(adapter);
61000861
AK
636 else if (BE3_chip(adapter))
637 /* for BE3 */
ca34fe38 638 populate_be_v1_stats(adapter);
61000861
AK
639 else
640 populate_be_v2_stats(adapter);
d51ebd33 641
61000861 642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 643 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
644 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
645 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 646 }
09c1c68f 647 }
89a88ab8
AK
648}
649
bc1f4470 650static void be_get_stats64(struct net_device *netdev,
651 struct rtnl_link_stats64 *stats)
6b7c5b94 652{
ab1594e9 653 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 654 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 655 struct be_rx_obj *rxo;
3c8def97 656 struct be_tx_obj *txo;
ab1594e9
SP
657 u64 pkts, bytes;
658 unsigned int start;
3abcdeda 659 int i;
6b7c5b94 660
3abcdeda 661 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 662 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 663
ab1594e9 664 do {
57a7744e 665 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
666 pkts = rx_stats(rxo)->rx_pkts;
667 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 668 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
669 stats->rx_packets += pkts;
670 stats->rx_bytes += bytes;
671 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
672 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
673 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
674 }
675
3c8def97 676 for_all_tx_queues(adapter, txo, i) {
ab1594e9 677 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 678
ab1594e9 679 do {
57a7744e 680 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
681 pkts = tx_stats(txo)->tx_pkts;
682 bytes = tx_stats(txo)->tx_bytes;
57a7744e 683 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
684 stats->tx_packets += pkts;
685 stats->tx_bytes += bytes;
3c8def97 686 }
6b7c5b94
SP
687
688 /* bad pkts received */
ab1594e9 689 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
690 drvs->rx_alignment_symbol_errors +
691 drvs->rx_in_range_errors +
692 drvs->rx_out_range_errors +
693 drvs->rx_frame_too_long +
694 drvs->rx_dropped_too_small +
695 drvs->rx_dropped_too_short +
696 drvs->rx_dropped_header_too_small +
697 drvs->rx_dropped_tcp_length +
ab1594e9 698 drvs->rx_dropped_runt;
68110868 699
6b7c5b94 700 /* detailed rx errors */
ab1594e9 701 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
702 drvs->rx_out_range_errors +
703 drvs->rx_frame_too_long;
68110868 704
ab1594e9 705 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
706
707 /* frame alignment errors */
ab1594e9 708 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 709
6b7c5b94
SP
710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 712 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
713 drvs->rx_input_fifo_overflow_drop +
714 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
715}
716
b236916a 717void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 718{
6b7c5b94
SP
719 struct net_device *netdev = adapter->netdev;
720
b236916a 721 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 722 netif_carrier_off(netdev);
b236916a 723 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 724 }
b236916a 725
bdce2ad7 726 if (link_status)
b236916a
AK
727 netif_carrier_on(netdev);
728 else
729 netif_carrier_off(netdev);
18824894
IV
730
731 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
732}
733
f3d6ad84
SB
734static int be_gso_hdr_len(struct sk_buff *skb)
735{
736 if (skb->encapsulation)
737 return skb_inner_transport_offset(skb) +
738 inner_tcp_hdrlen(skb);
739 return skb_transport_offset(skb) + tcp_hdrlen(skb);
740}
741
5f07b3c5 742static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 743{
3c8def97 744 struct be_tx_stats *stats = tx_stats(txo);
f3d6ad84
SB
745 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
3c8def97 748
ab1594e9 749 u64_stats_update_begin(&stats->sync);
ac124ff9 750 stats->tx_reqs++;
f3d6ad84 751 stats->tx_bytes += skb->len + dup_hdr_len;
8670f2a5
SB
752 stats->tx_pkts += tx_pkts;
753 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
754 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 755 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
756}
757
5f07b3c5
SP
758/* Returns number of WRBs needed for the skb */
759static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 760{
5f07b3c5
SP
761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
763}
764
765static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
766{
f986afcb
SP
767 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
768 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
769 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
770 wrb->rsvd0 = 0;
771}
772
773/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
775 */
776static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
777{
778 wrb->frag_pa_hi = 0;
779 wrb->frag_pa_lo = 0;
780 wrb->frag_len = 0;
89b1f496 781 wrb->rsvd0 = 0;
6b7c5b94
SP
782}
783
1ded132d 784static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 785 struct sk_buff *skb)
1ded132d
AK
786{
787 u8 vlan_prio;
788 u16 vlan_tag;
789
df8a39de 790 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
791 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
794 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 795 adapter->recommended_prio_bits;
1ded132d
AK
796
797 return vlan_tag;
798}
799
c9c47142
SP
800/* Used only for IP tunnel packets */
801static u16 skb_inner_ip_proto(struct sk_buff *skb)
802{
803 return (inner_ip_hdr(skb)->version == 4) ?
804 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
805}
806
807static u16 skb_ip_proto(struct sk_buff *skb)
808{
809 return (ip_hdr(skb)->version == 4) ?
810 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
811}
812
cf5671e6
SB
813static inline bool be_is_txq_full(struct be_tx_obj *txo)
814{
815 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
816}
817
818static inline bool be_can_txq_wake(struct be_tx_obj *txo)
819{
820 return atomic_read(&txo->q.used) < txo->q.len / 2;
821}
822
823static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
824{
825 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
826}
827
804abcdb
SB
828static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
829 struct sk_buff *skb,
830 struct be_wrb_params *wrb_params)
6b7c5b94 831{
804abcdb 832 u16 proto;
6b7c5b94 833
49e4b847 834 if (skb_is_gso(skb)) {
804abcdb
SB
835 BE_WRB_F_SET(wrb_params->features, LSO, 1);
836 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 837 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 838 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 839 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 840 if (skb->encapsulation) {
804abcdb 841 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
842 proto = skb_inner_ip_proto(skb);
843 } else {
844 proto = skb_ip_proto(skb);
845 }
846 if (proto == IPPROTO_TCP)
804abcdb 847 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 848 else if (proto == IPPROTO_UDP)
804abcdb 849 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
850 }
851
df8a39de 852 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
853 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
854 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
855 }
856
804abcdb
SB
857 BE_WRB_F_SET(wrb_params->features, CRC, 1);
858}
5f07b3c5 859
804abcdb
SB
860static void wrb_fill_hdr(struct be_adapter *adapter,
861 struct be_eth_hdr_wrb *hdr,
862 struct be_wrb_params *wrb_params,
863 struct sk_buff *skb)
864{
865 memset(hdr, 0, sizeof(*hdr));
866
867 SET_TX_WRB_HDR_BITS(crc, hdr,
868 BE_WRB_F_GET(wrb_params->features, CRC));
869 SET_TX_WRB_HDR_BITS(ipcs, hdr,
870 BE_WRB_F_GET(wrb_params->features, IPCS));
871 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, TCPCS));
873 SET_TX_WRB_HDR_BITS(udpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, UDPCS));
875
876 SET_TX_WRB_HDR_BITS(lso, hdr,
877 BE_WRB_F_GET(wrb_params->features, LSO));
878 SET_TX_WRB_HDR_BITS(lso6, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO6));
880 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
881
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 884 */
804abcdb
SB
885 SET_TX_WRB_HDR_BITS(event, hdr,
886 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
887 SET_TX_WRB_HDR_BITS(vlan, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN));
889 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
890
891 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
892 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
893 SET_TX_WRB_HDR_BITS(mgmt, hdr,
894 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
895}
896
2b7bcebf 897static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 898 bool unmap_single)
7101e111
SP
899{
900 dma_addr_t dma;
f986afcb 901 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 902
7101e111 903
f986afcb
SP
904 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
905 (u64)le32_to_cpu(wrb->frag_pa_lo);
906 if (frag_len) {
7101e111 907 if (unmap_single)
f986afcb 908 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 909 else
f986afcb 910 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
911 }
912}
6b7c5b94 913
79a0d7d8 914/* Grab a WRB header for xmit */
b0fd2eb2 915static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
79a0d7d8 916{
b0fd2eb2 917 u32 head = txo->q.head;
79a0d7d8
SB
918
919 queue_head_inc(&txo->q);
920 return head;
921}
922
923/* Set up the WRB header for xmit */
924static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
925 struct be_tx_obj *txo,
926 struct be_wrb_params *wrb_params,
927 struct sk_buff *skb, u16 head)
928{
929 u32 num_frags = skb_wrb_cnt(skb);
930 struct be_queue_info *txq = &txo->q;
931 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
932
933 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
934 be_dws_cpu_to_le(hdr, sizeof(*hdr));
935
936 BUG_ON(txo->sent_skb_list[head]);
937 txo->sent_skb_list[head] = skb;
938 txo->last_req_hdr = head;
939 atomic_add(num_frags, &txq->used);
940 txo->last_req_wrb_cnt = num_frags;
941 txo->pend_wrb_cnt += num_frags;
942}
943
944/* Setup a WRB fragment (buffer descriptor) for xmit */
945static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
946 int len)
947{
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
950
951 wrb = queue_head_node(txq);
952 wrb_fill(wrb, busaddr, len);
953 queue_head_inc(txq);
954}
955
956/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
959 */
960static void be_xmit_restore(struct be_adapter *adapter,
b0fd2eb2 961 struct be_tx_obj *txo, u32 head, bool map_single,
79a0d7d8
SB
962 u32 copied)
963{
964 struct device *dev;
965 struct be_eth_wrb *wrb;
966 struct be_queue_info *txq = &txo->q;
967
968 dev = &adapter->pdev->dev;
969 txq->head = head;
970
971 /* skip the first wrb (hdr); it's not mapped */
972 queue_head_inc(txq);
973 while (copied) {
974 wrb = queue_head_node(txq);
975 unmap_tx_frag(dev, wrb, map_single);
976 map_single = false;
977 copied -= le32_to_cpu(wrb->frag_len);
978 queue_head_inc(txq);
979 }
980
981 txq->head = head;
982}
983
984/* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
987 */
5f07b3c5 988static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
989 struct sk_buff *skb,
990 struct be_wrb_params *wrb_params)
6b7c5b94 991{
5f07b3c5 992 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 993 struct device *dev = &adapter->pdev->dev;
5f07b3c5 994 struct be_queue_info *txq = &txo->q;
7101e111 995 bool map_single = false;
b0fd2eb2 996 u32 head = txq->head;
79a0d7d8
SB
997 dma_addr_t busaddr;
998 int len;
6b7c5b94 999
79a0d7d8 1000 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 1001
ebc8d2ab 1002 if (skb->len > skb->data_len) {
79a0d7d8 1003 len = skb_headlen(skb);
03d28ffe 1004
2b7bcebf
IV
1005 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1006 if (dma_mapping_error(dev, busaddr))
7101e111
SP
1007 goto dma_err;
1008 map_single = true;
79a0d7d8 1009 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
1010 copied += len;
1011 }
6b7c5b94 1012
ebc8d2ab 1013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 1014 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 1015 len = skb_frag_size(frag);
03d28ffe 1016
79a0d7d8 1017 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 1018 if (dma_mapping_error(dev, busaddr))
7101e111 1019 goto dma_err;
79a0d7d8
SB
1020 be_tx_setup_wrb_frag(txo, busaddr, len);
1021 copied += len;
6b7c5b94
SP
1022 }
1023
79a0d7d8 1024 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 1025
5f07b3c5
SP
1026 be_tx_stats_update(txo, skb);
1027 return wrb_cnt;
6b7c5b94 1028
7101e111 1029dma_err:
79a0d7d8
SB
1030 adapter->drv_stats.dma_map_errors++;
1031 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 1032 return 0;
6b7c5b94
SP
1033}
1034
f7062ee5
SP
1035static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1036{
1037 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1038}
1039
93040ae5 1040static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 1041 struct sk_buff *skb,
804abcdb
SB
1042 struct be_wrb_params
1043 *wrb_params)
93040ae5
SK
1044{
1045 u16 vlan_tag = 0;
1046
1047 skb = skb_share_check(skb, GFP_ATOMIC);
1048 if (unlikely(!skb))
1049 return skb;
1050
df8a39de 1051 if (skb_vlan_tag_present(skb))
93040ae5 1052 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
1053
1054 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1055 if (!vlan_tag)
1056 vlan_tag = adapter->pvid;
1057 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1058 * skip VLAN insertion
1059 */
804abcdb 1060 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 1061 }
bc0c3405
AK
1062
1063 if (vlan_tag) {
62749e2c
JP
1064 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1065 vlan_tag);
bc0c3405
AK
1066 if (unlikely(!skb))
1067 return skb;
bc0c3405
AK
1068 skb->vlan_tci = 0;
1069 }
1070
1071 /* Insert the outer VLAN, if any */
1072 if (adapter->qnq_vid) {
1073 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1074 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1075 vlan_tag);
bc0c3405
AK
1076 if (unlikely(!skb))
1077 return skb;
804abcdb 1078 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1079 }
1080
93040ae5
SK
1081 return skb;
1082}
1083
bc0c3405
AK
1084static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1085{
1086 struct ethhdr *eh = (struct ethhdr *)skb->data;
1087 u16 offset = ETH_HLEN;
1088
1089 if (eh->h_proto == htons(ETH_P_IPV6)) {
1090 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1091
1092 offset += sizeof(struct ipv6hdr);
1093 if (ip6h->nexthdr != NEXTHDR_TCP &&
1094 ip6h->nexthdr != NEXTHDR_UDP) {
1095 struct ipv6_opt_hdr *ehdr =
504fbf1e 1096 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1097
1098 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1099 if (ehdr->hdrlen == 0xff)
1100 return true;
1101 }
1102 }
1103 return false;
1104}
1105
1106static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1107{
df8a39de 1108 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1109}
1110
748b539a 1111static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1112{
ee9c799c 1113 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1114}
1115
ec495fac
VV
1116static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1117 struct sk_buff *skb,
804abcdb
SB
1118 struct be_wrb_params
1119 *wrb_params)
6b7c5b94 1120{
d2cb6ce7 1121 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1122 unsigned int eth_hdr_len;
1123 struct iphdr *ip;
93040ae5 1124
1297f9db
AK
1125 /* For padded packets, BE HW modifies tot_len field in IP header
1126 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1127 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1128 */
ee9c799c
SP
1129 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1130 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1131 if (skb->len <= 60 &&
df8a39de 1132 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1133 is_ipv4_pkt(skb)) {
93040ae5
SK
1134 ip = (struct iphdr *)ip_hdr(skb);
1135 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1136 }
1ded132d 1137
d2cb6ce7 1138 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1139 * tagging in pvid-tagging mode
d2cb6ce7 1140 */
f93f160b 1141 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1142 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1143 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1144
93040ae5
SK
1145 /* HW has a bug wherein it will calculate CSUM for VLAN
1146 * pkts even though it is disabled.
1147 * Manually insert VLAN in pkt.
1148 */
1149 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1150 skb_vlan_tag_present(skb)) {
804abcdb 1151 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1152 if (unlikely(!skb))
c9128951 1153 goto err;
bc0c3405
AK
1154 }
1155
1156 /* HW may lockup when VLAN HW tagging is requested on
1157 * certain ipv6 packets. Drop such pkts if the HW workaround to
1158 * skip HW tagging is not enabled by FW.
1159 */
1160 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1161 (adapter->pvid || adapter->qnq_vid) &&
1162 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1163 goto tx_drop;
1164
1165 /* Manual VLAN tag insertion to prevent:
1166 * ASIC lockup when the ASIC inserts VLAN tag into
1167 * certain ipv6 packets. Insert VLAN tags in driver,
1168 * and set event, completion, vlan bits accordingly
1169 * in the Tx WRB.
1170 */
1171 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1172 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1173 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1174 if (unlikely(!skb))
c9128951 1175 goto err;
1ded132d
AK
1176 }
1177
ee9c799c
SP
1178 return skb;
1179tx_drop:
1180 dev_kfree_skb_any(skb);
c9128951 1181err:
ee9c799c
SP
1182 return NULL;
1183}
1184
ec495fac
VV
1185static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1186 struct sk_buff *skb,
804abcdb 1187 struct be_wrb_params *wrb_params)
ec495fac 1188{
127bfce5 1189 int err;
1190
8227e990
SR
1191 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1192 * packets that are 32b or less may cause a transmit stall
1193 * on that port. The workaround is to pad such packets
1194 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1195 */
8227e990 1196 if (skb->len <= 32) {
74b6939d 1197 if (skb_put_padto(skb, 36))
ec495fac 1198 return NULL;
ec495fac
VV
1199 }
1200
1201 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1202 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1203 if (!skb)
1204 return NULL;
1205 }
1206
127bfce5 1207 /* The stack can send us skbs with length greater than
1208 * what the HW can handle. Trim the extra bytes.
1209 */
1210 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1211 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1212 WARN_ON(err);
1213
ec495fac
VV
1214 return skb;
1215}
1216
5f07b3c5
SP
1217static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1218{
1219 struct be_queue_info *txq = &txo->q;
1220 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1221
1222 /* Mark the last request eventable if it hasn't been marked already */
1223 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1224 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1225
1226 /* compose a dummy wrb if there are odd set of wrbs to notify */
1227 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1228 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1229 queue_head_inc(txq);
1230 atomic_inc(&txq->used);
1231 txo->pend_wrb_cnt++;
1232 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1233 TX_HDR_WRB_NUM_SHIFT);
1234 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1235 TX_HDR_WRB_NUM_SHIFT);
1236 }
1237 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1238 txo->pend_wrb_cnt = 0;
1239}
1240
760c295e
VD
1241/* OS2BMC related */
1242
1243#define DHCP_CLIENT_PORT 68
1244#define DHCP_SERVER_PORT 67
1245#define NET_BIOS_PORT1 137
1246#define NET_BIOS_PORT2 138
1247#define DHCPV6_RAS_PORT 547
1248
1249#define is_mc_allowed_on_bmc(adapter, eh) \
1250 (!is_multicast_filt_enabled(adapter) && \
1251 is_multicast_ether_addr(eh->h_dest) && \
1252 !is_broadcast_ether_addr(eh->h_dest))
1253
1254#define is_bc_allowed_on_bmc(adapter, eh) \
1255 (!is_broadcast_filt_enabled(adapter) && \
1256 is_broadcast_ether_addr(eh->h_dest))
1257
1258#define is_arp_allowed_on_bmc(adapter, skb) \
1259 (is_arp(skb) && is_arp_filt_enabled(adapter))
1260
1261#define is_broadcast_packet(eh, adapter) \
1262 (is_multicast_ether_addr(eh->h_dest) && \
1263 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1264
1265#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1266
1267#define is_arp_filt_enabled(adapter) \
1268 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1269
1270#define is_dhcp_client_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1272
1273#define is_dhcp_srvr_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1275
1276#define is_nbios_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1278
1279#define is_ipv6_na_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & \
1281 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1282
1283#define is_ipv6_ra_filt_enabled(adapter) \
1284 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1285
1286#define is_ipv6_ras_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1288
1289#define is_broadcast_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1291
1292#define is_multicast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1294
1295static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1296 struct sk_buff **skb)
1297{
1298 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1299 bool os2bmc = false;
1300
1301 if (!be_is_os2bmc_enabled(adapter))
1302 goto done;
1303
1304 if (!is_multicast_ether_addr(eh->h_dest))
1305 goto done;
1306
1307 if (is_mc_allowed_on_bmc(adapter, eh) ||
1308 is_bc_allowed_on_bmc(adapter, eh) ||
1309 is_arp_allowed_on_bmc(adapter, (*skb))) {
1310 os2bmc = true;
1311 goto done;
1312 }
1313
1314 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1315 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1316 u8 nexthdr = hdr->nexthdr;
1317
1318 if (nexthdr == IPPROTO_ICMPV6) {
1319 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1320
1321 switch (icmp6->icmp6_type) {
1322 case NDISC_ROUTER_ADVERTISEMENT:
1323 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1324 goto done;
1325 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1326 os2bmc = is_ipv6_na_filt_enabled(adapter);
1327 goto done;
1328 default:
1329 break;
1330 }
1331 }
1332 }
1333
1334 if (is_udp_pkt((*skb))) {
1335 struct udphdr *udp = udp_hdr((*skb));
1336
1645d997 1337 switch (ntohs(udp->dest)) {
760c295e
VD
1338 case DHCP_CLIENT_PORT:
1339 os2bmc = is_dhcp_client_filt_enabled(adapter);
1340 goto done;
1341 case DHCP_SERVER_PORT:
1342 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1343 goto done;
1344 case NET_BIOS_PORT1:
1345 case NET_BIOS_PORT2:
1346 os2bmc = is_nbios_filt_enabled(adapter);
1347 goto done;
1348 case DHCPV6_RAS_PORT:
1349 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1350 goto done;
1351 default:
1352 break;
1353 }
1354 }
1355done:
1356 /* For packets over a vlan, which are destined
1357 * to BMC, asic expects the vlan to be inline in the packet.
1358 */
1359 if (os2bmc)
1360 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1361
1362 return os2bmc;
1363}
1364
ee9c799c
SP
1365static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1366{
1367 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1368 u16 q_idx = skb_get_queue_mapping(skb);
1369 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1370 struct be_wrb_params wrb_params = { 0 };
804abcdb 1371 bool flush = !skb->xmit_more;
5f07b3c5 1372 u16 wrb_cnt;
ee9c799c 1373
804abcdb 1374 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1375 if (unlikely(!skb))
1376 goto drop;
6b7c5b94 1377
804abcdb
SB
1378 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1379
1380 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1381 if (unlikely(!wrb_cnt)) {
1382 dev_kfree_skb_any(skb);
1383 goto drop;
1384 }
cd8f76c0 1385
760c295e
VD
1386 /* if os2bmc is enabled and if the pkt is destined to bmc,
1387 * enqueue the pkt a 2nd time with mgmt bit set.
1388 */
1389 if (be_send_pkt_to_bmc(adapter, &skb)) {
1390 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1391 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1392 if (unlikely(!wrb_cnt))
1393 goto drop;
1394 else
1395 skb_get(skb);
1396 }
1397
cf5671e6 1398 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1399 netif_stop_subqueue(netdev, q_idx);
1400 tx_stats(txo)->tx_stops++;
1401 }
c190e3c8 1402
5f07b3c5
SP
1403 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1404 be_xmit_flush(adapter, txo);
6b7c5b94 1405
5f07b3c5
SP
1406 return NETDEV_TX_OK;
1407drop:
1408 tx_stats(txo)->tx_drv_drops++;
1409 /* Flush the already enqueued tx requests */
1410 if (flush && txo->pend_wrb_cnt)
1411 be_xmit_flush(adapter, txo);
6b7c5b94 1412
6b7c5b94
SP
1413 return NETDEV_TX_OK;
1414}
1415
f66b7cfd
SP
1416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
6b7c5b94 1453/*
82903e4b
AK
1454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1456 */
10329df8 1457static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1458{
50762667 1459 struct device *dev = &adapter->pdev->dev;
10329df8 1460 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1461 u16 num = 0, i = 0;
82903e4b 1462 int status = 0;
1da87b7f 1463
92fbb1df
SB
1464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
c0e64ef4
SP
1466 return 0;
1467
92bf14ab 1468 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1469 return be_set_vlan_promisc(adapter);
0fc16ebf 1470
841f60fc
SK
1471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
0fc16ebf 1476 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
0fc16ebf 1479
435452aa 1480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1481 if (status) {
f66b7cfd 1482 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1483 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
4c60005f 1486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd 1487 return be_set_vlan_promisc(adapter);
6b7c5b94 1488 }
0fc16ebf 1489 return status;
6b7c5b94
SP
1490}
1491
80d5c368 1492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1495 int status = 0;
6b7c5b94 1496
b7172414
SP
1497 mutex_lock(&adapter->rx_filter_lock);
1498
a85e9986
PR
1499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
b7172414 1501 goto done;
48291c22 1502
f6cbd364 1503 if (test_bit(vid, adapter->vids))
b7172414 1504 goto done;
a85e9986 1505
f6cbd364 1506 set_bit(vid, adapter->vids);
a6b74e01 1507 adapter->vlans_added++;
8e586137 1508
b7172414
SP
1509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
6b7c5b94
SP
1513}
1514
80d5c368 1515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
b7172414
SP
1518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
6b7c5b94 1521
a85e9986
PR
1522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
b7172414 1524 goto done;
a85e9986 1525
41dcdfbd 1526 if (!test_bit(vid, adapter->vids))
b7172414 1527 goto done;
41dcdfbd 1528
f6cbd364 1529 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1530 adapter->vlans_added--;
1531
b7172414
SP
1532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
6b7c5b94
SP
1536}
1537
f66b7cfd
SP
1538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1545{
0fc16ebf 1546 int status;
6b7c5b94 1547
f66b7cfd
SP
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
6b7c5b94 1550
f66b7cfd
SP
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
92fbb1df 1556static void be_set_uc_promisc(struct be_adapter *adapter)
f66b7cfd
SP
1557{
1558 int status;
1559
92fbb1df
SB
1560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
f66b7cfd 1564 if (!status)
92fbb1df
SB
1565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
b7172414 1606 struct netdev_hw_addr *ha;
92fbb1df
SB
1607 bool mc_promisc = false;
1608 int status;
1609
b7172414 1610 netif_addr_lock_bh(netdev);
92fbb1df
SB
1611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
b7172414
SP
1629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
92fbb1df 1641 if (mc_promisc) {
f66b7cfd 1642 be_set_mc_promisc(adapter);
92fbb1df
SB
1643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
b7172414 1660 adapter->mc_count = 0;
f66b7cfd
SP
1661}
1662
988d44b1
SR
1663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
1d0f110a 1665 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
988d44b1
SR
1666 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1667 return 0;
1668 }
1669
1d0f110a 1670 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
988d44b1
SR
1671 adapter->if_handle,
1672 &adapter->pmac_id[uc_idx + 1], 0);
1673}
1674
1675static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1676{
1677 if (pmac_id == adapter->pmac_id[0])
1678 return;
1679
1680 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1681}
1682
f66b7cfd
SP
1683static void be_set_uc_list(struct be_adapter *adapter)
1684{
92fbb1df 1685 struct net_device *netdev = adapter->netdev;
f66b7cfd 1686 struct netdev_hw_addr *ha;
92fbb1df 1687 bool uc_promisc = false;
b7172414 1688 int curr_uc_macs = 0, i;
f66b7cfd 1689
b7172414 1690 netif_addr_lock_bh(netdev);
92fbb1df 1691 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
f66b7cfd 1692
92fbb1df
SB
1693 if (netdev->flags & IFF_PROMISC) {
1694 adapter->update_uc_list = false;
1695 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1696 uc_promisc = true;
1697 adapter->update_uc_list = false;
1698 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1699 /* Update uc-list unconditionally if the iface was previously
1700 * in uc-promisc mode and now is out of that mode.
1701 */
1702 adapter->update_uc_list = true;
6b7c5b94
SP
1703 }
1704
b7172414 1705 if (adapter->update_uc_list) {
b7172414 1706 /* cache the uc-list in adapter array */
6052cd1a 1707 i = 0;
b7172414
SP
1708 netdev_for_each_uc_addr(ha, netdev) {
1709 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1710 i++;
1711 }
1712 curr_uc_macs = netdev_uc_count(netdev);
1713 }
1714 netif_addr_unlock_bh(netdev);
1715
92fbb1df
SB
1716 if (uc_promisc) {
1717 be_set_uc_promisc(adapter);
1718 } else if (adapter->update_uc_list) {
1719 be_clear_uc_promisc(adapter);
1720
b7172414 1721 for (i = 0; i < adapter->uc_macs; i++)
988d44b1 1722 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
92fbb1df 1723
b7172414 1724 for (i = 0; i < curr_uc_macs; i++)
988d44b1 1725 be_uc_mac_add(adapter, i);
b7172414 1726 adapter->uc_macs = curr_uc_macs;
92fbb1df 1727 adapter->update_uc_list = false;
f66b7cfd
SP
1728 }
1729}
6b7c5b94 1730
f66b7cfd
SP
1731static void be_clear_uc_list(struct be_adapter *adapter)
1732{
92fbb1df 1733 struct net_device *netdev = adapter->netdev;
f66b7cfd 1734 int i;
fbc13f01 1735
92fbb1df 1736 __dev_uc_unsync(netdev, NULL);
b7172414 1737 for (i = 0; i < adapter->uc_macs; i++)
988d44b1
SR
1738 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1739
f66b7cfd
SP
1740 adapter->uc_macs = 0;
1741}
fbc13f01 1742
b7172414 1743static void __be_set_rx_mode(struct be_adapter *adapter)
f66b7cfd 1744{
b7172414
SP
1745 struct net_device *netdev = adapter->netdev;
1746
1747 mutex_lock(&adapter->rx_filter_lock);
fbc13f01 1748
f66b7cfd 1749 if (netdev->flags & IFF_PROMISC) {
92fbb1df
SB
1750 if (!be_in_all_promisc(adapter))
1751 be_set_all_promisc(adapter);
1752 } else if (be_in_all_promisc(adapter)) {
1753 /* We need to re-program the vlan-list or clear
1754 * vlan-promisc mode (if needed) when the interface
1755 * comes out of promisc mode.
1756 */
1757 be_vid_config(adapter);
f66b7cfd 1758 }
a0794885 1759
92fbb1df 1760 be_set_uc_list(adapter);
f66b7cfd 1761 be_set_mc_list(adapter);
b7172414
SP
1762
1763 mutex_unlock(&adapter->rx_filter_lock);
1764}
1765
1766static void be_work_set_rx_mode(struct work_struct *work)
1767{
1768 struct be_cmd_work *cmd_work =
1769 container_of(work, struct be_cmd_work, work);
1770
1771 __be_set_rx_mode(cmd_work->adapter);
1772 kfree(cmd_work);
6b7c5b94
SP
1773}
1774
ba343c77
SB
1775static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1776{
1777 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1778 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1779 int status;
1780
11ac75ed 1781 if (!sriov_enabled(adapter))
ba343c77
SB
1782 return -EPERM;
1783
11ac75ed 1784 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1785 return -EINVAL;
1786
3c31aaf3
VV
1787 /* Proceed further only if user provided MAC is different
1788 * from active MAC
1789 */
1790 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1791 return 0;
1792
3175d8c2
SP
1793 if (BEx_chip(adapter)) {
1794 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1795 vf + 1);
ba343c77 1796
11ac75ed
SP
1797 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1798 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1799 } else {
1800 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1801 vf + 1);
590c391d
PR
1802 }
1803
abccf23e
KA
1804 if (status) {
1805 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1806 mac, vf, status);
1807 return be_cmd_status(status);
1808 }
64600ea5 1809
abccf23e
KA
1810 ether_addr_copy(vf_cfg->mac_addr, mac);
1811
1812 return 0;
ba343c77
SB
1813}
1814
64600ea5 1815static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1816 struct ifla_vf_info *vi)
64600ea5
AK
1817{
1818 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1819 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1820
11ac75ed 1821 if (!sriov_enabled(adapter))
64600ea5
AK
1822 return -EPERM;
1823
11ac75ed 1824 if (vf >= adapter->num_vfs)
64600ea5
AK
1825 return -EINVAL;
1826
1827 vi->vf = vf;
ed616689
SC
1828 vi->max_tx_rate = vf_cfg->tx_rate;
1829 vi->min_tx_rate = 0;
a60b3a13
AK
1830 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1831 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1832 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1833 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1834 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1835
1836 return 0;
1837}
1838
435452aa
VV
1839static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1840{
1841 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1842 u16 vids[BE_NUM_VLANS_SUPPORTED];
1843 int vf_if_id = vf_cfg->if_handle;
1844 int status;
1845
1846 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1847 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1848 if (status)
1849 return status;
1850
1851 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1852 vids[0] = 0;
1853 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1854 if (!status)
1855 dev_info(&adapter->pdev->dev,
1856 "Cleared guest VLANs on VF%d", vf);
1857
1858 /* After TVT is enabled, disallow VFs to program VLAN filters */
1859 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1860 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1861 ~BE_PRIV_FILTMGMT, vf + 1);
1862 if (!status)
1863 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1864 }
1865 return 0;
1866}
1867
1868static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1869{
1870 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1871 struct device *dev = &adapter->pdev->dev;
1872 int status;
1873
1874 /* Reset Transparent VLAN Tagging. */
1875 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1876 vf_cfg->if_handle, 0, 0);
435452aa
VV
1877 if (status)
1878 return status;
1879
1880 /* Allow VFs to program VLAN filtering */
1881 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1882 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1883 BE_PRIV_FILTMGMT, vf + 1);
1884 if (!status) {
1885 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1886 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1887 }
1888 }
1889
1890 dev_info(dev,
1891 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1892 return 0;
1893}
1894
79aab093
MS
1895static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1896 __be16 vlan_proto)
1da87b7f
AK
1897{
1898 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1899 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1900 int status;
1da87b7f 1901
11ac75ed 1902 if (!sriov_enabled(adapter))
1da87b7f
AK
1903 return -EPERM;
1904
b9fc0e53 1905 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1906 return -EINVAL;
1907
79aab093
MS
1908 if (vlan_proto != htons(ETH_P_8021Q))
1909 return -EPROTONOSUPPORT;
1910
b9fc0e53
AK
1911 if (vlan || qos) {
1912 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1913 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1914 } else {
435452aa 1915 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1916 }
1917
abccf23e
KA
1918 if (status) {
1919 dev_err(&adapter->pdev->dev,
435452aa
VV
1920 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1921 status);
abccf23e
KA
1922 return be_cmd_status(status);
1923 }
1924
1925 vf_cfg->vlan_tag = vlan;
abccf23e 1926 return 0;
1da87b7f
AK
1927}
1928
ed616689
SC
1929static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1930 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1931{
1932 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1933 struct device *dev = &adapter->pdev->dev;
1934 int percent_rate, status = 0;
1935 u16 link_speed = 0;
1936 u8 link_status;
e1d18735 1937
11ac75ed 1938 if (!sriov_enabled(adapter))
e1d18735
AK
1939 return -EPERM;
1940
94f434c2 1941 if (vf >= adapter->num_vfs)
e1d18735
AK
1942 return -EINVAL;
1943
ed616689
SC
1944 if (min_tx_rate)
1945 return -EINVAL;
1946
0f77ba73
RN
1947 if (!max_tx_rate)
1948 goto config_qos;
1949
1950 status = be_cmd_link_status_query(adapter, &link_speed,
1951 &link_status, 0);
1952 if (status)
1953 goto err;
1954
1955 if (!link_status) {
1956 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1957 status = -ENETDOWN;
0f77ba73
RN
1958 goto err;
1959 }
1960
1961 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1962 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1963 link_speed);
1964 status = -EINVAL;
1965 goto err;
1966 }
1967
1968 /* On Skyhawk the QOS setting must be done only as a % value */
1969 percent_rate = link_speed / 100;
1970 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1971 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1972 percent_rate);
1973 status = -EINVAL;
1974 goto err;
94f434c2 1975 }
e1d18735 1976
0f77ba73
RN
1977config_qos:
1978 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1979 if (status)
0f77ba73
RN
1980 goto err;
1981
1982 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1983 return 0;
1984
1985err:
1986 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1987 max_tx_rate, vf);
abccf23e 1988 return be_cmd_status(status);
e1d18735 1989}
e2fb1afa 1990
bdce2ad7
SR
1991static int be_set_vf_link_state(struct net_device *netdev, int vf,
1992 int link_state)
1993{
1994 struct be_adapter *adapter = netdev_priv(netdev);
1995 int status;
1996
1997 if (!sriov_enabled(adapter))
1998 return -EPERM;
1999
2000 if (vf >= adapter->num_vfs)
2001 return -EINVAL;
2002
2003 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
2004 if (status) {
2005 dev_err(&adapter->pdev->dev,
2006 "Link state change on VF %d failed: %#x\n", vf, status);
2007 return be_cmd_status(status);
2008 }
bdce2ad7 2009
abccf23e
KA
2010 adapter->vf_cfg[vf].plink_tracking = link_state;
2011
2012 return 0;
bdce2ad7 2013}
e1d18735 2014
e7bcbd7b
KA
2015static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2016{
2017 struct be_adapter *adapter = netdev_priv(netdev);
2018 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2019 u8 spoofchk;
2020 int status;
2021
2022 if (!sriov_enabled(adapter))
2023 return -EPERM;
2024
2025 if (vf >= adapter->num_vfs)
2026 return -EINVAL;
2027
2028 if (BEx_chip(adapter))
2029 return -EOPNOTSUPP;
2030
2031 if (enable == vf_cfg->spoofchk)
2032 return 0;
2033
2034 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2035
2036 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2037 0, spoofchk);
2038 if (status) {
2039 dev_err(&adapter->pdev->dev,
2040 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2041 return be_cmd_status(status);
2042 }
2043
2044 vf_cfg->spoofchk = enable;
2045 return 0;
2046}
2047
2632bafd
SP
2048static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2049 ulong now)
6b7c5b94 2050{
2632bafd
SP
2051 aic->rx_pkts_prev = rx_pkts;
2052 aic->tx_reqs_prev = tx_pkts;
2053 aic->jiffies = now;
2054}
ac124ff9 2055
20947770 2056static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 2057{
20947770
PR
2058 struct be_adapter *adapter = eqo->adapter;
2059 int eqd, start;
2632bafd 2060 struct be_aic_obj *aic;
2632bafd
SP
2061 struct be_rx_obj *rxo;
2062 struct be_tx_obj *txo;
20947770 2063 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
2064 ulong now;
2065 u32 pps, delta;
20947770 2066 int i;
10ef9ab4 2067
20947770
PR
2068 aic = &adapter->aic_obj[eqo->idx];
2069 if (!aic->enable) {
2070 if (aic->jiffies)
2071 aic->jiffies = 0;
2072 eqd = aic->et_eqd;
2073 return eqd;
2074 }
6b7c5b94 2075
20947770 2076 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 2077 do {
57a7744e 2078 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 2079 rx_pkts += rxo->stats.rx_pkts;
57a7744e 2080 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 2081 }
10ef9ab4 2082
20947770 2083 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 2084 do {
57a7744e 2085 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 2086 tx_pkts += txo->stats.tx_reqs;
57a7744e 2087 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 2088 }
6b7c5b94 2089
20947770
PR
2090 /* Skip, if wrapped around or first calculation */
2091 now = jiffies;
2092 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2093 rx_pkts < aic->rx_pkts_prev ||
2094 tx_pkts < aic->tx_reqs_prev) {
2095 be_aic_update(aic, rx_pkts, tx_pkts, now);
2096 return aic->prev_eqd;
2097 }
2632bafd 2098
20947770
PR
2099 delta = jiffies_to_msecs(now - aic->jiffies);
2100 if (delta == 0)
2101 return aic->prev_eqd;
10ef9ab4 2102
20947770
PR
2103 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2104 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2105 eqd = (pps / 15000) << 2;
2632bafd 2106
20947770
PR
2107 if (eqd < 8)
2108 eqd = 0;
2109 eqd = min_t(u32, eqd, aic->max_eqd);
2110 eqd = max_t(u32, eqd, aic->min_eqd);
2111
2112 be_aic_update(aic, rx_pkts, tx_pkts, now);
2113
2114 return eqd;
2115}
2116
2117/* For Skyhawk-R only */
2118static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2119{
2120 struct be_adapter *adapter = eqo->adapter;
2121 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2122 ulong now = jiffies;
2123 int eqd;
2124 u32 mult_enc;
2125
2126 if (!aic->enable)
2127 return 0;
2128
3c0d49aa 2129 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
2130 eqd = aic->prev_eqd;
2131 else
2132 eqd = be_get_new_eqd(eqo);
2133
2134 if (eqd > 100)
2135 mult_enc = R2I_DLY_ENC_1;
2136 else if (eqd > 60)
2137 mult_enc = R2I_DLY_ENC_2;
2138 else if (eqd > 20)
2139 mult_enc = R2I_DLY_ENC_3;
2140 else
2141 mult_enc = R2I_DLY_ENC_0;
2142
2143 aic->prev_eqd = eqd;
2144
2145 return mult_enc;
2146}
2147
2148void be_eqd_update(struct be_adapter *adapter, bool force_update)
2149{
2150 struct be_set_eqd set_eqd[MAX_EVT_QS];
2151 struct be_aic_obj *aic;
2152 struct be_eq_obj *eqo;
2153 int i, num = 0, eqd;
2154
2155 for_all_evt_queues(adapter, eqo, i) {
2156 aic = &adapter->aic_obj[eqo->idx];
2157 eqd = be_get_new_eqd(eqo);
2158 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
2159 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2160 set_eqd[num].eq_id = eqo->q.id;
2161 aic->prev_eqd = eqd;
2162 num++;
2163 }
ac124ff9 2164 }
2632bafd
SP
2165
2166 if (num)
2167 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
2168}
2169
3abcdeda 2170static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 2171 struct be_rx_compl_info *rxcp)
4097f663 2172{
ac124ff9 2173 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 2174
ab1594e9 2175 u64_stats_update_begin(&stats->sync);
3abcdeda 2176 stats->rx_compl++;
2e588f84 2177 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 2178 stats->rx_pkts++;
8670f2a5
SB
2179 if (rxcp->tunneled)
2180 stats->rx_vxlan_offload_pkts++;
2e588f84 2181 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 2182 stats->rx_mcast_pkts++;
2e588f84 2183 if (rxcp->err)
ac124ff9 2184 stats->rx_compl_err++;
ab1594e9 2185 u64_stats_update_end(&stats->sync);
4097f663
SP
2186}
2187
2e588f84 2188static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 2189{
19fad86f 2190 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
2191 * Also ignore ipcksm for ipv6 pkts
2192 */
2e588f84 2193 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 2194 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
2195}
2196
0b0ef1d0 2197static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 2198{
10ef9ab4 2199 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2200 struct be_rx_page_info *rx_page_info;
3abcdeda 2201 struct be_queue_info *rxq = &rxo->q;
b0fd2eb2 2202 u32 frag_idx = rxq->tail;
6b7c5b94 2203
3abcdeda 2204 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
2205 BUG_ON(!rx_page_info->page);
2206
e50287be 2207 if (rx_page_info->last_frag) {
2b7bcebf
IV
2208 dma_unmap_page(&adapter->pdev->dev,
2209 dma_unmap_addr(rx_page_info, bus),
2210 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2211 rx_page_info->last_frag = false;
2212 } else {
2213 dma_sync_single_for_cpu(&adapter->pdev->dev,
2214 dma_unmap_addr(rx_page_info, bus),
2215 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2216 }
6b7c5b94 2217
0b0ef1d0 2218 queue_tail_inc(rxq);
6b7c5b94
SP
2219 atomic_dec(&rxq->used);
2220 return rx_page_info;
2221}
2222
2223/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2224static void be_rx_compl_discard(struct be_rx_obj *rxo,
2225 struct be_rx_compl_info *rxcp)
6b7c5b94 2226{
6b7c5b94 2227 struct be_rx_page_info *page_info;
2e588f84 2228 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2229
e80d9da6 2230 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2231 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2232 put_page(page_info->page);
2233 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2234 }
2235}
2236
2237/*
2238 * skb_fill_rx_data forms a complete skb for an ether frame
2239 * indicated by rxcp.
2240 */
10ef9ab4
SP
2241static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2242 struct be_rx_compl_info *rxcp)
6b7c5b94 2243{
6b7c5b94 2244 struct be_rx_page_info *page_info;
2e588f84
SP
2245 u16 i, j;
2246 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2247 u8 *start;
6b7c5b94 2248
0b0ef1d0 2249 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2250 start = page_address(page_info->page) + page_info->page_offset;
2251 prefetch(start);
2252
2253 /* Copy data in the first descriptor of this completion */
2e588f84 2254 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2255
6b7c5b94
SP
2256 skb->len = curr_frag_len;
2257 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2258 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2259 /* Complete packet has now been moved to data */
2260 put_page(page_info->page);
2261 skb->data_len = 0;
2262 skb->tail += curr_frag_len;
2263 } else {
ac1ae5f3
ED
2264 hdr_len = ETH_HLEN;
2265 memcpy(skb->data, start, hdr_len);
6b7c5b94 2266 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2267 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2268 skb_shinfo(skb)->frags[0].page_offset =
2269 page_info->page_offset + hdr_len;
748b539a
SP
2270 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2271 curr_frag_len - hdr_len);
6b7c5b94 2272 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2273 skb->truesize += rx_frag_size;
6b7c5b94
SP
2274 skb->tail += hdr_len;
2275 }
205859a2 2276 page_info->page = NULL;
6b7c5b94 2277
2e588f84
SP
2278 if (rxcp->pkt_size <= rx_frag_size) {
2279 BUG_ON(rxcp->num_rcvd != 1);
2280 return;
6b7c5b94
SP
2281 }
2282
2283 /* More frags present for this completion */
2e588f84
SP
2284 remaining = rxcp->pkt_size - curr_frag_len;
2285 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2286 page_info = get_rx_page_info(rxo);
2e588f84 2287 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2288
bd46cb6c
AK
2289 /* Coalesce all frags from the same physical page in one slot */
2290 if (page_info->page_offset == 0) {
2291 /* Fresh page */
2292 j++;
b061b39e 2293 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2294 skb_shinfo(skb)->frags[j].page_offset =
2295 page_info->page_offset;
9e903e08 2296 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2297 skb_shinfo(skb)->nr_frags++;
2298 } else {
2299 put_page(page_info->page);
2300 }
2301
9e903e08 2302 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2303 skb->len += curr_frag_len;
2304 skb->data_len += curr_frag_len;
bdb28a97 2305 skb->truesize += rx_frag_size;
2e588f84 2306 remaining -= curr_frag_len;
205859a2 2307 page_info->page = NULL;
6b7c5b94 2308 }
bd46cb6c 2309 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2310}
2311
5be93b9a 2312/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2313static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2314 struct be_rx_compl_info *rxcp)
6b7c5b94 2315{
10ef9ab4 2316 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2317 struct net_device *netdev = adapter->netdev;
6b7c5b94 2318 struct sk_buff *skb;
89420424 2319
bb349bb4 2320 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2321 if (unlikely(!skb)) {
ac124ff9 2322 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2323 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2324 return;
2325 }
2326
10ef9ab4 2327 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2328
6332c8d3 2329 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2330 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2331 else
2332 skb_checksum_none_assert(skb);
6b7c5b94 2333
6332c8d3 2334 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2335 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2336 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2337 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2338
b6c0e89d 2339 skb->csum_level = rxcp->tunneled;
6384a4d0 2340 skb_mark_napi_id(skb, napi);
6b7c5b94 2341
343e43c0 2342 if (rxcp->vlanf)
86a9bad3 2343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2344
2345 netif_receive_skb(skb);
6b7c5b94
SP
2346}
2347
5be93b9a 2348/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2349static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2350 struct napi_struct *napi,
2351 struct be_rx_compl_info *rxcp)
6b7c5b94 2352{
10ef9ab4 2353 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2354 struct be_rx_page_info *page_info;
5be93b9a 2355 struct sk_buff *skb = NULL;
2e588f84
SP
2356 u16 remaining, curr_frag_len;
2357 u16 i, j;
3968fa1e 2358
10ef9ab4 2359 skb = napi_get_frags(napi);
5be93b9a 2360 if (!skb) {
10ef9ab4 2361 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2362 return;
2363 }
2364
2e588f84
SP
2365 remaining = rxcp->pkt_size;
2366 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2367 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2368
2369 curr_frag_len = min(remaining, rx_frag_size);
2370
bd46cb6c
AK
2371 /* Coalesce all frags from the same physical page in one slot */
2372 if (i == 0 || page_info->page_offset == 0) {
2373 /* First frag or Fresh page */
2374 j++;
b061b39e 2375 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2376 skb_shinfo(skb)->frags[j].page_offset =
2377 page_info->page_offset;
9e903e08 2378 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2379 } else {
2380 put_page(page_info->page);
2381 }
9e903e08 2382 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2383 skb->truesize += rx_frag_size;
bd46cb6c 2384 remaining -= curr_frag_len;
6b7c5b94
SP
2385 memset(page_info, 0, sizeof(*page_info));
2386 }
bd46cb6c 2387 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2388
5be93b9a 2389 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2390 skb->len = rxcp->pkt_size;
2391 skb->data_len = rxcp->pkt_size;
5be93b9a 2392 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2393 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2394 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2395 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2396
b6c0e89d 2397 skb->csum_level = rxcp->tunneled;
5be93b9a 2398
343e43c0 2399 if (rxcp->vlanf)
86a9bad3 2400 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2401
10ef9ab4 2402 napi_gro_frags(napi);
2e588f84
SP
2403}
2404
10ef9ab4
SP
2405static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2406 struct be_rx_compl_info *rxcp)
2e588f84 2407{
c3c18bc1
SP
2408 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2409 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2410 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2411 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2412 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2413 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2414 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2415 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2416 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2417 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2418 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2419 if (rxcp->vlanf) {
c3c18bc1
SP
2420 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2421 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2422 }
c3c18bc1 2423 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2424 rxcp->tunneled =
c3c18bc1 2425 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2426}
2427
10ef9ab4
SP
2428static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2429 struct be_rx_compl_info *rxcp)
2e588f84 2430{
c3c18bc1
SP
2431 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2432 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2433 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2434 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2435 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2436 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2437 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2438 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2439 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2440 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2441 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2442 if (rxcp->vlanf) {
c3c18bc1
SP
2443 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2444 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2445 }
c3c18bc1
SP
2446 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2447 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2448}
2449
2450static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2451{
2452 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2453 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2454 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2455
2e588f84
SP
2456 /* For checking the valid bit it is Ok to use either definition as the
2457 * valid bit is at the same position in both v0 and v1 Rx compl */
2458 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2459 return NULL;
6b7c5b94 2460
2e588f84
SP
2461 rmb();
2462 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2463
2e588f84 2464 if (adapter->be3_native)
10ef9ab4 2465 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2466 else
10ef9ab4 2467 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2468
e38b1706
SK
2469 if (rxcp->ip_frag)
2470 rxcp->l4_csum = 0;
2471
15d72184 2472 if (rxcp->vlanf) {
f93f160b
VV
2473 /* In QNQ modes, if qnq bit is not set, then the packet was
2474 * tagged only with the transparent outer vlan-tag and must
2475 * not be treated as a vlan packet by host
2476 */
2477 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2478 rxcp->vlanf = 0;
6b7c5b94 2479
15d72184 2480 if (!lancer_chip(adapter))
3c709f8f 2481 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2482
939cf306 2483 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2484 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2485 rxcp->vlanf = 0;
2486 }
2e588f84
SP
2487
2488 /* As the compl has been parsed, reset it; we wont touch it again */
2489 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2490
3abcdeda 2491 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2492 return rxcp;
2493}
2494
1829b086 2495static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2496{
6b7c5b94 2497 u32 order = get_order(size);
1829b086 2498
6b7c5b94 2499 if (order > 0)
1829b086
ED
2500 gfp |= __GFP_COMP;
2501 return alloc_pages(gfp, order);
6b7c5b94
SP
2502}
2503
2504/*
2505 * Allocate a page, split it to fragments of size rx_frag_size and post as
2506 * receive buffers to BE
2507 */
c30d7266 2508static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2509{
3abcdeda 2510 struct be_adapter *adapter = rxo->adapter;
26d92f92 2511 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2512 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2513 struct page *pagep = NULL;
ba42fad0 2514 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2515 struct be_eth_rx_d *rxd;
2516 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2517 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2518
3abcdeda 2519 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2520 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2521 if (!pagep) {
1829b086 2522 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2523 if (unlikely(!pagep)) {
ac124ff9 2524 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2525 break;
2526 }
ba42fad0
IV
2527 page_dmaaddr = dma_map_page(dev, pagep, 0,
2528 adapter->big_page_size,
2b7bcebf 2529 DMA_FROM_DEVICE);
ba42fad0
IV
2530 if (dma_mapping_error(dev, page_dmaaddr)) {
2531 put_page(pagep);
2532 pagep = NULL;
d3de1540 2533 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2534 break;
2535 }
e50287be 2536 page_offset = 0;
6b7c5b94
SP
2537 } else {
2538 get_page(pagep);
e50287be 2539 page_offset += rx_frag_size;
6b7c5b94 2540 }
e50287be 2541 page_info->page_offset = page_offset;
6b7c5b94 2542 page_info->page = pagep;
6b7c5b94
SP
2543
2544 rxd = queue_head_node(rxq);
e50287be 2545 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2546 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2547 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2548
2549 /* Any space left in the current big page for another frag? */
2550 if ((page_offset + rx_frag_size + rx_frag_size) >
2551 adapter->big_page_size) {
2552 pagep = NULL;
e50287be
SP
2553 page_info->last_frag = true;
2554 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2555 } else {
2556 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2557 }
26d92f92
SP
2558
2559 prev_page_info = page_info;
2560 queue_head_inc(rxq);
10ef9ab4 2561 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2562 }
e50287be
SP
2563
2564 /* Mark the last frag of a page when we break out of the above loop
2565 * with no more slots available in the RXQ
2566 */
2567 if (pagep) {
2568 prev_page_info->last_frag = true;
2569 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2570 }
6b7c5b94
SP
2571
2572 if (posted) {
6b7c5b94 2573 atomic_add(posted, &rxq->used);
6384a4d0
SP
2574 if (rxo->rx_post_starved)
2575 rxo->rx_post_starved = false;
c30d7266 2576 do {
69304cc9 2577 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2578 be_rxq_notify(adapter, rxq->id, notify);
2579 posted -= notify;
2580 } while (posted);
ea1dae11
SP
2581 } else if (atomic_read(&rxq->used) == 0) {
2582 /* Let be_worker replenish when memory is available */
3abcdeda 2583 rxo->rx_post_starved = true;
6b7c5b94 2584 }
6b7c5b94
SP
2585}
2586
152ffe5b 2587static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2588{
152ffe5b
SB
2589 struct be_queue_info *tx_cq = &txo->cq;
2590 struct be_tx_compl_info *txcp = &txo->txcp;
2591 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2592
152ffe5b 2593 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2594 return NULL;
2595
152ffe5b 2596 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2597 rmb();
152ffe5b 2598 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2599
152ffe5b
SB
2600 txcp->status = GET_TX_COMPL_BITS(status, compl);
2601 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2602
152ffe5b 2603 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2604 queue_tail_inc(tx_cq);
2605 return txcp;
2606}
2607
3c8def97 2608static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2609 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2610{
5f07b3c5 2611 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2612 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2613 struct sk_buff *skb = NULL;
2614 bool unmap_skb_hdr = false;
a73b796e 2615 struct be_eth_wrb *wrb;
b0fd2eb2 2616 u16 num_wrbs = 0;
2617 u32 frag_index;
6b7c5b94 2618
ec43b1a6 2619 do {
5f07b3c5
SP
2620 if (sent_skbs[txq->tail]) {
2621 /* Free skb from prev req */
2622 if (skb)
2623 dev_consume_skb_any(skb);
2624 skb = sent_skbs[txq->tail];
2625 sent_skbs[txq->tail] = NULL;
2626 queue_tail_inc(txq); /* skip hdr wrb */
2627 num_wrbs++;
2628 unmap_skb_hdr = true;
2629 }
a73b796e 2630 wrb = queue_tail_node(txq);
5f07b3c5 2631 frag_index = txq->tail;
2b7bcebf 2632 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2633 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2634 unmap_skb_hdr = false;
6b7c5b94 2635 queue_tail_inc(txq);
5f07b3c5
SP
2636 num_wrbs++;
2637 } while (frag_index != last_index);
2638 dev_consume_skb_any(skb);
6b7c5b94 2639
4d586b82 2640 return num_wrbs;
6b7c5b94
SP
2641}
2642
10ef9ab4
SP
2643/* Return the number of events in the event queue */
2644static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2645{
10ef9ab4
SP
2646 struct be_eq_entry *eqe;
2647 int num = 0;
859b1e4e 2648
10ef9ab4
SP
2649 do {
2650 eqe = queue_tail_node(&eqo->q);
2651 if (eqe->evt == 0)
2652 break;
859b1e4e 2653
10ef9ab4
SP
2654 rmb();
2655 eqe->evt = 0;
2656 num++;
2657 queue_tail_inc(&eqo->q);
2658 } while (true);
2659
2660 return num;
859b1e4e
SP
2661}
2662
10ef9ab4
SP
2663/* Leaves the EQ is disarmed state */
2664static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2665{
10ef9ab4 2666 int num = events_get(eqo);
859b1e4e 2667
20947770 2668 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2669}
2670
99b44304
KA
2671/* Free posted rx buffers that were not used */
2672static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2673{
3abcdeda 2674 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2675 struct be_rx_page_info *page_info;
2676
2677 while (atomic_read(&rxq->used) > 0) {
2678 page_info = get_rx_page_info(rxo);
2679 put_page(page_info->page);
2680 memset(page_info, 0, sizeof(*page_info));
2681 }
2682 BUG_ON(atomic_read(&rxq->used));
2683 rxq->tail = 0;
2684 rxq->head = 0;
2685}
2686
2687static void be_rx_cq_clean(struct be_rx_obj *rxo)
2688{
3abcdeda 2689 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2690 struct be_rx_compl_info *rxcp;
d23e946c
SP
2691 struct be_adapter *adapter = rxo->adapter;
2692 int flush_wait = 0;
6b7c5b94 2693
d23e946c
SP
2694 /* Consume pending rx completions.
2695 * Wait for the flush completion (identified by zero num_rcvd)
2696 * to arrive. Notify CQ even when there are no more CQ entries
2697 * for HW to flush partially coalesced CQ entries.
2698 * In Lancer, there is no need to wait for flush compl.
2699 */
2700 for (;;) {
2701 rxcp = be_rx_compl_get(rxo);
ddf1169f 2702 if (!rxcp) {
d23e946c
SP
2703 if (lancer_chip(adapter))
2704 break;
2705
954f6825
VD
2706 if (flush_wait++ > 50 ||
2707 be_check_error(adapter,
2708 BE_ERROR_HW)) {
d23e946c
SP
2709 dev_warn(&adapter->pdev->dev,
2710 "did not receive flush compl\n");
2711 break;
2712 }
2713 be_cq_notify(adapter, rx_cq->id, true, 0);
2714 mdelay(1);
2715 } else {
2716 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2717 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2718 if (rxcp->num_rcvd == 0)
2719 break;
2720 }
6b7c5b94
SP
2721 }
2722
d23e946c
SP
2723 /* After cleanup, leave the CQ in unarmed state */
2724 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2725}
2726
0ae57bb3 2727static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2728{
5f07b3c5 2729 struct device *dev = &adapter->pdev->dev;
b0fd2eb2 2730 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
152ffe5b 2731 struct be_tx_compl_info *txcp;
0ae57bb3 2732 struct be_queue_info *txq;
b0fd2eb2 2733 u32 end_idx, notified_idx;
152ffe5b 2734 struct be_tx_obj *txo;
0ae57bb3 2735 int i, pending_txqs;
a8e9179a 2736
1a3d0717 2737 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2738 do {
0ae57bb3
SP
2739 pending_txqs = adapter->num_tx_qs;
2740
2741 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2742 cmpl = 0;
2743 num_wrbs = 0;
0ae57bb3 2744 txq = &txo->q;
152ffe5b
SB
2745 while ((txcp = be_tx_compl_get(txo))) {
2746 num_wrbs +=
2747 be_tx_compl_process(adapter, txo,
2748 txcp->end_index);
0ae57bb3
SP
2749 cmpl++;
2750 }
2751 if (cmpl) {
2752 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2753 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2754 timeo = 0;
0ae57bb3 2755 }
cf5671e6 2756 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2757 pending_txqs--;
a8e9179a
SP
2758 }
2759
954f6825
VD
2760 if (pending_txqs == 0 || ++timeo > 10 ||
2761 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2762 break;
2763
2764 mdelay(1);
2765 } while (true);
2766
5f07b3c5 2767 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2768 for_all_tx_queues(adapter, txo, i) {
2769 txq = &txo->q;
0ae57bb3 2770
5f07b3c5
SP
2771 if (atomic_read(&txq->used)) {
2772 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2773 i, atomic_read(&txq->used));
2774 notified_idx = txq->tail;
0ae57bb3 2775 end_idx = txq->tail;
5f07b3c5
SP
2776 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2777 txq->len);
2778 /* Use the tx-compl process logic to handle requests
2779 * that were not sent to the HW.
2780 */
0ae57bb3
SP
2781 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2782 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2783 BUG_ON(atomic_read(&txq->used));
2784 txo->pend_wrb_cnt = 0;
2785 /* Since hw was never notified of these requests,
2786 * reset TXQ indices
2787 */
2788 txq->head = notified_idx;
2789 txq->tail = notified_idx;
0ae57bb3 2790 }
b03388d6 2791 }
6b7c5b94
SP
2792}
2793
10ef9ab4
SP
2794static void be_evt_queues_destroy(struct be_adapter *adapter)
2795{
2796 struct be_eq_obj *eqo;
2797 int i;
2798
2799 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2800 if (eqo->q.created) {
2801 be_eq_clean(eqo);
10ef9ab4 2802 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
68d7bdcb 2803 netif_napi_del(&eqo->napi);
649886a3 2804 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2805 }
10ef9ab4
SP
2806 be_queue_free(adapter, &eqo->q);
2807 }
2808}
2809
2810static int be_evt_queues_create(struct be_adapter *adapter)
2811{
2812 struct be_queue_info *eq;
2813 struct be_eq_obj *eqo;
2632bafd 2814 struct be_aic_obj *aic;
10ef9ab4
SP
2815 int i, rc;
2816
e261768e 2817 /* need enough EQs to service both RX and TX queues */
92bf14ab 2818 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
e261768e
SP
2819 max(adapter->cfg_num_rx_irqs,
2820 adapter->cfg_num_tx_irqs));
10ef9ab4
SP
2821
2822 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2823 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2824
2632bafd 2825 aic = &adapter->aic_obj[i];
10ef9ab4 2826 eqo->adapter = adapter;
10ef9ab4 2827 eqo->idx = i;
2632bafd
SP
2828 aic->max_eqd = BE_MAX_EQD;
2829 aic->enable = true;
10ef9ab4
SP
2830
2831 eq = &eqo->q;
2832 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2833 sizeof(struct be_eq_entry));
10ef9ab4
SP
2834 if (rc)
2835 return rc;
2836
f2f781a7 2837 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2838 if (rc)
2839 return rc;
649886a3
KA
2840
2841 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2842 return -ENOMEM;
2843 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2844 eqo->affinity_mask);
2845 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2846 BE_NAPI_WEIGHT);
10ef9ab4 2847 }
1cfafab9 2848 return 0;
10ef9ab4
SP
2849}
2850
5fb379ee
SP
2851static void be_mcc_queues_destroy(struct be_adapter *adapter)
2852{
2853 struct be_queue_info *q;
5fb379ee 2854
8788fdc2 2855 q = &adapter->mcc_obj.q;
5fb379ee 2856 if (q->created)
8788fdc2 2857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2858 be_queue_free(adapter, q);
2859
8788fdc2 2860 q = &adapter->mcc_obj.cq;
5fb379ee 2861 if (q->created)
8788fdc2 2862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2863 be_queue_free(adapter, q);
2864}
2865
2866/* Must be called only after TX qs are created as MCC shares TX EQ */
2867static int be_mcc_queues_create(struct be_adapter *adapter)
2868{
2869 struct be_queue_info *q, *cq;
5fb379ee 2870
8788fdc2 2871 cq = &adapter->mcc_obj.cq;
5fb379ee 2872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2873 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2874 goto err;
2875
10ef9ab4
SP
2876 /* Use the default EQ for MCC completions */
2877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2878 goto mcc_cq_free;
2879
8788fdc2 2880 q = &adapter->mcc_obj.q;
5fb379ee
SP
2881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2882 goto mcc_cq_destroy;
2883
8788fdc2 2884 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2885 goto mcc_q_free;
2886
2887 return 0;
2888
2889mcc_q_free:
2890 be_queue_free(adapter, q);
2891mcc_cq_destroy:
8788fdc2 2892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2893mcc_cq_free:
2894 be_queue_free(adapter, cq);
2895err:
2896 return -1;
2897}
2898
6b7c5b94
SP
2899static void be_tx_queues_destroy(struct be_adapter *adapter)
2900{
2901 struct be_queue_info *q;
3c8def97
SP
2902 struct be_tx_obj *txo;
2903 u8 i;
6b7c5b94 2904
3c8def97
SP
2905 for_all_tx_queues(adapter, txo, i) {
2906 q = &txo->q;
2907 if (q->created)
2908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2909 be_queue_free(adapter, q);
6b7c5b94 2910
3c8def97
SP
2911 q = &txo->cq;
2912 if (q->created)
2913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2914 be_queue_free(adapter, q);
2915 }
6b7c5b94
SP
2916}
2917
7707133c 2918static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2919{
73f394e6 2920 struct be_queue_info *cq;
3c8def97 2921 struct be_tx_obj *txo;
73f394e6 2922 struct be_eq_obj *eqo;
92bf14ab 2923 int status, i;
6b7c5b94 2924
e261768e 2925 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
dafc0fe3 2926
10ef9ab4
SP
2927 for_all_tx_queues(adapter, txo, i) {
2928 cq = &txo->cq;
2929 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2930 sizeof(struct be_eth_tx_compl));
2931 if (status)
2932 return status;
3c8def97 2933
827da44c
JS
2934 u64_stats_init(&txo->stats.sync);
2935 u64_stats_init(&txo->stats.sync_compl);
2936
10ef9ab4
SP
2937 /* If num_evt_qs is less than num_tx_qs, then more than
2938 * one txq share an eq
2939 */
73f394e6
SP
2940 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2941 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2942 if (status)
2943 return status;
6b7c5b94 2944
10ef9ab4
SP
2945 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2946 sizeof(struct be_eth_wrb));
2947 if (status)
2948 return status;
6b7c5b94 2949
94d73aaa 2950 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2951 if (status)
2952 return status;
73f394e6
SP
2953
2954 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2955 eqo->idx);
3c8def97 2956 }
6b7c5b94 2957
d379142b
SP
2958 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2959 adapter->num_tx_qs);
10ef9ab4 2960 return 0;
6b7c5b94
SP
2961}
2962
10ef9ab4 2963static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2964{
2965 struct be_queue_info *q;
3abcdeda
SP
2966 struct be_rx_obj *rxo;
2967 int i;
2968
2969 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2970 q = &rxo->cq;
2971 if (q->created)
2972 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2973 be_queue_free(adapter, q);
ac6a0c4a
SP
2974 }
2975}
2976
10ef9ab4 2977static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2978{
10ef9ab4 2979 struct be_queue_info *eq, *cq;
3abcdeda
SP
2980 struct be_rx_obj *rxo;
2981 int rc, i;
6b7c5b94 2982
e261768e
SP
2983 adapter->num_rss_qs =
2984 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
92bf14ab 2985
71bb8bd0 2986 /* We'll use RSS only if atleast 2 RSS rings are supported. */
e261768e 2987 if (adapter->num_rss_qs < 2)
71bb8bd0
VV
2988 adapter->num_rss_qs = 0;
2989
2990 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2991
2992 /* When the interface is not capable of RSS rings (and there is no
2993 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2994 */
71bb8bd0
VV
2995 if (adapter->num_rx_qs == 0)
2996 adapter->num_rx_qs = 1;
92bf14ab 2997
6b7c5b94 2998 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2999 for_all_rx_queues(adapter, rxo, i) {
3000 rxo->adapter = adapter;
3abcdeda
SP
3001 cq = &rxo->cq;
3002 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 3003 sizeof(struct be_eth_rx_compl));
3abcdeda 3004 if (rc)
10ef9ab4 3005 return rc;
3abcdeda 3006
827da44c 3007 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
3008 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3009 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 3010 if (rc)
10ef9ab4 3011 return rc;
3abcdeda 3012 }
6b7c5b94 3013
d379142b 3014 dev_info(&adapter->pdev->dev,
71bb8bd0 3015 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 3016 return 0;
b628bde2
SP
3017}
3018
6b7c5b94
SP
3019static irqreturn_t be_intx(int irq, void *dev)
3020{
e49cc34f
SP
3021 struct be_eq_obj *eqo = dev;
3022 struct be_adapter *adapter = eqo->adapter;
3023 int num_evts = 0;
6b7c5b94 3024
d0b9cec3
SP
3025 /* IRQ is not expected when NAPI is scheduled as the EQ
3026 * will not be armed.
3027 * But, this can happen on Lancer INTx where it takes
3028 * a while to de-assert INTx or in BE2 where occasionaly
3029 * an interrupt may be raised even when EQ is unarmed.
3030 * If NAPI is already scheduled, then counting & notifying
3031 * events will orphan them.
e49cc34f 3032 */
d0b9cec3 3033 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 3034 num_evts = events_get(eqo);
d0b9cec3
SP
3035 __napi_schedule(&eqo->napi);
3036 if (num_evts)
3037 eqo->spurious_intr = 0;
3038 }
20947770 3039 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 3040
d0b9cec3
SP
3041 /* Return IRQ_HANDLED only for the the first spurious intr
3042 * after a valid intr to stop the kernel from branding
3043 * this irq as a bad one!
e49cc34f 3044 */
d0b9cec3
SP
3045 if (num_evts || eqo->spurious_intr++ == 0)
3046 return IRQ_HANDLED;
3047 else
3048 return IRQ_NONE;
6b7c5b94
SP
3049}
3050
10ef9ab4 3051static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 3052{
10ef9ab4 3053 struct be_eq_obj *eqo = dev;
6b7c5b94 3054
20947770 3055 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 3056 napi_schedule(&eqo->napi);
6b7c5b94
SP
3057 return IRQ_HANDLED;
3058}
3059
2e588f84 3060static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 3061{
e38b1706 3062 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
3063}
3064
10ef9ab4 3065static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
fb6113e6 3066 int budget)
6b7c5b94 3067{
3abcdeda
SP
3068 struct be_adapter *adapter = rxo->adapter;
3069 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 3070 struct be_rx_compl_info *rxcp;
6b7c5b94 3071 u32 work_done;
c30d7266 3072 u32 frags_consumed = 0;
6b7c5b94
SP
3073
3074 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 3075 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
3076 if (!rxcp)
3077 break;
3078
12004ae9
SP
3079 /* Is it a flush compl that has no data */
3080 if (unlikely(rxcp->num_rcvd == 0))
3081 goto loop_continue;
3082
3083 /* Discard compl with partial DMA Lancer B0 */
3084 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 3085 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
3086 goto loop_continue;
3087 }
3088
3089 /* On BE drop pkts that arrive due to imperfect filtering in
3090 * promiscuous mode on some skews
3091 */
3092 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 3093 !lancer_chip(adapter))) {
10ef9ab4 3094 be_rx_compl_discard(rxo, rxcp);
12004ae9 3095 goto loop_continue;
64642811 3096 }
009dd872 3097
fb6113e6 3098 if (do_gro(rxcp))
10ef9ab4 3099 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 3100 else
6384a4d0
SP
3101 be_rx_compl_process(rxo, napi, rxcp);
3102
12004ae9 3103loop_continue:
c30d7266 3104 frags_consumed += rxcp->num_rcvd;
2e588f84 3105 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
3106 }
3107
10ef9ab4
SP
3108 if (work_done) {
3109 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 3110
6384a4d0
SP
3111 /* When an rx-obj gets into post_starved state, just
3112 * let be_worker do the posting.
3113 */
3114 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3115 !rxo->rx_post_starved)
c30d7266
AK
3116 be_post_rx_frags(rxo, GFP_ATOMIC,
3117 max_t(u32, MAX_RX_POST,
3118 frags_consumed));
6b7c5b94 3119 }
10ef9ab4 3120
6b7c5b94
SP
3121 return work_done;
3122}
3123
152ffe5b 3124static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3125{
3126 switch (status) {
3127 case BE_TX_COMP_HDR_PARSE_ERR:
3128 tx_stats(txo)->tx_hdr_parse_err++;
3129 break;
3130 case BE_TX_COMP_NDMA_ERR:
3131 tx_stats(txo)->tx_dma_err++;
3132 break;
3133 case BE_TX_COMP_ACL_ERR:
3134 tx_stats(txo)->tx_spoof_check_err++;
3135 break;
3136 }
3137}
3138
152ffe5b 3139static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
3140{
3141 switch (status) {
3142 case LANCER_TX_COMP_LSO_ERR:
3143 tx_stats(txo)->tx_tso_err++;
3144 break;
3145 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3146 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3147 tx_stats(txo)->tx_spoof_check_err++;
3148 break;
3149 case LANCER_TX_COMP_QINQ_ERR:
3150 tx_stats(txo)->tx_qinq_err++;
3151 break;
3152 case LANCER_TX_COMP_PARITY_ERR:
3153 tx_stats(txo)->tx_internal_parity_err++;
3154 break;
3155 case LANCER_TX_COMP_DMA_ERR:
3156 tx_stats(txo)->tx_dma_err++;
3157 break;
3158 }
3159}
3160
c8f64615
SP
3161static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3162 int idx)
6b7c5b94 3163{
c8f64615 3164 int num_wrbs = 0, work_done = 0;
152ffe5b 3165 struct be_tx_compl_info *txcp;
c8f64615 3166
152ffe5b
SB
3167 while ((txcp = be_tx_compl_get(txo))) {
3168 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 3169 work_done++;
3c8def97 3170
152ffe5b 3171 if (txcp->status) {
512bb8a2 3172 if (lancer_chip(adapter))
152ffe5b 3173 lancer_update_tx_err(txo, txcp->status);
512bb8a2 3174 else
152ffe5b 3175 be_update_tx_err(txo, txcp->status);
512bb8a2 3176 }
10ef9ab4 3177 }
6b7c5b94 3178
10ef9ab4
SP
3179 if (work_done) {
3180 be_cq_notify(adapter, txo->cq.id, true, work_done);
3181 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 3182
10ef9ab4
SP
3183 /* As Tx wrbs have been freed up, wake up netdev queue
3184 * if it was stopped due to lack of tx wrbs. */
3185 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 3186 be_can_txq_wake(txo)) {
10ef9ab4 3187 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 3188 }
10ef9ab4
SP
3189
3190 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3191 tx_stats(txo)->tx_compl += work_done;
3192 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 3193 }
10ef9ab4 3194}
6b7c5b94 3195
68d7bdcb 3196int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3197{
3198 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3199 struct be_adapter *adapter = eqo->adapter;
0b545a62 3200 int max_work = 0, work, i, num_evts;
6384a4d0 3201 struct be_rx_obj *rxo;
a4906ea0 3202 struct be_tx_obj *txo;
20947770 3203 u32 mult_enc = 0;
f31e50a8 3204
0b545a62
SP
3205 num_evts = events_get(eqo);
3206
a4906ea0
SP
3207 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3208 be_process_tx(adapter, txo, i);
f31e50a8 3209
fb6113e6
ED
3210 /* This loop will iterate twice for EQ0 in which
3211 * completions of the last RXQ (default one) are also processed
3212 * For other EQs the loop iterates only once
3213 */
3214 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3215 work = be_process_rx(rxo, napi, budget);
3216 max_work = max(work, max_work);
10ef9ab4 3217 }
6b7c5b94 3218
10ef9ab4
SP
3219 if (is_mcc_eqo(eqo))
3220 be_process_mcc(adapter);
93c86700 3221
10ef9ab4 3222 if (max_work < budget) {
6ad20165 3223 napi_complete_done(napi, max_work);
20947770
PR
3224
3225 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3226 * delay via a delay multiplier encoding value
3227 */
3228 if (skyhawk_chip(adapter))
3229 mult_enc = be_get_eq_delay_mult_enc(eqo);
3230
3231 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3232 mult_enc);
10ef9ab4
SP
3233 } else {
3234 /* As we'll continue in polling mode, count and clear events */
20947770 3235 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3236 }
10ef9ab4 3237 return max_work;
6b7c5b94
SP
3238}
3239
f67ef7ba 3240void be_detect_error(struct be_adapter *adapter)
7c185276 3241{
e1cfb67a
PR
3242 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3243 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3244 u32 i;
eb0eecc1 3245 struct device *dev = &adapter->pdev->dev;
7c185276 3246
954f6825 3247 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3248 return;
3249
e1cfb67a
PR
3250 if (lancer_chip(adapter)) {
3251 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3252 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3253 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3254 sliport_err1 = ioread32(adapter->db +
748b539a 3255 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3256 sliport_err2 = ioread32(adapter->db +
748b539a 3257 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3258 /* Do not log error messages if its a FW reset */
3259 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3260 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3261 dev_info(dev, "Firmware update in progress\n");
3262 } else {
eb0eecc1
SK
3263 dev_err(dev, "Error detected in the card\n");
3264 dev_err(dev, "ERR: sliport status 0x%x\n",
3265 sliport_status);
3266 dev_err(dev, "ERR: sliport error1 0x%x\n",
3267 sliport_err1);
3268 dev_err(dev, "ERR: sliport error2 0x%x\n",
3269 sliport_err2);
3270 }
e1cfb67a
PR
3271 }
3272 } else {
25848c90
SR
3273 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3274 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3275 ue_lo_mask = ioread32(adapter->pcicfg +
3276 PCICFG_UE_STATUS_LOW_MASK);
3277 ue_hi_mask = ioread32(adapter->pcicfg +
3278 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3279
f67ef7ba
PR
3280 ue_lo = (ue_lo & ~ue_lo_mask);
3281 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3282
eb0eecc1
SK
3283 /* On certain platforms BE hardware can indicate spurious UEs.
3284 * Allow HW to stop working completely in case of a real UE.
3285 * Hence not setting the hw_error for UE detection.
3286 */
f67ef7ba 3287
eb0eecc1 3288 if (ue_lo || ue_hi) {
710f3e59 3289 dev_err(dev, "Error detected in the adapter");
eb0eecc1 3290 if (skyhawk_chip(adapter))
954f6825
VD
3291 be_set_error(adapter, BE_ERROR_UE);
3292
eb0eecc1
SK
3293 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3294 if (ue_lo & 1)
3295 dev_err(dev, "UE: %s bit set\n",
3296 ue_status_low_desc[i]);
3297 }
3298 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3299 if (ue_hi & 1)
3300 dev_err(dev, "UE: %s bit set\n",
3301 ue_status_hi_desc[i]);
3302 }
7c185276
AK
3303 }
3304 }
7c185276
AK
3305}
3306
8d56ff11
SP
3307static void be_msix_disable(struct be_adapter *adapter)
3308{
ac6a0c4a 3309 if (msix_enabled(adapter)) {
8d56ff11 3310 pci_disable_msix(adapter->pdev);
ac6a0c4a 3311 adapter->num_msix_vec = 0;
68d7bdcb 3312 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3313 }
3314}
3315
c2bba3df 3316static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3317{
6fde0e63 3318 unsigned int i, max_roce_eqs;
d379142b 3319 struct device *dev = &adapter->pdev->dev;
6fde0e63 3320 int num_vec;
6b7c5b94 3321
ce7faf0a
SP
3322 /* If RoCE is supported, program the max number of vectors that
3323 * could be used for NIC and RoCE, else, just program the number
3324 * we'll use initially.
92bf14ab 3325 */
e261768e
SP
3326 if (be_roce_supported(adapter)) {
3327 max_roce_eqs =
3328 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3329 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3330 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3331 } else {
3332 num_vec = max(adapter->cfg_num_rx_irqs,
3333 adapter->cfg_num_tx_irqs);
3334 }
3abcdeda 3335
ac6a0c4a 3336 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3337 adapter->msix_entries[i].entry = i;
3338
7dc4c064
AG
3339 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3340 MIN_MSIX_VECTORS, num_vec);
3341 if (num_vec < 0)
3342 goto fail;
92bf14ab 3343
92bf14ab
SP
3344 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3345 adapter->num_msix_roce_vec = num_vec / 2;
3346 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3347 adapter->num_msix_roce_vec);
3348 }
3349
3350 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3351
3352 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3353 adapter->num_msix_vec);
c2bba3df 3354 return 0;
7dc4c064
AG
3355
3356fail:
3357 dev_warn(dev, "MSIx enable failed\n");
3358
3359 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3360 if (be_virtfn(adapter))
7dc4c064
AG
3361 return num_vec;
3362 return 0;
6b7c5b94
SP
3363}
3364
fe6d2a38 3365static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3366 struct be_eq_obj *eqo)
b628bde2 3367{
f2f781a7 3368 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3369}
6b7c5b94 3370
b628bde2
SP
3371static int be_msix_register(struct be_adapter *adapter)
3372{
10ef9ab4
SP
3373 struct net_device *netdev = adapter->netdev;
3374 struct be_eq_obj *eqo;
3375 int status, i, vec;
6b7c5b94 3376
10ef9ab4
SP
3377 for_all_evt_queues(adapter, eqo, i) {
3378 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3379 vec = be_msix_vec_get(adapter, eqo);
3380 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3381 if (status)
3382 goto err_msix;
d658d98a
PR
3383
3384 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3385 }
b628bde2 3386
6b7c5b94 3387 return 0;
3abcdeda 3388err_msix:
6e3cd5fa
VD
3389 for (i--; i >= 0; i--) {
3390 eqo = &adapter->eq_obj[i];
10ef9ab4 3391 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3392 }
10ef9ab4 3393 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3394 status);
ac6a0c4a 3395 be_msix_disable(adapter);
6b7c5b94
SP
3396 return status;
3397}
3398
3399static int be_irq_register(struct be_adapter *adapter)
3400{
3401 struct net_device *netdev = adapter->netdev;
3402 int status;
3403
ac6a0c4a 3404 if (msix_enabled(adapter)) {
6b7c5b94
SP
3405 status = be_msix_register(adapter);
3406 if (status == 0)
3407 goto done;
ba343c77 3408 /* INTx is not supported for VF */
18c57c74 3409 if (be_virtfn(adapter))
ba343c77 3410 return status;
6b7c5b94
SP
3411 }
3412
e49cc34f 3413 /* INTx: only the first EQ is used */
6b7c5b94
SP
3414 netdev->irq = adapter->pdev->irq;
3415 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3416 &adapter->eq_obj[0]);
6b7c5b94
SP
3417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "INTx request IRQ failed - err %d\n", status);
3420 return status;
3421 }
3422done:
3423 adapter->isr_registered = true;
3424 return 0;
3425}
3426
3427static void be_irq_unregister(struct be_adapter *adapter)
3428{
3429 struct net_device *netdev = adapter->netdev;
10ef9ab4 3430 struct be_eq_obj *eqo;
d658d98a 3431 int i, vec;
6b7c5b94
SP
3432
3433 if (!adapter->isr_registered)
3434 return;
3435
3436 /* INTx */
ac6a0c4a 3437 if (!msix_enabled(adapter)) {
e49cc34f 3438 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3439 goto done;
3440 }
3441
3442 /* MSIx */
d658d98a
PR
3443 for_all_evt_queues(adapter, eqo, i) {
3444 vec = be_msix_vec_get(adapter, eqo);
3445 irq_set_affinity_hint(vec, NULL);
3446 free_irq(vec, eqo);
3447 }
3abcdeda 3448
6b7c5b94
SP
3449done:
3450 adapter->isr_registered = false;
6b7c5b94
SP
3451}
3452
10ef9ab4 3453static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3454{
62219066 3455 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3456 struct be_queue_info *q;
3457 struct be_rx_obj *rxo;
3458 int i;
3459
3460 for_all_rx_queues(adapter, rxo, i) {
3461 q = &rxo->q;
3462 if (q->created) {
99b44304
KA
3463 /* If RXQs are destroyed while in an "out of buffer"
3464 * state, there is a possibility of an HW stall on
3465 * Lancer. So, post 64 buffers to each queue to relieve
3466 * the "out of buffer" condition.
3467 * Make sure there's space in the RXQ before posting.
3468 */
3469 if (lancer_chip(adapter)) {
3470 be_rx_cq_clean(rxo);
3471 if (atomic_read(&q->used) == 0)
3472 be_post_rx_frags(rxo, GFP_KERNEL,
3473 MAX_RX_POST);
3474 }
3475
482c9e79 3476 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3477 be_rx_cq_clean(rxo);
99b44304 3478 be_rxq_clean(rxo);
482c9e79 3479 }
10ef9ab4 3480 be_queue_free(adapter, q);
482c9e79 3481 }
62219066
AK
3482
3483 if (rss->rss_flags) {
3484 rss->rss_flags = RSS_ENABLE_NONE;
3485 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3486 128, rss->rss_hkey);
3487 }
482c9e79
SP
3488}
3489
bcc84140
KA
3490static void be_disable_if_filters(struct be_adapter *adapter)
3491{
6d928ae5
IV
3492 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3493 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
4993b39a 3494 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
6d928ae5 3495 be_dev_mac_del(adapter, adapter->pmac_id[0]);
4993b39a
IV
3496 eth_zero_addr(adapter->dev_mac);
3497 }
6d928ae5 3498
bcc84140 3499 be_clear_uc_list(adapter);
92fbb1df 3500 be_clear_mc_list(adapter);
bcc84140
KA
3501
3502 /* The IFACE flags are enabled in the open path and cleared
3503 * in the close path. When a VF gets detached from the host and
3504 * assigned to a VM the following happens:
3505 * - VF's IFACE flags get cleared in the detach path
3506 * - IFACE create is issued by the VF in the attach path
3507 * Due to a bug in the BE3/Skyhawk-R FW
3508 * (Lancer FW doesn't have the bug), the IFACE capability flags
3509 * specified along with the IFACE create cmd issued by a VF are not
3510 * honoured by FW. As a consequence, if a *new* driver
3511 * (that enables/disables IFACE flags in open/close)
3512 * is loaded in the host and an *old* driver is * used by a VM/VF,
3513 * the IFACE gets created *without* the needed flags.
3514 * To avoid this, disable RX-filter flags only for Lancer.
3515 */
3516 if (lancer_chip(adapter)) {
3517 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3518 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3519 }
3520}
3521
889cd4b2
SP
3522static int be_close(struct net_device *netdev)
3523{
3524 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3525 struct be_eq_obj *eqo;
3526 int i;
889cd4b2 3527
e1ad8e33
KA
3528 /* This protection is needed as be_close() may be called even when the
3529 * adapter is in cleared state (after eeh perm failure)
3530 */
3531 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3532 return 0;
3533
b7172414
SP
3534 /* Before attempting cleanup ensure all the pending cmds in the
3535 * config_wq have finished execution
3536 */
3537 flush_workqueue(be_wq);
3538
bcc84140
KA
3539 be_disable_if_filters(adapter);
3540
dff345c5
IV
3541 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3542 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3543 napi_disable(&eqo->napi);
6384a4d0 3544 }
71237b6f 3545 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3546 }
a323d9bf
SP
3547
3548 be_async_mcc_disable(adapter);
3549
3550 /* Wait for all pending tx completions to arrive so that
3551 * all tx skbs are freed.
3552 */
fba87559 3553 netif_tx_disable(netdev);
6e1f9975 3554 be_tx_compl_clean(adapter);
a323d9bf
SP
3555
3556 be_rx_qs_destroy(adapter);
d11a347d 3557
a323d9bf 3558 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3559 if (msix_enabled(adapter))
3560 synchronize_irq(be_msix_vec_get(adapter, eqo));
3561 else
3562 synchronize_irq(netdev->irq);
3563 be_eq_clean(eqo);
63fcb27f
PR
3564 }
3565
889cd4b2
SP
3566 be_irq_unregister(adapter);
3567
482c9e79
SP
3568 return 0;
3569}
3570
10ef9ab4 3571static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3572{
1dcf7b1c
ED
3573 struct rss_info *rss = &adapter->rss_info;
3574 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3575 struct be_rx_obj *rxo;
e9008ee9 3576 int rc, i, j;
482c9e79
SP
3577
3578 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3579 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3580 sizeof(struct be_eth_rx_d));
3581 if (rc)
3582 return rc;
3583 }
3584
71bb8bd0
VV
3585 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3586 rxo = default_rxo(adapter);
3587 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3588 rx_frag_size, adapter->if_handle,
3589 false, &rxo->rss_id);
3590 if (rc)
3591 return rc;
3592 }
10ef9ab4
SP
3593
3594 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3595 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3596 rx_frag_size, adapter->if_handle,
3597 true, &rxo->rss_id);
482c9e79
SP
3598 if (rc)
3599 return rc;
3600 }
3601
3602 if (be_multi_rxq(adapter)) {
71bb8bd0 3603 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3604 for_all_rss_queues(adapter, rxo, i) {
e2557877 3605 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3606 break;
e2557877
VD
3607 rss->rsstable[j + i] = rxo->rss_id;
3608 rss->rss_queue[j + i] = i;
e9008ee9
PR
3609 }
3610 }
e2557877
VD
3611 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3612 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3613
3614 if (!BEx_chip(adapter))
e2557877
VD
3615 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3616 RSS_ENABLE_UDP_IPV6;
62219066
AK
3617
3618 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3619 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3620 RSS_INDIR_TABLE_LEN, rss_key);
3621 if (rc) {
3622 rss->rss_flags = RSS_ENABLE_NONE;
3623 return rc;
3624 }
3625
3626 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3627 } else {
3628 /* Disable RSS, if only default RX Q is created */
e2557877 3629 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3630 }
594ad54a 3631
e2557877 3632
b02e60c8
SR
3633 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3634 * which is a queue empty condition
3635 */
10ef9ab4 3636 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3637 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3638
889cd4b2
SP
3639 return 0;
3640}
3641
bcc84140
KA
3642static int be_enable_if_filters(struct be_adapter *adapter)
3643{
3644 int status;
3645
c1bb0a55 3646 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
bcc84140
KA
3647 if (status)
3648 return status;
3649
4993b39a
IV
3650 /* Normally this condition usually true as the ->dev_mac is zeroed.
3651 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3652 * subsequent be_dev_mac_add() can fail (after fresh boot)
3653 */
3654 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3655 int old_pmac_id = -1;
3656
3657 /* Remember old programmed MAC if any - can happen on BE3 VF */
3658 if (!is_zero_ether_addr(adapter->dev_mac))
3659 old_pmac_id = adapter->pmac_id[0];
3660
988d44b1 3661 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
bcc84140
KA
3662 if (status)
3663 return status;
4993b39a
IV
3664
3665 /* Delete the old programmed MAC as we successfully programmed
3666 * a new MAC
3667 */
3668 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3669 be_dev_mac_del(adapter, old_pmac_id);
3670
c27ebf58 3671 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
bcc84140
KA
3672 }
3673
3674 if (adapter->vlans_added)
3675 be_vid_config(adapter);
3676
b7172414 3677 __be_set_rx_mode(adapter);
bcc84140
KA
3678
3679 return 0;
3680}
3681
6b7c5b94
SP
3682static int be_open(struct net_device *netdev)
3683{
3684 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3685 struct be_eq_obj *eqo;
3abcdeda 3686 struct be_rx_obj *rxo;
10ef9ab4 3687 struct be_tx_obj *txo;
b236916a 3688 u8 link_status;
3abcdeda 3689 int status, i;
5fb379ee 3690
10ef9ab4 3691 status = be_rx_qs_create(adapter);
482c9e79
SP
3692 if (status)
3693 goto err;
3694
bcc84140
KA
3695 status = be_enable_if_filters(adapter);
3696 if (status)
3697 goto err;
3698
c2bba3df
SK
3699 status = be_irq_register(adapter);
3700 if (status)
3701 goto err;
5fb379ee 3702
10ef9ab4 3703 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3704 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3705
10ef9ab4
SP
3706 for_all_tx_queues(adapter, txo, i)
3707 be_cq_notify(adapter, txo->cq.id, true, 0);
3708
7a1e9b20
SP
3709 be_async_mcc_enable(adapter);
3710
10ef9ab4
SP
3711 for_all_evt_queues(adapter, eqo, i) {
3712 napi_enable(&eqo->napi);
20947770 3713 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3714 }
04d3d624 3715 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3716
323ff71e 3717 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3718 if (!status)
3719 be_link_status_update(adapter, link_status);
3720
fba87559 3721 netif_tx_start_all_queues(netdev);
c9c47142 3722 if (skyhawk_chip(adapter))
bde6b7cd 3723 udp_tunnel_get_rx_info(netdev);
c5abe7c0 3724
889cd4b2
SP
3725 return 0;
3726err:
3727 be_close(adapter->netdev);
3728 return -EIO;
5fb379ee
SP
3729}
3730
f7062ee5
SP
3731static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3732{
3733 u32 addr;
3734
3735 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3736
3737 mac[5] = (u8)(addr & 0xFF);
3738 mac[4] = (u8)((addr >> 8) & 0xFF);
3739 mac[3] = (u8)((addr >> 16) & 0xFF);
3740 /* Use the OUI from the current MAC address */
3741 memcpy(mac, adapter->netdev->dev_addr, 3);
3742}
3743
6d87f5c3
AK
3744/*
3745 * Generate a seed MAC address from the PF MAC Address using jhash.
3746 * MAC Address for VFs are assigned incrementally starting from the seed.
3747 * These addresses are programmed in the ASIC by the PF and the VF driver
3748 * queries for the MAC address during its probe.
3749 */
4c876616 3750static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3751{
f9449ab7 3752 u32 vf;
3abcdeda 3753 int status = 0;
6d87f5c3 3754 u8 mac[ETH_ALEN];
11ac75ed 3755 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3756
3757 be_vf_eth_addr_generate(adapter, mac);
3758
11ac75ed 3759 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3760 if (BEx_chip(adapter))
590c391d 3761 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3762 vf_cfg->if_handle,
3763 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3764 else
3765 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3766 vf + 1);
590c391d 3767
6d87f5c3
AK
3768 if (status)
3769 dev_err(&adapter->pdev->dev,
748b539a
SP
3770 "Mac address assignment failed for VF %d\n",
3771 vf);
6d87f5c3 3772 else
11ac75ed 3773 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3774
3775 mac[5] += 1;
3776 }
3777 return status;
3778}
3779
4c876616
SP
3780static int be_vfs_mac_query(struct be_adapter *adapter)
3781{
3782 int status, vf;
3783 u8 mac[ETH_ALEN];
3784 struct be_vf_cfg *vf_cfg;
4c876616
SP
3785
3786 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3787 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3788 mac, vf_cfg->if_handle,
3789 false, vf+1);
4c876616
SP
3790 if (status)
3791 return status;
3792 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3793 }
3794 return 0;
3795}
3796
f9449ab7 3797static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3798{
11ac75ed 3799 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3800 u32 vf;
3801
257a3feb 3802 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3803 dev_warn(&adapter->pdev->dev,
3804 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3805 goto done;
3806 }
3807
b4c1df93
SP
3808 pci_disable_sriov(adapter->pdev);
3809
11ac75ed 3810 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3811 if (BEx_chip(adapter))
11ac75ed
SP
3812 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3813 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3814 else
3815 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3816 vf + 1);
f9449ab7 3817
11ac75ed
SP
3818 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3819 }
884476be
SK
3820
3821 if (BE3_chip(adapter))
3822 be_cmd_set_hsw_config(adapter, 0, 0,
3823 adapter->if_handle,
3824 PORT_FWD_TYPE_PASSTHRU, 0);
39f1d94d
SP
3825done:
3826 kfree(adapter->vf_cfg);
3827 adapter->num_vfs = 0;
f174c7ec 3828 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3829}
3830
7707133c
SP
3831static void be_clear_queues(struct be_adapter *adapter)
3832{
3833 be_mcc_queues_destroy(adapter);
3834 be_rx_cqs_destroy(adapter);
3835 be_tx_queues_destroy(adapter);
3836 be_evt_queues_destroy(adapter);
3837}
3838
68d7bdcb 3839static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3840{
191eb756
SP
3841 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3842 cancel_delayed_work_sync(&adapter->work);
3843 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3844 }
68d7bdcb
SP
3845}
3846
eb7dd46c
SP
3847static void be_cancel_err_detection(struct be_adapter *adapter)
3848{
710f3e59
SB
3849 struct be_error_recovery *err_rec = &adapter->error_recovery;
3850
3851 if (!be_err_recovery_workq)
3852 return;
3853
eb7dd46c 3854 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
710f3e59 3855 cancel_delayed_work_sync(&err_rec->err_detection_work);
eb7dd46c
SP
3856 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3857 }
3858}
3859
bf8d9dfb
SB
3860static int be_enable_vxlan_offloads(struct be_adapter *adapter)
3861{
3862 struct net_device *netdev = adapter->netdev;
3863 struct device *dev = &adapter->pdev->dev;
3864 struct be_vxlan_port *vxlan_port;
3865 __be16 port;
3866 int status;
3867
3868 vxlan_port = list_first_entry(&adapter->vxlan_port_list,
3869 struct be_vxlan_port, list);
3870 port = vxlan_port->port;
3871
3872 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3873 OP_CONVERT_NORMAL_TO_TUNNEL);
3874 if (status) {
3875 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3876 return status;
3877 }
3878 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3879
3880 status = be_cmd_set_vxlan_port(adapter, port);
3881 if (status) {
3882 dev_warn(dev, "Failed to add VxLAN port\n");
3883 return status;
3884 }
3885 adapter->vxlan_port = port;
3886
3887 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3888 NETIF_F_TSO | NETIF_F_TSO6 |
3889 NETIF_F_GSO_UDP_TUNNEL;
3890 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3891 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
3892
3893 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
3894 be16_to_cpu(port));
3895 return 0;
3896}
3897
c9c47142
SP
3898static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3899{
630f4b70
SB
3900 struct net_device *netdev = adapter->netdev;
3901
c9c47142
SP
3902 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3903 be_cmd_manage_iface(adapter, adapter->if_handle,
3904 OP_CONVERT_TUNNEL_TO_NORMAL);
3905
3906 if (adapter->vxlan_port)
3907 be_cmd_set_vxlan_port(adapter, 0);
3908
3909 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3910 adapter->vxlan_port = 0;
630f4b70
SB
3911
3912 netdev->hw_enc_features = 0;
3913 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3914 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142
SP
3915}
3916
b9263cbf
SR
3917static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3918 struct be_resources *vft_res)
f2858738
VV
3919{
3920 struct be_resources res = adapter->pool_res;
b9263cbf
SR
3921 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3922 struct be_resources res_mod = {0};
f2858738
VV
3923 u16 num_vf_qs = 1;
3924
de2b1e03
SK
3925 /* Distribute the queue resources among the PF and it's VFs */
3926 if (num_vfs) {
3927 /* Divide the rx queues evenly among the VFs and the PF, capped
3928 * at VF-EQ-count. Any remainder queues belong to the PF.
3929 */
ee9ad280
SB
3930 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3931 res.max_rss_qs / (num_vfs + 1));
f2858738 3932
de2b1e03
SK
3933 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3934 * RSS Tables per port. Provide RSS on VFs, only if number of
3935 * VFs requested is less than it's PF Pool's RSS Tables limit.
f2858738 3936 */
de2b1e03 3937 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
f2858738
VV
3938 num_vf_qs = 1;
3939 }
b9263cbf
SR
3940
3941 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3942 * which are modifiable using SET_PROFILE_CONFIG cmd.
3943 */
de2b1e03
SK
3944 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3945 RESOURCE_MODIFIABLE, 0);
b9263cbf
SR
3946
3947 /* If RSS IFACE capability flags are modifiable for a VF, set the
3948 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3949 * more than 1 RSSQ is available for a VF.
3950 * Otherwise, provision only 1 queue pair for VF.
3951 */
3952 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3953 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3954 if (num_vf_qs > 1) {
3955 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3956 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3957 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3958 } else {
3959 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3960 BE_IF_FLAGS_DEFQ_RSS);
3961 }
3962 } else {
3963 num_vf_qs = 1;
3964 }
3965
3966 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3967 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3968 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3969 }
3970
3971 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3972 vft_res->max_rx_qs = num_vf_qs;
3973 vft_res->max_rss_qs = num_vf_qs;
3974 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3975 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3976
3977 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3978 * among the PF and it's VFs, if the fields are changeable
3979 */
3980 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3981 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3982
3983 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3984 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3985
3986 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3987 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3988
3989 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3990 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
f2858738
VV
3991}
3992
b7172414
SP
3993static void be_if_destroy(struct be_adapter *adapter)
3994{
3995 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3996
3997 kfree(adapter->pmac_id);
3998 adapter->pmac_id = NULL;
3999
4000 kfree(adapter->mc_list);
4001 adapter->mc_list = NULL;
4002
4003 kfree(adapter->uc_list);
4004 adapter->uc_list = NULL;
4005}
4006
b05004ad
SK
4007static int be_clear(struct be_adapter *adapter)
4008{
f2858738 4009 struct pci_dev *pdev = adapter->pdev;
b9263cbf 4010 struct be_resources vft_res = {0};
f2858738 4011
68d7bdcb 4012 be_cancel_worker(adapter);
191eb756 4013
b7172414
SP
4014 flush_workqueue(be_wq);
4015
11ac75ed 4016 if (sriov_enabled(adapter))
f9449ab7
SP
4017 be_vf_clear(adapter);
4018
bec84e6b
VV
4019 /* Re-configure FW to distribute resources evenly across max-supported
4020 * number of VFs, only when VFs are not already enabled.
4021 */
ace40aff
VV
4022 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4023 !pci_vfs_assigned(pdev)) {
b9263cbf
SR
4024 be_calculate_vf_res(adapter,
4025 pci_sriov_get_totalvfs(pdev),
4026 &vft_res);
bec84e6b 4027 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738 4028 pci_sriov_get_totalvfs(pdev),
b9263cbf 4029 &vft_res);
f2858738 4030 }
bec84e6b 4031
c9c47142 4032 be_disable_vxlan_offloads(adapter);
fbc13f01 4033
b7172414 4034 be_if_destroy(adapter);
a54769f5 4035
7707133c 4036 be_clear_queues(adapter);
a54769f5 4037
10ef9ab4 4038 be_msix_disable(adapter);
e1ad8e33 4039 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
4040 return 0;
4041}
4042
4c876616 4043static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 4044{
92bf14ab 4045 struct be_resources res = {0};
bcc84140 4046 u32 cap_flags, en_flags, vf;
4c876616 4047 struct be_vf_cfg *vf_cfg;
0700d816 4048 int status;
abb93951 4049
0700d816 4050 /* If a FW profile exists, then cap_flags are updated */
c1bb0a55 4051 cap_flags = BE_VF_IF_EN_FLAGS;
abb93951 4052
4c876616 4053 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab 4054 if (!BE3_chip(adapter)) {
de2b1e03
SK
4055 status = be_cmd_get_profile_config(adapter, &res, NULL,
4056 ACTIVE_PROFILE_TYPE,
f2858738 4057 RESOURCE_LIMITS,
92bf14ab 4058 vf + 1);
435452aa 4059 if (!status) {
92bf14ab 4060 cap_flags = res.if_cap_flags;
435452aa
VV
4061 /* Prevent VFs from enabling VLAN promiscuous
4062 * mode
4063 */
4064 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4065 }
92bf14ab 4066 }
4c876616 4067
c1bb0a55
VD
4068 /* PF should enable IF flags during proxy if_create call */
4069 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
bcc84140
KA
4070 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4071 &vf_cfg->if_handle, vf + 1);
4c876616 4072 if (status)
0700d816 4073 return status;
4c876616 4074 }
0700d816
KA
4075
4076 return 0;
abb93951
PR
4077}
4078
39f1d94d 4079static int be_vf_setup_init(struct be_adapter *adapter)
30128031 4080{
11ac75ed 4081 struct be_vf_cfg *vf_cfg;
30128031
SP
4082 int vf;
4083
39f1d94d
SP
4084 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4085 GFP_KERNEL);
4086 if (!adapter->vf_cfg)
4087 return -ENOMEM;
4088
11ac75ed
SP
4089 for_all_vfs(adapter, vf_cfg, vf) {
4090 vf_cfg->if_handle = -1;
4091 vf_cfg->pmac_id = -1;
30128031 4092 }
39f1d94d 4093 return 0;
30128031
SP
4094}
4095
f9449ab7
SP
4096static int be_vf_setup(struct be_adapter *adapter)
4097{
c502224e 4098 struct device *dev = &adapter->pdev->dev;
11ac75ed 4099 struct be_vf_cfg *vf_cfg;
4c876616 4100 int status, old_vfs, vf;
e7bcbd7b 4101 bool spoofchk;
39f1d94d 4102
257a3feb 4103 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
4104
4105 status = be_vf_setup_init(adapter);
4106 if (status)
4107 goto err;
30128031 4108
4c876616
SP
4109 if (old_vfs) {
4110 for_all_vfs(adapter, vf_cfg, vf) {
4111 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4112 if (status)
4113 goto err;
4114 }
f9449ab7 4115
4c876616
SP
4116 status = be_vfs_mac_query(adapter);
4117 if (status)
4118 goto err;
4119 } else {
bec84e6b
VV
4120 status = be_vfs_if_create(adapter);
4121 if (status)
4122 goto err;
4123
39f1d94d
SP
4124 status = be_vf_eth_addr_config(adapter);
4125 if (status)
4126 goto err;
4127 }
f9449ab7 4128
11ac75ed 4129 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 4130 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
4131 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4132 vf + 1);
4133 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 4134 status = be_cmd_set_fn_privileges(adapter,
435452aa 4135 vf_cfg->privileges |
04a06028
SP
4136 BE_PRIV_FILTMGMT,
4137 vf + 1);
435452aa
VV
4138 if (!status) {
4139 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
4140 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4141 vf);
435452aa 4142 }
04a06028
SP
4143 }
4144
0f77ba73
RN
4145 /* Allow full available bandwidth */
4146 if (!old_vfs)
4147 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 4148
e7bcbd7b
KA
4149 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4150 vf_cfg->if_handle, NULL,
4151 &spoofchk);
4152 if (!status)
4153 vf_cfg->spoofchk = spoofchk;
4154
bdce2ad7 4155 if (!old_vfs) {
0599863d 4156 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
4157 be_cmd_set_logical_link_config(adapter,
4158 IFLA_VF_LINK_STATE_AUTO,
4159 vf+1);
4160 }
f9449ab7 4161 }
b4c1df93
SP
4162
4163 if (!old_vfs) {
4164 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4165 if (status) {
4166 dev_err(dev, "SRIOV enable failed\n");
4167 adapter->num_vfs = 0;
4168 goto err;
4169 }
4170 }
f174c7ec 4171
884476be
SK
4172 if (BE3_chip(adapter)) {
4173 /* On BE3, enable VEB only when SRIOV is enabled */
4174 status = be_cmd_set_hsw_config(adapter, 0, 0,
4175 adapter->if_handle,
4176 PORT_FWD_TYPE_VEB, 0);
4177 if (status)
4178 goto err;
4179 }
4180
f174c7ec 4181 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
4182 return 0;
4183err:
4c876616
SP
4184 dev_err(dev, "VF setup failed\n");
4185 be_vf_clear(adapter);
f9449ab7
SP
4186 return status;
4187}
4188
f93f160b
VV
4189/* Converting function_mode bits on BE3 to SH mc_type enums */
4190
4191static u8 be_convert_mc_type(u32 function_mode)
4192{
66064dbc 4193 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4194 return vNIC1;
66064dbc 4195 else if (function_mode & QNQ_MODE)
f93f160b
VV
4196 return FLEX10;
4197 else if (function_mode & VNIC_MODE)
4198 return vNIC2;
4199 else if (function_mode & UMC_ENABLED)
4200 return UMC;
4201 else
4202 return MC_NONE;
4203}
4204
92bf14ab
SP
4205/* On BE2/BE3 FW does not suggest the supported limits */
4206static void BEx_get_resources(struct be_adapter *adapter,
4207 struct be_resources *res)
4208{
bec84e6b 4209 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4210
4211 if (be_physfn(adapter))
4212 res->max_uc_mac = BE_UC_PMAC_COUNT;
4213 else
4214 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4215
f93f160b
VV
4216 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4217
4218 if (be_is_mc(adapter)) {
4219 /* Assuming that there are 4 channels per port,
4220 * when multi-channel is enabled
4221 */
4222 if (be_is_qnq_mode(adapter))
4223 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4224 else
4225 /* In a non-qnq multichannel mode, the pvid
4226 * takes up one vlan entry
4227 */
4228 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4229 } else {
92bf14ab 4230 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4231 }
4232
92bf14ab
SP
4233 res->max_mcast_mac = BE_MAX_MC;
4234
a5243dab
VV
4235 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4236 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4237 * *only* if it is RSS-capable.
4238 */
4239 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4240 be_virtfn(adapter) ||
4241 (be_is_mc(adapter) &&
4242 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4243 res->max_tx_qs = 1;
a28277dc
SR
4244 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4245 struct be_resources super_nic_res = {0};
4246
4247 /* On a SuperNIC profile, the driver needs to use the
4248 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4249 */
de2b1e03
SK
4250 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4251 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4252 0);
a28277dc
SR
4253 /* Some old versions of BE3 FW don't report max_tx_qs value */
4254 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4255 } else {
92bf14ab 4256 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4257 }
92bf14ab
SP
4258
4259 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4260 !use_sriov && be_physfn(adapter))
4261 res->max_rss_qs = (adapter->be3_native) ?
4262 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4263 res->max_rx_qs = res->max_rss_qs + 1;
4264
e3dc867c 4265 if (be_physfn(adapter))
d3518e21 4266 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4267 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4268 else
4269 res->max_evt_qs = 1;
92bf14ab
SP
4270
4271 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4272 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4273 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4274 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4275}
4276
30128031
SP
4277static void be_setup_init(struct be_adapter *adapter)
4278{
4279 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4280 adapter->phy.link_speed = -1;
30128031
SP
4281 adapter->if_handle = -1;
4282 adapter->be3_native = false;
f66b7cfd 4283 adapter->if_flags = 0;
51d1f98a 4284 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
f25b119c
PR
4285 if (be_physfn(adapter))
4286 adapter->cmd_privileges = MAX_PRIVILEGES;
4287 else
4288 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4289}
4290
de2b1e03
SK
4291/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4292 * However, this HW limitation is not exposed to the host via any SLI cmd.
4293 * As a result, in the case of SRIOV and in particular multi-partition configs
4294 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4295 * for distribution between the VFs. This self-imposed limit will determine the
4296 * no: of VFs for which RSS can be enabled.
4297 */
d766e7e6 4298static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
de2b1e03
SK
4299{
4300 struct be_port_resources port_res = {0};
4301 u8 rss_tables_on_port;
4302 u16 max_vfs = be_max_vfs(adapter);
4303
4304 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4305 RESOURCE_LIMITS, 0);
4306
4307 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4308
4309 /* Each PF Pool's RSS Tables limit =
4310 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4311 */
4312 adapter->pool_res.max_rss_tables =
4313 max_vfs * rss_tables_on_port / port_res.max_vfs;
4314}
4315
bec84e6b
VV
4316static int be_get_sriov_config(struct be_adapter *adapter)
4317{
bec84e6b 4318 struct be_resources res = {0};
d3d18312 4319 int max_vfs, old_vfs;
bec84e6b 4320
de2b1e03
SK
4321 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4322 RESOURCE_LIMITS, 0);
d3d18312 4323
ace40aff 4324 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4325 if (BE3_chip(adapter) && !res.max_vfs) {
4326 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4327 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4328 }
4329
d3d18312 4330 adapter->pool_res = res;
bec84e6b 4331
ace40aff
VV
4332 /* If during previous unload of the driver, the VFs were not disabled,
4333 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4334 * Instead use the TotalVFs value stored in the pci-dev struct.
4335 */
bec84e6b
VV
4336 old_vfs = pci_num_vf(adapter->pdev);
4337 if (old_vfs) {
ace40aff
VV
4338 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4339 old_vfs);
4340
4341 adapter->pool_res.max_vfs =
4342 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4343 adapter->num_vfs = old_vfs;
bec84e6b
VV
4344 }
4345
de2b1e03
SK
4346 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4347 be_calculate_pf_pool_rss_tables(adapter);
4348 dev_info(&adapter->pdev->dev,
4349 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4350 be_max_pf_pool_rss_tables(adapter));
4351 }
bec84e6b
VV
4352 return 0;
4353}
4354
ace40aff
VV
4355static void be_alloc_sriov_res(struct be_adapter *adapter)
4356{
4357 int old_vfs = pci_num_vf(adapter->pdev);
b9263cbf 4358 struct be_resources vft_res = {0};
ace40aff
VV
4359 int status;
4360
4361 be_get_sriov_config(adapter);
4362
4363 if (!old_vfs)
4364 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4365
4366 /* When the HW is in SRIOV capable configuration, the PF-pool
4367 * resources are given to PF during driver load, if there are no
4368 * old VFs. This facility is not available in BE3 FW.
4369 * Also, this is done by FW in Lancer chip.
4370 */
4371 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
b9263cbf 4372 be_calculate_vf_res(adapter, 0, &vft_res);
ace40aff 4373 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
b9263cbf 4374 &vft_res);
ace40aff
VV
4375 if (status)
4376 dev_err(&adapter->pdev->dev,
4377 "Failed to optimize SRIOV resources\n");
4378 }
4379}
4380
92bf14ab 4381static int be_get_resources(struct be_adapter *adapter)
abb93951 4382{
92bf14ab
SP
4383 struct device *dev = &adapter->pdev->dev;
4384 struct be_resources res = {0};
4385 int status;
abb93951 4386
92bf14ab
SP
4387 /* For Lancer, SH etc read per-function resource limits from FW.
4388 * GET_FUNC_CONFIG returns per function guaranteed limits.
4389 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4390 */
ce7faf0a
SP
4391 if (BEx_chip(adapter)) {
4392 BEx_get_resources(adapter, &res);
4393 } else {
92bf14ab
SP
4394 status = be_cmd_get_func_config(adapter, &res);
4395 if (status)
4396 return status;
abb93951 4397
71bb8bd0
VV
4398 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4399 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4400 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4401 res.max_rss_qs -= 1;
abb93951 4402 }
4c876616 4403
ce7faf0a
SP
4404 /* If RoCE is supported stash away half the EQs for RoCE */
4405 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4406 res.max_evt_qs / 2 : res.max_evt_qs;
4407 adapter->res = res;
4408
71bb8bd0
VV
4409 /* If FW supports RSS default queue, then skip creating non-RSS
4410 * queue for non-IP traffic.
4411 */
4412 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4413 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4414
acbafeb1
SP
4415 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4416 be_max_txqs(adapter), be_max_rxqs(adapter),
ce7faf0a 4417 be_max_rss(adapter), be_max_nic_eqs(adapter),
acbafeb1
SP
4418 be_max_vfs(adapter));
4419 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4420 be_max_uc(adapter), be_max_mc(adapter),
4421 be_max_vlans(adapter));
4422
e261768e
SP
4423 /* Ensure RX and TX queues are created in pairs at init time */
4424 adapter->cfg_num_rx_irqs =
4425 min_t(u16, netif_get_num_default_rss_queues(),
4426 be_max_qp_irqs(adapter));
4427 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
92bf14ab 4428 return 0;
abb93951
PR
4429}
4430
39f1d94d
SP
4431static int be_get_config(struct be_adapter *adapter)
4432{
6b085ba9 4433 int status, level;
542963b7 4434 u16 profile_id;
6b085ba9 4435
980df249
SR
4436 status = be_cmd_get_cntl_attributes(adapter);
4437 if (status)
4438 return status;
4439
e97e3cda 4440 status = be_cmd_query_fw_cfg(adapter);
abb93951 4441 if (status)
92bf14ab 4442 return status;
abb93951 4443
fd7ff6f0
VD
4444 if (!lancer_chip(adapter) && be_physfn(adapter))
4445 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4446
6b085ba9
SP
4447 if (BEx_chip(adapter)) {
4448 level = be_cmd_get_fw_log_level(adapter);
4449 adapter->msg_enable =
4450 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4451 }
4452
4453 be_cmd_get_acpi_wol_cap(adapter);
45f13df7
SB
4454 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4455 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
6b085ba9 4456
21252377
VV
4457 be_cmd_query_port_name(adapter);
4458
4459 if (be_physfn(adapter)) {
542963b7
VV
4460 status = be_cmd_get_active_profile(adapter, &profile_id);
4461 if (!status)
4462 dev_info(&adapter->pdev->dev,
4463 "Using profile 0x%x\n", profile_id);
962bcb75 4464 }
bec84e6b 4465
92bf14ab 4466 return 0;
39f1d94d
SP
4467}
4468
95046b92
SP
4469static int be_mac_setup(struct be_adapter *adapter)
4470{
4471 u8 mac[ETH_ALEN];
4472 int status;
4473
4474 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4475 status = be_cmd_get_perm_mac(adapter, mac);
4476 if (status)
4477 return status;
4478
4479 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4480 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4993b39a
IV
4481
4482 /* Initial MAC for BE3 VFs is already programmed by PF */
4483 if (BEx_chip(adapter) && be_virtfn(adapter))
4484 memcpy(adapter->dev_mac, mac, ETH_ALEN);
95046b92
SP
4485 }
4486
95046b92
SP
4487 return 0;
4488}
4489
68d7bdcb
SP
4490static void be_schedule_worker(struct be_adapter *adapter)
4491{
b7172414 4492 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
68d7bdcb
SP
4493 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4494}
4495
710f3e59
SB
4496static void be_destroy_err_recovery_workq(void)
4497{
4498 if (!be_err_recovery_workq)
4499 return;
4500
4501 flush_workqueue(be_err_recovery_workq);
4502 destroy_workqueue(be_err_recovery_workq);
4503 be_err_recovery_workq = NULL;
4504}
4505
972f37b4 4506static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c 4507{
710f3e59
SB
4508 struct be_error_recovery *err_rec = &adapter->error_recovery;
4509
4510 if (!be_err_recovery_workq)
4511 return;
4512
4513 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4514 msecs_to_jiffies(delay));
eb7dd46c
SP
4515 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4516}
4517
7707133c 4518static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4519{
68d7bdcb 4520 struct net_device *netdev = adapter->netdev;
10ef9ab4 4521 int status;
ba343c77 4522
7707133c 4523 status = be_evt_queues_create(adapter);
abb93951
PR
4524 if (status)
4525 goto err;
73d540f2 4526
7707133c 4527 status = be_tx_qs_create(adapter);
c2bba3df
SK
4528 if (status)
4529 goto err;
10ef9ab4 4530
7707133c 4531 status = be_rx_cqs_create(adapter);
10ef9ab4 4532 if (status)
a54769f5 4533 goto err;
6b7c5b94 4534
7707133c 4535 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4536 if (status)
4537 goto err;
4538
68d7bdcb
SP
4539 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4540 if (status)
4541 goto err;
4542
4543 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4544 if (status)
4545 goto err;
4546
7707133c
SP
4547 return 0;
4548err:
4549 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4550 return status;
4551}
4552
62219066
AK
4553static int be_if_create(struct be_adapter *adapter)
4554{
4555 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4556 u32 cap_flags = be_if_cap_flags(adapter);
4557 int status;
4558
b7172414
SP
4559 /* alloc required memory for other filtering fields */
4560 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4561 sizeof(*adapter->pmac_id), GFP_KERNEL);
4562 if (!adapter->pmac_id)
4563 return -ENOMEM;
4564
4565 adapter->mc_list = kcalloc(be_max_mc(adapter),
4566 sizeof(*adapter->mc_list), GFP_KERNEL);
4567 if (!adapter->mc_list)
4568 return -ENOMEM;
4569
4570 adapter->uc_list = kcalloc(be_max_uc(adapter),
4571 sizeof(*adapter->uc_list), GFP_KERNEL);
4572 if (!adapter->uc_list)
4573 return -ENOMEM;
4574
e261768e 4575 if (adapter->cfg_num_rx_irqs == 1)
62219066
AK
4576 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4577
4578 en_flags &= cap_flags;
4579 /* will enable all the needed filter flags in be_open() */
4580 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4581 &adapter->if_handle, 0);
4582
b7172414
SP
4583 if (status)
4584 return status;
4585
4586 return 0;
62219066
AK
4587}
4588
68d7bdcb
SP
4589int be_update_queues(struct be_adapter *adapter)
4590{
4591 struct net_device *netdev = adapter->netdev;
4592 int status;
4593
4594 if (netif_running(netdev))
4595 be_close(netdev);
4596
4597 be_cancel_worker(adapter);
4598
4599 /* If any vectors have been shared with RoCE we cannot re-program
4600 * the MSIx table.
4601 */
4602 if (!adapter->num_msix_roce_vec)
4603 be_msix_disable(adapter);
4604
4605 be_clear_queues(adapter);
62219066
AK
4606 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4607 if (status)
4608 return status;
68d7bdcb
SP
4609
4610 if (!msix_enabled(adapter)) {
4611 status = be_msix_enable(adapter);
4612 if (status)
4613 return status;
4614 }
4615
62219066
AK
4616 status = be_if_create(adapter);
4617 if (status)
4618 return status;
4619
68d7bdcb
SP
4620 status = be_setup_queues(adapter);
4621 if (status)
4622 return status;
4623
4624 be_schedule_worker(adapter);
4625
4626 if (netif_running(netdev))
4627 status = be_open(netdev);
4628
4629 return status;
4630}
4631
f7062ee5
SP
4632static inline int fw_major_num(const char *fw_ver)
4633{
4634 int fw_major = 0, i;
4635
4636 i = sscanf(fw_ver, "%d.", &fw_major);
4637 if (i != 1)
4638 return 0;
4639
4640 return fw_major;
4641}
4642
710f3e59
SB
4643/* If it is error recovery, FLR the PF
4644 * Else if any VFs are already enabled don't FLR the PF
4645 */
f962f840
SP
4646static bool be_reset_required(struct be_adapter *adapter)
4647{
710f3e59
SB
4648 if (be_error_recovering(adapter))
4649 return true;
4650 else
4651 return pci_num_vf(adapter->pdev) == 0;
f962f840
SP
4652}
4653
4654/* Wait for the FW to be ready and perform the required initialization */
4655static int be_func_init(struct be_adapter *adapter)
4656{
4657 int status;
4658
4659 status = be_fw_wait_ready(adapter);
4660 if (status)
4661 return status;
4662
710f3e59
SB
4663 /* FW is now ready; clear errors to allow cmds/doorbell */
4664 be_clear_error(adapter, BE_CLEAR_ALL);
4665
f962f840
SP
4666 if (be_reset_required(adapter)) {
4667 status = be_cmd_reset_function(adapter);
4668 if (status)
4669 return status;
4670
4671 /* Wait for interrupts to quiesce after an FLR */
4672 msleep(100);
f962f840
SP
4673 }
4674
4675 /* Tell FW we're ready to fire cmds */
4676 status = be_cmd_fw_init(adapter);
4677 if (status)
4678 return status;
4679
4680 /* Allow interrupts for other ULPs running on NIC function */
4681 be_intr_set(adapter, true);
4682
4683 return 0;
4684}
4685
7707133c
SP
4686static int be_setup(struct be_adapter *adapter)
4687{
4688 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4689 int status;
4690
f962f840
SP
4691 status = be_func_init(adapter);
4692 if (status)
4693 return status;
4694
7707133c
SP
4695 be_setup_init(adapter);
4696
4697 if (!lancer_chip(adapter))
4698 be_cmd_req_native_mode(adapter);
4699
980df249
SR
4700 /* invoke this cmd first to get pf_num and vf_num which are needed
4701 * for issuing profile related cmds
4702 */
4703 if (!BEx_chip(adapter)) {
4704 status = be_cmd_get_func_config(adapter, NULL);
4705 if (status)
4706 return status;
4707 }
72ef3a88 4708
de2b1e03
SK
4709 status = be_get_config(adapter);
4710 if (status)
4711 goto err;
4712
ace40aff
VV
4713 if (!BE2_chip(adapter) && be_physfn(adapter))
4714 be_alloc_sriov_res(adapter);
4715
de2b1e03 4716 status = be_get_resources(adapter);
10ef9ab4 4717 if (status)
a54769f5 4718 goto err;
6b7c5b94 4719
7707133c 4720 status = be_msix_enable(adapter);
10ef9ab4 4721 if (status)
a54769f5 4722 goto err;
6b7c5b94 4723
bcc84140 4724 /* will enable all the needed filter flags in be_open() */
62219066 4725 status = be_if_create(adapter);
7707133c 4726 if (status)
a54769f5 4727 goto err;
6b7c5b94 4728
68d7bdcb
SP
4729 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4730 rtnl_lock();
7707133c 4731 status = be_setup_queues(adapter);
68d7bdcb 4732 rtnl_unlock();
95046b92 4733 if (status)
1578e777
PR
4734 goto err;
4735
7707133c 4736 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4737
4738 status = be_mac_setup(adapter);
10ef9ab4
SP
4739 if (status)
4740 goto err;
4741
e97e3cda 4742 be_cmd_get_fw_ver(adapter);
acbafeb1 4743 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4744
e9e2a904 4745 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4746 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4747 adapter->fw_ver);
4748 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4749 }
4750
00d594c3
KA
4751 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4752 adapter->rx_fc);
4753 if (status)
4754 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4755 &adapter->rx_fc);
590c391d 4756
00d594c3
KA
4757 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4758 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4759
bdce2ad7
SR
4760 if (be_physfn(adapter))
4761 be_cmd_set_logical_link_config(adapter,
4762 IFLA_VF_LINK_STATE_AUTO, 0);
4763
884476be
SK
4764 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4765 * confusing a linux bridge or OVS that it might be connected to.
4766 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4767 * when SRIOV is not enabled.
4768 */
4769 if (BE3_chip(adapter))
4770 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4771 PORT_FWD_TYPE_PASSTHRU, 0);
4772
bec84e6b
VV
4773 if (adapter->num_vfs)
4774 be_vf_setup(adapter);
f9449ab7 4775
f25b119c
PR
4776 status = be_cmd_get_phy_info(adapter);
4777 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4778 adapter->phy.fc_autoneg = 1;
4779
710f3e59
SB
4780 if (be_physfn(adapter) && !lancer_chip(adapter))
4781 be_cmd_set_features(adapter);
4782
68d7bdcb 4783 be_schedule_worker(adapter);
e1ad8e33 4784 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4785 return 0;
a54769f5
SP
4786err:
4787 be_clear(adapter);
4788 return status;
4789}
6b7c5b94 4790
66268739
IV
4791#ifdef CONFIG_NET_POLL_CONTROLLER
4792static void be_netpoll(struct net_device *netdev)
4793{
4794 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4795 struct be_eq_obj *eqo;
66268739
IV
4796 int i;
4797
e49cc34f 4798 for_all_evt_queues(adapter, eqo, i) {
20947770 4799 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4800 napi_schedule(&eqo->napi);
4801 }
66268739
IV
4802}
4803#endif
4804
485bf569
SN
4805int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4806{
4807 const struct firmware *fw;
4808 int status;
4809
4810 if (!netif_running(adapter->netdev)) {
4811 dev_err(&adapter->pdev->dev,
4812 "Firmware load not allowed (interface is down)\n");
940a3fcd 4813 return -ENETDOWN;
485bf569
SN
4814 }
4815
4816 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4817 if (status)
4818 goto fw_exit;
4819
4820 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4821
4822 if (lancer_chip(adapter))
4823 status = lancer_fw_download(adapter, fw);
4824 else
4825 status = be_fw_download(adapter, fw);
4826
eeb65ced 4827 if (!status)
e97e3cda 4828 be_cmd_get_fw_ver(adapter);
eeb65ced 4829
84517482
AK
4830fw_exit:
4831 release_firmware(fw);
4832 return status;
4833}
4834
add511b3
RP
4835static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4836 u16 flags)
a77dcb8c
AK
4837{
4838 struct be_adapter *adapter = netdev_priv(dev);
4839 struct nlattr *attr, *br_spec;
4840 int rem;
4841 int status = 0;
4842 u16 mode = 0;
4843
4844 if (!sriov_enabled(adapter))
4845 return -EOPNOTSUPP;
4846
4847 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4848 if (!br_spec)
4849 return -EINVAL;
a77dcb8c
AK
4850
4851 nla_for_each_nested(attr, br_spec, rem) {
4852 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4853 continue;
4854
b7c1a314
TG
4855 if (nla_len(attr) < sizeof(mode))
4856 return -EINVAL;
4857
a77dcb8c 4858 mode = nla_get_u16(attr);
ac0f5fba
SR
4859 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4860 return -EOPNOTSUPP;
4861
a77dcb8c
AK
4862 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4863 return -EINVAL;
4864
4865 status = be_cmd_set_hsw_config(adapter, 0, 0,
4866 adapter->if_handle,
4867 mode == BRIDGE_MODE_VEPA ?
4868 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4869 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4870 if (status)
4871 goto err;
4872
4873 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4874 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4875
4876 return status;
4877 }
4878err:
4879 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4880 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4881
4882 return status;
4883}
4884
4885static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4886 struct net_device *dev, u32 filter_mask,
4887 int nlflags)
a77dcb8c
AK
4888{
4889 struct be_adapter *adapter = netdev_priv(dev);
4890 int status = 0;
4891 u8 hsw_mode;
4892
a77dcb8c
AK
4893 /* BE and Lancer chips support VEB mode only */
4894 if (BEx_chip(adapter) || lancer_chip(adapter)) {
8431706b
IV
4895 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4896 if (!pci_sriov_get_totalvfs(adapter->pdev))
4897 return 0;
a77dcb8c
AK
4898 hsw_mode = PORT_FWD_TYPE_VEB;
4899 } else {
4900 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4901 adapter->if_handle, &hsw_mode,
4902 NULL);
a77dcb8c
AK
4903 if (status)
4904 return 0;
ff9ed19d
KP
4905
4906 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4907 return 0;
a77dcb8c
AK
4908 }
4909
4910 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4911 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4912 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4913 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4914}
4915
b7172414
SP
4916static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4917 void (*func)(struct work_struct *))
4918{
4919 struct be_cmd_work *work;
4920
4921 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4922 if (!work) {
4923 dev_err(&adapter->pdev->dev,
4924 "be_work memory allocation failed\n");
4925 return NULL;
4926 }
4927
4928 INIT_WORK(&work->work, func);
4929 work->adapter = adapter;
4930 return work;
4931}
4932
630f4b70
SB
4933/* VxLAN offload Notes:
4934 *
4935 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4936 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4937 * is expected to work across all types of IP tunnels once exported. Skyhawk
4938 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4939 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4940 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4941 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4942 *
4943 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
bf8d9dfb
SB
4944 * adds more than one port, disable offloads and re-enable them again when
4945 * there's only one port left. We maintain a list of ports for this purpose.
630f4b70 4946 */
b7172414 4947static void be_work_add_vxlan_port(struct work_struct *work)
c9c47142 4948{
b7172414
SP
4949 struct be_cmd_work *cmd_work =
4950 container_of(work, struct be_cmd_work, work);
4951 struct be_adapter *adapter = cmd_work->adapter;
c9c47142 4952 struct device *dev = &adapter->pdev->dev;
b7172414 4953 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 4954 struct be_vxlan_port *vxlan_port;
c9c47142
SP
4955 int status;
4956
bf8d9dfb
SB
4957 /* Bump up the alias count if it is an existing port */
4958 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
4959 if (vxlan_port->port == port) {
4960 vxlan_port->port_aliases++;
4961 goto done;
4962 }
1e5b311a
JB
4963 }
4964
bf8d9dfb
SB
4965 /* Add a new port to our list. We don't need a lock here since port
4966 * add/delete are done only in the context of a single-threaded work
4967 * queue (be_wq).
4968 */
4969 vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
4970 if (!vxlan_port)
4971 goto done;
4972
4973 vxlan_port->port = port;
4974 INIT_LIST_HEAD(&vxlan_port->list);
4975 list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
4976 adapter->vxlan_port_count++;
4977
c9c47142 4978 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4979 dev_info(dev,
4980 "Only one UDP port supported for VxLAN offloads\n");
630f4b70 4981 dev_info(dev, "Disabling VxLAN offloads\n");
630f4b70 4982 goto err;
c9c47142
SP
4983 }
4984
bf8d9dfb 4985 if (adapter->vxlan_port_count > 1)
b7172414 4986 goto done;
630f4b70 4987
bf8d9dfb
SB
4988 status = be_enable_vxlan_offloads(adapter);
4989 if (!status)
4990 goto done;
630f4b70 4991
c9c47142
SP
4992err:
4993 be_disable_vxlan_offloads(adapter);
b7172414
SP
4994done:
4995 kfree(cmd_work);
bf8d9dfb 4996 return;
c9c47142
SP
4997}
4998
b7172414 4999static void be_work_del_vxlan_port(struct work_struct *work)
c9c47142 5000{
b7172414
SP
5001 struct be_cmd_work *cmd_work =
5002 container_of(work, struct be_cmd_work, work);
5003 struct be_adapter *adapter = cmd_work->adapter;
5004 __be16 port = cmd_work->info.vxlan_port;
bf8d9dfb 5005 struct be_vxlan_port *vxlan_port;
c9c47142 5006
bf8d9dfb
SB
5007 /* Nothing to be done if a port alias is being deleted */
5008 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5009 if (vxlan_port->port == port) {
5010 if (vxlan_port->port_aliases) {
5011 vxlan_port->port_aliases--;
5012 goto done;
5013 }
5014 break;
5015 }
5016 }
5017
5018 /* No port aliases left; delete the port from the list */
5019 list_del(&vxlan_port->list);
5020 adapter->vxlan_port_count--;
c9c47142 5021
bf8d9dfb
SB
5022 /* Disable VxLAN offload if this is the offloaded port */
5023 if (adapter->vxlan_port == vxlan_port->port) {
5024 WARN_ON(adapter->vxlan_port_count);
5025 be_disable_vxlan_offloads(adapter);
5026 dev_info(&adapter->pdev->dev,
5027 "Disabled VxLAN offloads for UDP port %d\n",
5028 be16_to_cpu(port));
b7172414 5029 goto out;
1e5b311a
JB
5030 }
5031
bf8d9dfb
SB
5032 /* If only 1 port is left, re-enable VxLAN offload */
5033 if (adapter->vxlan_port_count == 1)
5034 be_enable_vxlan_offloads(adapter);
c9c47142 5035
b7172414 5036out:
bf8d9dfb
SB
5037 kfree(vxlan_port);
5038done:
b7172414
SP
5039 kfree(cmd_work);
5040}
5041
5042static void be_cfg_vxlan_port(struct net_device *netdev,
5043 struct udp_tunnel_info *ti,
5044 void (*func)(struct work_struct *))
5045{
5046 struct be_adapter *adapter = netdev_priv(netdev);
5047 struct be_cmd_work *cmd_work;
5048
5049 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5050 return;
5051
5052 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5053 return;
5054
5055 cmd_work = be_alloc_work(adapter, func);
5056 if (cmd_work) {
5057 cmd_work->info.vxlan_port = ti->port;
5058 queue_work(be_wq, &cmd_work->work);
5059 }
5060}
5061
5062static void be_del_vxlan_port(struct net_device *netdev,
5063 struct udp_tunnel_info *ti)
5064{
5065 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5066}
5067
5068static void be_add_vxlan_port(struct net_device *netdev,
5069 struct udp_tunnel_info *ti)
5070{
5071 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
c9c47142 5072}
725d548f 5073
5f35227e
JG
5074static netdev_features_t be_features_check(struct sk_buff *skb,
5075 struct net_device *dev,
5076 netdev_features_t features)
725d548f 5077{
16dde0d6
SB
5078 struct be_adapter *adapter = netdev_priv(dev);
5079 u8 l4_hdr = 0;
5080
cc6e9de6
VY
5081 /* The code below restricts offload features for some tunneled and
5082 * Q-in-Q packets.
16dde0d6
SB
5083 * Offload features for normal (non tunnel) packets are unchanged.
5084 */
cc6e9de6 5085 features = vlan_features_check(skb, features);
16dde0d6
SB
5086 if (!skb->encapsulation ||
5087 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5088 return features;
5089
5090 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5091 * should disable tunnel offload features if it's not a VxLAN packet,
5092 * as tunnel offloads have been enabled only for VxLAN. This is done to
5093 * allow other tunneled traffic like GRE work fine while VxLAN
5094 * offloads are configured in Skyhawk-R.
5095 */
5096 switch (vlan_get_protocol(skb)) {
5097 case htons(ETH_P_IP):
5098 l4_hdr = ip_hdr(skb)->protocol;
5099 break;
5100 case htons(ETH_P_IPV6):
5101 l4_hdr = ipv6_hdr(skb)->nexthdr;
5102 break;
5103 default:
5104 return features;
5105 }
5106
5107 if (l4_hdr != IPPROTO_UDP ||
5108 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5109 skb->inner_protocol != htons(ETH_P_TEB) ||
5110 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
096de2f8
SD
5111 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5112 !adapter->vxlan_port ||
5113 udp_hdr(skb)->dest != adapter->vxlan_port)
a188222b 5114 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
5115
5116 return features;
725d548f 5117}
c9c47142 5118
a155a5db
SB
5119static int be_get_phys_port_id(struct net_device *dev,
5120 struct netdev_phys_item_id *ppid)
5121{
5122 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5123 struct be_adapter *adapter = netdev_priv(dev);
5124 u8 *id;
5125
5126 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5127 return -ENOSPC;
5128
5129 ppid->id[0] = adapter->hba_port_num + 1;
5130 id = &ppid->id[1];
5131 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5132 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5133 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5134
5135 ppid->id_len = id_len;
5136
5137 return 0;
5138}
5139
b7172414
SP
5140static void be_set_rx_mode(struct net_device *dev)
5141{
5142 struct be_adapter *adapter = netdev_priv(dev);
5143 struct be_cmd_work *work;
5144
5145 work = be_alloc_work(adapter, be_work_set_rx_mode);
5146 if (work)
5147 queue_work(be_wq, &work->work);
5148}
5149
e5686ad8 5150static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5151 .ndo_open = be_open,
5152 .ndo_stop = be_close,
5153 .ndo_start_xmit = be_xmit,
a54769f5 5154 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94 5155 .ndo_set_mac_address = be_mac_addr_set,
ab1594e9 5156 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5157 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5158 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5159 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5160 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5161 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5162 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5163 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5164 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5165 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5166#ifdef CONFIG_NET_POLL_CONTROLLER
5167 .ndo_poll_controller = be_netpoll,
5168#endif
a77dcb8c
AK
5169 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5170 .ndo_bridge_getlink = be_ndo_bridge_getlink,
bde6b7cd
AD
5171 .ndo_udp_tunnel_add = be_add_vxlan_port,
5172 .ndo_udp_tunnel_del = be_del_vxlan_port,
5f35227e 5173 .ndo_features_check = be_features_check,
a155a5db 5174 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5175};
5176
5177static void be_netdev_init(struct net_device *netdev)
5178{
5179 struct be_adapter *adapter = netdev_priv(netdev);
5180
6332c8d3 5181 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5182 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5183 NETIF_F_HW_VLAN_CTAG_TX;
62219066 5184 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 5185 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5186
5187 netdev->features |= netdev->hw_features |
f646968f 5188 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5189
eb8a50d9 5190 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5191 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5192
fbc13f01
AK
5193 netdev->priv_flags |= IFF_UNICAST_FLT;
5194
6b7c5b94
SP
5195 netdev->flags |= IFF_MULTICAST;
5196
127bfce5 5197 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
c190e3c8 5198
10ef9ab4 5199 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5200
7ad24ea4 5201 netdev->ethtool_ops = &be_ethtool_ops;
d894be57
JW
5202
5203 /* MTU range: 256 - 9000 */
5204 netdev->min_mtu = BE_MIN_MTU;
5205 netdev->max_mtu = BE_MAX_MTU;
6b7c5b94
SP
5206}
5207
87ac1a52
KA
5208static void be_cleanup(struct be_adapter *adapter)
5209{
5210 struct net_device *netdev = adapter->netdev;
5211
5212 rtnl_lock();
5213 netif_device_detach(netdev);
5214 if (netif_running(netdev))
5215 be_close(netdev);
5216 rtnl_unlock();
5217
5218 be_clear(adapter);
5219}
5220
484d76fd 5221static int be_resume(struct be_adapter *adapter)
78fad34e 5222{
d0e1b319 5223 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5224 int status;
5225
78fad34e
SP
5226 status = be_setup(adapter);
5227 if (status)
484d76fd 5228 return status;
78fad34e 5229
08d9910c
HFS
5230 rtnl_lock();
5231 if (netif_running(netdev))
d0e1b319 5232 status = be_open(netdev);
08d9910c
HFS
5233 rtnl_unlock();
5234
5235 if (status)
5236 return status;
78fad34e 5237
d0e1b319
KA
5238 netif_device_attach(netdev);
5239
484d76fd
KA
5240 return 0;
5241}
5242
710f3e59
SB
5243static void be_soft_reset(struct be_adapter *adapter)
5244{
5245 u32 val;
5246
5247 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5248 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5249 val |= SLIPORT_SOFTRESET_SR_MASK;
5250 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5251}
5252
5253static bool be_err_is_recoverable(struct be_adapter *adapter)
5254{
5255 struct be_error_recovery *err_rec = &adapter->error_recovery;
5256 unsigned long initial_idle_time =
5257 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5258 unsigned long recovery_interval =
5259 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5260 u16 ue_err_code;
5261 u32 val;
5262
5263 val = be_POST_stage_get(adapter);
5264 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5265 return false;
5266 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5267 if (ue_err_code == 0)
5268 return false;
5269
5270 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5271 ue_err_code);
5272
2faf2657 5273 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
710f3e59
SB
5274 dev_err(&adapter->pdev->dev,
5275 "Cannot recover within %lu sec from driver load\n",
5276 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5277 return false;
5278 }
5279
2faf2657
KE
5280 if (err_rec->last_recovery_time && time_before_eq(
5281 jiffies - err_rec->last_recovery_time, recovery_interval)) {
710f3e59
SB
5282 dev_err(&adapter->pdev->dev,
5283 "Cannot recover within %lu sec from last recovery\n",
5284 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5285 return false;
5286 }
5287
5288 if (ue_err_code == err_rec->last_err_code) {
5289 dev_err(&adapter->pdev->dev,
5290 "Cannot recover from a consecutive TPE error\n");
5291 return false;
5292 }
5293
5294 err_rec->last_recovery_time = jiffies;
5295 err_rec->last_err_code = ue_err_code;
5296 return true;
5297}
5298
5299static int be_tpe_recover(struct be_adapter *adapter)
5300{
5301 struct be_error_recovery *err_rec = &adapter->error_recovery;
5302 int status = -EAGAIN;
5303 u32 val;
5304
5305 switch (err_rec->recovery_state) {
5306 case ERR_RECOVERY_ST_NONE:
5307 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5308 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5309 break;
5310
5311 case ERR_RECOVERY_ST_DETECT:
5312 val = be_POST_stage_get(adapter);
5313 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5314 POST_STAGE_RECOVERABLE_ERR) {
5315 dev_err(&adapter->pdev->dev,
5316 "Unrecoverable HW error detected: 0x%x\n", val);
5317 status = -EINVAL;
5318 err_rec->resched_delay = 0;
5319 break;
5320 }
5321
5322 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5323
5324 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5325 * milliseconds before it checks for final error status in
5326 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5327 * If it does, then PF0 initiates a Soft Reset.
5328 */
5329 if (adapter->pf_num == 0) {
5330 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5331 err_rec->resched_delay = err_rec->ue_to_reset_time -
5332 ERR_RECOVERY_UE_DETECT_DURATION;
5333 break;
5334 }
5335
5336 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5337 err_rec->resched_delay = err_rec->ue_to_poll_time -
5338 ERR_RECOVERY_UE_DETECT_DURATION;
5339 break;
5340
5341 case ERR_RECOVERY_ST_RESET:
5342 if (!be_err_is_recoverable(adapter)) {
5343 dev_err(&adapter->pdev->dev,
5344 "Failed to meet recovery criteria\n");
5345 status = -EIO;
5346 err_rec->resched_delay = 0;
5347 break;
5348 }
5349 be_soft_reset(adapter);
5350 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5351 err_rec->resched_delay = err_rec->ue_to_poll_time -
5352 err_rec->ue_to_reset_time;
5353 break;
5354
5355 case ERR_RECOVERY_ST_PRE_POLL:
5356 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5357 err_rec->resched_delay = 0;
5358 status = 0; /* done */
5359 break;
5360
5361 default:
5362 status = -EINVAL;
5363 err_rec->resched_delay = 0;
5364 break;
5365 }
5366
5367 return status;
5368}
5369
484d76fd
KA
5370static int be_err_recover(struct be_adapter *adapter)
5371{
484d76fd
KA
5372 int status;
5373
710f3e59
SB
5374 if (!lancer_chip(adapter)) {
5375 if (!adapter->error_recovery.recovery_supported ||
5376 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5377 return -EIO;
5378 status = be_tpe_recover(adapter);
5379 if (status)
5380 goto err;
5381 }
1babbad4
PR
5382
5383 /* Wait for adapter to reach quiescent state before
5384 * destroying queues
5385 */
5386 status = be_fw_wait_ready(adapter);
5387 if (status)
5388 goto err;
5389
710f3e59
SB
5390 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5391
1babbad4
PR
5392 be_cleanup(adapter);
5393
484d76fd
KA
5394 status = be_resume(adapter);
5395 if (status)
5396 goto err;
5397
710f3e59
SB
5398 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5399
78fad34e 5400err:
78fad34e
SP
5401 return status;
5402}
5403
eb7dd46c 5404static void be_err_detection_task(struct work_struct *work)
78fad34e 5405{
710f3e59
SB
5406 struct be_error_recovery *err_rec =
5407 container_of(work, struct be_error_recovery,
5408 err_detection_work.work);
78fad34e 5409 struct be_adapter *adapter =
710f3e59
SB
5410 container_of(err_rec, struct be_adapter,
5411 error_recovery);
5412 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
1babbad4
PR
5413 struct device *dev = &adapter->pdev->dev;
5414 int recovery_status;
78fad34e
SP
5415
5416 be_detect_error(adapter);
710f3e59 5417 if (!be_check_error(adapter, BE_ERROR_HW))
1babbad4
PR
5418 goto reschedule_task;
5419
710f3e59 5420 recovery_status = be_err_recover(adapter);
1babbad4 5421 if (!recovery_status) {
710f3e59
SB
5422 err_rec->recovery_retries = 0;
5423 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
1babbad4
PR
5424 dev_info(dev, "Adapter recovery successful\n");
5425 goto reschedule_task;
710f3e59
SB
5426 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5427 /* BEx/SH recovery state machine */
5428 if (adapter->pf_num == 0 &&
5429 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5430 dev_err(&adapter->pdev->dev,
5431 "Adapter recovery in progress\n");
5432 resched_delay = err_rec->resched_delay;
5433 goto reschedule_task;
5434 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
1babbad4
PR
5435 /* For VFs, check if PF have allocated resources
5436 * every second.
5437 */
5438 dev_err(dev, "Re-trying adapter recovery\n");
5439 goto reschedule_task;
710f3e59
SB
5440 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5441 ERR_RECOVERY_MAX_RETRY_COUNT) {
972f37b4
PR
5442 /* In case of another error during recovery, it takes 30 sec
5443 * for adapter to come out of error. Retry error recovery after
5444 * this time interval.
5445 */
5446 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
710f3e59 5447 resched_delay = ERR_RECOVERY_RETRY_DELAY;
972f37b4 5448 goto reschedule_task;
1babbad4
PR
5449 } else {
5450 dev_err(dev, "Adapter recovery failed\n");
710f3e59 5451 dev_err(dev, "Please reboot server to recover\n");
78fad34e
SP
5452 }
5453
1babbad4 5454 return;
710f3e59 5455
1babbad4 5456reschedule_task:
710f3e59 5457 be_schedule_err_detection(adapter, resched_delay);
78fad34e
SP
5458}
5459
5460static void be_log_sfp_info(struct be_adapter *adapter)
5461{
5462 int status;
5463
5464 status = be_cmd_query_sfp_info(adapter);
5465 if (!status) {
5466 dev_err(&adapter->pdev->dev,
51d1f98a
AK
5467 "Port %c: %s Vendor: %s part no: %s",
5468 adapter->port_name,
5469 be_misconfig_evt_port_state[adapter->phy_state],
5470 adapter->phy.vendor_name,
78fad34e
SP
5471 adapter->phy.vendor_pn);
5472 }
51d1f98a 5473 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
78fad34e
SP
5474}
5475
5476static void be_worker(struct work_struct *work)
5477{
5478 struct be_adapter *adapter =
5479 container_of(work, struct be_adapter, work.work);
5480 struct be_rx_obj *rxo;
5481 int i;
5482
d3480615
GP
5483 if (be_physfn(adapter) &&
5484 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5485 be_cmd_get_die_temperature(adapter);
5486
78fad34e
SP
5487 /* when interrupts are not yet enabled, just reap any pending
5488 * mcc completions
5489 */
5490 if (!netif_running(adapter->netdev)) {
5491 local_bh_disable();
5492 be_process_mcc(adapter);
5493 local_bh_enable();
5494 goto reschedule;
5495 }
5496
5497 if (!adapter->stats_cmd_sent) {
5498 if (lancer_chip(adapter))
5499 lancer_cmd_get_pport_stats(adapter,
5500 &adapter->stats_cmd);
5501 else
5502 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5503 }
5504
78fad34e
SP
5505 for_all_rx_queues(adapter, rxo, i) {
5506 /* Replenish RX-queues starved due to memory
5507 * allocation failures.
5508 */
5509 if (rxo->rx_post_starved)
5510 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5511 }
5512
20947770
PR
5513 /* EQ-delay update for Skyhawk is done while notifying EQ */
5514 if (!skyhawk_chip(adapter))
5515 be_eqd_update(adapter, false);
78fad34e 5516
51d1f98a 5517 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
78fad34e
SP
5518 be_log_sfp_info(adapter);
5519
5520reschedule:
5521 adapter->work_counter++;
b7172414 5522 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
78fad34e
SP
5523}
5524
6b7c5b94
SP
5525static void be_unmap_pci_bars(struct be_adapter *adapter)
5526{
c5b3ad4c
SP
5527 if (adapter->csr)
5528 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5529 if (adapter->db)
ce66f781 5530 pci_iounmap(adapter->pdev, adapter->db);
a69bf3c5
DM
5531 if (adapter->pcicfg && adapter->pcicfg_mapped)
5532 pci_iounmap(adapter->pdev, adapter->pcicfg);
045508a8
PP
5533}
5534
ce66f781
SP
5535static int db_bar(struct be_adapter *adapter)
5536{
18c57c74 5537 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5538 return 0;
5539 else
5540 return 4;
5541}
5542
5543static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5544{
dbf0f2a7 5545 if (skyhawk_chip(adapter)) {
ce66f781
SP
5546 adapter->roce_db.size = 4096;
5547 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5548 db_bar(adapter));
5549 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5550 db_bar(adapter));
5551 }
045508a8 5552 return 0;
6b7c5b94
SP
5553}
5554
5555static int be_map_pci_bars(struct be_adapter *adapter)
5556{
0fa74a4b 5557 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5558 u8 __iomem *addr;
78fad34e
SP
5559 u32 sli_intf;
5560
5561 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5562 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5563 SLI_INTF_FAMILY_SHIFT;
5564 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5565
c5b3ad4c 5566 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5567 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5568 if (!adapter->csr)
c5b3ad4c
SP
5569 return -ENOMEM;
5570 }
5571
25848c90 5572 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5573 if (!addr)
6b7c5b94 5574 goto pci_map_err;
ba343c77 5575 adapter->db = addr;
ce66f781 5576
25848c90
SR
5577 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5578 if (be_physfn(adapter)) {
5579 /* PCICFG is the 2nd BAR in BE2 */
5580 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5581 if (!addr)
5582 goto pci_map_err;
5583 adapter->pcicfg = addr;
a69bf3c5 5584 adapter->pcicfg_mapped = true;
25848c90
SR
5585 } else {
5586 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
a69bf3c5 5587 adapter->pcicfg_mapped = false;
25848c90
SR
5588 }
5589 }
5590
ce66f781 5591 be_roce_map_pci_bars(adapter);
6b7c5b94 5592 return 0;
ce66f781 5593
6b7c5b94 5594pci_map_err:
25848c90 5595 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5596 be_unmap_pci_bars(adapter);
5597 return -ENOMEM;
5598}
5599
78fad34e 5600static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5601{
8788fdc2 5602 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5603 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5604
5605 if (mem->va)
78fad34e 5606 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5607
5b8821b7 5608 mem = &adapter->rx_filter;
e7b909a6 5609 if (mem->va)
78fad34e
SP
5610 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5611
5612 mem = &adapter->stats_cmd;
5613 if (mem->va)
5614 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5615}
5616
78fad34e
SP
5617/* Allocate and initialize various fields in be_adapter struct */
5618static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5619{
8788fdc2
SP
5620 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5621 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5622 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5623 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5624 struct device *dev = &adapter->pdev->dev;
5625 int status = 0;
6b7c5b94
SP
5626
5627 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5628 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5629 &mbox_mem_alloc->dma,
5630 GFP_KERNEL);
78fad34e
SP
5631 if (!mbox_mem_alloc->va)
5632 return -ENOMEM;
5633
6b7c5b94
SP
5634 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5635 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5636 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5637
5b8821b7 5638 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5639 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5640 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5641 if (!rx_filter->va) {
e7b909a6
SP
5642 status = -ENOMEM;
5643 goto free_mbox;
5644 }
1f9061d2 5645
78fad34e
SP
5646 if (lancer_chip(adapter))
5647 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5648 else if (BE2_chip(adapter))
5649 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5650 else if (BE3_chip(adapter))
5651 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5652 else
5653 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5654 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5655 &stats_cmd->dma, GFP_KERNEL);
5656 if (!stats_cmd->va) {
5657 status = -ENOMEM;
5658 goto free_rx_filter;
5659 }
5660
2984961c 5661 mutex_init(&adapter->mbox_lock);
b7172414
SP
5662 mutex_init(&adapter->mcc_lock);
5663 mutex_init(&adapter->rx_filter_lock);
8788fdc2 5664 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5665 init_completion(&adapter->et_cmd_compl);
e7b909a6 5666
78fad34e 5667 pci_save_state(adapter->pdev);
6b7c5b94 5668
78fad34e 5669 INIT_DELAYED_WORK(&adapter->work, be_worker);
710f3e59
SB
5670
5671 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5672 adapter->error_recovery.resched_delay = 0;
5673 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
eb7dd46c 5674 be_err_detection_task);
6b7c5b94 5675
78fad34e
SP
5676 adapter->rx_fc = true;
5677 adapter->tx_fc = true;
6b7c5b94 5678
78fad34e
SP
5679 /* Must be a power of 2 or else MODULO will BUG_ON */
5680 adapter->be_get_temp_freq = 64;
ca34fe38 5681
bf8d9dfb 5682 INIT_LIST_HEAD(&adapter->vxlan_port_list);
6b7c5b94 5683 return 0;
78fad34e
SP
5684
5685free_rx_filter:
5686 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5687free_mbox:
5688 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5689 mbox_mem_alloc->dma);
5690 return status;
6b7c5b94
SP
5691}
5692
3bc6b06c 5693static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5694{
5695 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5696
6b7c5b94
SP
5697 if (!adapter)
5698 return;
5699
045508a8 5700 be_roce_dev_remove(adapter);
8cef7a78 5701 be_intr_set(adapter, false);
045508a8 5702
eb7dd46c 5703 be_cancel_err_detection(adapter);
f67ef7ba 5704
6b7c5b94
SP
5705 unregister_netdev(adapter->netdev);
5706
5fb379ee
SP
5707 be_clear(adapter);
5708
f72099e0
SK
5709 if (!pci_vfs_assigned(adapter->pdev))
5710 be_cmd_reset_function(adapter);
5711
bf99e50d
PR
5712 /* tell fw we're done with firing cmds */
5713 be_cmd_fw_clean(adapter);
5714
78fad34e
SP
5715 be_unmap_pci_bars(adapter);
5716 be_drv_cleanup(adapter);
6b7c5b94 5717
d6b6d987
SP
5718 pci_disable_pcie_error_reporting(pdev);
5719
6b7c5b94
SP
5720 pci_release_regions(pdev);
5721 pci_disable_device(pdev);
5722
5723 free_netdev(adapter->netdev);
5724}
5725
9a03259c
AB
5726static ssize_t be_hwmon_show_temp(struct device *dev,
5727 struct device_attribute *dev_attr,
5728 char *buf)
29e9122b
VD
5729{
5730 struct be_adapter *adapter = dev_get_drvdata(dev);
5731
5732 /* Unit: millidegree Celsius */
5733 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5734 return -EIO;
5735 else
5736 return sprintf(buf, "%u\n",
5737 adapter->hwmon_info.be_on_die_temp * 1000);
5738}
5739
5740static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5741 be_hwmon_show_temp, NULL, 1);
5742
5743static struct attribute *be_hwmon_attrs[] = {
5744 &sensor_dev_attr_temp1_input.dev_attr.attr,
5745 NULL
5746};
5747
5748ATTRIBUTE_GROUPS(be_hwmon);
5749
d379142b
SP
5750static char *mc_name(struct be_adapter *adapter)
5751{
f93f160b
VV
5752 char *str = ""; /* default */
5753
5754 switch (adapter->mc_type) {
5755 case UMC:
5756 str = "UMC";
5757 break;
5758 case FLEX10:
5759 str = "FLEX10";
5760 break;
5761 case vNIC1:
5762 str = "vNIC-1";
5763 break;
5764 case nPAR:
5765 str = "nPAR";
5766 break;
5767 case UFP:
5768 str = "UFP";
5769 break;
5770 case vNIC2:
5771 str = "vNIC-2";
5772 break;
5773 default:
5774 str = "";
5775 }
5776
5777 return str;
d379142b
SP
5778}
5779
5780static inline char *func_name(struct be_adapter *adapter)
5781{
5782 return be_physfn(adapter) ? "PF" : "VF";
5783}
5784
f7062ee5
SP
5785static inline char *nic_name(struct pci_dev *pdev)
5786{
5787 switch (pdev->device) {
5788 case OC_DEVICE_ID1:
5789 return OC_NAME;
5790 case OC_DEVICE_ID2:
5791 return OC_NAME_BE;
5792 case OC_DEVICE_ID3:
5793 case OC_DEVICE_ID4:
5794 return OC_NAME_LANCER;
5795 case BE_DEVICE_ID2:
5796 return BE3_NAME;
5797 case OC_DEVICE_ID5:
5798 case OC_DEVICE_ID6:
5799 return OC_NAME_SH;
5800 default:
5801 return BE_NAME;
5802 }
5803}
5804
1dd06ae8 5805static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5806{
6b7c5b94
SP
5807 struct be_adapter *adapter;
5808 struct net_device *netdev;
21252377 5809 int status = 0;
6b7c5b94 5810
acbafeb1
SP
5811 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5812
6b7c5b94
SP
5813 status = pci_enable_device(pdev);
5814 if (status)
5815 goto do_none;
5816
5817 status = pci_request_regions(pdev, DRV_NAME);
5818 if (status)
5819 goto disable_dev;
5820 pci_set_master(pdev);
5821
7f640062 5822 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5823 if (!netdev) {
6b7c5b94
SP
5824 status = -ENOMEM;
5825 goto rel_reg;
5826 }
5827 adapter = netdev_priv(netdev);
5828 adapter->pdev = pdev;
5829 pci_set_drvdata(pdev, adapter);
5830 adapter->netdev = netdev;
2243e2e9 5831 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5832
4c15c243 5833 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5834 if (!status) {
5835 netdev->features |= NETIF_F_HIGHDMA;
5836 } else {
4c15c243 5837 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5838 if (status) {
5839 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5840 goto free_netdev;
5841 }
5842 }
5843
2f951a9a
KA
5844 status = pci_enable_pcie_error_reporting(pdev);
5845 if (!status)
5846 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5847
78fad34e 5848 status = be_map_pci_bars(adapter);
6b7c5b94 5849 if (status)
39f1d94d 5850 goto free_netdev;
6b7c5b94 5851
78fad34e
SP
5852 status = be_drv_init(adapter);
5853 if (status)
5854 goto unmap_bars;
5855
5fb379ee
SP
5856 status = be_setup(adapter);
5857 if (status)
78fad34e 5858 goto drv_cleanup;
2243e2e9 5859
3abcdeda 5860 be_netdev_init(netdev);
6b7c5b94
SP
5861 status = register_netdev(netdev);
5862 if (status != 0)
5fb379ee 5863 goto unsetup;
6b7c5b94 5864
045508a8
PP
5865 be_roce_dev_add(adapter);
5866
972f37b4 5867 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
710f3e59 5868 adapter->error_recovery.probe_time = jiffies;
b4e32a71 5869
29e9122b 5870 /* On Die temperature not supported for VF. */
9a03259c 5871 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5872 adapter->hwmon_info.hwmon_dev =
5873 devm_hwmon_device_register_with_groups(&pdev->dev,
5874 DRV_NAME,
5875 adapter,
5876 be_hwmon_groups);
5877 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5878 }
5879
d379142b 5880 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5881 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5882
6b7c5b94
SP
5883 return 0;
5884
5fb379ee
SP
5885unsetup:
5886 be_clear(adapter);
78fad34e
SP
5887drv_cleanup:
5888 be_drv_cleanup(adapter);
5889unmap_bars:
5890 be_unmap_pci_bars(adapter);
f9449ab7 5891free_netdev:
fe6d2a38 5892 free_netdev(netdev);
6b7c5b94
SP
5893rel_reg:
5894 pci_release_regions(pdev);
5895disable_dev:
5896 pci_disable_device(pdev);
5897do_none:
c4ca2374 5898 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5899 return status;
5900}
5901
5902static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5903{
5904 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5905
d4360d6f 5906 be_intr_set(adapter, false);
eb7dd46c 5907 be_cancel_err_detection(adapter);
f67ef7ba 5908
87ac1a52 5909 be_cleanup(adapter);
6b7c5b94
SP
5910
5911 pci_save_state(pdev);
5912 pci_disable_device(pdev);
5913 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5914 return 0;
5915}
5916
484d76fd 5917static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5918{
6b7c5b94 5919 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5920 int status = 0;
6b7c5b94
SP
5921
5922 status = pci_enable_device(pdev);
5923 if (status)
5924 return status;
5925
6b7c5b94
SP
5926 pci_restore_state(pdev);
5927
484d76fd 5928 status = be_resume(adapter);
2243e2e9
SP
5929 if (status)
5930 return status;
5931
972f37b4 5932 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5933
6b7c5b94
SP
5934 return 0;
5935}
5936
82456b03
SP
5937/*
5938 * An FLR will stop BE from DMAing any data.
5939 */
5940static void be_shutdown(struct pci_dev *pdev)
5941{
5942 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5943
2d5d4154
AK
5944 if (!adapter)
5945 return;
82456b03 5946
d114f99a 5947 be_roce_dev_shutdown(adapter);
0f4a6828 5948 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5949 be_cancel_err_detection(adapter);
a4ca055f 5950
2d5d4154 5951 netif_device_detach(adapter->netdev);
82456b03 5952
57841869
AK
5953 be_cmd_reset_function(adapter);
5954
82456b03 5955 pci_disable_device(pdev);
82456b03
SP
5956}
5957
cf588477 5958static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5959 pci_channel_state_t state)
cf588477
SP
5960{
5961 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5962
5963 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5964
68f22793
PR
5965 be_roce_dev_remove(adapter);
5966
954f6825
VD
5967 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5968 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5969
eb7dd46c 5970 be_cancel_err_detection(adapter);
cf588477 5971
87ac1a52 5972 be_cleanup(adapter);
cf588477 5973 }
cf588477
SP
5974
5975 if (state == pci_channel_io_perm_failure)
5976 return PCI_ERS_RESULT_DISCONNECT;
5977
5978 pci_disable_device(pdev);
5979
eeb7fc7b
SK
5980 /* The error could cause the FW to trigger a flash debug dump.
5981 * Resetting the card while flash dump is in progress
c8a54163
PR
5982 * can cause it not to recover; wait for it to finish.
5983 * Wait only for first function as it is needed only once per
5984 * adapter.
eeb7fc7b 5985 */
c8a54163
PR
5986 if (pdev->devfn == 0)
5987 ssleep(30);
5988
cf588477
SP
5989 return PCI_ERS_RESULT_NEED_RESET;
5990}
5991
5992static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5993{
5994 struct be_adapter *adapter = pci_get_drvdata(pdev);
5995 int status;
5996
5997 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5998
5999 status = pci_enable_device(pdev);
6000 if (status)
6001 return PCI_ERS_RESULT_DISCONNECT;
6002
6003 pci_set_master(pdev);
cf588477
SP
6004 pci_restore_state(pdev);
6005
6006 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
6007 dev_info(&adapter->pdev->dev,
6008 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 6009 status = be_fw_wait_ready(adapter);
cf588477
SP
6010 if (status)
6011 return PCI_ERS_RESULT_DISCONNECT;
6012
d6b6d987 6013 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 6014 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
6015 return PCI_ERS_RESULT_RECOVERED;
6016}
6017
6018static void be_eeh_resume(struct pci_dev *pdev)
6019{
6020 int status = 0;
6021 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6022
6023 dev_info(&adapter->pdev->dev, "EEH resume\n");
6024
6025 pci_save_state(pdev);
6026
484d76fd 6027 status = be_resume(adapter);
bf99e50d
PR
6028 if (status)
6029 goto err;
6030
68f22793
PR
6031 be_roce_dev_add(adapter);
6032
972f37b4 6033 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
6034 return;
6035err:
6036 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6037}
6038
ace40aff
VV
6039static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6040{
6041 struct be_adapter *adapter = pci_get_drvdata(pdev);
b9263cbf 6042 struct be_resources vft_res = {0};
ace40aff
VV
6043 int status;
6044
6045 if (!num_vfs)
6046 be_vf_clear(adapter);
6047
6048 adapter->num_vfs = num_vfs;
6049
6050 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6051 dev_warn(&pdev->dev,
6052 "Cannot disable VFs while they are assigned\n");
6053 return -EBUSY;
6054 }
6055
6056 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6057 * are equally distributed across the max-number of VFs. The user may
6058 * request only a subset of the max-vfs to be enabled.
6059 * Based on num_vfs, redistribute the resources across num_vfs so that
6060 * each VF will have access to more number of resources.
6061 * This facility is not available in BE3 FW.
6062 * Also, this is done by FW in Lancer chip.
6063 */
6064 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
b9263cbf
SR
6065 be_calculate_vf_res(adapter, adapter->num_vfs,
6066 &vft_res);
ace40aff 6067 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
b9263cbf 6068 adapter->num_vfs, &vft_res);
ace40aff
VV
6069 if (status)
6070 dev_err(&pdev->dev,
6071 "Failed to optimize SR-IOV resources\n");
6072 }
6073
6074 status = be_get_resources(adapter);
6075 if (status)
6076 return be_cmd_status(status);
6077
6078 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6079 rtnl_lock();
6080 status = be_update_queues(adapter);
6081 rtnl_unlock();
6082 if (status)
6083 return be_cmd_status(status);
6084
6085 if (adapter->num_vfs)
6086 status = be_vf_setup(adapter);
6087
6088 if (!status)
6089 return adapter->num_vfs;
6090
6091 return 0;
6092}
6093
3646f0e5 6094static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6095 .error_detected = be_eeh_err_detected,
6096 .slot_reset = be_eeh_reset,
6097 .resume = be_eeh_resume,
6098};
6099
6b7c5b94
SP
6100static struct pci_driver be_driver = {
6101 .name = DRV_NAME,
6102 .id_table = be_dev_ids,
6103 .probe = be_probe,
6104 .remove = be_remove,
6105 .suspend = be_suspend,
484d76fd 6106 .resume = be_pci_resume,
82456b03 6107 .shutdown = be_shutdown,
ace40aff 6108 .sriov_configure = be_pci_sriov_configure,
cf588477 6109 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6110};
6111
6112static int __init be_init_module(void)
6113{
710f3e59
SB
6114 int status;
6115
8e95a202
JP
6116 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6117 rx_frag_size != 2048) {
6b7c5b94
SP
6118 printk(KERN_WARNING DRV_NAME
6119 " : Module param rx_frag_size must be 2048/4096/8192."
6120 " Using 2048\n");
6121 rx_frag_size = 2048;
6122 }
6b7c5b94 6123
ace40aff
VV
6124 if (num_vfs > 0) {
6125 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6126 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6127 }
6128
b7172414
SP
6129 be_wq = create_singlethread_workqueue("be_wq");
6130 if (!be_wq) {
6131 pr_warn(DRV_NAME "workqueue creation failed\n");
6132 return -1;
6133 }
6134
710f3e59
SB
6135 be_err_recovery_workq =
6136 create_singlethread_workqueue("be_err_recover");
6137 if (!be_err_recovery_workq)
6138 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6139
6140 status = pci_register_driver(&be_driver);
6141 if (status) {
6142 destroy_workqueue(be_wq);
6143 be_destroy_err_recovery_workq();
6144 }
6145 return status;
6b7c5b94
SP
6146}
6147module_init(be_init_module);
6148
6149static void __exit be_exit_module(void)
6150{
6151 pci_unregister_driver(&be_driver);
b7172414 6152
710f3e59
SB
6153 be_destroy_err_recovery_workq();
6154
b7172414
SP
6155 if (be_wq)
6156 destroy_workqueue(be_wq);
6b7c5b94
SP
6157}
6158module_exit(be_exit_module);