]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Request RSS capability of Rx interface depending on number of Rx rings
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97 683 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 685
ab1594e9 686 u64_stats_update_begin(&stats->sync);
ac124ff9 687 stats->tx_reqs++;
5f07b3c5 688 stats->tx_bytes += skb->len;
8670f2a5
SB
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 692 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
693}
694
5f07b3c5
SP
695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 697{
5f07b3c5
SP
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
f986afcb
SP
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
89b1f496 718 wrb->rsvd0 = 0;
6b7c5b94
SP
719}
720
1ded132d 721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 722 struct sk_buff *skb)
1ded132d
AK
723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
df8a39de 727 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
fdf81bfb 732 adapter->recommended_prio_bits;
1ded132d
AK
733
734 return vlan_tag;
735}
736
c9c47142
SP
737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
cf5671e6
SB
750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
804abcdb
SB
765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
6b7c5b94 768{
804abcdb 769 u16 proto;
6b7c5b94 770
49e4b847 771 if (skb_is_gso(skb)) {
804abcdb
SB
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 777 if (skb->encapsulation) {
804abcdb 778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
804abcdb 784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 785 else if (proto == IPPROTO_UDP)
804abcdb 786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
787 }
788
df8a39de 789 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
792 }
793
804abcdb
SB
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
5f07b3c5 796
804abcdb
SB
797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 821 */
804abcdb
SB
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
832}
833
2b7bcebf 834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 835 bool unmap_single)
7101e111
SP
836{
837 dma_addr_t dma;
f986afcb 838 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 839
7101e111 840
f986afcb
SP
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
7101e111 844 if (unmap_single)
f986afcb 845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 846 else
f986afcb 847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
848 }
849}
6b7c5b94 850
79a0d7d8
SB
851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
853{
854 u16 head = txo->q.head;
855
856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
880
881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
906 txq->head = head;
907
908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
910 while (copied) {
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
913 map_single = false;
914 copied -= le32_to_cpu(wrb->frag_len);
915 queue_head_inc(txq);
916 }
917
918 txq->head = head;
919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
5f07b3c5 925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
6b7c5b94 928{
5f07b3c5 929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 930 struct device *dev = &adapter->pdev->dev;
5f07b3c5 931 struct be_queue_info *txq = &txo->q;
7101e111 932 bool map_single = false;
5f07b3c5 933 u16 head = txq->head;
79a0d7d8
SB
934 dma_addr_t busaddr;
935 int len;
6b7c5b94 936
79a0d7d8 937 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 938
ebc8d2ab 939 if (skb->len > skb->data_len) {
79a0d7d8 940 len = skb_headlen(skb);
03d28ffe 941
2b7bcebf
IV
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
7101e111
SP
944 goto dma_err;
945 map_single = true;
79a0d7d8 946 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
947 copied += len;
948 }
6b7c5b94 949
ebc8d2ab 950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 952 len = skb_frag_size(frag);
03d28ffe 953
79a0d7d8 954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 955 if (dma_mapping_error(dev, busaddr))
7101e111 956 goto dma_err;
79a0d7d8
SB
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
6b7c5b94
SP
959 }
960
79a0d7d8 961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 962
5f07b3c5
SP
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
6b7c5b94 965
7101e111 966dma_err:
79a0d7d8
SB
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 969 return 0;
6b7c5b94
SP
970}
971
f7062ee5
SP
972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
93040ae5 977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 978 struct sk_buff *skb,
804abcdb
SB
979 struct be_wrb_params
980 *wrb_params)
93040ae5
SK
981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
df8a39de 988 if (skb_vlan_tag_present(skb))
93040ae5 989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
804abcdb 997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 998 }
bc0c3405
AK
999
1000 if (vlan_tag) {
62749e2c
JP
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
bc0c3405
AK
1003 if (unlikely(!skb))
1004 return skb;
bc0c3405
AK
1005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
bc0c3405
AK
1013 if (unlikely(!skb))
1014 return skb;
804abcdb 1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1016 }
1017
93040ae5
SK
1018 return skb;
1019}
1020
bc0c3405
AK
1021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
504fbf1e 1033 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
df8a39de 1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1046}
1047
748b539a 1048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1049{
ee9c799c 1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1051}
1052
ec495fac
VV
1053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
804abcdb
SB
1055 struct be_wrb_params
1056 *wrb_params)
6b7c5b94 1057{
d2cb6ce7 1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
93040ae5 1061
1297f9db
AK
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1064 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1065 */
ee9c799c
SP
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1068 if (skb->len <= 60 &&
df8a39de 1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1070 is_ipv4_pkt(skb)) {
93040ae5
SK
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1ded132d 1074
d2cb6ce7 1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1076 * tagging in pvid-tagging mode
d2cb6ce7 1077 */
f93f160b 1078 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1081
93040ae5
SK
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1087 skb_vlan_tag_present(skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1089 if (unlikely(!skb))
c9128951 1090 goto err;
bc0c3405
AK
1091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1111 if (unlikely(!skb))
c9128951 1112 goto err;
1ded132d
AK
1113 }
1114
ee9c799c
SP
1115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
c9128951 1118err:
ee9c799c
SP
1119 return NULL;
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb 1124 struct be_wrb_params *wrb_params)
ec495fac 1125{
8227e990
SR
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1130 */
8227e990 1131 if (skb->len <= 32) {
74b6939d 1132 if (skb_put_padto(skb, 36))
ec495fac 1133 return NULL;
ec495fac
VV
1134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
5f07b3c5
SP
1145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1156 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
760c295e
VD
1169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
1645d997 1265 switch (ntohs(udp->dest)) {
760c295e
VD
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
ee9c799c
SP
1293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1298 struct be_wrb_params wrb_params = { 0 };
804abcdb 1299 bool flush = !skb->xmit_more;
5f07b3c5 1300 u16 wrb_cnt;
ee9c799c 1301
804abcdb 1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1303 if (unlikely(!skb))
1304 goto drop;
6b7c5b94 1305
804abcdb
SB
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1311 goto drop;
1312 }
cd8f76c0 1313
760c295e
VD
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
cf5671e6 1326 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
c190e3c8 1330
5f07b3c5
SP
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
6b7c5b94 1333
5f07b3c5
SP
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
6b7c5b94 1340
6b7c5b94
SP
1341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1352 return -EINVAL;
1353 }
0d3f5cce
KA
1354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1356 netdev->mtu, new_mtu);
6b7c5b94
SP
1357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
f66b7cfd
SP
1361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
6b7c5b94 1398/*
82903e4b
AK
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1401 */
10329df8 1402static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1403{
50762667 1404 struct device *dev = &adapter->pdev->dev;
10329df8 1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1406 u16 num = 0, i = 0;
82903e4b 1407 int status = 0;
1da87b7f 1408
c0e64ef4 1409 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1410 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1411 return 0;
1412
92bf14ab 1413 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1414 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1415
1416 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
0fc16ebf 1419
435452aa 1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1421 if (status) {
f66b7cfd 1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
4c60005f 1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1430 }
0fc16ebf 1431 return status;
6b7c5b94
SP
1432}
1433
80d5c368 1434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1437 int status = 0;
6b7c5b94 1438
a85e9986
PR
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1441 return status;
1442
f6cbd364 1443 if (test_bit(vid, adapter->vids))
48291c22 1444 return status;
a85e9986 1445
f6cbd364 1446 set_bit(vid, adapter->vids);
a6b74e01 1447 adapter->vlans_added++;
8e586137 1448
a6b74e01
SK
1449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
f6cbd364 1452 clear_bit(vid, adapter->vids);
a6b74e01 1453 }
48291c22 1454
80817cbf 1455 return status;
6b7c5b94
SP
1456}
1457
80d5c368 1458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
a85e9986
PR
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1464 return 0;
a85e9986 1465
41dcdfbd
SB
1466 if (!test_bit(vid, adapter->vids))
1467 return 0;
1468
f6cbd364 1469 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1470 adapter->vlans_added--;
1471
1472 return be_vid_config(adapter);
6b7c5b94
SP
1473}
1474
f66b7cfd 1475static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1476{
ac34b743 1477 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1478 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1479}
1480
f66b7cfd
SP
1481static void be_set_all_promisc(struct be_adapter *adapter)
1482{
1483 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1484 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1485}
1486
1487static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1488{
0fc16ebf 1489 int status;
6b7c5b94 1490
f66b7cfd
SP
1491 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1492 return;
6b7c5b94 1493
f66b7cfd
SP
1494 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1495 if (!status)
1496 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1497}
1498
1499static void be_set_mc_list(struct be_adapter *adapter)
1500{
1501 int status;
1502
1503 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1504 if (!status)
1505 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1506 else
1507 be_set_mc_promisc(adapter);
1508}
1509
1510static void be_set_uc_list(struct be_adapter *adapter)
1511{
1512 struct netdev_hw_addr *ha;
1513 int i = 1; /* First slot is claimed by the Primary MAC */
1514
1515 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1516 be_cmd_pmac_del(adapter, adapter->if_handle,
1517 adapter->pmac_id[i], 0);
1518
1519 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1520 be_set_all_promisc(adapter);
1521 return;
6b7c5b94
SP
1522 }
1523
f66b7cfd
SP
1524 netdev_for_each_uc_addr(ha, adapter->netdev) {
1525 adapter->uc_macs++; /* First slot is for Primary MAC */
1526 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1527 &adapter->pmac_id[adapter->uc_macs], 0);
1528 }
1529}
6b7c5b94 1530
f66b7cfd
SP
1531static void be_clear_uc_list(struct be_adapter *adapter)
1532{
1533 int i;
fbc13f01 1534
f66b7cfd
SP
1535 for (i = 1; i < (adapter->uc_macs + 1); i++)
1536 be_cmd_pmac_del(adapter, adapter->if_handle,
1537 adapter->pmac_id[i], 0);
1538 adapter->uc_macs = 0;
1539}
fbc13f01 1540
f66b7cfd
SP
1541static void be_set_rx_mode(struct net_device *netdev)
1542{
1543 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1544
f66b7cfd
SP
1545 if (netdev->flags & IFF_PROMISC) {
1546 be_set_all_promisc(adapter);
1547 return;
fbc13f01
AK
1548 }
1549
f66b7cfd
SP
1550 /* Interface was previously in promiscuous mode; disable it */
1551 if (be_in_all_promisc(adapter)) {
1552 be_clear_all_promisc(adapter);
1553 if (adapter->vlans_added)
1554 be_vid_config(adapter);
0fc16ebf 1555 }
a0794885 1556
f66b7cfd
SP
1557 /* Enable multicast promisc if num configured exceeds what we support */
1558 if (netdev->flags & IFF_ALLMULTI ||
1559 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1560 be_set_mc_promisc(adapter);
a0794885 1561 return;
f66b7cfd 1562 }
a0794885 1563
f66b7cfd
SP
1564 if (netdev_uc_count(netdev) != adapter->uc_macs)
1565 be_set_uc_list(adapter);
1566
1567 be_set_mc_list(adapter);
6b7c5b94
SP
1568}
1569
ba343c77
SB
1570static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1571{
1572 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1573 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1574 int status;
1575
11ac75ed 1576 if (!sriov_enabled(adapter))
ba343c77
SB
1577 return -EPERM;
1578
11ac75ed 1579 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1580 return -EINVAL;
1581
3c31aaf3
VV
1582 /* Proceed further only if user provided MAC is different
1583 * from active MAC
1584 */
1585 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1586 return 0;
1587
3175d8c2
SP
1588 if (BEx_chip(adapter)) {
1589 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1590 vf + 1);
ba343c77 1591
11ac75ed
SP
1592 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1593 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1594 } else {
1595 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1596 vf + 1);
590c391d
PR
1597 }
1598
abccf23e
KA
1599 if (status) {
1600 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1601 mac, vf, status);
1602 return be_cmd_status(status);
1603 }
64600ea5 1604
abccf23e
KA
1605 ether_addr_copy(vf_cfg->mac_addr, mac);
1606
1607 return 0;
ba343c77
SB
1608}
1609
64600ea5 1610static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1611 struct ifla_vf_info *vi)
64600ea5
AK
1612{
1613 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1614 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1615
11ac75ed 1616 if (!sriov_enabled(adapter))
64600ea5
AK
1617 return -EPERM;
1618
11ac75ed 1619 if (vf >= adapter->num_vfs)
64600ea5
AK
1620 return -EINVAL;
1621
1622 vi->vf = vf;
ed616689
SC
1623 vi->max_tx_rate = vf_cfg->tx_rate;
1624 vi->min_tx_rate = 0;
a60b3a13
AK
1625 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1626 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1627 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1628 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1629 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1630
1631 return 0;
1632}
1633
435452aa
VV
1634static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1635{
1636 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1637 u16 vids[BE_NUM_VLANS_SUPPORTED];
1638 int vf_if_id = vf_cfg->if_handle;
1639 int status;
1640
1641 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1642 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1643 if (status)
1644 return status;
1645
1646 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1647 vids[0] = 0;
1648 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1649 if (!status)
1650 dev_info(&adapter->pdev->dev,
1651 "Cleared guest VLANs on VF%d", vf);
1652
1653 /* After TVT is enabled, disallow VFs to program VLAN filters */
1654 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1655 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1656 ~BE_PRIV_FILTMGMT, vf + 1);
1657 if (!status)
1658 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1659 }
1660 return 0;
1661}
1662
1663static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1664{
1665 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1666 struct device *dev = &adapter->pdev->dev;
1667 int status;
1668
1669 /* Reset Transparent VLAN Tagging. */
1670 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1671 vf_cfg->if_handle, 0, 0);
435452aa
VV
1672 if (status)
1673 return status;
1674
1675 /* Allow VFs to program VLAN filtering */
1676 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1677 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1678 BE_PRIV_FILTMGMT, vf + 1);
1679 if (!status) {
1680 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1681 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1682 }
1683 }
1684
1685 dev_info(dev,
1686 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1687 return 0;
1688}
1689
748b539a 1690static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1691{
1692 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1693 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1694 int status;
1da87b7f 1695
11ac75ed 1696 if (!sriov_enabled(adapter))
1da87b7f
AK
1697 return -EPERM;
1698
b9fc0e53 1699 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1700 return -EINVAL;
1701
b9fc0e53
AK
1702 if (vlan || qos) {
1703 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1704 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1705 } else {
435452aa 1706 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1707 }
1708
abccf23e
KA
1709 if (status) {
1710 dev_err(&adapter->pdev->dev,
435452aa
VV
1711 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1712 status);
abccf23e
KA
1713 return be_cmd_status(status);
1714 }
1715
1716 vf_cfg->vlan_tag = vlan;
abccf23e 1717 return 0;
1da87b7f
AK
1718}
1719
ed616689
SC
1720static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1721 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1722{
1723 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1724 struct device *dev = &adapter->pdev->dev;
1725 int percent_rate, status = 0;
1726 u16 link_speed = 0;
1727 u8 link_status;
e1d18735 1728
11ac75ed 1729 if (!sriov_enabled(adapter))
e1d18735
AK
1730 return -EPERM;
1731
94f434c2 1732 if (vf >= adapter->num_vfs)
e1d18735
AK
1733 return -EINVAL;
1734
ed616689
SC
1735 if (min_tx_rate)
1736 return -EINVAL;
1737
0f77ba73
RN
1738 if (!max_tx_rate)
1739 goto config_qos;
1740
1741 status = be_cmd_link_status_query(adapter, &link_speed,
1742 &link_status, 0);
1743 if (status)
1744 goto err;
1745
1746 if (!link_status) {
1747 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1748 status = -ENETDOWN;
0f77ba73
RN
1749 goto err;
1750 }
1751
1752 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1753 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1754 link_speed);
1755 status = -EINVAL;
1756 goto err;
1757 }
1758
1759 /* On Skyhawk the QOS setting must be done only as a % value */
1760 percent_rate = link_speed / 100;
1761 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1762 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1763 percent_rate);
1764 status = -EINVAL;
1765 goto err;
94f434c2 1766 }
e1d18735 1767
0f77ba73
RN
1768config_qos:
1769 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1770 if (status)
0f77ba73
RN
1771 goto err;
1772
1773 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1774 return 0;
1775
1776err:
1777 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1778 max_tx_rate, vf);
abccf23e 1779 return be_cmd_status(status);
e1d18735 1780}
e2fb1afa 1781
bdce2ad7
SR
1782static int be_set_vf_link_state(struct net_device *netdev, int vf,
1783 int link_state)
1784{
1785 struct be_adapter *adapter = netdev_priv(netdev);
1786 int status;
1787
1788 if (!sriov_enabled(adapter))
1789 return -EPERM;
1790
1791 if (vf >= adapter->num_vfs)
1792 return -EINVAL;
1793
1794 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1795 if (status) {
1796 dev_err(&adapter->pdev->dev,
1797 "Link state change on VF %d failed: %#x\n", vf, status);
1798 return be_cmd_status(status);
1799 }
bdce2ad7 1800
abccf23e
KA
1801 adapter->vf_cfg[vf].plink_tracking = link_state;
1802
1803 return 0;
bdce2ad7 1804}
e1d18735 1805
e7bcbd7b
KA
1806static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1807{
1808 struct be_adapter *adapter = netdev_priv(netdev);
1809 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1810 u8 spoofchk;
1811 int status;
1812
1813 if (!sriov_enabled(adapter))
1814 return -EPERM;
1815
1816 if (vf >= adapter->num_vfs)
1817 return -EINVAL;
1818
1819 if (BEx_chip(adapter))
1820 return -EOPNOTSUPP;
1821
1822 if (enable == vf_cfg->spoofchk)
1823 return 0;
1824
1825 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1826
1827 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1828 0, spoofchk);
1829 if (status) {
1830 dev_err(&adapter->pdev->dev,
1831 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1832 return be_cmd_status(status);
1833 }
1834
1835 vf_cfg->spoofchk = enable;
1836 return 0;
1837}
1838
2632bafd
SP
1839static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1840 ulong now)
6b7c5b94 1841{
2632bafd
SP
1842 aic->rx_pkts_prev = rx_pkts;
1843 aic->tx_reqs_prev = tx_pkts;
1844 aic->jiffies = now;
1845}
ac124ff9 1846
20947770 1847static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1848{
20947770
PR
1849 struct be_adapter *adapter = eqo->adapter;
1850 int eqd, start;
2632bafd 1851 struct be_aic_obj *aic;
2632bafd
SP
1852 struct be_rx_obj *rxo;
1853 struct be_tx_obj *txo;
20947770 1854 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1855 ulong now;
1856 u32 pps, delta;
20947770 1857 int i;
10ef9ab4 1858
20947770
PR
1859 aic = &adapter->aic_obj[eqo->idx];
1860 if (!aic->enable) {
1861 if (aic->jiffies)
1862 aic->jiffies = 0;
1863 eqd = aic->et_eqd;
1864 return eqd;
1865 }
6b7c5b94 1866
20947770 1867 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1868 do {
57a7744e 1869 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1870 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1871 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1872 }
10ef9ab4 1873
20947770 1874 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1875 do {
57a7744e 1876 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1877 tx_pkts += txo->stats.tx_reqs;
57a7744e 1878 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1879 }
6b7c5b94 1880
20947770
PR
1881 /* Skip, if wrapped around or first calculation */
1882 now = jiffies;
1883 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1884 rx_pkts < aic->rx_pkts_prev ||
1885 tx_pkts < aic->tx_reqs_prev) {
1886 be_aic_update(aic, rx_pkts, tx_pkts, now);
1887 return aic->prev_eqd;
1888 }
2632bafd 1889
20947770
PR
1890 delta = jiffies_to_msecs(now - aic->jiffies);
1891 if (delta == 0)
1892 return aic->prev_eqd;
10ef9ab4 1893
20947770
PR
1894 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1895 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1896 eqd = (pps / 15000) << 2;
2632bafd 1897
20947770
PR
1898 if (eqd < 8)
1899 eqd = 0;
1900 eqd = min_t(u32, eqd, aic->max_eqd);
1901 eqd = max_t(u32, eqd, aic->min_eqd);
1902
1903 be_aic_update(aic, rx_pkts, tx_pkts, now);
1904
1905 return eqd;
1906}
1907
1908/* For Skyhawk-R only */
1909static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1910{
1911 struct be_adapter *adapter = eqo->adapter;
1912 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1913 ulong now = jiffies;
1914 int eqd;
1915 u32 mult_enc;
1916
1917 if (!aic->enable)
1918 return 0;
1919
3c0d49aa 1920 if (jiffies_to_msecs(now - aic->jiffies) < 1)
20947770
PR
1921 eqd = aic->prev_eqd;
1922 else
1923 eqd = be_get_new_eqd(eqo);
1924
1925 if (eqd > 100)
1926 mult_enc = R2I_DLY_ENC_1;
1927 else if (eqd > 60)
1928 mult_enc = R2I_DLY_ENC_2;
1929 else if (eqd > 20)
1930 mult_enc = R2I_DLY_ENC_3;
1931 else
1932 mult_enc = R2I_DLY_ENC_0;
1933
1934 aic->prev_eqd = eqd;
1935
1936 return mult_enc;
1937}
1938
1939void be_eqd_update(struct be_adapter *adapter, bool force_update)
1940{
1941 struct be_set_eqd set_eqd[MAX_EVT_QS];
1942 struct be_aic_obj *aic;
1943 struct be_eq_obj *eqo;
1944 int i, num = 0, eqd;
1945
1946 for_all_evt_queues(adapter, eqo, i) {
1947 aic = &adapter->aic_obj[eqo->idx];
1948 eqd = be_get_new_eqd(eqo);
1949 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1950 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1951 set_eqd[num].eq_id = eqo->q.id;
1952 aic->prev_eqd = eqd;
1953 num++;
1954 }
ac124ff9 1955 }
2632bafd
SP
1956
1957 if (num)
1958 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1959}
1960
3abcdeda 1961static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1962 struct be_rx_compl_info *rxcp)
4097f663 1963{
ac124ff9 1964 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1965
ab1594e9 1966 u64_stats_update_begin(&stats->sync);
3abcdeda 1967 stats->rx_compl++;
2e588f84 1968 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1969 stats->rx_pkts++;
8670f2a5
SB
1970 if (rxcp->tunneled)
1971 stats->rx_vxlan_offload_pkts++;
2e588f84 1972 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1973 stats->rx_mcast_pkts++;
2e588f84 1974 if (rxcp->err)
ac124ff9 1975 stats->rx_compl_err++;
ab1594e9 1976 u64_stats_update_end(&stats->sync);
4097f663
SP
1977}
1978
2e588f84 1979static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1980{
19fad86f 1981 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1982 * Also ignore ipcksm for ipv6 pkts
1983 */
2e588f84 1984 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1985 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1986}
1987
0b0ef1d0 1988static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1989{
10ef9ab4 1990 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1991 struct be_rx_page_info *rx_page_info;
3abcdeda 1992 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1993 u16 frag_idx = rxq->tail;
6b7c5b94 1994
3abcdeda 1995 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1996 BUG_ON(!rx_page_info->page);
1997
e50287be 1998 if (rx_page_info->last_frag) {
2b7bcebf
IV
1999 dma_unmap_page(&adapter->pdev->dev,
2000 dma_unmap_addr(rx_page_info, bus),
2001 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2002 rx_page_info->last_frag = false;
2003 } else {
2004 dma_sync_single_for_cpu(&adapter->pdev->dev,
2005 dma_unmap_addr(rx_page_info, bus),
2006 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2007 }
6b7c5b94 2008
0b0ef1d0 2009 queue_tail_inc(rxq);
6b7c5b94
SP
2010 atomic_dec(&rxq->used);
2011 return rx_page_info;
2012}
2013
2014/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2015static void be_rx_compl_discard(struct be_rx_obj *rxo,
2016 struct be_rx_compl_info *rxcp)
6b7c5b94 2017{
6b7c5b94 2018 struct be_rx_page_info *page_info;
2e588f84 2019 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2020
e80d9da6 2021 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2022 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2023 put_page(page_info->page);
2024 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2025 }
2026}
2027
2028/*
2029 * skb_fill_rx_data forms a complete skb for an ether frame
2030 * indicated by rxcp.
2031 */
10ef9ab4
SP
2032static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2033 struct be_rx_compl_info *rxcp)
6b7c5b94 2034{
6b7c5b94 2035 struct be_rx_page_info *page_info;
2e588f84
SP
2036 u16 i, j;
2037 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2038 u8 *start;
6b7c5b94 2039
0b0ef1d0 2040 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2041 start = page_address(page_info->page) + page_info->page_offset;
2042 prefetch(start);
2043
2044 /* Copy data in the first descriptor of this completion */
2e588f84 2045 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2046
6b7c5b94
SP
2047 skb->len = curr_frag_len;
2048 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2049 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2050 /* Complete packet has now been moved to data */
2051 put_page(page_info->page);
2052 skb->data_len = 0;
2053 skb->tail += curr_frag_len;
2054 } else {
ac1ae5f3
ED
2055 hdr_len = ETH_HLEN;
2056 memcpy(skb->data, start, hdr_len);
6b7c5b94 2057 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2058 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2059 skb_shinfo(skb)->frags[0].page_offset =
2060 page_info->page_offset + hdr_len;
748b539a
SP
2061 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2062 curr_frag_len - hdr_len);
6b7c5b94 2063 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2064 skb->truesize += rx_frag_size;
6b7c5b94
SP
2065 skb->tail += hdr_len;
2066 }
205859a2 2067 page_info->page = NULL;
6b7c5b94 2068
2e588f84
SP
2069 if (rxcp->pkt_size <= rx_frag_size) {
2070 BUG_ON(rxcp->num_rcvd != 1);
2071 return;
6b7c5b94
SP
2072 }
2073
2074 /* More frags present for this completion */
2e588f84
SP
2075 remaining = rxcp->pkt_size - curr_frag_len;
2076 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2077 page_info = get_rx_page_info(rxo);
2e588f84 2078 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2079
bd46cb6c
AK
2080 /* Coalesce all frags from the same physical page in one slot */
2081 if (page_info->page_offset == 0) {
2082 /* Fresh page */
2083 j++;
b061b39e 2084 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2085 skb_shinfo(skb)->frags[j].page_offset =
2086 page_info->page_offset;
9e903e08 2087 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2088 skb_shinfo(skb)->nr_frags++;
2089 } else {
2090 put_page(page_info->page);
2091 }
2092
9e903e08 2093 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2094 skb->len += curr_frag_len;
2095 skb->data_len += curr_frag_len;
bdb28a97 2096 skb->truesize += rx_frag_size;
2e588f84 2097 remaining -= curr_frag_len;
205859a2 2098 page_info->page = NULL;
6b7c5b94 2099 }
bd46cb6c 2100 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2101}
2102
5be93b9a 2103/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2104static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2105 struct be_rx_compl_info *rxcp)
6b7c5b94 2106{
10ef9ab4 2107 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2108 struct net_device *netdev = adapter->netdev;
6b7c5b94 2109 struct sk_buff *skb;
89420424 2110
bb349bb4 2111 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2112 if (unlikely(!skb)) {
ac124ff9 2113 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2114 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2115 return;
2116 }
2117
10ef9ab4 2118 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2119
6332c8d3 2120 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2121 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2122 else
2123 skb_checksum_none_assert(skb);
6b7c5b94 2124
6332c8d3 2125 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2126 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2127 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2128 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2129
b6c0e89d 2130 skb->csum_level = rxcp->tunneled;
6384a4d0 2131 skb_mark_napi_id(skb, napi);
6b7c5b94 2132
343e43c0 2133 if (rxcp->vlanf)
86a9bad3 2134 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2135
2136 netif_receive_skb(skb);
6b7c5b94
SP
2137}
2138
5be93b9a 2139/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2140static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2141 struct napi_struct *napi,
2142 struct be_rx_compl_info *rxcp)
6b7c5b94 2143{
10ef9ab4 2144 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2145 struct be_rx_page_info *page_info;
5be93b9a 2146 struct sk_buff *skb = NULL;
2e588f84
SP
2147 u16 remaining, curr_frag_len;
2148 u16 i, j;
3968fa1e 2149
10ef9ab4 2150 skb = napi_get_frags(napi);
5be93b9a 2151 if (!skb) {
10ef9ab4 2152 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2153 return;
2154 }
2155
2e588f84
SP
2156 remaining = rxcp->pkt_size;
2157 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2158 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2159
2160 curr_frag_len = min(remaining, rx_frag_size);
2161
bd46cb6c
AK
2162 /* Coalesce all frags from the same physical page in one slot */
2163 if (i == 0 || page_info->page_offset == 0) {
2164 /* First frag or Fresh page */
2165 j++;
b061b39e 2166 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2167 skb_shinfo(skb)->frags[j].page_offset =
2168 page_info->page_offset;
9e903e08 2169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2170 } else {
2171 put_page(page_info->page);
2172 }
9e903e08 2173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2174 skb->truesize += rx_frag_size;
bd46cb6c 2175 remaining -= curr_frag_len;
6b7c5b94
SP
2176 memset(page_info, 0, sizeof(*page_info));
2177 }
bd46cb6c 2178 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2179
5be93b9a 2180 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2181 skb->len = rxcp->pkt_size;
2182 skb->data_len = rxcp->pkt_size;
5be93b9a 2183 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2184 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2185 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2186 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2187
b6c0e89d 2188 skb->csum_level = rxcp->tunneled;
5be93b9a 2189
343e43c0 2190 if (rxcp->vlanf)
86a9bad3 2191 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2192
10ef9ab4 2193 napi_gro_frags(napi);
2e588f84
SP
2194}
2195
10ef9ab4
SP
2196static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2197 struct be_rx_compl_info *rxcp)
2e588f84 2198{
c3c18bc1
SP
2199 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2200 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2201 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2202 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2203 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2204 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2205 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2206 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2207 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2208 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2209 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2210 if (rxcp->vlanf) {
c3c18bc1
SP
2211 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2212 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2213 }
c3c18bc1 2214 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2215 rxcp->tunneled =
c3c18bc1 2216 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2217}
2218
10ef9ab4
SP
2219static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2220 struct be_rx_compl_info *rxcp)
2e588f84 2221{
c3c18bc1
SP
2222 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2223 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2224 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2225 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2226 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2227 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2228 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2229 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2230 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2231 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2232 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2233 if (rxcp->vlanf) {
c3c18bc1
SP
2234 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2235 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2236 }
c3c18bc1
SP
2237 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2238 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2239}
2240
2241static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2242{
2243 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2244 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2245 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2246
2e588f84
SP
2247 /* For checking the valid bit it is Ok to use either definition as the
2248 * valid bit is at the same position in both v0 and v1 Rx compl */
2249 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2250 return NULL;
6b7c5b94 2251
2e588f84
SP
2252 rmb();
2253 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2254
2e588f84 2255 if (adapter->be3_native)
10ef9ab4 2256 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2257 else
10ef9ab4 2258 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2259
e38b1706
SK
2260 if (rxcp->ip_frag)
2261 rxcp->l4_csum = 0;
2262
15d72184 2263 if (rxcp->vlanf) {
f93f160b
VV
2264 /* In QNQ modes, if qnq bit is not set, then the packet was
2265 * tagged only with the transparent outer vlan-tag and must
2266 * not be treated as a vlan packet by host
2267 */
2268 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2269 rxcp->vlanf = 0;
6b7c5b94 2270
15d72184 2271 if (!lancer_chip(adapter))
3c709f8f 2272 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2273
939cf306 2274 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2275 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2276 rxcp->vlanf = 0;
2277 }
2e588f84
SP
2278
2279 /* As the compl has been parsed, reset it; we wont touch it again */
2280 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2281
3abcdeda 2282 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2283 return rxcp;
2284}
2285
1829b086 2286static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2287{
6b7c5b94 2288 u32 order = get_order(size);
1829b086 2289
6b7c5b94 2290 if (order > 0)
1829b086
ED
2291 gfp |= __GFP_COMP;
2292 return alloc_pages(gfp, order);
6b7c5b94
SP
2293}
2294
2295/*
2296 * Allocate a page, split it to fragments of size rx_frag_size and post as
2297 * receive buffers to BE
2298 */
c30d7266 2299static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2300{
3abcdeda 2301 struct be_adapter *adapter = rxo->adapter;
26d92f92 2302 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2303 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2304 struct page *pagep = NULL;
ba42fad0 2305 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2306 struct be_eth_rx_d *rxd;
2307 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2308 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2309
3abcdeda 2310 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2311 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2312 if (!pagep) {
1829b086 2313 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2314 if (unlikely(!pagep)) {
ac124ff9 2315 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2316 break;
2317 }
ba42fad0
IV
2318 page_dmaaddr = dma_map_page(dev, pagep, 0,
2319 adapter->big_page_size,
2b7bcebf 2320 DMA_FROM_DEVICE);
ba42fad0
IV
2321 if (dma_mapping_error(dev, page_dmaaddr)) {
2322 put_page(pagep);
2323 pagep = NULL;
d3de1540 2324 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2325 break;
2326 }
e50287be 2327 page_offset = 0;
6b7c5b94
SP
2328 } else {
2329 get_page(pagep);
e50287be 2330 page_offset += rx_frag_size;
6b7c5b94 2331 }
e50287be 2332 page_info->page_offset = page_offset;
6b7c5b94 2333 page_info->page = pagep;
6b7c5b94
SP
2334
2335 rxd = queue_head_node(rxq);
e50287be 2336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2339
2340 /* Any space left in the current big page for another frag? */
2341 if ((page_offset + rx_frag_size + rx_frag_size) >
2342 adapter->big_page_size) {
2343 pagep = NULL;
e50287be
SP
2344 page_info->last_frag = true;
2345 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2346 } else {
2347 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2348 }
26d92f92
SP
2349
2350 prev_page_info = page_info;
2351 queue_head_inc(rxq);
10ef9ab4 2352 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2353 }
e50287be
SP
2354
2355 /* Mark the last frag of a page when we break out of the above loop
2356 * with no more slots available in the RXQ
2357 */
2358 if (pagep) {
2359 prev_page_info->last_frag = true;
2360 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2361 }
6b7c5b94
SP
2362
2363 if (posted) {
6b7c5b94 2364 atomic_add(posted, &rxq->used);
6384a4d0
SP
2365 if (rxo->rx_post_starved)
2366 rxo->rx_post_starved = false;
c30d7266 2367 do {
69304cc9 2368 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2369 be_rxq_notify(adapter, rxq->id, notify);
2370 posted -= notify;
2371 } while (posted);
ea1dae11
SP
2372 } else if (atomic_read(&rxq->used) == 0) {
2373 /* Let be_worker replenish when memory is available */
3abcdeda 2374 rxo->rx_post_starved = true;
6b7c5b94 2375 }
6b7c5b94
SP
2376}
2377
152ffe5b 2378static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2379{
152ffe5b
SB
2380 struct be_queue_info *tx_cq = &txo->cq;
2381 struct be_tx_compl_info *txcp = &txo->txcp;
2382 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2383
152ffe5b 2384 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2385 return NULL;
2386
152ffe5b 2387 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2388 rmb();
152ffe5b 2389 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2390
152ffe5b
SB
2391 txcp->status = GET_TX_COMPL_BITS(status, compl);
2392 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2393
152ffe5b 2394 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2395 queue_tail_inc(tx_cq);
2396 return txcp;
2397}
2398
3c8def97 2399static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2400 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2401{
5f07b3c5 2402 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2403 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2404 u16 frag_index, num_wrbs = 0;
2405 struct sk_buff *skb = NULL;
2406 bool unmap_skb_hdr = false;
a73b796e 2407 struct be_eth_wrb *wrb;
6b7c5b94 2408
ec43b1a6 2409 do {
5f07b3c5
SP
2410 if (sent_skbs[txq->tail]) {
2411 /* Free skb from prev req */
2412 if (skb)
2413 dev_consume_skb_any(skb);
2414 skb = sent_skbs[txq->tail];
2415 sent_skbs[txq->tail] = NULL;
2416 queue_tail_inc(txq); /* skip hdr wrb */
2417 num_wrbs++;
2418 unmap_skb_hdr = true;
2419 }
a73b796e 2420 wrb = queue_tail_node(txq);
5f07b3c5 2421 frag_index = txq->tail;
2b7bcebf 2422 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2423 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2424 unmap_skb_hdr = false;
6b7c5b94 2425 queue_tail_inc(txq);
5f07b3c5
SP
2426 num_wrbs++;
2427 } while (frag_index != last_index);
2428 dev_consume_skb_any(skb);
6b7c5b94 2429
4d586b82 2430 return num_wrbs;
6b7c5b94
SP
2431}
2432
10ef9ab4
SP
2433/* Return the number of events in the event queue */
2434static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2435{
10ef9ab4
SP
2436 struct be_eq_entry *eqe;
2437 int num = 0;
859b1e4e 2438
10ef9ab4
SP
2439 do {
2440 eqe = queue_tail_node(&eqo->q);
2441 if (eqe->evt == 0)
2442 break;
859b1e4e 2443
10ef9ab4
SP
2444 rmb();
2445 eqe->evt = 0;
2446 num++;
2447 queue_tail_inc(&eqo->q);
2448 } while (true);
2449
2450 return num;
859b1e4e
SP
2451}
2452
10ef9ab4
SP
2453/* Leaves the EQ is disarmed state */
2454static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2455{
10ef9ab4 2456 int num = events_get(eqo);
859b1e4e 2457
20947770 2458 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2459}
2460
99b44304
KA
2461/* Free posted rx buffers that were not used */
2462static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2463{
3abcdeda 2464 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2465 struct be_rx_page_info *page_info;
2466
2467 while (atomic_read(&rxq->used) > 0) {
2468 page_info = get_rx_page_info(rxo);
2469 put_page(page_info->page);
2470 memset(page_info, 0, sizeof(*page_info));
2471 }
2472 BUG_ON(atomic_read(&rxq->used));
2473 rxq->tail = 0;
2474 rxq->head = 0;
2475}
2476
2477static void be_rx_cq_clean(struct be_rx_obj *rxo)
2478{
3abcdeda 2479 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2480 struct be_rx_compl_info *rxcp;
d23e946c
SP
2481 struct be_adapter *adapter = rxo->adapter;
2482 int flush_wait = 0;
6b7c5b94 2483
d23e946c
SP
2484 /* Consume pending rx completions.
2485 * Wait for the flush completion (identified by zero num_rcvd)
2486 * to arrive. Notify CQ even when there are no more CQ entries
2487 * for HW to flush partially coalesced CQ entries.
2488 * In Lancer, there is no need to wait for flush compl.
2489 */
2490 for (;;) {
2491 rxcp = be_rx_compl_get(rxo);
ddf1169f 2492 if (!rxcp) {
d23e946c
SP
2493 if (lancer_chip(adapter))
2494 break;
2495
954f6825
VD
2496 if (flush_wait++ > 50 ||
2497 be_check_error(adapter,
2498 BE_ERROR_HW)) {
d23e946c
SP
2499 dev_warn(&adapter->pdev->dev,
2500 "did not receive flush compl\n");
2501 break;
2502 }
2503 be_cq_notify(adapter, rx_cq->id, true, 0);
2504 mdelay(1);
2505 } else {
2506 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2507 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2508 if (rxcp->num_rcvd == 0)
2509 break;
2510 }
6b7c5b94
SP
2511 }
2512
d23e946c
SP
2513 /* After cleanup, leave the CQ in unarmed state */
2514 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2515}
2516
0ae57bb3 2517static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2518{
5f07b3c5
SP
2519 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2520 struct device *dev = &adapter->pdev->dev;
152ffe5b 2521 struct be_tx_compl_info *txcp;
0ae57bb3 2522 struct be_queue_info *txq;
152ffe5b 2523 struct be_tx_obj *txo;
0ae57bb3 2524 int i, pending_txqs;
a8e9179a 2525
1a3d0717 2526 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2527 do {
0ae57bb3
SP
2528 pending_txqs = adapter->num_tx_qs;
2529
2530 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2531 cmpl = 0;
2532 num_wrbs = 0;
0ae57bb3 2533 txq = &txo->q;
152ffe5b
SB
2534 while ((txcp = be_tx_compl_get(txo))) {
2535 num_wrbs +=
2536 be_tx_compl_process(adapter, txo,
2537 txcp->end_index);
0ae57bb3
SP
2538 cmpl++;
2539 }
2540 if (cmpl) {
2541 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2542 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2543 timeo = 0;
0ae57bb3 2544 }
cf5671e6 2545 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2546 pending_txqs--;
a8e9179a
SP
2547 }
2548
954f6825
VD
2549 if (pending_txqs == 0 || ++timeo > 10 ||
2550 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2551 break;
2552
2553 mdelay(1);
2554 } while (true);
2555
5f07b3c5 2556 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2557 for_all_tx_queues(adapter, txo, i) {
2558 txq = &txo->q;
0ae57bb3 2559
5f07b3c5
SP
2560 if (atomic_read(&txq->used)) {
2561 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2562 i, atomic_read(&txq->used));
2563 notified_idx = txq->tail;
0ae57bb3 2564 end_idx = txq->tail;
5f07b3c5
SP
2565 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2566 txq->len);
2567 /* Use the tx-compl process logic to handle requests
2568 * that were not sent to the HW.
2569 */
0ae57bb3
SP
2570 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2571 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2572 BUG_ON(atomic_read(&txq->used));
2573 txo->pend_wrb_cnt = 0;
2574 /* Since hw was never notified of these requests,
2575 * reset TXQ indices
2576 */
2577 txq->head = notified_idx;
2578 txq->tail = notified_idx;
0ae57bb3 2579 }
b03388d6 2580 }
6b7c5b94
SP
2581}
2582
10ef9ab4
SP
2583static void be_evt_queues_destroy(struct be_adapter *adapter)
2584{
2585 struct be_eq_obj *eqo;
2586 int i;
2587
2588 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2589 if (eqo->q.created) {
2590 be_eq_clean(eqo);
10ef9ab4 2591 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2592 napi_hash_del(&eqo->napi);
68d7bdcb 2593 netif_napi_del(&eqo->napi);
649886a3 2594 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2595 }
10ef9ab4
SP
2596 be_queue_free(adapter, &eqo->q);
2597 }
2598}
2599
2600static int be_evt_queues_create(struct be_adapter *adapter)
2601{
2602 struct be_queue_info *eq;
2603 struct be_eq_obj *eqo;
2632bafd 2604 struct be_aic_obj *aic;
10ef9ab4
SP
2605 int i, rc;
2606
92bf14ab
SP
2607 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2608 adapter->cfg_num_qs);
10ef9ab4
SP
2609
2610 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2611 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2612
2632bafd 2613 aic = &adapter->aic_obj[i];
10ef9ab4 2614 eqo->adapter = adapter;
10ef9ab4 2615 eqo->idx = i;
2632bafd
SP
2616 aic->max_eqd = BE_MAX_EQD;
2617 aic->enable = true;
10ef9ab4
SP
2618
2619 eq = &eqo->q;
2620 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2621 sizeof(struct be_eq_entry));
10ef9ab4
SP
2622 if (rc)
2623 return rc;
2624
f2f781a7 2625 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2626 if (rc)
2627 return rc;
649886a3
KA
2628
2629 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2630 return -ENOMEM;
2631 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2632 eqo->affinity_mask);
2633 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2634 BE_NAPI_WEIGHT);
10ef9ab4 2635 }
1cfafab9 2636 return 0;
10ef9ab4
SP
2637}
2638
5fb379ee
SP
2639static void be_mcc_queues_destroy(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q;
5fb379ee 2642
8788fdc2 2643 q = &adapter->mcc_obj.q;
5fb379ee 2644 if (q->created)
8788fdc2 2645 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2646 be_queue_free(adapter, q);
2647
8788fdc2 2648 q = &adapter->mcc_obj.cq;
5fb379ee 2649 if (q->created)
8788fdc2 2650 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2651 be_queue_free(adapter, q);
2652}
2653
2654/* Must be called only after TX qs are created as MCC shares TX EQ */
2655static int be_mcc_queues_create(struct be_adapter *adapter)
2656{
2657 struct be_queue_info *q, *cq;
5fb379ee 2658
8788fdc2 2659 cq = &adapter->mcc_obj.cq;
5fb379ee 2660 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2661 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2662 goto err;
2663
10ef9ab4
SP
2664 /* Use the default EQ for MCC completions */
2665 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2666 goto mcc_cq_free;
2667
8788fdc2 2668 q = &adapter->mcc_obj.q;
5fb379ee
SP
2669 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2670 goto mcc_cq_destroy;
2671
8788fdc2 2672 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2673 goto mcc_q_free;
2674
2675 return 0;
2676
2677mcc_q_free:
2678 be_queue_free(adapter, q);
2679mcc_cq_destroy:
8788fdc2 2680 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2681mcc_cq_free:
2682 be_queue_free(adapter, cq);
2683err:
2684 return -1;
2685}
2686
6b7c5b94
SP
2687static void be_tx_queues_destroy(struct be_adapter *adapter)
2688{
2689 struct be_queue_info *q;
3c8def97
SP
2690 struct be_tx_obj *txo;
2691 u8 i;
6b7c5b94 2692
3c8def97
SP
2693 for_all_tx_queues(adapter, txo, i) {
2694 q = &txo->q;
2695 if (q->created)
2696 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2697 be_queue_free(adapter, q);
6b7c5b94 2698
3c8def97
SP
2699 q = &txo->cq;
2700 if (q->created)
2701 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2702 be_queue_free(adapter, q);
2703 }
6b7c5b94
SP
2704}
2705
7707133c 2706static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2707{
73f394e6 2708 struct be_queue_info *cq;
3c8def97 2709 struct be_tx_obj *txo;
73f394e6 2710 struct be_eq_obj *eqo;
92bf14ab 2711 int status, i;
6b7c5b94 2712
92bf14ab 2713 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2714
10ef9ab4
SP
2715 for_all_tx_queues(adapter, txo, i) {
2716 cq = &txo->cq;
2717 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2718 sizeof(struct be_eth_tx_compl));
2719 if (status)
2720 return status;
3c8def97 2721
827da44c
JS
2722 u64_stats_init(&txo->stats.sync);
2723 u64_stats_init(&txo->stats.sync_compl);
2724
10ef9ab4
SP
2725 /* If num_evt_qs is less than num_tx_qs, then more than
2726 * one txq share an eq
2727 */
73f394e6
SP
2728 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2729 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2730 if (status)
2731 return status;
6b7c5b94 2732
10ef9ab4
SP
2733 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2734 sizeof(struct be_eth_wrb));
2735 if (status)
2736 return status;
6b7c5b94 2737
94d73aaa 2738 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2739 if (status)
2740 return status;
73f394e6
SP
2741
2742 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2743 eqo->idx);
3c8def97 2744 }
6b7c5b94 2745
d379142b
SP
2746 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2747 adapter->num_tx_qs);
10ef9ab4 2748 return 0;
6b7c5b94
SP
2749}
2750
10ef9ab4 2751static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2752{
2753 struct be_queue_info *q;
3abcdeda
SP
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2758 q = &rxo->cq;
2759 if (q->created)
2760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2761 be_queue_free(adapter, q);
ac6a0c4a
SP
2762 }
2763}
2764
10ef9ab4 2765static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2766{
10ef9ab4 2767 struct be_queue_info *eq, *cq;
3abcdeda
SP
2768 struct be_rx_obj *rxo;
2769 int rc, i;
6b7c5b94 2770
92bf14ab 2771 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2772 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2773
71bb8bd0
VV
2774 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2775 if (adapter->num_rss_qs <= 1)
2776 adapter->num_rss_qs = 0;
2777
2778 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2779
2780 /* When the interface is not capable of RSS rings (and there is no
2781 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2782 */
71bb8bd0
VV
2783 if (adapter->num_rx_qs == 0)
2784 adapter->num_rx_qs = 1;
92bf14ab 2785
6b7c5b94 2786 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2787 for_all_rx_queues(adapter, rxo, i) {
2788 rxo->adapter = adapter;
3abcdeda
SP
2789 cq = &rxo->cq;
2790 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2791 sizeof(struct be_eth_rx_compl));
3abcdeda 2792 if (rc)
10ef9ab4 2793 return rc;
3abcdeda 2794
827da44c 2795 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2796 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2797 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2798 if (rc)
10ef9ab4 2799 return rc;
3abcdeda 2800 }
6b7c5b94 2801
d379142b 2802 dev_info(&adapter->pdev->dev,
71bb8bd0 2803 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2804 return 0;
b628bde2
SP
2805}
2806
6b7c5b94
SP
2807static irqreturn_t be_intx(int irq, void *dev)
2808{
e49cc34f
SP
2809 struct be_eq_obj *eqo = dev;
2810 struct be_adapter *adapter = eqo->adapter;
2811 int num_evts = 0;
6b7c5b94 2812
d0b9cec3
SP
2813 /* IRQ is not expected when NAPI is scheduled as the EQ
2814 * will not be armed.
2815 * But, this can happen on Lancer INTx where it takes
2816 * a while to de-assert INTx or in BE2 where occasionaly
2817 * an interrupt may be raised even when EQ is unarmed.
2818 * If NAPI is already scheduled, then counting & notifying
2819 * events will orphan them.
e49cc34f 2820 */
d0b9cec3 2821 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2822 num_evts = events_get(eqo);
d0b9cec3
SP
2823 __napi_schedule(&eqo->napi);
2824 if (num_evts)
2825 eqo->spurious_intr = 0;
2826 }
20947770 2827 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2828
d0b9cec3
SP
2829 /* Return IRQ_HANDLED only for the the first spurious intr
2830 * after a valid intr to stop the kernel from branding
2831 * this irq as a bad one!
e49cc34f 2832 */
d0b9cec3
SP
2833 if (num_evts || eqo->spurious_intr++ == 0)
2834 return IRQ_HANDLED;
2835 else
2836 return IRQ_NONE;
6b7c5b94
SP
2837}
2838
10ef9ab4 2839static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2840{
10ef9ab4 2841 struct be_eq_obj *eqo = dev;
6b7c5b94 2842
20947770 2843 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2844 napi_schedule(&eqo->napi);
6b7c5b94
SP
2845 return IRQ_HANDLED;
2846}
2847
2e588f84 2848static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2849{
e38b1706 2850 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2851}
2852
10ef9ab4 2853static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2854 int budget, int polling)
6b7c5b94 2855{
3abcdeda
SP
2856 struct be_adapter *adapter = rxo->adapter;
2857 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2858 struct be_rx_compl_info *rxcp;
6b7c5b94 2859 u32 work_done;
c30d7266 2860 u32 frags_consumed = 0;
6b7c5b94
SP
2861
2862 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2863 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2864 if (!rxcp)
2865 break;
2866
12004ae9
SP
2867 /* Is it a flush compl that has no data */
2868 if (unlikely(rxcp->num_rcvd == 0))
2869 goto loop_continue;
2870
2871 /* Discard compl with partial DMA Lancer B0 */
2872 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2873 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2874 goto loop_continue;
2875 }
2876
2877 /* On BE drop pkts that arrive due to imperfect filtering in
2878 * promiscuous mode on some skews
2879 */
2880 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2881 !lancer_chip(adapter))) {
10ef9ab4 2882 be_rx_compl_discard(rxo, rxcp);
12004ae9 2883 goto loop_continue;
64642811 2884 }
009dd872 2885
6384a4d0
SP
2886 /* Don't do gro when we're busy_polling */
2887 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2888 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2889 else
6384a4d0
SP
2890 be_rx_compl_process(rxo, napi, rxcp);
2891
12004ae9 2892loop_continue:
c30d7266 2893 frags_consumed += rxcp->num_rcvd;
2e588f84 2894 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2895 }
2896
10ef9ab4
SP
2897 if (work_done) {
2898 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2899
6384a4d0
SP
2900 /* When an rx-obj gets into post_starved state, just
2901 * let be_worker do the posting.
2902 */
2903 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2904 !rxo->rx_post_starved)
c30d7266
AK
2905 be_post_rx_frags(rxo, GFP_ATOMIC,
2906 max_t(u32, MAX_RX_POST,
2907 frags_consumed));
6b7c5b94 2908 }
10ef9ab4 2909
6b7c5b94
SP
2910 return work_done;
2911}
2912
152ffe5b 2913static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2914{
2915 switch (status) {
2916 case BE_TX_COMP_HDR_PARSE_ERR:
2917 tx_stats(txo)->tx_hdr_parse_err++;
2918 break;
2919 case BE_TX_COMP_NDMA_ERR:
2920 tx_stats(txo)->tx_dma_err++;
2921 break;
2922 case BE_TX_COMP_ACL_ERR:
2923 tx_stats(txo)->tx_spoof_check_err++;
2924 break;
2925 }
2926}
2927
152ffe5b 2928static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2929{
2930 switch (status) {
2931 case LANCER_TX_COMP_LSO_ERR:
2932 tx_stats(txo)->tx_tso_err++;
2933 break;
2934 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2935 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2936 tx_stats(txo)->tx_spoof_check_err++;
2937 break;
2938 case LANCER_TX_COMP_QINQ_ERR:
2939 tx_stats(txo)->tx_qinq_err++;
2940 break;
2941 case LANCER_TX_COMP_PARITY_ERR:
2942 tx_stats(txo)->tx_internal_parity_err++;
2943 break;
2944 case LANCER_TX_COMP_DMA_ERR:
2945 tx_stats(txo)->tx_dma_err++;
2946 break;
2947 }
2948}
2949
c8f64615
SP
2950static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2951 int idx)
6b7c5b94 2952{
c8f64615 2953 int num_wrbs = 0, work_done = 0;
152ffe5b 2954 struct be_tx_compl_info *txcp;
c8f64615 2955
152ffe5b
SB
2956 while ((txcp = be_tx_compl_get(txo))) {
2957 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2958 work_done++;
3c8def97 2959
152ffe5b 2960 if (txcp->status) {
512bb8a2 2961 if (lancer_chip(adapter))
152ffe5b 2962 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2963 else
152ffe5b 2964 be_update_tx_err(txo, txcp->status);
512bb8a2 2965 }
10ef9ab4 2966 }
6b7c5b94 2967
10ef9ab4
SP
2968 if (work_done) {
2969 be_cq_notify(adapter, txo->cq.id, true, work_done);
2970 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2971
10ef9ab4
SP
2972 /* As Tx wrbs have been freed up, wake up netdev queue
2973 * if it was stopped due to lack of tx wrbs. */
2974 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2975 be_can_txq_wake(txo)) {
10ef9ab4 2976 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2977 }
10ef9ab4
SP
2978
2979 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2980 tx_stats(txo)->tx_compl += work_done;
2981 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2982 }
10ef9ab4 2983}
6b7c5b94 2984
f7062ee5
SP
2985#ifdef CONFIG_NET_RX_BUSY_POLL
2986static inline bool be_lock_napi(struct be_eq_obj *eqo)
2987{
2988 bool status = true;
2989
2990 spin_lock(&eqo->lock); /* BH is already disabled */
2991 if (eqo->state & BE_EQ_LOCKED) {
2992 WARN_ON(eqo->state & BE_EQ_NAPI);
2993 eqo->state |= BE_EQ_NAPI_YIELD;
2994 status = false;
2995 } else {
2996 eqo->state = BE_EQ_NAPI;
2997 }
2998 spin_unlock(&eqo->lock);
2999 return status;
3000}
3001
3002static inline void be_unlock_napi(struct be_eq_obj *eqo)
3003{
3004 spin_lock(&eqo->lock); /* BH is already disabled */
3005
3006 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3007 eqo->state = BE_EQ_IDLE;
3008
3009 spin_unlock(&eqo->lock);
3010}
3011
3012static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3013{
3014 bool status = true;
3015
3016 spin_lock_bh(&eqo->lock);
3017 if (eqo->state & BE_EQ_LOCKED) {
3018 eqo->state |= BE_EQ_POLL_YIELD;
3019 status = false;
3020 } else {
3021 eqo->state |= BE_EQ_POLL;
3022 }
3023 spin_unlock_bh(&eqo->lock);
3024 return status;
3025}
3026
3027static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3028{
3029 spin_lock_bh(&eqo->lock);
3030
3031 WARN_ON(eqo->state & (BE_EQ_NAPI));
3032 eqo->state = BE_EQ_IDLE;
3033
3034 spin_unlock_bh(&eqo->lock);
3035}
3036
3037static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 spin_lock_init(&eqo->lock);
3040 eqo->state = BE_EQ_IDLE;
3041}
3042
3043static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3044{
3045 local_bh_disable();
3046
3047 /* It's enough to just acquire napi lock on the eqo to stop
3048 * be_busy_poll() from processing any queueus.
3049 */
3050 while (!be_lock_napi(eqo))
3051 mdelay(1);
3052
3053 local_bh_enable();
3054}
3055
3056#else /* CONFIG_NET_RX_BUSY_POLL */
3057
3058static inline bool be_lock_napi(struct be_eq_obj *eqo)
3059{
3060 return true;
3061}
3062
3063static inline void be_unlock_napi(struct be_eq_obj *eqo)
3064{
3065}
3066
3067static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3068{
3069 return false;
3070}
3071
3072static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3073{
3074}
3075
3076static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3077{
3078}
3079
3080static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3081{
3082}
3083#endif /* CONFIG_NET_RX_BUSY_POLL */
3084
68d7bdcb 3085int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3086{
3087 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3088 struct be_adapter *adapter = eqo->adapter;
0b545a62 3089 int max_work = 0, work, i, num_evts;
6384a4d0 3090 struct be_rx_obj *rxo;
a4906ea0 3091 struct be_tx_obj *txo;
20947770 3092 u32 mult_enc = 0;
f31e50a8 3093
0b545a62
SP
3094 num_evts = events_get(eqo);
3095
a4906ea0
SP
3096 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3097 be_process_tx(adapter, txo, i);
f31e50a8 3098
6384a4d0
SP
3099 if (be_lock_napi(eqo)) {
3100 /* This loop will iterate twice for EQ0 in which
3101 * completions of the last RXQ (default one) are also processed
3102 * For other EQs the loop iterates only once
3103 */
3104 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3105 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3106 max_work = max(work, max_work);
3107 }
3108 be_unlock_napi(eqo);
3109 } else {
3110 max_work = budget;
10ef9ab4 3111 }
6b7c5b94 3112
10ef9ab4
SP
3113 if (is_mcc_eqo(eqo))
3114 be_process_mcc(adapter);
93c86700 3115
10ef9ab4
SP
3116 if (max_work < budget) {
3117 napi_complete(napi);
20947770
PR
3118
3119 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3120 * delay via a delay multiplier encoding value
3121 */
3122 if (skyhawk_chip(adapter))
3123 mult_enc = be_get_eq_delay_mult_enc(eqo);
3124
3125 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3126 mult_enc);
10ef9ab4
SP
3127 } else {
3128 /* As we'll continue in polling mode, count and clear events */
20947770 3129 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3130 }
10ef9ab4 3131 return max_work;
6b7c5b94
SP
3132}
3133
6384a4d0
SP
3134#ifdef CONFIG_NET_RX_BUSY_POLL
3135static int be_busy_poll(struct napi_struct *napi)
3136{
3137 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3138 struct be_adapter *adapter = eqo->adapter;
3139 struct be_rx_obj *rxo;
3140 int i, work = 0;
3141
3142 if (!be_lock_busy_poll(eqo))
3143 return LL_FLUSH_BUSY;
3144
3145 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3146 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3147 if (work)
3148 break;
3149 }
3150
3151 be_unlock_busy_poll(eqo);
3152 return work;
3153}
3154#endif
3155
f67ef7ba 3156void be_detect_error(struct be_adapter *adapter)
7c185276 3157{
e1cfb67a
PR
3158 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3159 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3160 u32 i;
eb0eecc1 3161 struct device *dev = &adapter->pdev->dev;
7c185276 3162
954f6825 3163 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3164 return;
3165
e1cfb67a
PR
3166 if (lancer_chip(adapter)) {
3167 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3168 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3169 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3170 sliport_err1 = ioread32(adapter->db +
748b539a 3171 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3172 sliport_err2 = ioread32(adapter->db +
748b539a 3173 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3174 /* Do not log error messages if its a FW reset */
3175 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3176 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3177 dev_info(dev, "Firmware update in progress\n");
3178 } else {
eb0eecc1
SK
3179 dev_err(dev, "Error detected in the card\n");
3180 dev_err(dev, "ERR: sliport status 0x%x\n",
3181 sliport_status);
3182 dev_err(dev, "ERR: sliport error1 0x%x\n",
3183 sliport_err1);
3184 dev_err(dev, "ERR: sliport error2 0x%x\n",
3185 sliport_err2);
3186 }
e1cfb67a
PR
3187 }
3188 } else {
25848c90
SR
3189 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3190 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3191 ue_lo_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_LOW_MASK);
3193 ue_hi_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3195
f67ef7ba
PR
3196 ue_lo = (ue_lo & ~ue_lo_mask);
3197 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3198
eb0eecc1
SK
3199 /* On certain platforms BE hardware can indicate spurious UEs.
3200 * Allow HW to stop working completely in case of a real UE.
3201 * Hence not setting the hw_error for UE detection.
3202 */
f67ef7ba 3203
eb0eecc1 3204 if (ue_lo || ue_hi) {
eb0eecc1
SK
3205 dev_err(dev,
3206 "Unrecoverable Error detected in the adapter");
3207 dev_err(dev, "Please reboot server to recover");
3208 if (skyhawk_chip(adapter))
954f6825
VD
3209 be_set_error(adapter, BE_ERROR_UE);
3210
eb0eecc1
SK
3211 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3212 if (ue_lo & 1)
3213 dev_err(dev, "UE: %s bit set\n",
3214 ue_status_low_desc[i]);
3215 }
3216 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3217 if (ue_hi & 1)
3218 dev_err(dev, "UE: %s bit set\n",
3219 ue_status_hi_desc[i]);
3220 }
7c185276
AK
3221 }
3222 }
7c185276
AK
3223}
3224
8d56ff11
SP
3225static void be_msix_disable(struct be_adapter *adapter)
3226{
ac6a0c4a 3227 if (msix_enabled(adapter)) {
8d56ff11 3228 pci_disable_msix(adapter->pdev);
ac6a0c4a 3229 adapter->num_msix_vec = 0;
68d7bdcb 3230 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3231 }
3232}
3233
c2bba3df 3234static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3235{
7dc4c064 3236 int i, num_vec;
d379142b 3237 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3238
92bf14ab
SP
3239 /* If RoCE is supported, program the max number of NIC vectors that
3240 * may be configured via set-channels, along with vectors needed for
3241 * RoCe. Else, just program the number we'll use initially.
3242 */
3243 if (be_roce_supported(adapter))
3244 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3245 2 * num_online_cpus());
3246 else
3247 num_vec = adapter->cfg_num_qs;
3abcdeda 3248
ac6a0c4a 3249 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3250 adapter->msix_entries[i].entry = i;
3251
7dc4c064
AG
3252 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3253 MIN_MSIX_VECTORS, num_vec);
3254 if (num_vec < 0)
3255 goto fail;
92bf14ab 3256
92bf14ab
SP
3257 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3258 adapter->num_msix_roce_vec = num_vec / 2;
3259 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3260 adapter->num_msix_roce_vec);
3261 }
3262
3263 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3264
3265 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3266 adapter->num_msix_vec);
c2bba3df 3267 return 0;
7dc4c064
AG
3268
3269fail:
3270 dev_warn(dev, "MSIx enable failed\n");
3271
3272 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3273 if (be_virtfn(adapter))
7dc4c064
AG
3274 return num_vec;
3275 return 0;
6b7c5b94
SP
3276}
3277
fe6d2a38 3278static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3279 struct be_eq_obj *eqo)
b628bde2 3280{
f2f781a7 3281 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3282}
6b7c5b94 3283
b628bde2
SP
3284static int be_msix_register(struct be_adapter *adapter)
3285{
10ef9ab4
SP
3286 struct net_device *netdev = adapter->netdev;
3287 struct be_eq_obj *eqo;
3288 int status, i, vec;
6b7c5b94 3289
10ef9ab4
SP
3290 for_all_evt_queues(adapter, eqo, i) {
3291 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3292 vec = be_msix_vec_get(adapter, eqo);
3293 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3294 if (status)
3295 goto err_msix;
d658d98a
PR
3296
3297 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3298 }
b628bde2 3299
6b7c5b94 3300 return 0;
3abcdeda 3301err_msix:
6e3cd5fa
VD
3302 for (i--; i >= 0; i--) {
3303 eqo = &adapter->eq_obj[i];
10ef9ab4 3304 free_irq(be_msix_vec_get(adapter, eqo), eqo);
6e3cd5fa 3305 }
10ef9ab4 3306 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3307 status);
ac6a0c4a 3308 be_msix_disable(adapter);
6b7c5b94
SP
3309 return status;
3310}
3311
3312static int be_irq_register(struct be_adapter *adapter)
3313{
3314 struct net_device *netdev = adapter->netdev;
3315 int status;
3316
ac6a0c4a 3317 if (msix_enabled(adapter)) {
6b7c5b94
SP
3318 status = be_msix_register(adapter);
3319 if (status == 0)
3320 goto done;
ba343c77 3321 /* INTx is not supported for VF */
18c57c74 3322 if (be_virtfn(adapter))
ba343c77 3323 return status;
6b7c5b94
SP
3324 }
3325
e49cc34f 3326 /* INTx: only the first EQ is used */
6b7c5b94
SP
3327 netdev->irq = adapter->pdev->irq;
3328 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3329 &adapter->eq_obj[0]);
6b7c5b94
SP
3330 if (status) {
3331 dev_err(&adapter->pdev->dev,
3332 "INTx request IRQ failed - err %d\n", status);
3333 return status;
3334 }
3335done:
3336 adapter->isr_registered = true;
3337 return 0;
3338}
3339
3340static void be_irq_unregister(struct be_adapter *adapter)
3341{
3342 struct net_device *netdev = adapter->netdev;
10ef9ab4 3343 struct be_eq_obj *eqo;
d658d98a 3344 int i, vec;
6b7c5b94
SP
3345
3346 if (!adapter->isr_registered)
3347 return;
3348
3349 /* INTx */
ac6a0c4a 3350 if (!msix_enabled(adapter)) {
e49cc34f 3351 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3352 goto done;
3353 }
3354
3355 /* MSIx */
d658d98a
PR
3356 for_all_evt_queues(adapter, eqo, i) {
3357 vec = be_msix_vec_get(adapter, eqo);
3358 irq_set_affinity_hint(vec, NULL);
3359 free_irq(vec, eqo);
3360 }
3abcdeda 3361
6b7c5b94
SP
3362done:
3363 adapter->isr_registered = false;
6b7c5b94
SP
3364}
3365
10ef9ab4 3366static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79 3367{
62219066 3368 struct rss_info *rss = &adapter->rss_info;
482c9e79
SP
3369 struct be_queue_info *q;
3370 struct be_rx_obj *rxo;
3371 int i;
3372
3373 for_all_rx_queues(adapter, rxo, i) {
3374 q = &rxo->q;
3375 if (q->created) {
99b44304
KA
3376 /* If RXQs are destroyed while in an "out of buffer"
3377 * state, there is a possibility of an HW stall on
3378 * Lancer. So, post 64 buffers to each queue to relieve
3379 * the "out of buffer" condition.
3380 * Make sure there's space in the RXQ before posting.
3381 */
3382 if (lancer_chip(adapter)) {
3383 be_rx_cq_clean(rxo);
3384 if (atomic_read(&q->used) == 0)
3385 be_post_rx_frags(rxo, GFP_KERNEL,
3386 MAX_RX_POST);
3387 }
3388
482c9e79 3389 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3390 be_rx_cq_clean(rxo);
99b44304 3391 be_rxq_clean(rxo);
482c9e79 3392 }
10ef9ab4 3393 be_queue_free(adapter, q);
482c9e79 3394 }
62219066
AK
3395
3396 if (rss->rss_flags) {
3397 rss->rss_flags = RSS_ENABLE_NONE;
3398 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3399 128, rss->rss_hkey);
3400 }
482c9e79
SP
3401}
3402
bcc84140
KA
3403static void be_disable_if_filters(struct be_adapter *adapter)
3404{
3405 be_cmd_pmac_del(adapter, adapter->if_handle,
3406 adapter->pmac_id[0], 0);
3407
3408 be_clear_uc_list(adapter);
3409
3410 /* The IFACE flags are enabled in the open path and cleared
3411 * in the close path. When a VF gets detached from the host and
3412 * assigned to a VM the following happens:
3413 * - VF's IFACE flags get cleared in the detach path
3414 * - IFACE create is issued by the VF in the attach path
3415 * Due to a bug in the BE3/Skyhawk-R FW
3416 * (Lancer FW doesn't have the bug), the IFACE capability flags
3417 * specified along with the IFACE create cmd issued by a VF are not
3418 * honoured by FW. As a consequence, if a *new* driver
3419 * (that enables/disables IFACE flags in open/close)
3420 * is loaded in the host and an *old* driver is * used by a VM/VF,
3421 * the IFACE gets created *without* the needed flags.
3422 * To avoid this, disable RX-filter flags only for Lancer.
3423 */
3424 if (lancer_chip(adapter)) {
3425 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3426 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3427 }
3428}
3429
889cd4b2
SP
3430static int be_close(struct net_device *netdev)
3431{
3432 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3433 struct be_eq_obj *eqo;
3434 int i;
889cd4b2 3435
e1ad8e33
KA
3436 /* This protection is needed as be_close() may be called even when the
3437 * adapter is in cleared state (after eeh perm failure)
3438 */
3439 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3440 return 0;
3441
bcc84140
KA
3442 be_disable_if_filters(adapter);
3443
dff345c5
IV
3444 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3445 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3446 napi_disable(&eqo->napi);
6384a4d0
SP
3447 be_disable_busy_poll(eqo);
3448 }
71237b6f 3449 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3450 }
a323d9bf
SP
3451
3452 be_async_mcc_disable(adapter);
3453
3454 /* Wait for all pending tx completions to arrive so that
3455 * all tx skbs are freed.
3456 */
fba87559 3457 netif_tx_disable(netdev);
6e1f9975 3458 be_tx_compl_clean(adapter);
a323d9bf
SP
3459
3460 be_rx_qs_destroy(adapter);
d11a347d 3461
a323d9bf 3462 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3463 if (msix_enabled(adapter))
3464 synchronize_irq(be_msix_vec_get(adapter, eqo));
3465 else
3466 synchronize_irq(netdev->irq);
3467 be_eq_clean(eqo);
63fcb27f
PR
3468 }
3469
889cd4b2
SP
3470 be_irq_unregister(adapter);
3471
482c9e79
SP
3472 return 0;
3473}
3474
10ef9ab4 3475static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3476{
1dcf7b1c
ED
3477 struct rss_info *rss = &adapter->rss_info;
3478 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3479 struct be_rx_obj *rxo;
e9008ee9 3480 int rc, i, j;
482c9e79
SP
3481
3482 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3483 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3484 sizeof(struct be_eth_rx_d));
3485 if (rc)
3486 return rc;
3487 }
3488
71bb8bd0
VV
3489 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3490 rxo = default_rxo(adapter);
3491 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3492 rx_frag_size, adapter->if_handle,
3493 false, &rxo->rss_id);
3494 if (rc)
3495 return rc;
3496 }
10ef9ab4
SP
3497
3498 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3499 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3500 rx_frag_size, adapter->if_handle,
3501 true, &rxo->rss_id);
482c9e79
SP
3502 if (rc)
3503 return rc;
3504 }
3505
3506 if (be_multi_rxq(adapter)) {
71bb8bd0 3507 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3508 for_all_rss_queues(adapter, rxo, i) {
e2557877 3509 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3510 break;
e2557877
VD
3511 rss->rsstable[j + i] = rxo->rss_id;
3512 rss->rss_queue[j + i] = i;
e9008ee9
PR
3513 }
3514 }
e2557877
VD
3515 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3516 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3517
3518 if (!BEx_chip(adapter))
e2557877
VD
3519 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3520 RSS_ENABLE_UDP_IPV6;
62219066
AK
3521
3522 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3523 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3524 RSS_INDIR_TABLE_LEN, rss_key);
3525 if (rc) {
3526 rss->rss_flags = RSS_ENABLE_NONE;
3527 return rc;
3528 }
3529
3530 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
da1388d6
VV
3531 } else {
3532 /* Disable RSS, if only default RX Q is created */
e2557877 3533 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3534 }
594ad54a 3535
e2557877 3536
b02e60c8
SR
3537 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3538 * which is a queue empty condition
3539 */
10ef9ab4 3540 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3541 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3542
889cd4b2
SP
3543 return 0;
3544}
3545
bcc84140
KA
3546static int be_enable_if_filters(struct be_adapter *adapter)
3547{
3548 int status;
3549
3550 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3551 if (status)
3552 return status;
3553
3554 /* For BE3 VFs, the PF programs the initial MAC address */
3555 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3556 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3557 adapter->if_handle,
3558 &adapter->pmac_id[0], 0);
3559 if (status)
3560 return status;
3561 }
3562
3563 if (adapter->vlans_added)
3564 be_vid_config(adapter);
3565
3566 be_set_rx_mode(adapter->netdev);
3567
3568 return 0;
3569}
3570
6b7c5b94
SP
3571static int be_open(struct net_device *netdev)
3572{
3573 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3574 struct be_eq_obj *eqo;
3abcdeda 3575 struct be_rx_obj *rxo;
10ef9ab4 3576 struct be_tx_obj *txo;
b236916a 3577 u8 link_status;
3abcdeda 3578 int status, i;
5fb379ee 3579
10ef9ab4 3580 status = be_rx_qs_create(adapter);
482c9e79
SP
3581 if (status)
3582 goto err;
3583
bcc84140
KA
3584 status = be_enable_if_filters(adapter);
3585 if (status)
3586 goto err;
3587
c2bba3df
SK
3588 status = be_irq_register(adapter);
3589 if (status)
3590 goto err;
5fb379ee 3591
10ef9ab4 3592 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3593 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3594
10ef9ab4
SP
3595 for_all_tx_queues(adapter, txo, i)
3596 be_cq_notify(adapter, txo->cq.id, true, 0);
3597
7a1e9b20
SP
3598 be_async_mcc_enable(adapter);
3599
10ef9ab4
SP
3600 for_all_evt_queues(adapter, eqo, i) {
3601 napi_enable(&eqo->napi);
6384a4d0 3602 be_enable_busy_poll(eqo);
20947770 3603 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3604 }
04d3d624 3605 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3606
323ff71e 3607 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3608 if (!status)
3609 be_link_status_update(adapter, link_status);
3610
fba87559 3611 netif_tx_start_all_queues(netdev);
c5abe7c0 3612#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3613 if (skyhawk_chip(adapter))
3614 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3615#endif
3616
889cd4b2
SP
3617 return 0;
3618err:
3619 be_close(adapter->netdev);
3620 return -EIO;
5fb379ee
SP
3621}
3622
71d8d1b5
AK
3623static int be_setup_wol(struct be_adapter *adapter, bool enable)
3624{
145155e7 3625 struct device *dev = &adapter->pdev->dev;
71d8d1b5 3626 struct be_dma_mem cmd;
71d8d1b5 3627 u8 mac[ETH_ALEN];
145155e7 3628 int status;
71d8d1b5 3629
c7bf7169 3630 eth_zero_addr(mac);
71d8d1b5
AK
3631
3632 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
145155e7 3633 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
ddf1169f 3634 if (!cmd.va)
6b568689 3635 return -ENOMEM;
71d8d1b5
AK
3636
3637 if (enable) {
3638 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3639 PCICFG_PM_CONTROL_OFFSET,
3640 PCICFG_PM_CONTROL_MASK);
71d8d1b5 3641 if (status) {
145155e7
KP
3642 dev_err(dev, "Could not enable Wake-on-lan\n");
3643 goto err;
71d8d1b5 3644 }
71d8d1b5 3645 } else {
145155e7 3646 ether_addr_copy(mac, adapter->netdev->dev_addr);
71d8d1b5
AK
3647 }
3648
145155e7
KP
3649 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3650 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3651 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3652err:
3653 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3654 return status;
3655}
3656
f7062ee5
SP
3657static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3658{
3659 u32 addr;
3660
3661 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3662
3663 mac[5] = (u8)(addr & 0xFF);
3664 mac[4] = (u8)((addr >> 8) & 0xFF);
3665 mac[3] = (u8)((addr >> 16) & 0xFF);
3666 /* Use the OUI from the current MAC address */
3667 memcpy(mac, adapter->netdev->dev_addr, 3);
3668}
3669
6d87f5c3
AK
3670/*
3671 * Generate a seed MAC address from the PF MAC Address using jhash.
3672 * MAC Address for VFs are assigned incrementally starting from the seed.
3673 * These addresses are programmed in the ASIC by the PF and the VF driver
3674 * queries for the MAC address during its probe.
3675 */
4c876616 3676static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3677{
f9449ab7 3678 u32 vf;
3abcdeda 3679 int status = 0;
6d87f5c3 3680 u8 mac[ETH_ALEN];
11ac75ed 3681 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3682
3683 be_vf_eth_addr_generate(adapter, mac);
3684
11ac75ed 3685 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3686 if (BEx_chip(adapter))
590c391d 3687 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3688 vf_cfg->if_handle,
3689 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3690 else
3691 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3692 vf + 1);
590c391d 3693
6d87f5c3
AK
3694 if (status)
3695 dev_err(&adapter->pdev->dev,
748b539a
SP
3696 "Mac address assignment failed for VF %d\n",
3697 vf);
6d87f5c3 3698 else
11ac75ed 3699 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3700
3701 mac[5] += 1;
3702 }
3703 return status;
3704}
3705
4c876616
SP
3706static int be_vfs_mac_query(struct be_adapter *adapter)
3707{
3708 int status, vf;
3709 u8 mac[ETH_ALEN];
3710 struct be_vf_cfg *vf_cfg;
4c876616
SP
3711
3712 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3713 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3714 mac, vf_cfg->if_handle,
3715 false, vf+1);
4c876616
SP
3716 if (status)
3717 return status;
3718 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3719 }
3720 return 0;
3721}
3722
f9449ab7 3723static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3724{
11ac75ed 3725 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3726 u32 vf;
3727
257a3feb 3728 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3729 dev_warn(&adapter->pdev->dev,
3730 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3731 goto done;
3732 }
3733
b4c1df93
SP
3734 pci_disable_sriov(adapter->pdev);
3735
11ac75ed 3736 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3737 if (BEx_chip(adapter))
11ac75ed
SP
3738 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3739 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3740 else
3741 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3742 vf + 1);
f9449ab7 3743
11ac75ed
SP
3744 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3745 }
39f1d94d
SP
3746done:
3747 kfree(adapter->vf_cfg);
3748 adapter->num_vfs = 0;
f174c7ec 3749 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3750}
3751
7707133c
SP
3752static void be_clear_queues(struct be_adapter *adapter)
3753{
3754 be_mcc_queues_destroy(adapter);
3755 be_rx_cqs_destroy(adapter);
3756 be_tx_queues_destroy(adapter);
3757 be_evt_queues_destroy(adapter);
3758}
3759
68d7bdcb 3760static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3761{
191eb756
SP
3762 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3763 cancel_delayed_work_sync(&adapter->work);
3764 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3765 }
68d7bdcb
SP
3766}
3767
eb7dd46c
SP
3768static void be_cancel_err_detection(struct be_adapter *adapter)
3769{
3770 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3771 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3772 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3773 }
3774}
3775
c5abe7c0 3776#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3777static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3778{
630f4b70
SB
3779 struct net_device *netdev = adapter->netdev;
3780
c9c47142
SP
3781 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3782 be_cmd_manage_iface(adapter, adapter->if_handle,
3783 OP_CONVERT_TUNNEL_TO_NORMAL);
3784
3785 if (adapter->vxlan_port)
3786 be_cmd_set_vxlan_port(adapter, 0);
3787
3788 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3789 adapter->vxlan_port = 0;
630f4b70
SB
3790
3791 netdev->hw_enc_features = 0;
3792 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3793 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3794}
c5abe7c0 3795#endif
c9c47142 3796
f2858738
VV
3797static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3798{
3799 struct be_resources res = adapter->pool_res;
3800 u16 num_vf_qs = 1;
3801
ee9ad280 3802 /* Distribute the queue resources among the PF and it's VFs
f2858738
VV
3803 * Do not distribute queue resources in multi-channel configuration.
3804 */
3805 if (num_vfs && !be_is_mc(adapter)) {
ee9ad280
SB
3806 /* Divide the qpairs evenly among the VFs and the PF, capped
3807 * at VF-EQ-count. Any remainder qpairs belong to the PF.
3808 */
3809 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3810 res.max_rss_qs / (num_vfs + 1));
f2858738
VV
3811
3812 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3813 * interfaces per port. Provide RSS on VFs, only if number
3814 * of VFs requested is less than MAX_RSS_IFACES limit.
3815 */
3816 if (num_vfs >= MAX_RSS_IFACES)
3817 num_vf_qs = 1;
3818 }
3819 return num_vf_qs;
3820}
3821
b05004ad
SK
3822static int be_clear(struct be_adapter *adapter)
3823{
f2858738
VV
3824 struct pci_dev *pdev = adapter->pdev;
3825 u16 num_vf_qs;
3826
68d7bdcb 3827 be_cancel_worker(adapter);
191eb756 3828
11ac75ed 3829 if (sriov_enabled(adapter))
f9449ab7
SP
3830 be_vf_clear(adapter);
3831
bec84e6b
VV
3832 /* Re-configure FW to distribute resources evenly across max-supported
3833 * number of VFs, only when VFs are not already enabled.
3834 */
ace40aff
VV
3835 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3836 !pci_vfs_assigned(pdev)) {
f2858738
VV
3837 num_vf_qs = be_calculate_vf_qs(adapter,
3838 pci_sriov_get_totalvfs(pdev));
bec84e6b 3839 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3840 pci_sriov_get_totalvfs(pdev),
3841 num_vf_qs);
3842 }
bec84e6b 3843
c5abe7c0 3844#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3845 be_disable_vxlan_offloads(adapter);
c5abe7c0 3846#endif
bcc84140
KA
3847 kfree(adapter->pmac_id);
3848 adapter->pmac_id = NULL;
fbc13f01 3849
f9449ab7 3850 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3851
7707133c 3852 be_clear_queues(adapter);
a54769f5 3853
10ef9ab4 3854 be_msix_disable(adapter);
e1ad8e33 3855 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3856 return 0;
3857}
3858
4c876616 3859static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3860{
92bf14ab 3861 struct be_resources res = {0};
bcc84140 3862 u32 cap_flags, en_flags, vf;
4c876616 3863 struct be_vf_cfg *vf_cfg;
0700d816 3864 int status;
abb93951 3865
0700d816 3866 /* If a FW profile exists, then cap_flags are updated */
4c876616 3867 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3868 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3869
4c876616 3870 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3871 if (!BE3_chip(adapter)) {
3872 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3873 RESOURCE_LIMITS,
92bf14ab 3874 vf + 1);
435452aa 3875 if (!status) {
92bf14ab 3876 cap_flags = res.if_cap_flags;
435452aa
VV
3877 /* Prevent VFs from enabling VLAN promiscuous
3878 * mode
3879 */
3880 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3881 }
92bf14ab 3882 }
4c876616 3883
bcc84140
KA
3884 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3885 BE_IF_FLAGS_BROADCAST |
3886 BE_IF_FLAGS_MULTICAST |
3887 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3888 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3889 &vf_cfg->if_handle, vf + 1);
4c876616 3890 if (status)
0700d816 3891 return status;
4c876616 3892 }
0700d816
KA
3893
3894 return 0;
abb93951
PR
3895}
3896
39f1d94d 3897static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3898{
11ac75ed 3899 struct be_vf_cfg *vf_cfg;
30128031
SP
3900 int vf;
3901
39f1d94d
SP
3902 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3903 GFP_KERNEL);
3904 if (!adapter->vf_cfg)
3905 return -ENOMEM;
3906
11ac75ed
SP
3907 for_all_vfs(adapter, vf_cfg, vf) {
3908 vf_cfg->if_handle = -1;
3909 vf_cfg->pmac_id = -1;
30128031 3910 }
39f1d94d 3911 return 0;
30128031
SP
3912}
3913
f9449ab7
SP
3914static int be_vf_setup(struct be_adapter *adapter)
3915{
c502224e 3916 struct device *dev = &adapter->pdev->dev;
11ac75ed 3917 struct be_vf_cfg *vf_cfg;
4c876616 3918 int status, old_vfs, vf;
e7bcbd7b 3919 bool spoofchk;
39f1d94d 3920
257a3feb 3921 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3922
3923 status = be_vf_setup_init(adapter);
3924 if (status)
3925 goto err;
30128031 3926
4c876616
SP
3927 if (old_vfs) {
3928 for_all_vfs(adapter, vf_cfg, vf) {
3929 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3930 if (status)
3931 goto err;
3932 }
f9449ab7 3933
4c876616
SP
3934 status = be_vfs_mac_query(adapter);
3935 if (status)
3936 goto err;
3937 } else {
bec84e6b
VV
3938 status = be_vfs_if_create(adapter);
3939 if (status)
3940 goto err;
3941
39f1d94d
SP
3942 status = be_vf_eth_addr_config(adapter);
3943 if (status)
3944 goto err;
3945 }
f9449ab7 3946
11ac75ed 3947 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3948 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3949 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3950 vf + 1);
3951 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3952 status = be_cmd_set_fn_privileges(adapter,
435452aa 3953 vf_cfg->privileges |
04a06028
SP
3954 BE_PRIV_FILTMGMT,
3955 vf + 1);
435452aa
VV
3956 if (!status) {
3957 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3958 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3959 vf);
435452aa 3960 }
04a06028
SP
3961 }
3962
0f77ba73
RN
3963 /* Allow full available bandwidth */
3964 if (!old_vfs)
3965 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3966
e7bcbd7b
KA
3967 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3968 vf_cfg->if_handle, NULL,
3969 &spoofchk);
3970 if (!status)
3971 vf_cfg->spoofchk = spoofchk;
3972
bdce2ad7 3973 if (!old_vfs) {
0599863d 3974 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3975 be_cmd_set_logical_link_config(adapter,
3976 IFLA_VF_LINK_STATE_AUTO,
3977 vf+1);
3978 }
f9449ab7 3979 }
b4c1df93
SP
3980
3981 if (!old_vfs) {
3982 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3983 if (status) {
3984 dev_err(dev, "SRIOV enable failed\n");
3985 adapter->num_vfs = 0;
3986 goto err;
3987 }
3988 }
f174c7ec
VV
3989
3990 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3991 return 0;
3992err:
4c876616
SP
3993 dev_err(dev, "VF setup failed\n");
3994 be_vf_clear(adapter);
f9449ab7
SP
3995 return status;
3996}
3997
f93f160b
VV
3998/* Converting function_mode bits on BE3 to SH mc_type enums */
3999
4000static u8 be_convert_mc_type(u32 function_mode)
4001{
66064dbc 4002 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4003 return vNIC1;
66064dbc 4004 else if (function_mode & QNQ_MODE)
f93f160b
VV
4005 return FLEX10;
4006 else if (function_mode & VNIC_MODE)
4007 return vNIC2;
4008 else if (function_mode & UMC_ENABLED)
4009 return UMC;
4010 else
4011 return MC_NONE;
4012}
4013
92bf14ab
SP
4014/* On BE2/BE3 FW does not suggest the supported limits */
4015static void BEx_get_resources(struct be_adapter *adapter,
4016 struct be_resources *res)
4017{
bec84e6b 4018 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4019
4020 if (be_physfn(adapter))
4021 res->max_uc_mac = BE_UC_PMAC_COUNT;
4022 else
4023 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4024
f93f160b
VV
4025 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4026
4027 if (be_is_mc(adapter)) {
4028 /* Assuming that there are 4 channels per port,
4029 * when multi-channel is enabled
4030 */
4031 if (be_is_qnq_mode(adapter))
4032 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4033 else
4034 /* In a non-qnq multichannel mode, the pvid
4035 * takes up one vlan entry
4036 */
4037 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4038 } else {
92bf14ab 4039 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4040 }
4041
92bf14ab
SP
4042 res->max_mcast_mac = BE_MAX_MC;
4043
a5243dab
VV
4044 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4045 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4046 * *only* if it is RSS-capable.
4047 */
4048 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4049 be_virtfn(adapter) ||
4050 (be_is_mc(adapter) &&
4051 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4052 res->max_tx_qs = 1;
a28277dc
SR
4053 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4054 struct be_resources super_nic_res = {0};
4055
4056 /* On a SuperNIC profile, the driver needs to use the
4057 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4058 */
f2858738
VV
4059 be_cmd_get_profile_config(adapter, &super_nic_res,
4060 RESOURCE_LIMITS, 0);
a28277dc
SR
4061 /* Some old versions of BE3 FW don't report max_tx_qs value */
4062 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4063 } else {
92bf14ab 4064 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4065 }
92bf14ab
SP
4066
4067 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4068 !use_sriov && be_physfn(adapter))
4069 res->max_rss_qs = (adapter->be3_native) ?
4070 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4071 res->max_rx_qs = res->max_rss_qs + 1;
4072
e3dc867c 4073 if (be_physfn(adapter))
d3518e21 4074 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4075 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4076 else
4077 res->max_evt_qs = 1;
92bf14ab
SP
4078
4079 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4080 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4081 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4082 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4083}
4084
30128031
SP
4085static void be_setup_init(struct be_adapter *adapter)
4086{
4087 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4088 adapter->phy.link_speed = -1;
30128031
SP
4089 adapter->if_handle = -1;
4090 adapter->be3_native = false;
f66b7cfd 4091 adapter->if_flags = 0;
f25b119c
PR
4092 if (be_physfn(adapter))
4093 adapter->cmd_privileges = MAX_PRIVILEGES;
4094 else
4095 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4096}
4097
bec84e6b
VV
4098static int be_get_sriov_config(struct be_adapter *adapter)
4099{
bec84e6b 4100 struct be_resources res = {0};
d3d18312 4101 int max_vfs, old_vfs;
bec84e6b 4102
f2858738 4103 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4104
ace40aff 4105 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4106 if (BE3_chip(adapter) && !res.max_vfs) {
4107 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4108 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4109 }
4110
d3d18312 4111 adapter->pool_res = res;
bec84e6b 4112
ace40aff
VV
4113 /* If during previous unload of the driver, the VFs were not disabled,
4114 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4115 * Instead use the TotalVFs value stored in the pci-dev struct.
4116 */
bec84e6b
VV
4117 old_vfs = pci_num_vf(adapter->pdev);
4118 if (old_vfs) {
ace40aff
VV
4119 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4120 old_vfs);
4121
4122 adapter->pool_res.max_vfs =
4123 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4124 adapter->num_vfs = old_vfs;
bec84e6b
VV
4125 }
4126
4127 return 0;
4128}
4129
ace40aff
VV
4130static void be_alloc_sriov_res(struct be_adapter *adapter)
4131{
4132 int old_vfs = pci_num_vf(adapter->pdev);
4133 u16 num_vf_qs;
4134 int status;
4135
4136 be_get_sriov_config(adapter);
4137
4138 if (!old_vfs)
4139 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4140
4141 /* When the HW is in SRIOV capable configuration, the PF-pool
4142 * resources are given to PF during driver load, if there are no
4143 * old VFs. This facility is not available in BE3 FW.
4144 * Also, this is done by FW in Lancer chip.
4145 */
4146 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4147 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4148 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4149 num_vf_qs);
4150 if (status)
4151 dev_err(&adapter->pdev->dev,
4152 "Failed to optimize SRIOV resources\n");
4153 }
4154}
4155
92bf14ab 4156static int be_get_resources(struct be_adapter *adapter)
abb93951 4157{
92bf14ab
SP
4158 struct device *dev = &adapter->pdev->dev;
4159 struct be_resources res = {0};
4160 int status;
abb93951 4161
92bf14ab
SP
4162 if (BEx_chip(adapter)) {
4163 BEx_get_resources(adapter, &res);
4164 adapter->res = res;
abb93951
PR
4165 }
4166
92bf14ab
SP
4167 /* For Lancer, SH etc read per-function resource limits from FW.
4168 * GET_FUNC_CONFIG returns per function guaranteed limits.
4169 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4170 */
4171 if (!BEx_chip(adapter)) {
4172 status = be_cmd_get_func_config(adapter, &res);
4173 if (status)
4174 return status;
abb93951 4175
71bb8bd0
VV
4176 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4177 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4178 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4179 res.max_rss_qs -= 1;
4180
92bf14ab
SP
4181 /* If RoCE may be enabled stash away half the EQs for RoCE */
4182 if (be_roce_supported(adapter))
4183 res.max_evt_qs /= 2;
4184 adapter->res = res;
abb93951 4185 }
4c876616 4186
71bb8bd0
VV
4187 /* If FW supports RSS default queue, then skip creating non-RSS
4188 * queue for non-IP traffic.
4189 */
4190 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4191 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4192
acbafeb1
SP
4193 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4194 be_max_txqs(adapter), be_max_rxqs(adapter),
4195 be_max_rss(adapter), be_max_eqs(adapter),
4196 be_max_vfs(adapter));
4197 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4198 be_max_uc(adapter), be_max_mc(adapter),
4199 be_max_vlans(adapter));
4200
ace40aff
VV
4201 /* Sanitize cfg_num_qs based on HW and platform limits */
4202 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4203 be_max_qs(adapter));
92bf14ab 4204 return 0;
abb93951
PR
4205}
4206
39f1d94d
SP
4207static int be_get_config(struct be_adapter *adapter)
4208{
6b085ba9 4209 int status, level;
542963b7 4210 u16 profile_id;
6b085ba9 4211
980df249
SR
4212 status = be_cmd_get_cntl_attributes(adapter);
4213 if (status)
4214 return status;
4215
e97e3cda 4216 status = be_cmd_query_fw_cfg(adapter);
abb93951 4217 if (status)
92bf14ab 4218 return status;
abb93951 4219
fd7ff6f0
VD
4220 if (!lancer_chip(adapter) && be_physfn(adapter))
4221 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4222
6b085ba9
SP
4223 if (BEx_chip(adapter)) {
4224 level = be_cmd_get_fw_log_level(adapter);
4225 adapter->msg_enable =
4226 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4227 }
4228
4229 be_cmd_get_acpi_wol_cap(adapter);
4230
21252377
VV
4231 be_cmd_query_port_name(adapter);
4232
4233 if (be_physfn(adapter)) {
542963b7
VV
4234 status = be_cmd_get_active_profile(adapter, &profile_id);
4235 if (!status)
4236 dev_info(&adapter->pdev->dev,
4237 "Using profile 0x%x\n", profile_id);
962bcb75 4238 }
bec84e6b 4239
92bf14ab
SP
4240 status = be_get_resources(adapter);
4241 if (status)
4242 return status;
abb93951 4243
46ee9c14
RN
4244 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4245 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4246 if (!adapter->pmac_id)
4247 return -ENOMEM;
abb93951 4248
92bf14ab 4249 return 0;
39f1d94d
SP
4250}
4251
95046b92
SP
4252static int be_mac_setup(struct be_adapter *adapter)
4253{
4254 u8 mac[ETH_ALEN];
4255 int status;
4256
4257 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4258 status = be_cmd_get_perm_mac(adapter, mac);
4259 if (status)
4260 return status;
4261
4262 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4263 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4264 }
4265
95046b92
SP
4266 return 0;
4267}
4268
68d7bdcb
SP
4269static void be_schedule_worker(struct be_adapter *adapter)
4270{
4271 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4272 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4273}
4274
972f37b4 4275static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
eb7dd46c
SP
4276{
4277 schedule_delayed_work(&adapter->be_err_detection_work,
972f37b4 4278 msecs_to_jiffies(delay));
eb7dd46c
SP
4279 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4280}
4281
7707133c 4282static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4283{
68d7bdcb 4284 struct net_device *netdev = adapter->netdev;
10ef9ab4 4285 int status;
ba343c77 4286
7707133c 4287 status = be_evt_queues_create(adapter);
abb93951
PR
4288 if (status)
4289 goto err;
73d540f2 4290
7707133c 4291 status = be_tx_qs_create(adapter);
c2bba3df
SK
4292 if (status)
4293 goto err;
10ef9ab4 4294
7707133c 4295 status = be_rx_cqs_create(adapter);
10ef9ab4 4296 if (status)
a54769f5 4297 goto err;
6b7c5b94 4298
7707133c 4299 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4300 if (status)
4301 goto err;
4302
68d7bdcb
SP
4303 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4304 if (status)
4305 goto err;
4306
4307 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4308 if (status)
4309 goto err;
4310
7707133c
SP
4311 return 0;
4312err:
4313 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4314 return status;
4315}
4316
62219066
AK
4317static int be_if_create(struct be_adapter *adapter)
4318{
4319 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4320 u32 cap_flags = be_if_cap_flags(adapter);
4321 int status;
4322
4323 if (adapter->cfg_num_qs == 1)
4324 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4325
4326 en_flags &= cap_flags;
4327 /* will enable all the needed filter flags in be_open() */
4328 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4329 &adapter->if_handle, 0);
4330
4331 return status;
4332}
4333
68d7bdcb
SP
4334int be_update_queues(struct be_adapter *adapter)
4335{
4336 struct net_device *netdev = adapter->netdev;
4337 int status;
4338
4339 if (netif_running(netdev))
4340 be_close(netdev);
4341
4342 be_cancel_worker(adapter);
4343
4344 /* If any vectors have been shared with RoCE we cannot re-program
4345 * the MSIx table.
4346 */
4347 if (!adapter->num_msix_roce_vec)
4348 be_msix_disable(adapter);
4349
4350 be_clear_queues(adapter);
62219066
AK
4351 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4352 if (status)
4353 return status;
68d7bdcb
SP
4354
4355 if (!msix_enabled(adapter)) {
4356 status = be_msix_enable(adapter);
4357 if (status)
4358 return status;
4359 }
4360
62219066
AK
4361 status = be_if_create(adapter);
4362 if (status)
4363 return status;
4364
68d7bdcb
SP
4365 status = be_setup_queues(adapter);
4366 if (status)
4367 return status;
4368
4369 be_schedule_worker(adapter);
4370
4371 if (netif_running(netdev))
4372 status = be_open(netdev);
4373
4374 return status;
4375}
4376
f7062ee5
SP
4377static inline int fw_major_num(const char *fw_ver)
4378{
4379 int fw_major = 0, i;
4380
4381 i = sscanf(fw_ver, "%d.", &fw_major);
4382 if (i != 1)
4383 return 0;
4384
4385 return fw_major;
4386}
4387
f962f840
SP
4388/* If any VFs are already enabled don't FLR the PF */
4389static bool be_reset_required(struct be_adapter *adapter)
4390{
4391 return pci_num_vf(adapter->pdev) ? false : true;
4392}
4393
4394/* Wait for the FW to be ready and perform the required initialization */
4395static int be_func_init(struct be_adapter *adapter)
4396{
4397 int status;
4398
4399 status = be_fw_wait_ready(adapter);
4400 if (status)
4401 return status;
4402
4403 if (be_reset_required(adapter)) {
4404 status = be_cmd_reset_function(adapter);
4405 if (status)
4406 return status;
4407
4408 /* Wait for interrupts to quiesce after an FLR */
4409 msleep(100);
4410
4411 /* We can clear all errors when function reset succeeds */
954f6825 4412 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4413 }
4414
4415 /* Tell FW we're ready to fire cmds */
4416 status = be_cmd_fw_init(adapter);
4417 if (status)
4418 return status;
4419
4420 /* Allow interrupts for other ULPs running on NIC function */
4421 be_intr_set(adapter, true);
4422
4423 return 0;
4424}
4425
7707133c
SP
4426static int be_setup(struct be_adapter *adapter)
4427{
4428 struct device *dev = &adapter->pdev->dev;
7707133c
SP
4429 int status;
4430
f962f840
SP
4431 status = be_func_init(adapter);
4432 if (status)
4433 return status;
4434
7707133c
SP
4435 be_setup_init(adapter);
4436
4437 if (!lancer_chip(adapter))
4438 be_cmd_req_native_mode(adapter);
4439
980df249
SR
4440 /* invoke this cmd first to get pf_num and vf_num which are needed
4441 * for issuing profile related cmds
4442 */
4443 if (!BEx_chip(adapter)) {
4444 status = be_cmd_get_func_config(adapter, NULL);
4445 if (status)
4446 return status;
4447 }
72ef3a88 4448
ace40aff
VV
4449 if (!BE2_chip(adapter) && be_physfn(adapter))
4450 be_alloc_sriov_res(adapter);
4451
7707133c 4452 status = be_get_config(adapter);
10ef9ab4 4453 if (status)
a54769f5 4454 goto err;
6b7c5b94 4455
7707133c 4456 status = be_msix_enable(adapter);
10ef9ab4 4457 if (status)
a54769f5 4458 goto err;
6b7c5b94 4459
bcc84140 4460 /* will enable all the needed filter flags in be_open() */
62219066 4461 status = be_if_create(adapter);
7707133c 4462 if (status)
a54769f5 4463 goto err;
6b7c5b94 4464
68d7bdcb
SP
4465 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4466 rtnl_lock();
7707133c 4467 status = be_setup_queues(adapter);
68d7bdcb 4468 rtnl_unlock();
95046b92 4469 if (status)
1578e777
PR
4470 goto err;
4471
7707133c 4472 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4473
4474 status = be_mac_setup(adapter);
10ef9ab4
SP
4475 if (status)
4476 goto err;
4477
e97e3cda 4478 be_cmd_get_fw_ver(adapter);
acbafeb1 4479 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4480
e9e2a904 4481 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4482 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4483 adapter->fw_ver);
4484 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4485 }
4486
00d594c3
KA
4487 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4488 adapter->rx_fc);
4489 if (status)
4490 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4491 &adapter->rx_fc);
590c391d 4492
00d594c3
KA
4493 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4494 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4495
bdce2ad7
SR
4496 if (be_physfn(adapter))
4497 be_cmd_set_logical_link_config(adapter,
4498 IFLA_VF_LINK_STATE_AUTO, 0);
4499
bec84e6b
VV
4500 if (adapter->num_vfs)
4501 be_vf_setup(adapter);
f9449ab7 4502
f25b119c
PR
4503 status = be_cmd_get_phy_info(adapter);
4504 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4505 adapter->phy.fc_autoneg = 1;
4506
68d7bdcb 4507 be_schedule_worker(adapter);
e1ad8e33 4508 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4509 return 0;
a54769f5
SP
4510err:
4511 be_clear(adapter);
4512 return status;
4513}
6b7c5b94 4514
66268739
IV
4515#ifdef CONFIG_NET_POLL_CONTROLLER
4516static void be_netpoll(struct net_device *netdev)
4517{
4518 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4519 struct be_eq_obj *eqo;
66268739
IV
4520 int i;
4521
e49cc34f 4522 for_all_evt_queues(adapter, eqo, i) {
20947770 4523 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4524 napi_schedule(&eqo->napi);
4525 }
66268739
IV
4526}
4527#endif
4528
485bf569
SN
4529int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4530{
4531 const struct firmware *fw;
4532 int status;
4533
4534 if (!netif_running(adapter->netdev)) {
4535 dev_err(&adapter->pdev->dev,
4536 "Firmware load not allowed (interface is down)\n");
940a3fcd 4537 return -ENETDOWN;
485bf569
SN
4538 }
4539
4540 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4541 if (status)
4542 goto fw_exit;
4543
4544 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4545
4546 if (lancer_chip(adapter))
4547 status = lancer_fw_download(adapter, fw);
4548 else
4549 status = be_fw_download(adapter, fw);
4550
eeb65ced 4551 if (!status)
e97e3cda 4552 be_cmd_get_fw_ver(adapter);
eeb65ced 4553
84517482
AK
4554fw_exit:
4555 release_firmware(fw);
4556 return status;
4557}
4558
add511b3
RP
4559static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4560 u16 flags)
a77dcb8c
AK
4561{
4562 struct be_adapter *adapter = netdev_priv(dev);
4563 struct nlattr *attr, *br_spec;
4564 int rem;
4565 int status = 0;
4566 u16 mode = 0;
4567
4568 if (!sriov_enabled(adapter))
4569 return -EOPNOTSUPP;
4570
4571 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
4572 if (!br_spec)
4573 return -EINVAL;
a77dcb8c
AK
4574
4575 nla_for_each_nested(attr, br_spec, rem) {
4576 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4577 continue;
4578
b7c1a314
TG
4579 if (nla_len(attr) < sizeof(mode))
4580 return -EINVAL;
4581
a77dcb8c 4582 mode = nla_get_u16(attr);
ac0f5fba
SR
4583 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4584 return -EOPNOTSUPP;
4585
a77dcb8c
AK
4586 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4587 return -EINVAL;
4588
4589 status = be_cmd_set_hsw_config(adapter, 0, 0,
4590 adapter->if_handle,
4591 mode == BRIDGE_MODE_VEPA ?
4592 PORT_FWD_TYPE_VEPA :
e7bcbd7b 4593 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
4594 if (status)
4595 goto err;
4596
4597 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4598 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4599
4600 return status;
4601 }
4602err:
4603 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4604 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4605
4606 return status;
4607}
4608
4609static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
4610 struct net_device *dev, u32 filter_mask,
4611 int nlflags)
a77dcb8c
AK
4612{
4613 struct be_adapter *adapter = netdev_priv(dev);
4614 int status = 0;
4615 u8 hsw_mode;
4616
a77dcb8c
AK
4617 /* BE and Lancer chips support VEB mode only */
4618 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4619 hsw_mode = PORT_FWD_TYPE_VEB;
4620 } else {
4621 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
4622 adapter->if_handle, &hsw_mode,
4623 NULL);
a77dcb8c
AK
4624 if (status)
4625 return 0;
ff9ed19d
KP
4626
4627 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4628 return 0;
a77dcb8c
AK
4629 }
4630
4631 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4632 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 4633 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 4634 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
4635}
4636
c5abe7c0 4637#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
4638/* VxLAN offload Notes:
4639 *
4640 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4641 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4642 * is expected to work across all types of IP tunnels once exported. Skyhawk
4643 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
4644 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4645 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4646 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
4647 *
4648 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4649 * adds more than one port, disable offloads and don't re-enable them again
4650 * until after all the tunnels are removed.
4651 */
c9c47142
SP
4652static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4653 __be16 port)
4654{
4655 struct be_adapter *adapter = netdev_priv(netdev);
4656 struct device *dev = &adapter->pdev->dev;
4657 int status;
4658
af19e686 4659 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4660 return;
4661
1e5b311a
JB
4662 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4663 adapter->vxlan_port_aliases++;
4664 return;
4665 }
4666
c9c47142 4667 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
4668 dev_info(dev,
4669 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
4670 dev_info(dev, "Disabling VxLAN offloads\n");
4671 adapter->vxlan_port_count++;
4672 goto err;
c9c47142
SP
4673 }
4674
630f4b70
SB
4675 if (adapter->vxlan_port_count++ >= 1)
4676 return;
4677
c9c47142
SP
4678 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4679 OP_CONVERT_NORMAL_TO_TUNNEL);
4680 if (status) {
4681 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4682 goto err;
4683 }
4684
4685 status = be_cmd_set_vxlan_port(adapter, port);
4686 if (status) {
4687 dev_warn(dev, "Failed to add VxLAN port\n");
4688 goto err;
4689 }
4690 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4691 adapter->vxlan_port = port;
4692
630f4b70
SB
4693 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4694 NETIF_F_TSO | NETIF_F_TSO6 |
4695 NETIF_F_GSO_UDP_TUNNEL;
4696 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 4697 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 4698
c9c47142
SP
4699 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4700 be16_to_cpu(port));
4701 return;
4702err:
4703 be_disable_vxlan_offloads(adapter);
c9c47142
SP
4704}
4705
4706static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4707 __be16 port)
4708{
4709 struct be_adapter *adapter = netdev_priv(netdev);
4710
af19e686 4711 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
4712 return;
4713
4714 if (adapter->vxlan_port != port)
630f4b70 4715 goto done;
c9c47142 4716
1e5b311a
JB
4717 if (adapter->vxlan_port_aliases) {
4718 adapter->vxlan_port_aliases--;
4719 return;
4720 }
4721
c9c47142
SP
4722 be_disable_vxlan_offloads(adapter);
4723
4724 dev_info(&adapter->pdev->dev,
4725 "Disabled VxLAN offloads for UDP port %d\n",
4726 be16_to_cpu(port));
630f4b70
SB
4727done:
4728 adapter->vxlan_port_count--;
c9c47142 4729}
725d548f 4730
5f35227e
JG
4731static netdev_features_t be_features_check(struct sk_buff *skb,
4732 struct net_device *dev,
4733 netdev_features_t features)
725d548f 4734{
16dde0d6
SB
4735 struct be_adapter *adapter = netdev_priv(dev);
4736 u8 l4_hdr = 0;
4737
4738 /* The code below restricts offload features for some tunneled packets.
4739 * Offload features for normal (non tunnel) packets are unchanged.
4740 */
4741 if (!skb->encapsulation ||
4742 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4743 return features;
4744
4745 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4746 * should disable tunnel offload features if it's not a VxLAN packet,
4747 * as tunnel offloads have been enabled only for VxLAN. This is done to
4748 * allow other tunneled traffic like GRE work fine while VxLAN
4749 * offloads are configured in Skyhawk-R.
4750 */
4751 switch (vlan_get_protocol(skb)) {
4752 case htons(ETH_P_IP):
4753 l4_hdr = ip_hdr(skb)->protocol;
4754 break;
4755 case htons(ETH_P_IPV6):
4756 l4_hdr = ipv6_hdr(skb)->nexthdr;
4757 break;
4758 default:
4759 return features;
4760 }
4761
4762 if (l4_hdr != IPPROTO_UDP ||
4763 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4764 skb->inner_protocol != htons(ETH_P_TEB) ||
4765 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4766 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
a188222b 4767 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
16dde0d6
SB
4768
4769 return features;
725d548f 4770}
c5abe7c0 4771#endif
c9c47142 4772
a155a5db
SB
4773static int be_get_phys_port_id(struct net_device *dev,
4774 struct netdev_phys_item_id *ppid)
4775{
4776 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4777 struct be_adapter *adapter = netdev_priv(dev);
4778 u8 *id;
4779
4780 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4781 return -ENOSPC;
4782
4783 ppid->id[0] = adapter->hba_port_num + 1;
4784 id = &ppid->id[1];
4785 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4786 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4787 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4788
4789 ppid->id_len = id_len;
4790
4791 return 0;
4792}
4793
e5686ad8 4794static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
4795 .ndo_open = be_open,
4796 .ndo_stop = be_close,
4797 .ndo_start_xmit = be_xmit,
a54769f5 4798 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
4799 .ndo_set_mac_address = be_mac_addr_set,
4800 .ndo_change_mtu = be_change_mtu,
ab1594e9 4801 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 4802 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
4803 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4804 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 4805 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 4806 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 4807 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 4808 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 4809 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 4810 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
4811#ifdef CONFIG_NET_POLL_CONTROLLER
4812 .ndo_poll_controller = be_netpoll,
4813#endif
a77dcb8c
AK
4814 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4815 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 4816#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 4817 .ndo_busy_poll = be_busy_poll,
6384a4d0 4818#endif
c5abe7c0 4819#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
4820 .ndo_add_vxlan_port = be_add_vxlan_port,
4821 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 4822 .ndo_features_check = be_features_check,
c5abe7c0 4823#endif
a155a5db 4824 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
4825};
4826
4827static void be_netdev_init(struct net_device *netdev)
4828{
4829 struct be_adapter *adapter = netdev_priv(netdev);
4830
6332c8d3 4831 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 4832 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 4833 NETIF_F_HW_VLAN_CTAG_TX;
62219066 4834 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
8b8ddc68 4835 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
4836
4837 netdev->features |= netdev->hw_features |
f646968f 4838 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 4839
eb8a50d9 4840 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 4841 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 4842
fbc13f01
AK
4843 netdev->priv_flags |= IFF_UNICAST_FLT;
4844
6b7c5b94
SP
4845 netdev->flags |= IFF_MULTICAST;
4846
b7e5887e 4847 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 4848
10ef9ab4 4849 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 4850
7ad24ea4 4851 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
4852}
4853
87ac1a52
KA
4854static void be_cleanup(struct be_adapter *adapter)
4855{
4856 struct net_device *netdev = adapter->netdev;
4857
4858 rtnl_lock();
4859 netif_device_detach(netdev);
4860 if (netif_running(netdev))
4861 be_close(netdev);
4862 rtnl_unlock();
4863
4864 be_clear(adapter);
4865}
4866
484d76fd 4867static int be_resume(struct be_adapter *adapter)
78fad34e 4868{
d0e1b319 4869 struct net_device *netdev = adapter->netdev;
78fad34e
SP
4870 int status;
4871
78fad34e
SP
4872 status = be_setup(adapter);
4873 if (status)
484d76fd 4874 return status;
78fad34e 4875
d0e1b319
KA
4876 if (netif_running(netdev)) {
4877 status = be_open(netdev);
78fad34e 4878 if (status)
484d76fd 4879 return status;
78fad34e
SP
4880 }
4881
d0e1b319
KA
4882 netif_device_attach(netdev);
4883
484d76fd
KA
4884 return 0;
4885}
4886
4887static int be_err_recover(struct be_adapter *adapter)
4888{
484d76fd
KA
4889 int status;
4890
1babbad4
PR
4891 /* Error recovery is supported only Lancer as of now */
4892 if (!lancer_chip(adapter))
4893 return -EIO;
4894
4895 /* Wait for adapter to reach quiescent state before
4896 * destroying queues
4897 */
4898 status = be_fw_wait_ready(adapter);
4899 if (status)
4900 goto err;
4901
4902 be_cleanup(adapter);
4903
484d76fd
KA
4904 status = be_resume(adapter);
4905 if (status)
4906 goto err;
4907
78fad34e
SP
4908 return 0;
4909err:
78fad34e
SP
4910 return status;
4911}
4912
eb7dd46c 4913static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
4914{
4915 struct be_adapter *adapter =
eb7dd46c
SP
4916 container_of(work, struct be_adapter,
4917 be_err_detection_work.work);
1babbad4
PR
4918 struct device *dev = &adapter->pdev->dev;
4919 int recovery_status;
972f37b4 4920 int delay = ERR_DETECTION_DELAY;
78fad34e
SP
4921
4922 be_detect_error(adapter);
4923
1babbad4
PR
4924 if (be_check_error(adapter, BE_ERROR_HW))
4925 recovery_status = be_err_recover(adapter);
4926 else
4927 goto reschedule_task;
4928
4929 if (!recovery_status) {
972f37b4 4930 adapter->recovery_retries = 0;
1babbad4
PR
4931 dev_info(dev, "Adapter recovery successful\n");
4932 goto reschedule_task;
4933 } else if (be_virtfn(adapter)) {
4934 /* For VFs, check if PF have allocated resources
4935 * every second.
4936 */
4937 dev_err(dev, "Re-trying adapter recovery\n");
4938 goto reschedule_task;
972f37b4
PR
4939 } else if (adapter->recovery_retries++ <
4940 MAX_ERR_RECOVERY_RETRY_COUNT) {
4941 /* In case of another error during recovery, it takes 30 sec
4942 * for adapter to come out of error. Retry error recovery after
4943 * this time interval.
4944 */
4945 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
4946 delay = ERR_RECOVERY_RETRY_DELAY;
4947 goto reschedule_task;
1babbad4
PR
4948 } else {
4949 dev_err(dev, "Adapter recovery failed\n");
78fad34e
SP
4950 }
4951
1babbad4
PR
4952 return;
4953reschedule_task:
972f37b4 4954 be_schedule_err_detection(adapter, delay);
78fad34e
SP
4955}
4956
4957static void be_log_sfp_info(struct be_adapter *adapter)
4958{
4959 int status;
4960
4961 status = be_cmd_query_sfp_info(adapter);
4962 if (!status) {
4963 dev_err(&adapter->pdev->dev,
4964 "Unqualified SFP+ detected on %c from %s part no: %s",
4965 adapter->port_name, adapter->phy.vendor_name,
4966 adapter->phy.vendor_pn);
4967 }
4968 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
4969}
4970
4971static void be_worker(struct work_struct *work)
4972{
4973 struct be_adapter *adapter =
4974 container_of(work, struct be_adapter, work.work);
4975 struct be_rx_obj *rxo;
4976 int i;
4977
4978 /* when interrupts are not yet enabled, just reap any pending
4979 * mcc completions
4980 */
4981 if (!netif_running(adapter->netdev)) {
4982 local_bh_disable();
4983 be_process_mcc(adapter);
4984 local_bh_enable();
4985 goto reschedule;
4986 }
4987
4988 if (!adapter->stats_cmd_sent) {
4989 if (lancer_chip(adapter))
4990 lancer_cmd_get_pport_stats(adapter,
4991 &adapter->stats_cmd);
4992 else
4993 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4994 }
4995
4996 if (be_physfn(adapter) &&
4997 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4998 be_cmd_get_die_temperature(adapter);
4999
5000 for_all_rx_queues(adapter, rxo, i) {
5001 /* Replenish RX-queues starved due to memory
5002 * allocation failures.
5003 */
5004 if (rxo->rx_post_starved)
5005 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5006 }
5007
20947770
PR
5008 /* EQ-delay update for Skyhawk is done while notifying EQ */
5009 if (!skyhawk_chip(adapter))
5010 be_eqd_update(adapter, false);
78fad34e
SP
5011
5012 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5013 be_log_sfp_info(adapter);
5014
5015reschedule:
5016 adapter->work_counter++;
5017 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5018}
5019
6b7c5b94
SP
5020static void be_unmap_pci_bars(struct be_adapter *adapter)
5021{
c5b3ad4c
SP
5022 if (adapter->csr)
5023 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5024 if (adapter->db)
ce66f781 5025 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5026}
5027
ce66f781
SP
5028static int db_bar(struct be_adapter *adapter)
5029{
18c57c74 5030 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5031 return 0;
5032 else
5033 return 4;
5034}
5035
5036static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5037{
dbf0f2a7 5038 if (skyhawk_chip(adapter)) {
ce66f781
SP
5039 adapter->roce_db.size = 4096;
5040 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5041 db_bar(adapter));
5042 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5043 db_bar(adapter));
5044 }
045508a8 5045 return 0;
6b7c5b94
SP
5046}
5047
5048static int be_map_pci_bars(struct be_adapter *adapter)
5049{
0fa74a4b 5050 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5051 u8 __iomem *addr;
78fad34e
SP
5052 u32 sli_intf;
5053
5054 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5055 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5056 SLI_INTF_FAMILY_SHIFT;
5057 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5058
c5b3ad4c 5059 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5060 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5061 if (!adapter->csr)
c5b3ad4c
SP
5062 return -ENOMEM;
5063 }
5064
25848c90 5065 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5066 if (!addr)
6b7c5b94 5067 goto pci_map_err;
ba343c77 5068 adapter->db = addr;
ce66f781 5069
25848c90
SR
5070 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5071 if (be_physfn(adapter)) {
5072 /* PCICFG is the 2nd BAR in BE2 */
5073 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5074 if (!addr)
5075 goto pci_map_err;
5076 adapter->pcicfg = addr;
5077 } else {
5078 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5079 }
5080 }
5081
ce66f781 5082 be_roce_map_pci_bars(adapter);
6b7c5b94 5083 return 0;
ce66f781 5084
6b7c5b94 5085pci_map_err:
25848c90 5086 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5087 be_unmap_pci_bars(adapter);
5088 return -ENOMEM;
5089}
5090
78fad34e 5091static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5092{
8788fdc2 5093 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5094 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5095
5096 if (mem->va)
78fad34e 5097 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5098
5b8821b7 5099 mem = &adapter->rx_filter;
e7b909a6 5100 if (mem->va)
78fad34e
SP
5101 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5102
5103 mem = &adapter->stats_cmd;
5104 if (mem->va)
5105 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5106}
5107
78fad34e
SP
5108/* Allocate and initialize various fields in be_adapter struct */
5109static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5110{
8788fdc2
SP
5111 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5112 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5113 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5114 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5115 struct device *dev = &adapter->pdev->dev;
5116 int status = 0;
6b7c5b94
SP
5117
5118 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5119 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5120 &mbox_mem_alloc->dma,
5121 GFP_KERNEL);
78fad34e
SP
5122 if (!mbox_mem_alloc->va)
5123 return -ENOMEM;
5124
6b7c5b94
SP
5125 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5126 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5127 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5128
5b8821b7 5129 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5130 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5131 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5132 if (!rx_filter->va) {
e7b909a6
SP
5133 status = -ENOMEM;
5134 goto free_mbox;
5135 }
1f9061d2 5136
78fad34e
SP
5137 if (lancer_chip(adapter))
5138 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5139 else if (BE2_chip(adapter))
5140 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5141 else if (BE3_chip(adapter))
5142 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5143 else
5144 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5145 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5146 &stats_cmd->dma, GFP_KERNEL);
5147 if (!stats_cmd->va) {
5148 status = -ENOMEM;
5149 goto free_rx_filter;
5150 }
5151
2984961c 5152 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5153 spin_lock_init(&adapter->mcc_lock);
5154 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5155 init_completion(&adapter->et_cmd_compl);
e7b909a6 5156
78fad34e 5157 pci_save_state(adapter->pdev);
6b7c5b94 5158
78fad34e 5159 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5160 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5161 be_err_detection_task);
6b7c5b94 5162
78fad34e
SP
5163 adapter->rx_fc = true;
5164 adapter->tx_fc = true;
6b7c5b94 5165
78fad34e
SP
5166 /* Must be a power of 2 or else MODULO will BUG_ON */
5167 adapter->be_get_temp_freq = 64;
ca34fe38 5168
6b7c5b94 5169 return 0;
78fad34e
SP
5170
5171free_rx_filter:
5172 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5173free_mbox:
5174 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5175 mbox_mem_alloc->dma);
5176 return status;
6b7c5b94
SP
5177}
5178
3bc6b06c 5179static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5180{
5181 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5182
6b7c5b94
SP
5183 if (!adapter)
5184 return;
5185
045508a8 5186 be_roce_dev_remove(adapter);
8cef7a78 5187 be_intr_set(adapter, false);
045508a8 5188
eb7dd46c 5189 be_cancel_err_detection(adapter);
f67ef7ba 5190
6b7c5b94
SP
5191 unregister_netdev(adapter->netdev);
5192
5fb379ee
SP
5193 be_clear(adapter);
5194
bf99e50d
PR
5195 /* tell fw we're done with firing cmds */
5196 be_cmd_fw_clean(adapter);
5197
78fad34e
SP
5198 be_unmap_pci_bars(adapter);
5199 be_drv_cleanup(adapter);
6b7c5b94 5200
d6b6d987
SP
5201 pci_disable_pcie_error_reporting(pdev);
5202
6b7c5b94
SP
5203 pci_release_regions(pdev);
5204 pci_disable_device(pdev);
5205
5206 free_netdev(adapter->netdev);
5207}
5208
9a03259c
AB
5209static ssize_t be_hwmon_show_temp(struct device *dev,
5210 struct device_attribute *dev_attr,
5211 char *buf)
29e9122b
VD
5212{
5213 struct be_adapter *adapter = dev_get_drvdata(dev);
5214
5215 /* Unit: millidegree Celsius */
5216 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5217 return -EIO;
5218 else
5219 return sprintf(buf, "%u\n",
5220 adapter->hwmon_info.be_on_die_temp * 1000);
5221}
5222
5223static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5224 be_hwmon_show_temp, NULL, 1);
5225
5226static struct attribute *be_hwmon_attrs[] = {
5227 &sensor_dev_attr_temp1_input.dev_attr.attr,
5228 NULL
5229};
5230
5231ATTRIBUTE_GROUPS(be_hwmon);
5232
d379142b
SP
5233static char *mc_name(struct be_adapter *adapter)
5234{
f93f160b
VV
5235 char *str = ""; /* default */
5236
5237 switch (adapter->mc_type) {
5238 case UMC:
5239 str = "UMC";
5240 break;
5241 case FLEX10:
5242 str = "FLEX10";
5243 break;
5244 case vNIC1:
5245 str = "vNIC-1";
5246 break;
5247 case nPAR:
5248 str = "nPAR";
5249 break;
5250 case UFP:
5251 str = "UFP";
5252 break;
5253 case vNIC2:
5254 str = "vNIC-2";
5255 break;
5256 default:
5257 str = "";
5258 }
5259
5260 return str;
d379142b
SP
5261}
5262
5263static inline char *func_name(struct be_adapter *adapter)
5264{
5265 return be_physfn(adapter) ? "PF" : "VF";
5266}
5267
f7062ee5
SP
5268static inline char *nic_name(struct pci_dev *pdev)
5269{
5270 switch (pdev->device) {
5271 case OC_DEVICE_ID1:
5272 return OC_NAME;
5273 case OC_DEVICE_ID2:
5274 return OC_NAME_BE;
5275 case OC_DEVICE_ID3:
5276 case OC_DEVICE_ID4:
5277 return OC_NAME_LANCER;
5278 case BE_DEVICE_ID2:
5279 return BE3_NAME;
5280 case OC_DEVICE_ID5:
5281 case OC_DEVICE_ID6:
5282 return OC_NAME_SH;
5283 default:
5284 return BE_NAME;
5285 }
5286}
5287
1dd06ae8 5288static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5289{
6b7c5b94
SP
5290 struct be_adapter *adapter;
5291 struct net_device *netdev;
21252377 5292 int status = 0;
6b7c5b94 5293
acbafeb1
SP
5294 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5295
6b7c5b94
SP
5296 status = pci_enable_device(pdev);
5297 if (status)
5298 goto do_none;
5299
5300 status = pci_request_regions(pdev, DRV_NAME);
5301 if (status)
5302 goto disable_dev;
5303 pci_set_master(pdev);
5304
7f640062 5305 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5306 if (!netdev) {
6b7c5b94
SP
5307 status = -ENOMEM;
5308 goto rel_reg;
5309 }
5310 adapter = netdev_priv(netdev);
5311 adapter->pdev = pdev;
5312 pci_set_drvdata(pdev, adapter);
5313 adapter->netdev = netdev;
2243e2e9 5314 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5315
4c15c243 5316 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5317 if (!status) {
5318 netdev->features |= NETIF_F_HIGHDMA;
5319 } else {
4c15c243 5320 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5321 if (status) {
5322 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5323 goto free_netdev;
5324 }
5325 }
5326
2f951a9a
KA
5327 status = pci_enable_pcie_error_reporting(pdev);
5328 if (!status)
5329 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5330
78fad34e 5331 status = be_map_pci_bars(adapter);
6b7c5b94 5332 if (status)
39f1d94d 5333 goto free_netdev;
6b7c5b94 5334
78fad34e
SP
5335 status = be_drv_init(adapter);
5336 if (status)
5337 goto unmap_bars;
5338
5fb379ee
SP
5339 status = be_setup(adapter);
5340 if (status)
78fad34e 5341 goto drv_cleanup;
2243e2e9 5342
3abcdeda 5343 be_netdev_init(netdev);
6b7c5b94
SP
5344 status = register_netdev(netdev);
5345 if (status != 0)
5fb379ee 5346 goto unsetup;
6b7c5b94 5347
045508a8
PP
5348 be_roce_dev_add(adapter);
5349
972f37b4 5350 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
b4e32a71 5351
29e9122b 5352 /* On Die temperature not supported for VF. */
9a03259c 5353 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5354 adapter->hwmon_info.hwmon_dev =
5355 devm_hwmon_device_register_with_groups(&pdev->dev,
5356 DRV_NAME,
5357 adapter,
5358 be_hwmon_groups);
5359 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5360 }
5361
d379142b 5362 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5363 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5364
6b7c5b94
SP
5365 return 0;
5366
5fb379ee
SP
5367unsetup:
5368 be_clear(adapter);
78fad34e
SP
5369drv_cleanup:
5370 be_drv_cleanup(adapter);
5371unmap_bars:
5372 be_unmap_pci_bars(adapter);
f9449ab7 5373free_netdev:
fe6d2a38 5374 free_netdev(netdev);
6b7c5b94
SP
5375rel_reg:
5376 pci_release_regions(pdev);
5377disable_dev:
5378 pci_disable_device(pdev);
5379do_none:
c4ca2374 5380 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5381 return status;
5382}
5383
5384static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5385{
5386 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5387
76a9e08e 5388 if (adapter->wol_en)
71d8d1b5
AK
5389 be_setup_wol(adapter, true);
5390
d4360d6f 5391 be_intr_set(adapter, false);
eb7dd46c 5392 be_cancel_err_detection(adapter);
f67ef7ba 5393
87ac1a52 5394 be_cleanup(adapter);
6b7c5b94
SP
5395
5396 pci_save_state(pdev);
5397 pci_disable_device(pdev);
5398 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5399 return 0;
5400}
5401
484d76fd 5402static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5403{
6b7c5b94 5404 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5405 int status = 0;
6b7c5b94
SP
5406
5407 status = pci_enable_device(pdev);
5408 if (status)
5409 return status;
5410
6b7c5b94
SP
5411 pci_restore_state(pdev);
5412
484d76fd 5413 status = be_resume(adapter);
2243e2e9
SP
5414 if (status)
5415 return status;
5416
972f37b4 5417 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
eb7dd46c 5418
76a9e08e 5419 if (adapter->wol_en)
71d8d1b5 5420 be_setup_wol(adapter, false);
a4ca055f 5421
6b7c5b94
SP
5422 return 0;
5423}
5424
82456b03
SP
5425/*
5426 * An FLR will stop BE from DMAing any data.
5427 */
5428static void be_shutdown(struct pci_dev *pdev)
5429{
5430 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5431
2d5d4154
AK
5432 if (!adapter)
5433 return;
82456b03 5434
d114f99a 5435 be_roce_dev_shutdown(adapter);
0f4a6828 5436 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5437 be_cancel_err_detection(adapter);
a4ca055f 5438
2d5d4154 5439 netif_device_detach(adapter->netdev);
82456b03 5440
57841869
AK
5441 be_cmd_reset_function(adapter);
5442
82456b03 5443 pci_disable_device(pdev);
82456b03
SP
5444}
5445
cf588477 5446static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5447 pci_channel_state_t state)
cf588477
SP
5448{
5449 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5450
5451 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5452
954f6825
VD
5453 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5454 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5455
eb7dd46c 5456 be_cancel_err_detection(adapter);
cf588477 5457
87ac1a52 5458 be_cleanup(adapter);
cf588477 5459 }
cf588477
SP
5460
5461 if (state == pci_channel_io_perm_failure)
5462 return PCI_ERS_RESULT_DISCONNECT;
5463
5464 pci_disable_device(pdev);
5465
eeb7fc7b
SK
5466 /* The error could cause the FW to trigger a flash debug dump.
5467 * Resetting the card while flash dump is in progress
c8a54163
PR
5468 * can cause it not to recover; wait for it to finish.
5469 * Wait only for first function as it is needed only once per
5470 * adapter.
eeb7fc7b 5471 */
c8a54163
PR
5472 if (pdev->devfn == 0)
5473 ssleep(30);
5474
cf588477
SP
5475 return PCI_ERS_RESULT_NEED_RESET;
5476}
5477
5478static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5479{
5480 struct be_adapter *adapter = pci_get_drvdata(pdev);
5481 int status;
5482
5483 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5484
5485 status = pci_enable_device(pdev);
5486 if (status)
5487 return PCI_ERS_RESULT_DISCONNECT;
5488
5489 pci_set_master(pdev);
cf588477
SP
5490 pci_restore_state(pdev);
5491
5492 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5493 dev_info(&adapter->pdev->dev,
5494 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5495 status = be_fw_wait_ready(adapter);
cf588477
SP
5496 if (status)
5497 return PCI_ERS_RESULT_DISCONNECT;
5498
d6b6d987 5499 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5500 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5501 return PCI_ERS_RESULT_RECOVERED;
5502}
5503
5504static void be_eeh_resume(struct pci_dev *pdev)
5505{
5506 int status = 0;
5507 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5508
5509 dev_info(&adapter->pdev->dev, "EEH resume\n");
5510
5511 pci_save_state(pdev);
5512
484d76fd 5513 status = be_resume(adapter);
bf99e50d
PR
5514 if (status)
5515 goto err;
5516
972f37b4 5517 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
cf588477
SP
5518 return;
5519err:
5520 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5521}
5522
ace40aff
VV
5523static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5524{
5525 struct be_adapter *adapter = pci_get_drvdata(pdev);
5526 u16 num_vf_qs;
5527 int status;
5528
5529 if (!num_vfs)
5530 be_vf_clear(adapter);
5531
5532 adapter->num_vfs = num_vfs;
5533
5534 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5535 dev_warn(&pdev->dev,
5536 "Cannot disable VFs while they are assigned\n");
5537 return -EBUSY;
5538 }
5539
5540 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5541 * are equally distributed across the max-number of VFs. The user may
5542 * request only a subset of the max-vfs to be enabled.
5543 * Based on num_vfs, redistribute the resources across num_vfs so that
5544 * each VF will have access to more number of resources.
5545 * This facility is not available in BE3 FW.
5546 * Also, this is done by FW in Lancer chip.
5547 */
5548 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5549 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5550 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5551 adapter->num_vfs, num_vf_qs);
5552 if (status)
5553 dev_err(&pdev->dev,
5554 "Failed to optimize SR-IOV resources\n");
5555 }
5556
5557 status = be_get_resources(adapter);
5558 if (status)
5559 return be_cmd_status(status);
5560
5561 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5562 rtnl_lock();
5563 status = be_update_queues(adapter);
5564 rtnl_unlock();
5565 if (status)
5566 return be_cmd_status(status);
5567
5568 if (adapter->num_vfs)
5569 status = be_vf_setup(adapter);
5570
5571 if (!status)
5572 return adapter->num_vfs;
5573
5574 return 0;
5575}
5576
3646f0e5 5577static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
5578 .error_detected = be_eeh_err_detected,
5579 .slot_reset = be_eeh_reset,
5580 .resume = be_eeh_resume,
5581};
5582
6b7c5b94
SP
5583static struct pci_driver be_driver = {
5584 .name = DRV_NAME,
5585 .id_table = be_dev_ids,
5586 .probe = be_probe,
5587 .remove = be_remove,
5588 .suspend = be_suspend,
484d76fd 5589 .resume = be_pci_resume,
82456b03 5590 .shutdown = be_shutdown,
ace40aff 5591 .sriov_configure = be_pci_sriov_configure,
cf588477 5592 .err_handler = &be_eeh_handlers
6b7c5b94
SP
5593};
5594
5595static int __init be_init_module(void)
5596{
8e95a202
JP
5597 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5598 rx_frag_size != 2048) {
6b7c5b94
SP
5599 printk(KERN_WARNING DRV_NAME
5600 " : Module param rx_frag_size must be 2048/4096/8192."
5601 " Using 2048\n");
5602 rx_frag_size = 2048;
5603 }
6b7c5b94 5604
ace40aff
VV
5605 if (num_vfs > 0) {
5606 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5607 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5608 }
5609
6b7c5b94
SP
5610 return pci_register_driver(&be_driver);
5611}
5612module_init(be_init_module);
5613
5614static void __exit be_exit_module(void)
5615{
5616 pci_unregister_driver(&be_driver);
5617}
5618module_exit(be_exit_module);