]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
RDMA/be2net: Remove open and close entry points
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97 683 struct be_tx_stats *stats = tx_stats(txo);
8670f2a5 684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
3c8def97 685
ab1594e9 686 u64_stats_update_begin(&stats->sync);
ac124ff9 687 stats->tx_reqs++;
5f07b3c5 688 stats->tx_bytes += skb->len;
8670f2a5
SB
689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
ab1594e9 692 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
693}
694
5f07b3c5
SP
695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 697{
5f07b3c5
SP
698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
f986afcb
SP
704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
89b1f496 718 wrb->rsvd0 = 0;
6b7c5b94
SP
719}
720
1ded132d 721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 722 struct sk_buff *skb)
1ded132d
AK
723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
df8a39de 727 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
732 adapter->recommended_prio;
733
734 return vlan_tag;
735}
736
c9c47142
SP
737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
cf5671e6
SB
750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
804abcdb
SB
765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
6b7c5b94 768{
804abcdb 769 u16 proto;
6b7c5b94 770
49e4b847 771 if (skb_is_gso(skb)) {
804abcdb
SB
772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 777 if (skb->encapsulation) {
804abcdb 778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
804abcdb 784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 785 else if (proto == IPPROTO_UDP)
804abcdb 786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
787 }
788
df8a39de 789 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
792 }
793
804abcdb
SB
794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
5f07b3c5 796
804abcdb
SB
797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 821 */
804abcdb
SB
822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
832}
833
2b7bcebf 834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 835 bool unmap_single)
7101e111
SP
836{
837 dma_addr_t dma;
f986afcb 838 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 839
7101e111 840
f986afcb
SP
841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
7101e111 844 if (unmap_single)
f986afcb 845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 846 else
f986afcb 847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
848 }
849}
6b7c5b94 850
79a0d7d8
SB
851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
853{
854 u16 head = txo->q.head;
855
856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
880
881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
887
888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
906 txq->head = head;
907
908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
910 while (copied) {
911 wrb = queue_head_node(txq);
912 unmap_tx_frag(dev, wrb, map_single);
913 map_single = false;
914 copied -= le32_to_cpu(wrb->frag_len);
915 queue_head_inc(txq);
916 }
917
918 txq->head = head;
919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
5f07b3c5 925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
6b7c5b94 928{
5f07b3c5 929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 930 struct device *dev = &adapter->pdev->dev;
5f07b3c5 931 struct be_queue_info *txq = &txo->q;
7101e111 932 bool map_single = false;
5f07b3c5 933 u16 head = txq->head;
79a0d7d8
SB
934 dma_addr_t busaddr;
935 int len;
6b7c5b94 936
79a0d7d8 937 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 938
ebc8d2ab 939 if (skb->len > skb->data_len) {
79a0d7d8 940 len = skb_headlen(skb);
03d28ffe 941
2b7bcebf
IV
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
7101e111
SP
944 goto dma_err;
945 map_single = true;
79a0d7d8 946 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
947 copied += len;
948 }
6b7c5b94 949
ebc8d2ab 950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 952 len = skb_frag_size(frag);
03d28ffe 953
79a0d7d8 954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 955 if (dma_mapping_error(dev, busaddr))
7101e111 956 goto dma_err;
79a0d7d8
SB
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
6b7c5b94
SP
959 }
960
79a0d7d8 961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 962
5f07b3c5
SP
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
6b7c5b94 965
7101e111 966dma_err:
79a0d7d8
SB
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 969 return 0;
6b7c5b94
SP
970}
971
f7062ee5
SP
972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
93040ae5 977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 978 struct sk_buff *skb,
804abcdb
SB
979 struct be_wrb_params
980 *wrb_params)
93040ae5
SK
981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
df8a39de 988 if (skb_vlan_tag_present(skb))
93040ae5 989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
804abcdb 997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 998 }
bc0c3405
AK
999
1000 if (vlan_tag) {
62749e2c
JP
1001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
bc0c3405
AK
1003 if (unlikely(!skb))
1004 return skb;
bc0c3405
AK
1005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
bc0c3405
AK
1013 if (unlikely(!skb))
1014 return skb;
804abcdb 1015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1016 }
1017
93040ae5
SK
1018 return skb;
1019}
1020
bc0c3405
AK
1021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
504fbf1e 1033 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
df8a39de 1045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1046}
1047
748b539a 1048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1049{
ee9c799c 1050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1051}
1052
ec495fac
VV
1053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
804abcdb
SB
1055 struct be_wrb_params
1056 *wrb_params)
6b7c5b94 1057{
d2cb6ce7 1058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
93040ae5 1061
1297f9db
AK
1062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1064 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1065 */
ee9c799c
SP
1066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1068 if (skb->len <= 60 &&
df8a39de 1069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1070 is_ipv4_pkt(skb)) {
93040ae5
SK
1071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1ded132d 1074
d2cb6ce7 1075 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1076 * tagging in pvid-tagging mode
d2cb6ce7 1077 */
f93f160b 1078 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1079 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1081
93040ae5
SK
1082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1087 skb_vlan_tag_present(skb)) {
804abcdb 1088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1089 if (unlikely(!skb))
c9128951 1090 goto err;
bc0c3405
AK
1091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1111 if (unlikely(!skb))
c9128951 1112 goto err;
1ded132d
AK
1113 }
1114
ee9c799c
SP
1115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
c9128951 1118err:
ee9c799c
SP
1119 return NULL;
1120}
1121
ec495fac
VV
1122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
804abcdb 1124 struct be_wrb_params *wrb_params)
ec495fac 1125{
8227e990
SR
1126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
ec495fac 1130 */
8227e990 1131 if (skb->len <= 32) {
74b6939d 1132 if (skb_put_padto(skb, 36))
ec495fac 1133 return NULL;
ec495fac
VV
1134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
5f07b3c5
SP
1145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1156 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
760c295e
VD
1169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
1645d997 1265 switch (ntohs(udp->dest)) {
760c295e
VD
1266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
ee9c799c
SP
1293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1298 struct be_wrb_params wrb_params = { 0 };
804abcdb 1299 bool flush = !skb->xmit_more;
5f07b3c5 1300 u16 wrb_cnt;
ee9c799c 1301
804abcdb 1302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1303 if (unlikely(!skb))
1304 goto drop;
6b7c5b94 1305
804abcdb
SB
1306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1309 if (unlikely(!wrb_cnt)) {
1310 dev_kfree_skb_any(skb);
1311 goto drop;
1312 }
cd8f76c0 1313
760c295e
VD
1314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
cf5671e6 1326 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
c190e3c8 1330
5f07b3c5
SP
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
6b7c5b94 1333
5f07b3c5
SP
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
6b7c5b94 1340
6b7c5b94
SP
1341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1352 return -EINVAL;
1353 }
0d3f5cce
KA
1354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1356 netdev->mtu, new_mtu);
6b7c5b94
SP
1357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
f66b7cfd
SP
1361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
6b7c5b94 1398/*
82903e4b
AK
1399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1401 */
10329df8 1402static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1403{
50762667 1404 struct device *dev = &adapter->pdev->dev;
10329df8 1405 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1406 u16 num = 0, i = 0;
82903e4b 1407 int status = 0;
1da87b7f 1408
c0e64ef4 1409 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1410 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1411 return 0;
1412
92bf14ab 1413 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1414 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1415
1416 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
0fc16ebf 1419
435452aa 1420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1421 if (status) {
f66b7cfd 1422 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1423 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
4c60005f 1426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1430 }
0fc16ebf 1431 return status;
6b7c5b94
SP
1432}
1433
80d5c368 1434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1437 int status = 0;
6b7c5b94 1438
a85e9986
PR
1439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1441 return status;
1442
f6cbd364 1443 if (test_bit(vid, adapter->vids))
48291c22 1444 return status;
a85e9986 1445
f6cbd364 1446 set_bit(vid, adapter->vids);
a6b74e01 1447 adapter->vlans_added++;
8e586137 1448
a6b74e01
SK
1449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
f6cbd364 1452 clear_bit(vid, adapter->vids);
a6b74e01 1453 }
48291c22 1454
80817cbf 1455 return status;
6b7c5b94
SP
1456}
1457
80d5c368 1458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
a85e9986
PR
1462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1464 return 0;
a85e9986 1465
f6cbd364 1466 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1467 adapter->vlans_added--;
1468
1469 return be_vid_config(adapter);
6b7c5b94
SP
1470}
1471
f66b7cfd 1472static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1473{
ac34b743 1474 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1475 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1476}
1477
f66b7cfd
SP
1478static void be_set_all_promisc(struct be_adapter *adapter)
1479{
1480 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1481 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1482}
1483
1484static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1485{
0fc16ebf 1486 int status;
6b7c5b94 1487
f66b7cfd
SP
1488 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1489 return;
6b7c5b94 1490
f66b7cfd
SP
1491 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1492 if (!status)
1493 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1494}
1495
1496static void be_set_mc_list(struct be_adapter *adapter)
1497{
1498 int status;
1499
1500 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1501 if (!status)
1502 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1503 else
1504 be_set_mc_promisc(adapter);
1505}
1506
1507static void be_set_uc_list(struct be_adapter *adapter)
1508{
1509 struct netdev_hw_addr *ha;
1510 int i = 1; /* First slot is claimed by the Primary MAC */
1511
1512 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1513 be_cmd_pmac_del(adapter, adapter->if_handle,
1514 adapter->pmac_id[i], 0);
1515
1516 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1517 be_set_all_promisc(adapter);
1518 return;
6b7c5b94
SP
1519 }
1520
f66b7cfd
SP
1521 netdev_for_each_uc_addr(ha, adapter->netdev) {
1522 adapter->uc_macs++; /* First slot is for Primary MAC */
1523 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1524 &adapter->pmac_id[adapter->uc_macs], 0);
1525 }
1526}
6b7c5b94 1527
f66b7cfd
SP
1528static void be_clear_uc_list(struct be_adapter *adapter)
1529{
1530 int i;
fbc13f01 1531
f66b7cfd
SP
1532 for (i = 1; i < (adapter->uc_macs + 1); i++)
1533 be_cmd_pmac_del(adapter, adapter->if_handle,
1534 adapter->pmac_id[i], 0);
1535 adapter->uc_macs = 0;
1536}
fbc13f01 1537
f66b7cfd
SP
1538static void be_set_rx_mode(struct net_device *netdev)
1539{
1540 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1541
f66b7cfd
SP
1542 if (netdev->flags & IFF_PROMISC) {
1543 be_set_all_promisc(adapter);
1544 return;
fbc13f01
AK
1545 }
1546
f66b7cfd
SP
1547 /* Interface was previously in promiscuous mode; disable it */
1548 if (be_in_all_promisc(adapter)) {
1549 be_clear_all_promisc(adapter);
1550 if (adapter->vlans_added)
1551 be_vid_config(adapter);
0fc16ebf 1552 }
a0794885 1553
f66b7cfd
SP
1554 /* Enable multicast promisc if num configured exceeds what we support */
1555 if (netdev->flags & IFF_ALLMULTI ||
1556 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1557 be_set_mc_promisc(adapter);
a0794885 1558 return;
f66b7cfd 1559 }
a0794885 1560
f66b7cfd
SP
1561 if (netdev_uc_count(netdev) != adapter->uc_macs)
1562 be_set_uc_list(adapter);
1563
1564 be_set_mc_list(adapter);
6b7c5b94
SP
1565}
1566
ba343c77
SB
1567static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1568{
1569 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1570 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1571 int status;
1572
11ac75ed 1573 if (!sriov_enabled(adapter))
ba343c77
SB
1574 return -EPERM;
1575
11ac75ed 1576 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1577 return -EINVAL;
1578
3c31aaf3
VV
1579 /* Proceed further only if user provided MAC is different
1580 * from active MAC
1581 */
1582 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1583 return 0;
1584
3175d8c2
SP
1585 if (BEx_chip(adapter)) {
1586 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1587 vf + 1);
ba343c77 1588
11ac75ed
SP
1589 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1590 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1591 } else {
1592 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1593 vf + 1);
590c391d
PR
1594 }
1595
abccf23e
KA
1596 if (status) {
1597 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1598 mac, vf, status);
1599 return be_cmd_status(status);
1600 }
64600ea5 1601
abccf23e
KA
1602 ether_addr_copy(vf_cfg->mac_addr, mac);
1603
1604 return 0;
ba343c77
SB
1605}
1606
64600ea5 1607static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1608 struct ifla_vf_info *vi)
64600ea5
AK
1609{
1610 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1611 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1612
11ac75ed 1613 if (!sriov_enabled(adapter))
64600ea5
AK
1614 return -EPERM;
1615
11ac75ed 1616 if (vf >= adapter->num_vfs)
64600ea5
AK
1617 return -EINVAL;
1618
1619 vi->vf = vf;
ed616689
SC
1620 vi->max_tx_rate = vf_cfg->tx_rate;
1621 vi->min_tx_rate = 0;
a60b3a13
AK
1622 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1623 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1624 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1625 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1626 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1627
1628 return 0;
1629}
1630
435452aa
VV
1631static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1632{
1633 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1634 u16 vids[BE_NUM_VLANS_SUPPORTED];
1635 int vf_if_id = vf_cfg->if_handle;
1636 int status;
1637
1638 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1639 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1640 if (status)
1641 return status;
1642
1643 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1644 vids[0] = 0;
1645 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1646 if (!status)
1647 dev_info(&adapter->pdev->dev,
1648 "Cleared guest VLANs on VF%d", vf);
1649
1650 /* After TVT is enabled, disallow VFs to program VLAN filters */
1651 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1652 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1653 ~BE_PRIV_FILTMGMT, vf + 1);
1654 if (!status)
1655 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1656 }
1657 return 0;
1658}
1659
1660static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1661{
1662 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1663 struct device *dev = &adapter->pdev->dev;
1664 int status;
1665
1666 /* Reset Transparent VLAN Tagging. */
1667 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1668 vf_cfg->if_handle, 0, 0);
435452aa
VV
1669 if (status)
1670 return status;
1671
1672 /* Allow VFs to program VLAN filtering */
1673 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1674 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1675 BE_PRIV_FILTMGMT, vf + 1);
1676 if (!status) {
1677 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1678 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1679 }
1680 }
1681
1682 dev_info(dev,
1683 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1684 return 0;
1685}
1686
748b539a 1687static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1688{
1689 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1690 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1691 int status;
1da87b7f 1692
11ac75ed 1693 if (!sriov_enabled(adapter))
1da87b7f
AK
1694 return -EPERM;
1695
b9fc0e53 1696 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1697 return -EINVAL;
1698
b9fc0e53
AK
1699 if (vlan || qos) {
1700 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1701 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1702 } else {
435452aa 1703 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1704 }
1705
abccf23e
KA
1706 if (status) {
1707 dev_err(&adapter->pdev->dev,
435452aa
VV
1708 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1709 status);
abccf23e
KA
1710 return be_cmd_status(status);
1711 }
1712
1713 vf_cfg->vlan_tag = vlan;
abccf23e 1714 return 0;
1da87b7f
AK
1715}
1716
ed616689
SC
1717static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1718 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1719{
1720 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1721 struct device *dev = &adapter->pdev->dev;
1722 int percent_rate, status = 0;
1723 u16 link_speed = 0;
1724 u8 link_status;
e1d18735 1725
11ac75ed 1726 if (!sriov_enabled(adapter))
e1d18735
AK
1727 return -EPERM;
1728
94f434c2 1729 if (vf >= adapter->num_vfs)
e1d18735
AK
1730 return -EINVAL;
1731
ed616689
SC
1732 if (min_tx_rate)
1733 return -EINVAL;
1734
0f77ba73
RN
1735 if (!max_tx_rate)
1736 goto config_qos;
1737
1738 status = be_cmd_link_status_query(adapter, &link_speed,
1739 &link_status, 0);
1740 if (status)
1741 goto err;
1742
1743 if (!link_status) {
1744 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1745 status = -ENETDOWN;
0f77ba73
RN
1746 goto err;
1747 }
1748
1749 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1750 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1751 link_speed);
1752 status = -EINVAL;
1753 goto err;
1754 }
1755
1756 /* On Skyhawk the QOS setting must be done only as a % value */
1757 percent_rate = link_speed / 100;
1758 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1759 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1760 percent_rate);
1761 status = -EINVAL;
1762 goto err;
94f434c2 1763 }
e1d18735 1764
0f77ba73
RN
1765config_qos:
1766 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1767 if (status)
0f77ba73
RN
1768 goto err;
1769
1770 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1771 return 0;
1772
1773err:
1774 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1775 max_tx_rate, vf);
abccf23e 1776 return be_cmd_status(status);
e1d18735 1777}
e2fb1afa 1778
bdce2ad7
SR
1779static int be_set_vf_link_state(struct net_device *netdev, int vf,
1780 int link_state)
1781{
1782 struct be_adapter *adapter = netdev_priv(netdev);
1783 int status;
1784
1785 if (!sriov_enabled(adapter))
1786 return -EPERM;
1787
1788 if (vf >= adapter->num_vfs)
1789 return -EINVAL;
1790
1791 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1792 if (status) {
1793 dev_err(&adapter->pdev->dev,
1794 "Link state change on VF %d failed: %#x\n", vf, status);
1795 return be_cmd_status(status);
1796 }
bdce2ad7 1797
abccf23e
KA
1798 adapter->vf_cfg[vf].plink_tracking = link_state;
1799
1800 return 0;
bdce2ad7 1801}
e1d18735 1802
e7bcbd7b
KA
1803static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1804{
1805 struct be_adapter *adapter = netdev_priv(netdev);
1806 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1807 u8 spoofchk;
1808 int status;
1809
1810 if (!sriov_enabled(adapter))
1811 return -EPERM;
1812
1813 if (vf >= adapter->num_vfs)
1814 return -EINVAL;
1815
1816 if (BEx_chip(adapter))
1817 return -EOPNOTSUPP;
1818
1819 if (enable == vf_cfg->spoofchk)
1820 return 0;
1821
1822 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1823
1824 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1825 0, spoofchk);
1826 if (status) {
1827 dev_err(&adapter->pdev->dev,
1828 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1829 return be_cmd_status(status);
1830 }
1831
1832 vf_cfg->spoofchk = enable;
1833 return 0;
1834}
1835
2632bafd
SP
1836static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1837 ulong now)
6b7c5b94 1838{
2632bafd
SP
1839 aic->rx_pkts_prev = rx_pkts;
1840 aic->tx_reqs_prev = tx_pkts;
1841 aic->jiffies = now;
1842}
ac124ff9 1843
20947770 1844static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1845{
20947770
PR
1846 struct be_adapter *adapter = eqo->adapter;
1847 int eqd, start;
2632bafd 1848 struct be_aic_obj *aic;
2632bafd
SP
1849 struct be_rx_obj *rxo;
1850 struct be_tx_obj *txo;
20947770 1851 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1852 ulong now;
1853 u32 pps, delta;
20947770 1854 int i;
10ef9ab4 1855
20947770
PR
1856 aic = &adapter->aic_obj[eqo->idx];
1857 if (!aic->enable) {
1858 if (aic->jiffies)
1859 aic->jiffies = 0;
1860 eqd = aic->et_eqd;
1861 return eqd;
1862 }
6b7c5b94 1863
20947770 1864 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1865 do {
57a7744e 1866 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1867 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1868 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1869 }
10ef9ab4 1870
20947770 1871 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1872 do {
57a7744e 1873 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1874 tx_pkts += txo->stats.tx_reqs;
57a7744e 1875 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1876 }
6b7c5b94 1877
20947770
PR
1878 /* Skip, if wrapped around or first calculation */
1879 now = jiffies;
1880 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1881 rx_pkts < aic->rx_pkts_prev ||
1882 tx_pkts < aic->tx_reqs_prev) {
1883 be_aic_update(aic, rx_pkts, tx_pkts, now);
1884 return aic->prev_eqd;
1885 }
2632bafd 1886
20947770
PR
1887 delta = jiffies_to_msecs(now - aic->jiffies);
1888 if (delta == 0)
1889 return aic->prev_eqd;
10ef9ab4 1890
20947770
PR
1891 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1892 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1893 eqd = (pps / 15000) << 2;
2632bafd 1894
20947770
PR
1895 if (eqd < 8)
1896 eqd = 0;
1897 eqd = min_t(u32, eqd, aic->max_eqd);
1898 eqd = max_t(u32, eqd, aic->min_eqd);
1899
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901
1902 return eqd;
1903}
1904
1905/* For Skyhawk-R only */
1906static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1907{
1908 struct be_adapter *adapter = eqo->adapter;
1909 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1910 ulong now = jiffies;
1911 int eqd;
1912 u32 mult_enc;
1913
1914 if (!aic->enable)
1915 return 0;
1916
1917 if (time_before_eq(now, aic->jiffies) ||
1918 jiffies_to_msecs(now - aic->jiffies) < 1)
1919 eqd = aic->prev_eqd;
1920 else
1921 eqd = be_get_new_eqd(eqo);
1922
1923 if (eqd > 100)
1924 mult_enc = R2I_DLY_ENC_1;
1925 else if (eqd > 60)
1926 mult_enc = R2I_DLY_ENC_2;
1927 else if (eqd > 20)
1928 mult_enc = R2I_DLY_ENC_3;
1929 else
1930 mult_enc = R2I_DLY_ENC_0;
1931
1932 aic->prev_eqd = eqd;
1933
1934 return mult_enc;
1935}
1936
1937void be_eqd_update(struct be_adapter *adapter, bool force_update)
1938{
1939 struct be_set_eqd set_eqd[MAX_EVT_QS];
1940 struct be_aic_obj *aic;
1941 struct be_eq_obj *eqo;
1942 int i, num = 0, eqd;
1943
1944 for_all_evt_queues(adapter, eqo, i) {
1945 aic = &adapter->aic_obj[eqo->idx];
1946 eqd = be_get_new_eqd(eqo);
1947 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1948 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1949 set_eqd[num].eq_id = eqo->q.id;
1950 aic->prev_eqd = eqd;
1951 num++;
1952 }
ac124ff9 1953 }
2632bafd
SP
1954
1955 if (num)
1956 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1957}
1958
3abcdeda 1959static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1960 struct be_rx_compl_info *rxcp)
4097f663 1961{
ac124ff9 1962 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1963
ab1594e9 1964 u64_stats_update_begin(&stats->sync);
3abcdeda 1965 stats->rx_compl++;
2e588f84 1966 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1967 stats->rx_pkts++;
8670f2a5
SB
1968 if (rxcp->tunneled)
1969 stats->rx_vxlan_offload_pkts++;
2e588f84 1970 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1971 stats->rx_mcast_pkts++;
2e588f84 1972 if (rxcp->err)
ac124ff9 1973 stats->rx_compl_err++;
ab1594e9 1974 u64_stats_update_end(&stats->sync);
4097f663
SP
1975}
1976
2e588f84 1977static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1978{
19fad86f 1979 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1980 * Also ignore ipcksm for ipv6 pkts
1981 */
2e588f84 1982 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1983 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1984}
1985
0b0ef1d0 1986static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1987{
10ef9ab4 1988 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1989 struct be_rx_page_info *rx_page_info;
3abcdeda 1990 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1991 u16 frag_idx = rxq->tail;
6b7c5b94 1992
3abcdeda 1993 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1994 BUG_ON(!rx_page_info->page);
1995
e50287be 1996 if (rx_page_info->last_frag) {
2b7bcebf
IV
1997 dma_unmap_page(&adapter->pdev->dev,
1998 dma_unmap_addr(rx_page_info, bus),
1999 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
2000 rx_page_info->last_frag = false;
2001 } else {
2002 dma_sync_single_for_cpu(&adapter->pdev->dev,
2003 dma_unmap_addr(rx_page_info, bus),
2004 rx_frag_size, DMA_FROM_DEVICE);
205859a2 2005 }
6b7c5b94 2006
0b0ef1d0 2007 queue_tail_inc(rxq);
6b7c5b94
SP
2008 atomic_dec(&rxq->used);
2009 return rx_page_info;
2010}
2011
2012/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2013static void be_rx_compl_discard(struct be_rx_obj *rxo,
2014 struct be_rx_compl_info *rxcp)
6b7c5b94 2015{
6b7c5b94 2016 struct be_rx_page_info *page_info;
2e588f84 2017 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2018
e80d9da6 2019 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2020 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2021 put_page(page_info->page);
2022 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2023 }
2024}
2025
2026/*
2027 * skb_fill_rx_data forms a complete skb for an ether frame
2028 * indicated by rxcp.
2029 */
10ef9ab4
SP
2030static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2031 struct be_rx_compl_info *rxcp)
6b7c5b94 2032{
6b7c5b94 2033 struct be_rx_page_info *page_info;
2e588f84
SP
2034 u16 i, j;
2035 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2036 u8 *start;
6b7c5b94 2037
0b0ef1d0 2038 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2039 start = page_address(page_info->page) + page_info->page_offset;
2040 prefetch(start);
2041
2042 /* Copy data in the first descriptor of this completion */
2e588f84 2043 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2044
6b7c5b94
SP
2045 skb->len = curr_frag_len;
2046 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2047 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2048 /* Complete packet has now been moved to data */
2049 put_page(page_info->page);
2050 skb->data_len = 0;
2051 skb->tail += curr_frag_len;
2052 } else {
ac1ae5f3
ED
2053 hdr_len = ETH_HLEN;
2054 memcpy(skb->data, start, hdr_len);
6b7c5b94 2055 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2056 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2057 skb_shinfo(skb)->frags[0].page_offset =
2058 page_info->page_offset + hdr_len;
748b539a
SP
2059 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2060 curr_frag_len - hdr_len);
6b7c5b94 2061 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2062 skb->truesize += rx_frag_size;
6b7c5b94
SP
2063 skb->tail += hdr_len;
2064 }
205859a2 2065 page_info->page = NULL;
6b7c5b94 2066
2e588f84
SP
2067 if (rxcp->pkt_size <= rx_frag_size) {
2068 BUG_ON(rxcp->num_rcvd != 1);
2069 return;
6b7c5b94
SP
2070 }
2071
2072 /* More frags present for this completion */
2e588f84
SP
2073 remaining = rxcp->pkt_size - curr_frag_len;
2074 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2075 page_info = get_rx_page_info(rxo);
2e588f84 2076 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2077
bd46cb6c
AK
2078 /* Coalesce all frags from the same physical page in one slot */
2079 if (page_info->page_offset == 0) {
2080 /* Fresh page */
2081 j++;
b061b39e 2082 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2083 skb_shinfo(skb)->frags[j].page_offset =
2084 page_info->page_offset;
9e903e08 2085 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2086 skb_shinfo(skb)->nr_frags++;
2087 } else {
2088 put_page(page_info->page);
2089 }
2090
9e903e08 2091 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2092 skb->len += curr_frag_len;
2093 skb->data_len += curr_frag_len;
bdb28a97 2094 skb->truesize += rx_frag_size;
2e588f84 2095 remaining -= curr_frag_len;
205859a2 2096 page_info->page = NULL;
6b7c5b94 2097 }
bd46cb6c 2098 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2099}
2100
5be93b9a 2101/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2102static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2103 struct be_rx_compl_info *rxcp)
6b7c5b94 2104{
10ef9ab4 2105 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2106 struct net_device *netdev = adapter->netdev;
6b7c5b94 2107 struct sk_buff *skb;
89420424 2108
bb349bb4 2109 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2110 if (unlikely(!skb)) {
ac124ff9 2111 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2112 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2113 return;
2114 }
2115
10ef9ab4 2116 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2117
6332c8d3 2118 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2119 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2120 else
2121 skb_checksum_none_assert(skb);
6b7c5b94 2122
6332c8d3 2123 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2124 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2125 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2126 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2127
b6c0e89d 2128 skb->csum_level = rxcp->tunneled;
6384a4d0 2129 skb_mark_napi_id(skb, napi);
6b7c5b94 2130
343e43c0 2131 if (rxcp->vlanf)
86a9bad3 2132 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2133
2134 netif_receive_skb(skb);
6b7c5b94
SP
2135}
2136
5be93b9a 2137/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2138static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2139 struct napi_struct *napi,
2140 struct be_rx_compl_info *rxcp)
6b7c5b94 2141{
10ef9ab4 2142 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2143 struct be_rx_page_info *page_info;
5be93b9a 2144 struct sk_buff *skb = NULL;
2e588f84
SP
2145 u16 remaining, curr_frag_len;
2146 u16 i, j;
3968fa1e 2147
10ef9ab4 2148 skb = napi_get_frags(napi);
5be93b9a 2149 if (!skb) {
10ef9ab4 2150 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2151 return;
2152 }
2153
2e588f84
SP
2154 remaining = rxcp->pkt_size;
2155 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2156 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2157
2158 curr_frag_len = min(remaining, rx_frag_size);
2159
bd46cb6c
AK
2160 /* Coalesce all frags from the same physical page in one slot */
2161 if (i == 0 || page_info->page_offset == 0) {
2162 /* First frag or Fresh page */
2163 j++;
b061b39e 2164 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2165 skb_shinfo(skb)->frags[j].page_offset =
2166 page_info->page_offset;
9e903e08 2167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2168 } else {
2169 put_page(page_info->page);
2170 }
9e903e08 2171 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2172 skb->truesize += rx_frag_size;
bd46cb6c 2173 remaining -= curr_frag_len;
6b7c5b94
SP
2174 memset(page_info, 0, sizeof(*page_info));
2175 }
bd46cb6c 2176 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2177
5be93b9a 2178 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2179 skb->len = rxcp->pkt_size;
2180 skb->data_len = rxcp->pkt_size;
5be93b9a 2181 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2182 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2183 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2184 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2185
b6c0e89d 2186 skb->csum_level = rxcp->tunneled;
6384a4d0 2187 skb_mark_napi_id(skb, napi);
5be93b9a 2188
343e43c0 2189 if (rxcp->vlanf)
86a9bad3 2190 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2191
10ef9ab4 2192 napi_gro_frags(napi);
2e588f84
SP
2193}
2194
10ef9ab4
SP
2195static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2196 struct be_rx_compl_info *rxcp)
2e588f84 2197{
c3c18bc1
SP
2198 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2199 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2200 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2201 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2202 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2203 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2204 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2205 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2206 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2207 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2208 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2209 if (rxcp->vlanf) {
c3c18bc1
SP
2210 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2211 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2212 }
c3c18bc1 2213 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2214 rxcp->tunneled =
c3c18bc1 2215 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2216}
2217
10ef9ab4
SP
2218static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2219 struct be_rx_compl_info *rxcp)
2e588f84 2220{
c3c18bc1
SP
2221 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2222 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2223 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2224 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2225 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2226 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2227 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2228 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2229 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2230 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2231 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2232 if (rxcp->vlanf) {
c3c18bc1
SP
2233 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2234 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2235 }
c3c18bc1
SP
2236 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2237 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2238}
2239
2240static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2241{
2242 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2243 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2244 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2245
2e588f84
SP
2246 /* For checking the valid bit it is Ok to use either definition as the
2247 * valid bit is at the same position in both v0 and v1 Rx compl */
2248 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2249 return NULL;
6b7c5b94 2250
2e588f84
SP
2251 rmb();
2252 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2253
2e588f84 2254 if (adapter->be3_native)
10ef9ab4 2255 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2256 else
10ef9ab4 2257 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2258
e38b1706
SK
2259 if (rxcp->ip_frag)
2260 rxcp->l4_csum = 0;
2261
15d72184 2262 if (rxcp->vlanf) {
f93f160b
VV
2263 /* In QNQ modes, if qnq bit is not set, then the packet was
2264 * tagged only with the transparent outer vlan-tag and must
2265 * not be treated as a vlan packet by host
2266 */
2267 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2268 rxcp->vlanf = 0;
6b7c5b94 2269
15d72184 2270 if (!lancer_chip(adapter))
3c709f8f 2271 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2272
939cf306 2273 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2274 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2275 rxcp->vlanf = 0;
2276 }
2e588f84
SP
2277
2278 /* As the compl has been parsed, reset it; we wont touch it again */
2279 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2280
3abcdeda 2281 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2282 return rxcp;
2283}
2284
1829b086 2285static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2286{
6b7c5b94 2287 u32 order = get_order(size);
1829b086 2288
6b7c5b94 2289 if (order > 0)
1829b086
ED
2290 gfp |= __GFP_COMP;
2291 return alloc_pages(gfp, order);
6b7c5b94
SP
2292}
2293
2294/*
2295 * Allocate a page, split it to fragments of size rx_frag_size and post as
2296 * receive buffers to BE
2297 */
c30d7266 2298static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2299{
3abcdeda 2300 struct be_adapter *adapter = rxo->adapter;
26d92f92 2301 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2302 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2303 struct page *pagep = NULL;
ba42fad0 2304 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2305 struct be_eth_rx_d *rxd;
2306 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2307 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2308
3abcdeda 2309 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2310 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2311 if (!pagep) {
1829b086 2312 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2313 if (unlikely(!pagep)) {
ac124ff9 2314 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2315 break;
2316 }
ba42fad0
IV
2317 page_dmaaddr = dma_map_page(dev, pagep, 0,
2318 adapter->big_page_size,
2b7bcebf 2319 DMA_FROM_DEVICE);
ba42fad0
IV
2320 if (dma_mapping_error(dev, page_dmaaddr)) {
2321 put_page(pagep);
2322 pagep = NULL;
d3de1540 2323 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2324 break;
2325 }
e50287be 2326 page_offset = 0;
6b7c5b94
SP
2327 } else {
2328 get_page(pagep);
e50287be 2329 page_offset += rx_frag_size;
6b7c5b94 2330 }
e50287be 2331 page_info->page_offset = page_offset;
6b7c5b94 2332 page_info->page = pagep;
6b7c5b94
SP
2333
2334 rxd = queue_head_node(rxq);
e50287be 2335 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2336 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2337 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2338
2339 /* Any space left in the current big page for another frag? */
2340 if ((page_offset + rx_frag_size + rx_frag_size) >
2341 adapter->big_page_size) {
2342 pagep = NULL;
e50287be
SP
2343 page_info->last_frag = true;
2344 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2345 } else {
2346 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2347 }
26d92f92
SP
2348
2349 prev_page_info = page_info;
2350 queue_head_inc(rxq);
10ef9ab4 2351 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2352 }
e50287be
SP
2353
2354 /* Mark the last frag of a page when we break out of the above loop
2355 * with no more slots available in the RXQ
2356 */
2357 if (pagep) {
2358 prev_page_info->last_frag = true;
2359 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2360 }
6b7c5b94
SP
2361
2362 if (posted) {
6b7c5b94 2363 atomic_add(posted, &rxq->used);
6384a4d0
SP
2364 if (rxo->rx_post_starved)
2365 rxo->rx_post_starved = false;
c30d7266 2366 do {
69304cc9 2367 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2368 be_rxq_notify(adapter, rxq->id, notify);
2369 posted -= notify;
2370 } while (posted);
ea1dae11
SP
2371 } else if (atomic_read(&rxq->used) == 0) {
2372 /* Let be_worker replenish when memory is available */
3abcdeda 2373 rxo->rx_post_starved = true;
6b7c5b94 2374 }
6b7c5b94
SP
2375}
2376
152ffe5b 2377static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2378{
152ffe5b
SB
2379 struct be_queue_info *tx_cq = &txo->cq;
2380 struct be_tx_compl_info *txcp = &txo->txcp;
2381 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2382
152ffe5b 2383 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2384 return NULL;
2385
152ffe5b 2386 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2387 rmb();
152ffe5b 2388 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2389
152ffe5b
SB
2390 txcp->status = GET_TX_COMPL_BITS(status, compl);
2391 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2392
152ffe5b 2393 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2394 queue_tail_inc(tx_cq);
2395 return txcp;
2396}
2397
3c8def97 2398static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2399 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2400{
5f07b3c5 2401 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2402 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2403 u16 frag_index, num_wrbs = 0;
2404 struct sk_buff *skb = NULL;
2405 bool unmap_skb_hdr = false;
a73b796e 2406 struct be_eth_wrb *wrb;
6b7c5b94 2407
ec43b1a6 2408 do {
5f07b3c5
SP
2409 if (sent_skbs[txq->tail]) {
2410 /* Free skb from prev req */
2411 if (skb)
2412 dev_consume_skb_any(skb);
2413 skb = sent_skbs[txq->tail];
2414 sent_skbs[txq->tail] = NULL;
2415 queue_tail_inc(txq); /* skip hdr wrb */
2416 num_wrbs++;
2417 unmap_skb_hdr = true;
2418 }
a73b796e 2419 wrb = queue_tail_node(txq);
5f07b3c5 2420 frag_index = txq->tail;
2b7bcebf 2421 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2422 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2423 unmap_skb_hdr = false;
6b7c5b94 2424 queue_tail_inc(txq);
5f07b3c5
SP
2425 num_wrbs++;
2426 } while (frag_index != last_index);
2427 dev_consume_skb_any(skb);
6b7c5b94 2428
4d586b82 2429 return num_wrbs;
6b7c5b94
SP
2430}
2431
10ef9ab4
SP
2432/* Return the number of events in the event queue */
2433static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2434{
10ef9ab4
SP
2435 struct be_eq_entry *eqe;
2436 int num = 0;
859b1e4e 2437
10ef9ab4
SP
2438 do {
2439 eqe = queue_tail_node(&eqo->q);
2440 if (eqe->evt == 0)
2441 break;
859b1e4e 2442
10ef9ab4
SP
2443 rmb();
2444 eqe->evt = 0;
2445 num++;
2446 queue_tail_inc(&eqo->q);
2447 } while (true);
2448
2449 return num;
859b1e4e
SP
2450}
2451
10ef9ab4
SP
2452/* Leaves the EQ is disarmed state */
2453static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2454{
10ef9ab4 2455 int num = events_get(eqo);
859b1e4e 2456
20947770 2457 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2458}
2459
99b44304
KA
2460/* Free posted rx buffers that were not used */
2461static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2462{
3abcdeda 2463 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2464 struct be_rx_page_info *page_info;
2465
2466 while (atomic_read(&rxq->used) > 0) {
2467 page_info = get_rx_page_info(rxo);
2468 put_page(page_info->page);
2469 memset(page_info, 0, sizeof(*page_info));
2470 }
2471 BUG_ON(atomic_read(&rxq->used));
2472 rxq->tail = 0;
2473 rxq->head = 0;
2474}
2475
2476static void be_rx_cq_clean(struct be_rx_obj *rxo)
2477{
3abcdeda 2478 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2479 struct be_rx_compl_info *rxcp;
d23e946c
SP
2480 struct be_adapter *adapter = rxo->adapter;
2481 int flush_wait = 0;
6b7c5b94 2482
d23e946c
SP
2483 /* Consume pending rx completions.
2484 * Wait for the flush completion (identified by zero num_rcvd)
2485 * to arrive. Notify CQ even when there are no more CQ entries
2486 * for HW to flush partially coalesced CQ entries.
2487 * In Lancer, there is no need to wait for flush compl.
2488 */
2489 for (;;) {
2490 rxcp = be_rx_compl_get(rxo);
ddf1169f 2491 if (!rxcp) {
d23e946c
SP
2492 if (lancer_chip(adapter))
2493 break;
2494
954f6825
VD
2495 if (flush_wait++ > 50 ||
2496 be_check_error(adapter,
2497 BE_ERROR_HW)) {
d23e946c
SP
2498 dev_warn(&adapter->pdev->dev,
2499 "did not receive flush compl\n");
2500 break;
2501 }
2502 be_cq_notify(adapter, rx_cq->id, true, 0);
2503 mdelay(1);
2504 } else {
2505 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2506 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2507 if (rxcp->num_rcvd == 0)
2508 break;
2509 }
6b7c5b94
SP
2510 }
2511
d23e946c
SP
2512 /* After cleanup, leave the CQ in unarmed state */
2513 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2514}
2515
0ae57bb3 2516static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2517{
5f07b3c5
SP
2518 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2519 struct device *dev = &adapter->pdev->dev;
152ffe5b 2520 struct be_tx_compl_info *txcp;
0ae57bb3 2521 struct be_queue_info *txq;
152ffe5b 2522 struct be_tx_obj *txo;
0ae57bb3 2523 int i, pending_txqs;
a8e9179a 2524
1a3d0717 2525 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2526 do {
0ae57bb3
SP
2527 pending_txqs = adapter->num_tx_qs;
2528
2529 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2530 cmpl = 0;
2531 num_wrbs = 0;
0ae57bb3 2532 txq = &txo->q;
152ffe5b
SB
2533 while ((txcp = be_tx_compl_get(txo))) {
2534 num_wrbs +=
2535 be_tx_compl_process(adapter, txo,
2536 txcp->end_index);
0ae57bb3
SP
2537 cmpl++;
2538 }
2539 if (cmpl) {
2540 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2541 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2542 timeo = 0;
0ae57bb3 2543 }
cf5671e6 2544 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2545 pending_txqs--;
a8e9179a
SP
2546 }
2547
954f6825
VD
2548 if (pending_txqs == 0 || ++timeo > 10 ||
2549 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2550 break;
2551
2552 mdelay(1);
2553 } while (true);
2554
5f07b3c5 2555 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2556 for_all_tx_queues(adapter, txo, i) {
2557 txq = &txo->q;
0ae57bb3 2558
5f07b3c5
SP
2559 if (atomic_read(&txq->used)) {
2560 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2561 i, atomic_read(&txq->used));
2562 notified_idx = txq->tail;
0ae57bb3 2563 end_idx = txq->tail;
5f07b3c5
SP
2564 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2565 txq->len);
2566 /* Use the tx-compl process logic to handle requests
2567 * that were not sent to the HW.
2568 */
0ae57bb3
SP
2569 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2570 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2571 BUG_ON(atomic_read(&txq->used));
2572 txo->pend_wrb_cnt = 0;
2573 /* Since hw was never notified of these requests,
2574 * reset TXQ indices
2575 */
2576 txq->head = notified_idx;
2577 txq->tail = notified_idx;
0ae57bb3 2578 }
b03388d6 2579 }
6b7c5b94
SP
2580}
2581
10ef9ab4
SP
2582static void be_evt_queues_destroy(struct be_adapter *adapter)
2583{
2584 struct be_eq_obj *eqo;
2585 int i;
2586
2587 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2588 if (eqo->q.created) {
2589 be_eq_clean(eqo);
10ef9ab4 2590 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2591 napi_hash_del(&eqo->napi);
68d7bdcb 2592 netif_napi_del(&eqo->napi);
649886a3 2593 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2594 }
10ef9ab4
SP
2595 be_queue_free(adapter, &eqo->q);
2596 }
2597}
2598
2599static int be_evt_queues_create(struct be_adapter *adapter)
2600{
2601 struct be_queue_info *eq;
2602 struct be_eq_obj *eqo;
2632bafd 2603 struct be_aic_obj *aic;
10ef9ab4
SP
2604 int i, rc;
2605
92bf14ab
SP
2606 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2607 adapter->cfg_num_qs);
10ef9ab4
SP
2608
2609 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2610 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2611
2632bafd 2612 aic = &adapter->aic_obj[i];
10ef9ab4 2613 eqo->adapter = adapter;
10ef9ab4 2614 eqo->idx = i;
2632bafd
SP
2615 aic->max_eqd = BE_MAX_EQD;
2616 aic->enable = true;
10ef9ab4
SP
2617
2618 eq = &eqo->q;
2619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2620 sizeof(struct be_eq_entry));
10ef9ab4
SP
2621 if (rc)
2622 return rc;
2623
f2f781a7 2624 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2625 if (rc)
2626 return rc;
649886a3
KA
2627
2628 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2629 return -ENOMEM;
2630 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2631 eqo->affinity_mask);
2632 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2633 BE_NAPI_WEIGHT);
2634 napi_hash_add(&eqo->napi);
10ef9ab4 2635 }
1cfafab9 2636 return 0;
10ef9ab4
SP
2637}
2638
5fb379ee
SP
2639static void be_mcc_queues_destroy(struct be_adapter *adapter)
2640{
2641 struct be_queue_info *q;
5fb379ee 2642
8788fdc2 2643 q = &adapter->mcc_obj.q;
5fb379ee 2644 if (q->created)
8788fdc2 2645 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2646 be_queue_free(adapter, q);
2647
8788fdc2 2648 q = &adapter->mcc_obj.cq;
5fb379ee 2649 if (q->created)
8788fdc2 2650 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2651 be_queue_free(adapter, q);
2652}
2653
2654/* Must be called only after TX qs are created as MCC shares TX EQ */
2655static int be_mcc_queues_create(struct be_adapter *adapter)
2656{
2657 struct be_queue_info *q, *cq;
5fb379ee 2658
8788fdc2 2659 cq = &adapter->mcc_obj.cq;
5fb379ee 2660 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2661 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2662 goto err;
2663
10ef9ab4
SP
2664 /* Use the default EQ for MCC completions */
2665 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2666 goto mcc_cq_free;
2667
8788fdc2 2668 q = &adapter->mcc_obj.q;
5fb379ee
SP
2669 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2670 goto mcc_cq_destroy;
2671
8788fdc2 2672 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2673 goto mcc_q_free;
2674
2675 return 0;
2676
2677mcc_q_free:
2678 be_queue_free(adapter, q);
2679mcc_cq_destroy:
8788fdc2 2680 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2681mcc_cq_free:
2682 be_queue_free(adapter, cq);
2683err:
2684 return -1;
2685}
2686
6b7c5b94
SP
2687static void be_tx_queues_destroy(struct be_adapter *adapter)
2688{
2689 struct be_queue_info *q;
3c8def97
SP
2690 struct be_tx_obj *txo;
2691 u8 i;
6b7c5b94 2692
3c8def97
SP
2693 for_all_tx_queues(adapter, txo, i) {
2694 q = &txo->q;
2695 if (q->created)
2696 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2697 be_queue_free(adapter, q);
6b7c5b94 2698
3c8def97
SP
2699 q = &txo->cq;
2700 if (q->created)
2701 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2702 be_queue_free(adapter, q);
2703 }
6b7c5b94
SP
2704}
2705
7707133c 2706static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2707{
73f394e6 2708 struct be_queue_info *cq;
3c8def97 2709 struct be_tx_obj *txo;
73f394e6 2710 struct be_eq_obj *eqo;
92bf14ab 2711 int status, i;
6b7c5b94 2712
92bf14ab 2713 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2714
10ef9ab4
SP
2715 for_all_tx_queues(adapter, txo, i) {
2716 cq = &txo->cq;
2717 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2718 sizeof(struct be_eth_tx_compl));
2719 if (status)
2720 return status;
3c8def97 2721
827da44c
JS
2722 u64_stats_init(&txo->stats.sync);
2723 u64_stats_init(&txo->stats.sync_compl);
2724
10ef9ab4
SP
2725 /* If num_evt_qs is less than num_tx_qs, then more than
2726 * one txq share an eq
2727 */
73f394e6
SP
2728 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2729 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2730 if (status)
2731 return status;
6b7c5b94 2732
10ef9ab4
SP
2733 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2734 sizeof(struct be_eth_wrb));
2735 if (status)
2736 return status;
6b7c5b94 2737
94d73aaa 2738 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2739 if (status)
2740 return status;
73f394e6
SP
2741
2742 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2743 eqo->idx);
3c8def97 2744 }
6b7c5b94 2745
d379142b
SP
2746 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2747 adapter->num_tx_qs);
10ef9ab4 2748 return 0;
6b7c5b94
SP
2749}
2750
10ef9ab4 2751static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2752{
2753 struct be_queue_info *q;
3abcdeda
SP
2754 struct be_rx_obj *rxo;
2755 int i;
2756
2757 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2758 q = &rxo->cq;
2759 if (q->created)
2760 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2761 be_queue_free(adapter, q);
ac6a0c4a
SP
2762 }
2763}
2764
10ef9ab4 2765static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2766{
10ef9ab4 2767 struct be_queue_info *eq, *cq;
3abcdeda
SP
2768 struct be_rx_obj *rxo;
2769 int rc, i;
6b7c5b94 2770
92bf14ab 2771 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2772 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2773
71bb8bd0
VV
2774 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2775 if (adapter->num_rss_qs <= 1)
2776 adapter->num_rss_qs = 0;
2777
2778 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2779
2780 /* When the interface is not capable of RSS rings (and there is no
2781 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2782 */
71bb8bd0
VV
2783 if (adapter->num_rx_qs == 0)
2784 adapter->num_rx_qs = 1;
92bf14ab 2785
6b7c5b94 2786 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2787 for_all_rx_queues(adapter, rxo, i) {
2788 rxo->adapter = adapter;
3abcdeda
SP
2789 cq = &rxo->cq;
2790 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2791 sizeof(struct be_eth_rx_compl));
3abcdeda 2792 if (rc)
10ef9ab4 2793 return rc;
3abcdeda 2794
827da44c 2795 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2796 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2797 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2798 if (rc)
10ef9ab4 2799 return rc;
3abcdeda 2800 }
6b7c5b94 2801
d379142b 2802 dev_info(&adapter->pdev->dev,
71bb8bd0 2803 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2804 return 0;
b628bde2
SP
2805}
2806
6b7c5b94
SP
2807static irqreturn_t be_intx(int irq, void *dev)
2808{
e49cc34f
SP
2809 struct be_eq_obj *eqo = dev;
2810 struct be_adapter *adapter = eqo->adapter;
2811 int num_evts = 0;
6b7c5b94 2812
d0b9cec3
SP
2813 /* IRQ is not expected when NAPI is scheduled as the EQ
2814 * will not be armed.
2815 * But, this can happen on Lancer INTx where it takes
2816 * a while to de-assert INTx or in BE2 where occasionaly
2817 * an interrupt may be raised even when EQ is unarmed.
2818 * If NAPI is already scheduled, then counting & notifying
2819 * events will orphan them.
e49cc34f 2820 */
d0b9cec3 2821 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2822 num_evts = events_get(eqo);
d0b9cec3
SP
2823 __napi_schedule(&eqo->napi);
2824 if (num_evts)
2825 eqo->spurious_intr = 0;
2826 }
20947770 2827 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2828
d0b9cec3
SP
2829 /* Return IRQ_HANDLED only for the the first spurious intr
2830 * after a valid intr to stop the kernel from branding
2831 * this irq as a bad one!
e49cc34f 2832 */
d0b9cec3
SP
2833 if (num_evts || eqo->spurious_intr++ == 0)
2834 return IRQ_HANDLED;
2835 else
2836 return IRQ_NONE;
6b7c5b94
SP
2837}
2838
10ef9ab4 2839static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2840{
10ef9ab4 2841 struct be_eq_obj *eqo = dev;
6b7c5b94 2842
20947770 2843 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2844 napi_schedule(&eqo->napi);
6b7c5b94
SP
2845 return IRQ_HANDLED;
2846}
2847
2e588f84 2848static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2849{
e38b1706 2850 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2851}
2852
10ef9ab4 2853static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2854 int budget, int polling)
6b7c5b94 2855{
3abcdeda
SP
2856 struct be_adapter *adapter = rxo->adapter;
2857 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2858 struct be_rx_compl_info *rxcp;
6b7c5b94 2859 u32 work_done;
c30d7266 2860 u32 frags_consumed = 0;
6b7c5b94
SP
2861
2862 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2863 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2864 if (!rxcp)
2865 break;
2866
12004ae9
SP
2867 /* Is it a flush compl that has no data */
2868 if (unlikely(rxcp->num_rcvd == 0))
2869 goto loop_continue;
2870
2871 /* Discard compl with partial DMA Lancer B0 */
2872 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2873 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2874 goto loop_continue;
2875 }
2876
2877 /* On BE drop pkts that arrive due to imperfect filtering in
2878 * promiscuous mode on some skews
2879 */
2880 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2881 !lancer_chip(adapter))) {
10ef9ab4 2882 be_rx_compl_discard(rxo, rxcp);
12004ae9 2883 goto loop_continue;
64642811 2884 }
009dd872 2885
6384a4d0
SP
2886 /* Don't do gro when we're busy_polling */
2887 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2888 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2889 else
6384a4d0
SP
2890 be_rx_compl_process(rxo, napi, rxcp);
2891
12004ae9 2892loop_continue:
c30d7266 2893 frags_consumed += rxcp->num_rcvd;
2e588f84 2894 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2895 }
2896
10ef9ab4
SP
2897 if (work_done) {
2898 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2899
6384a4d0
SP
2900 /* When an rx-obj gets into post_starved state, just
2901 * let be_worker do the posting.
2902 */
2903 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2904 !rxo->rx_post_starved)
c30d7266
AK
2905 be_post_rx_frags(rxo, GFP_ATOMIC,
2906 max_t(u32, MAX_RX_POST,
2907 frags_consumed));
6b7c5b94 2908 }
10ef9ab4 2909
6b7c5b94
SP
2910 return work_done;
2911}
2912
152ffe5b 2913static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2914{
2915 switch (status) {
2916 case BE_TX_COMP_HDR_PARSE_ERR:
2917 tx_stats(txo)->tx_hdr_parse_err++;
2918 break;
2919 case BE_TX_COMP_NDMA_ERR:
2920 tx_stats(txo)->tx_dma_err++;
2921 break;
2922 case BE_TX_COMP_ACL_ERR:
2923 tx_stats(txo)->tx_spoof_check_err++;
2924 break;
2925 }
2926}
2927
152ffe5b 2928static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2929{
2930 switch (status) {
2931 case LANCER_TX_COMP_LSO_ERR:
2932 tx_stats(txo)->tx_tso_err++;
2933 break;
2934 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2935 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2936 tx_stats(txo)->tx_spoof_check_err++;
2937 break;
2938 case LANCER_TX_COMP_QINQ_ERR:
2939 tx_stats(txo)->tx_qinq_err++;
2940 break;
2941 case LANCER_TX_COMP_PARITY_ERR:
2942 tx_stats(txo)->tx_internal_parity_err++;
2943 break;
2944 case LANCER_TX_COMP_DMA_ERR:
2945 tx_stats(txo)->tx_dma_err++;
2946 break;
2947 }
2948}
2949
c8f64615
SP
2950static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2951 int idx)
6b7c5b94 2952{
c8f64615 2953 int num_wrbs = 0, work_done = 0;
152ffe5b 2954 struct be_tx_compl_info *txcp;
c8f64615 2955
152ffe5b
SB
2956 while ((txcp = be_tx_compl_get(txo))) {
2957 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2958 work_done++;
3c8def97 2959
152ffe5b 2960 if (txcp->status) {
512bb8a2 2961 if (lancer_chip(adapter))
152ffe5b 2962 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2963 else
152ffe5b 2964 be_update_tx_err(txo, txcp->status);
512bb8a2 2965 }
10ef9ab4 2966 }
6b7c5b94 2967
10ef9ab4
SP
2968 if (work_done) {
2969 be_cq_notify(adapter, txo->cq.id, true, work_done);
2970 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2971
10ef9ab4
SP
2972 /* As Tx wrbs have been freed up, wake up netdev queue
2973 * if it was stopped due to lack of tx wrbs. */
2974 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2975 be_can_txq_wake(txo)) {
10ef9ab4 2976 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2977 }
10ef9ab4
SP
2978
2979 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2980 tx_stats(txo)->tx_compl += work_done;
2981 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2982 }
10ef9ab4 2983}
6b7c5b94 2984
f7062ee5
SP
2985#ifdef CONFIG_NET_RX_BUSY_POLL
2986static inline bool be_lock_napi(struct be_eq_obj *eqo)
2987{
2988 bool status = true;
2989
2990 spin_lock(&eqo->lock); /* BH is already disabled */
2991 if (eqo->state & BE_EQ_LOCKED) {
2992 WARN_ON(eqo->state & BE_EQ_NAPI);
2993 eqo->state |= BE_EQ_NAPI_YIELD;
2994 status = false;
2995 } else {
2996 eqo->state = BE_EQ_NAPI;
2997 }
2998 spin_unlock(&eqo->lock);
2999 return status;
3000}
3001
3002static inline void be_unlock_napi(struct be_eq_obj *eqo)
3003{
3004 spin_lock(&eqo->lock); /* BH is already disabled */
3005
3006 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3007 eqo->state = BE_EQ_IDLE;
3008
3009 spin_unlock(&eqo->lock);
3010}
3011
3012static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3013{
3014 bool status = true;
3015
3016 spin_lock_bh(&eqo->lock);
3017 if (eqo->state & BE_EQ_LOCKED) {
3018 eqo->state |= BE_EQ_POLL_YIELD;
3019 status = false;
3020 } else {
3021 eqo->state |= BE_EQ_POLL;
3022 }
3023 spin_unlock_bh(&eqo->lock);
3024 return status;
3025}
3026
3027static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3028{
3029 spin_lock_bh(&eqo->lock);
3030
3031 WARN_ON(eqo->state & (BE_EQ_NAPI));
3032 eqo->state = BE_EQ_IDLE;
3033
3034 spin_unlock_bh(&eqo->lock);
3035}
3036
3037static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 spin_lock_init(&eqo->lock);
3040 eqo->state = BE_EQ_IDLE;
3041}
3042
3043static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3044{
3045 local_bh_disable();
3046
3047 /* It's enough to just acquire napi lock on the eqo to stop
3048 * be_busy_poll() from processing any queueus.
3049 */
3050 while (!be_lock_napi(eqo))
3051 mdelay(1);
3052
3053 local_bh_enable();
3054}
3055
3056#else /* CONFIG_NET_RX_BUSY_POLL */
3057
3058static inline bool be_lock_napi(struct be_eq_obj *eqo)
3059{
3060 return true;
3061}
3062
3063static inline void be_unlock_napi(struct be_eq_obj *eqo)
3064{
3065}
3066
3067static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3068{
3069 return false;
3070}
3071
3072static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3073{
3074}
3075
3076static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3077{
3078}
3079
3080static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3081{
3082}
3083#endif /* CONFIG_NET_RX_BUSY_POLL */
3084
68d7bdcb 3085int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3086{
3087 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3088 struct be_adapter *adapter = eqo->adapter;
0b545a62 3089 int max_work = 0, work, i, num_evts;
6384a4d0 3090 struct be_rx_obj *rxo;
a4906ea0 3091 struct be_tx_obj *txo;
20947770 3092 u32 mult_enc = 0;
f31e50a8 3093
0b545a62
SP
3094 num_evts = events_get(eqo);
3095
a4906ea0
SP
3096 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3097 be_process_tx(adapter, txo, i);
f31e50a8 3098
6384a4d0
SP
3099 if (be_lock_napi(eqo)) {
3100 /* This loop will iterate twice for EQ0 in which
3101 * completions of the last RXQ (default one) are also processed
3102 * For other EQs the loop iterates only once
3103 */
3104 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3105 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3106 max_work = max(work, max_work);
3107 }
3108 be_unlock_napi(eqo);
3109 } else {
3110 max_work = budget;
10ef9ab4 3111 }
6b7c5b94 3112
10ef9ab4
SP
3113 if (is_mcc_eqo(eqo))
3114 be_process_mcc(adapter);
93c86700 3115
10ef9ab4
SP
3116 if (max_work < budget) {
3117 napi_complete(napi);
20947770
PR
3118
3119 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3120 * delay via a delay multiplier encoding value
3121 */
3122 if (skyhawk_chip(adapter))
3123 mult_enc = be_get_eq_delay_mult_enc(eqo);
3124
3125 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3126 mult_enc);
10ef9ab4
SP
3127 } else {
3128 /* As we'll continue in polling mode, count and clear events */
20947770 3129 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3130 }
10ef9ab4 3131 return max_work;
6b7c5b94
SP
3132}
3133
6384a4d0
SP
3134#ifdef CONFIG_NET_RX_BUSY_POLL
3135static int be_busy_poll(struct napi_struct *napi)
3136{
3137 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3138 struct be_adapter *adapter = eqo->adapter;
3139 struct be_rx_obj *rxo;
3140 int i, work = 0;
3141
3142 if (!be_lock_busy_poll(eqo))
3143 return LL_FLUSH_BUSY;
3144
3145 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3146 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3147 if (work)
3148 break;
3149 }
3150
3151 be_unlock_busy_poll(eqo);
3152 return work;
3153}
3154#endif
3155
f67ef7ba 3156void be_detect_error(struct be_adapter *adapter)
7c185276 3157{
e1cfb67a
PR
3158 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3159 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3160 u32 i;
eb0eecc1 3161 struct device *dev = &adapter->pdev->dev;
7c185276 3162
954f6825 3163 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3164 return;
3165
e1cfb67a
PR
3166 if (lancer_chip(adapter)) {
3167 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3168 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3169 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3170 sliport_err1 = ioread32(adapter->db +
748b539a 3171 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3172 sliport_err2 = ioread32(adapter->db +
748b539a 3173 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3174 /* Do not log error messages if its a FW reset */
3175 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3176 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3177 dev_info(dev, "Firmware update in progress\n");
3178 } else {
eb0eecc1
SK
3179 dev_err(dev, "Error detected in the card\n");
3180 dev_err(dev, "ERR: sliport status 0x%x\n",
3181 sliport_status);
3182 dev_err(dev, "ERR: sliport error1 0x%x\n",
3183 sliport_err1);
3184 dev_err(dev, "ERR: sliport error2 0x%x\n",
3185 sliport_err2);
3186 }
e1cfb67a
PR
3187 }
3188 } else {
25848c90
SR
3189 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3190 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3191 ue_lo_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_LOW_MASK);
3193 ue_hi_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3195
f67ef7ba
PR
3196 ue_lo = (ue_lo & ~ue_lo_mask);
3197 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3198
eb0eecc1
SK
3199 /* On certain platforms BE hardware can indicate spurious UEs.
3200 * Allow HW to stop working completely in case of a real UE.
3201 * Hence not setting the hw_error for UE detection.
3202 */
f67ef7ba 3203
eb0eecc1 3204 if (ue_lo || ue_hi) {
eb0eecc1
SK
3205 dev_err(dev,
3206 "Unrecoverable Error detected in the adapter");
3207 dev_err(dev, "Please reboot server to recover");
3208 if (skyhawk_chip(adapter))
954f6825
VD
3209 be_set_error(adapter, BE_ERROR_UE);
3210
eb0eecc1
SK
3211 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3212 if (ue_lo & 1)
3213 dev_err(dev, "UE: %s bit set\n",
3214 ue_status_low_desc[i]);
3215 }
3216 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3217 if (ue_hi & 1)
3218 dev_err(dev, "UE: %s bit set\n",
3219 ue_status_hi_desc[i]);
3220 }
7c185276
AK
3221 }
3222 }
7c185276
AK
3223}
3224
8d56ff11
SP
3225static void be_msix_disable(struct be_adapter *adapter)
3226{
ac6a0c4a 3227 if (msix_enabled(adapter)) {
8d56ff11 3228 pci_disable_msix(adapter->pdev);
ac6a0c4a 3229 adapter->num_msix_vec = 0;
68d7bdcb 3230 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3231 }
3232}
3233
c2bba3df 3234static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3235{
7dc4c064 3236 int i, num_vec;
d379142b 3237 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3238
92bf14ab
SP
3239 /* If RoCE is supported, program the max number of NIC vectors that
3240 * may be configured via set-channels, along with vectors needed for
3241 * RoCe. Else, just program the number we'll use initially.
3242 */
3243 if (be_roce_supported(adapter))
3244 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3245 2 * num_online_cpus());
3246 else
3247 num_vec = adapter->cfg_num_qs;
3abcdeda 3248
ac6a0c4a 3249 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3250 adapter->msix_entries[i].entry = i;
3251
7dc4c064
AG
3252 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3253 MIN_MSIX_VECTORS, num_vec);
3254 if (num_vec < 0)
3255 goto fail;
92bf14ab 3256
92bf14ab
SP
3257 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3258 adapter->num_msix_roce_vec = num_vec / 2;
3259 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3260 adapter->num_msix_roce_vec);
3261 }
3262
3263 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3264
3265 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3266 adapter->num_msix_vec);
c2bba3df 3267 return 0;
7dc4c064
AG
3268
3269fail:
3270 dev_warn(dev, "MSIx enable failed\n");
3271
3272 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3273 if (be_virtfn(adapter))
7dc4c064
AG
3274 return num_vec;
3275 return 0;
6b7c5b94
SP
3276}
3277
fe6d2a38 3278static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3279 struct be_eq_obj *eqo)
b628bde2 3280{
f2f781a7 3281 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3282}
6b7c5b94 3283
b628bde2
SP
3284static int be_msix_register(struct be_adapter *adapter)
3285{
10ef9ab4
SP
3286 struct net_device *netdev = adapter->netdev;
3287 struct be_eq_obj *eqo;
3288 int status, i, vec;
6b7c5b94 3289
10ef9ab4
SP
3290 for_all_evt_queues(adapter, eqo, i) {
3291 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3292 vec = be_msix_vec_get(adapter, eqo);
3293 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3294 if (status)
3295 goto err_msix;
d658d98a
PR
3296
3297 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3298 }
b628bde2 3299
6b7c5b94 3300 return 0;
3abcdeda 3301err_msix:
10ef9ab4
SP
3302 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3303 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3304 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3305 status);
ac6a0c4a 3306 be_msix_disable(adapter);
6b7c5b94
SP
3307 return status;
3308}
3309
3310static int be_irq_register(struct be_adapter *adapter)
3311{
3312 struct net_device *netdev = adapter->netdev;
3313 int status;
3314
ac6a0c4a 3315 if (msix_enabled(adapter)) {
6b7c5b94
SP
3316 status = be_msix_register(adapter);
3317 if (status == 0)
3318 goto done;
ba343c77 3319 /* INTx is not supported for VF */
18c57c74 3320 if (be_virtfn(adapter))
ba343c77 3321 return status;
6b7c5b94
SP
3322 }
3323
e49cc34f 3324 /* INTx: only the first EQ is used */
6b7c5b94
SP
3325 netdev->irq = adapter->pdev->irq;
3326 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3327 &adapter->eq_obj[0]);
6b7c5b94
SP
3328 if (status) {
3329 dev_err(&adapter->pdev->dev,
3330 "INTx request IRQ failed - err %d\n", status);
3331 return status;
3332 }
3333done:
3334 adapter->isr_registered = true;
3335 return 0;
3336}
3337
3338static void be_irq_unregister(struct be_adapter *adapter)
3339{
3340 struct net_device *netdev = adapter->netdev;
10ef9ab4 3341 struct be_eq_obj *eqo;
d658d98a 3342 int i, vec;
6b7c5b94
SP
3343
3344 if (!adapter->isr_registered)
3345 return;
3346
3347 /* INTx */
ac6a0c4a 3348 if (!msix_enabled(adapter)) {
e49cc34f 3349 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3350 goto done;
3351 }
3352
3353 /* MSIx */
d658d98a
PR
3354 for_all_evt_queues(adapter, eqo, i) {
3355 vec = be_msix_vec_get(adapter, eqo);
3356 irq_set_affinity_hint(vec, NULL);
3357 free_irq(vec, eqo);
3358 }
3abcdeda 3359
6b7c5b94
SP
3360done:
3361 adapter->isr_registered = false;
6b7c5b94
SP
3362}
3363
10ef9ab4 3364static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3365{
3366 struct be_queue_info *q;
3367 struct be_rx_obj *rxo;
3368 int i;
3369
3370 for_all_rx_queues(adapter, rxo, i) {
3371 q = &rxo->q;
3372 if (q->created) {
99b44304
KA
3373 /* If RXQs are destroyed while in an "out of buffer"
3374 * state, there is a possibility of an HW stall on
3375 * Lancer. So, post 64 buffers to each queue to relieve
3376 * the "out of buffer" condition.
3377 * Make sure there's space in the RXQ before posting.
3378 */
3379 if (lancer_chip(adapter)) {
3380 be_rx_cq_clean(rxo);
3381 if (atomic_read(&q->used) == 0)
3382 be_post_rx_frags(rxo, GFP_KERNEL,
3383 MAX_RX_POST);
3384 }
3385
482c9e79 3386 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3387 be_rx_cq_clean(rxo);
99b44304 3388 be_rxq_clean(rxo);
482c9e79 3389 }
10ef9ab4 3390 be_queue_free(adapter, q);
482c9e79
SP
3391 }
3392}
3393
bcc84140
KA
3394static void be_disable_if_filters(struct be_adapter *adapter)
3395{
3396 be_cmd_pmac_del(adapter, adapter->if_handle,
3397 adapter->pmac_id[0], 0);
3398
3399 be_clear_uc_list(adapter);
3400
3401 /* The IFACE flags are enabled in the open path and cleared
3402 * in the close path. When a VF gets detached from the host and
3403 * assigned to a VM the following happens:
3404 * - VF's IFACE flags get cleared in the detach path
3405 * - IFACE create is issued by the VF in the attach path
3406 * Due to a bug in the BE3/Skyhawk-R FW
3407 * (Lancer FW doesn't have the bug), the IFACE capability flags
3408 * specified along with the IFACE create cmd issued by a VF are not
3409 * honoured by FW. As a consequence, if a *new* driver
3410 * (that enables/disables IFACE flags in open/close)
3411 * is loaded in the host and an *old* driver is * used by a VM/VF,
3412 * the IFACE gets created *without* the needed flags.
3413 * To avoid this, disable RX-filter flags only for Lancer.
3414 */
3415 if (lancer_chip(adapter)) {
3416 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3417 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3418 }
3419}
3420
889cd4b2
SP
3421static int be_close(struct net_device *netdev)
3422{
3423 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3424 struct be_eq_obj *eqo;
3425 int i;
889cd4b2 3426
e1ad8e33
KA
3427 /* This protection is needed as be_close() may be called even when the
3428 * adapter is in cleared state (after eeh perm failure)
3429 */
3430 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3431 return 0;
3432
bcc84140
KA
3433 be_disable_if_filters(adapter);
3434
dff345c5
IV
3435 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3436 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3437 napi_disable(&eqo->napi);
6384a4d0
SP
3438 be_disable_busy_poll(eqo);
3439 }
71237b6f 3440 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3441 }
a323d9bf
SP
3442
3443 be_async_mcc_disable(adapter);
3444
3445 /* Wait for all pending tx completions to arrive so that
3446 * all tx skbs are freed.
3447 */
fba87559 3448 netif_tx_disable(netdev);
6e1f9975 3449 be_tx_compl_clean(adapter);
a323d9bf
SP
3450
3451 be_rx_qs_destroy(adapter);
d11a347d 3452
a323d9bf 3453 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3454 if (msix_enabled(adapter))
3455 synchronize_irq(be_msix_vec_get(adapter, eqo));
3456 else
3457 synchronize_irq(netdev->irq);
3458 be_eq_clean(eqo);
63fcb27f
PR
3459 }
3460
889cd4b2
SP
3461 be_irq_unregister(adapter);
3462
482c9e79
SP
3463 return 0;
3464}
3465
10ef9ab4 3466static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3467{
1dcf7b1c
ED
3468 struct rss_info *rss = &adapter->rss_info;
3469 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3470 struct be_rx_obj *rxo;
e9008ee9 3471 int rc, i, j;
482c9e79
SP
3472
3473 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3475 sizeof(struct be_eth_rx_d));
3476 if (rc)
3477 return rc;
3478 }
3479
71bb8bd0
VV
3480 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3481 rxo = default_rxo(adapter);
3482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3483 rx_frag_size, adapter->if_handle,
3484 false, &rxo->rss_id);
3485 if (rc)
3486 return rc;
3487 }
10ef9ab4
SP
3488
3489 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3491 rx_frag_size, adapter->if_handle,
3492 true, &rxo->rss_id);
482c9e79
SP
3493 if (rc)
3494 return rc;
3495 }
3496
3497 if (be_multi_rxq(adapter)) {
71bb8bd0 3498 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3499 for_all_rss_queues(adapter, rxo, i) {
e2557877 3500 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3501 break;
e2557877
VD
3502 rss->rsstable[j + i] = rxo->rss_id;
3503 rss->rss_queue[j + i] = i;
e9008ee9
PR
3504 }
3505 }
e2557877
VD
3506 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3507 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3508
3509 if (!BEx_chip(adapter))
e2557877
VD
3510 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3511 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3512 } else {
3513 /* Disable RSS, if only default RX Q is created */
e2557877 3514 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3515 }
594ad54a 3516
1dcf7b1c 3517 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3518 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
d5d30981 3519 RSS_INDIR_TABLE_LEN, rss_key);
da1388d6 3520 if (rc) {
e2557877 3521 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3522 return rc;
482c9e79
SP
3523 }
3524
1dcf7b1c 3525 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3526
b02e60c8
SR
3527 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3528 * which is a queue empty condition
3529 */
10ef9ab4 3530 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3531 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3532
889cd4b2
SP
3533 return 0;
3534}
3535
bcc84140
KA
3536static int be_enable_if_filters(struct be_adapter *adapter)
3537{
3538 int status;
3539
3540 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3541 if (status)
3542 return status;
3543
3544 /* For BE3 VFs, the PF programs the initial MAC address */
3545 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3546 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3547 adapter->if_handle,
3548 &adapter->pmac_id[0], 0);
3549 if (status)
3550 return status;
3551 }
3552
3553 if (adapter->vlans_added)
3554 be_vid_config(adapter);
3555
3556 be_set_rx_mode(adapter->netdev);
3557
3558 return 0;
3559}
3560
6b7c5b94
SP
3561static int be_open(struct net_device *netdev)
3562{
3563 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3564 struct be_eq_obj *eqo;
3abcdeda 3565 struct be_rx_obj *rxo;
10ef9ab4 3566 struct be_tx_obj *txo;
b236916a 3567 u8 link_status;
3abcdeda 3568 int status, i;
5fb379ee 3569
10ef9ab4 3570 status = be_rx_qs_create(adapter);
482c9e79
SP
3571 if (status)
3572 goto err;
3573
bcc84140
KA
3574 status = be_enable_if_filters(adapter);
3575 if (status)
3576 goto err;
3577
c2bba3df
SK
3578 status = be_irq_register(adapter);
3579 if (status)
3580 goto err;
5fb379ee 3581
10ef9ab4 3582 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3583 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3584
10ef9ab4
SP
3585 for_all_tx_queues(adapter, txo, i)
3586 be_cq_notify(adapter, txo->cq.id, true, 0);
3587
7a1e9b20
SP
3588 be_async_mcc_enable(adapter);
3589
10ef9ab4
SP
3590 for_all_evt_queues(adapter, eqo, i) {
3591 napi_enable(&eqo->napi);
6384a4d0 3592 be_enable_busy_poll(eqo);
20947770 3593 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3594 }
04d3d624 3595 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3596
323ff71e 3597 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3598 if (!status)
3599 be_link_status_update(adapter, link_status);
3600
fba87559 3601 netif_tx_start_all_queues(netdev);
c5abe7c0 3602#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3603 if (skyhawk_chip(adapter))
3604 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3605#endif
3606
889cd4b2
SP
3607 return 0;
3608err:
3609 be_close(adapter->netdev);
3610 return -EIO;
5fb379ee
SP
3611}
3612
71d8d1b5
AK
3613static int be_setup_wol(struct be_adapter *adapter, bool enable)
3614{
145155e7 3615 struct device *dev = &adapter->pdev->dev;
71d8d1b5 3616 struct be_dma_mem cmd;
71d8d1b5 3617 u8 mac[ETH_ALEN];
145155e7 3618 int status;
71d8d1b5 3619
c7bf7169 3620 eth_zero_addr(mac);
71d8d1b5
AK
3621
3622 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
145155e7 3623 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
ddf1169f 3624 if (!cmd.va)
6b568689 3625 return -ENOMEM;
71d8d1b5
AK
3626
3627 if (enable) {
3628 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3629 PCICFG_PM_CONTROL_OFFSET,
3630 PCICFG_PM_CONTROL_MASK);
71d8d1b5 3631 if (status) {
145155e7
KP
3632 dev_err(dev, "Could not enable Wake-on-lan\n");
3633 goto err;
71d8d1b5 3634 }
71d8d1b5 3635 } else {
145155e7 3636 ether_addr_copy(mac, adapter->netdev->dev_addr);
71d8d1b5
AK
3637 }
3638
145155e7
KP
3639 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3640 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3641 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3642err:
3643 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3644 return status;
3645}
3646
f7062ee5
SP
3647static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3648{
3649 u32 addr;
3650
3651 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3652
3653 mac[5] = (u8)(addr & 0xFF);
3654 mac[4] = (u8)((addr >> 8) & 0xFF);
3655 mac[3] = (u8)((addr >> 16) & 0xFF);
3656 /* Use the OUI from the current MAC address */
3657 memcpy(mac, adapter->netdev->dev_addr, 3);
3658}
3659
6d87f5c3
AK
3660/*
3661 * Generate a seed MAC address from the PF MAC Address using jhash.
3662 * MAC Address for VFs are assigned incrementally starting from the seed.
3663 * These addresses are programmed in the ASIC by the PF and the VF driver
3664 * queries for the MAC address during its probe.
3665 */
4c876616 3666static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3667{
f9449ab7 3668 u32 vf;
3abcdeda 3669 int status = 0;
6d87f5c3 3670 u8 mac[ETH_ALEN];
11ac75ed 3671 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3672
3673 be_vf_eth_addr_generate(adapter, mac);
3674
11ac75ed 3675 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3676 if (BEx_chip(adapter))
590c391d 3677 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3678 vf_cfg->if_handle,
3679 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3680 else
3681 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3682 vf + 1);
590c391d 3683
6d87f5c3
AK
3684 if (status)
3685 dev_err(&adapter->pdev->dev,
748b539a
SP
3686 "Mac address assignment failed for VF %d\n",
3687 vf);
6d87f5c3 3688 else
11ac75ed 3689 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3690
3691 mac[5] += 1;
3692 }
3693 return status;
3694}
3695
4c876616
SP
3696static int be_vfs_mac_query(struct be_adapter *adapter)
3697{
3698 int status, vf;
3699 u8 mac[ETH_ALEN];
3700 struct be_vf_cfg *vf_cfg;
4c876616
SP
3701
3702 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3703 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3704 mac, vf_cfg->if_handle,
3705 false, vf+1);
4c876616
SP
3706 if (status)
3707 return status;
3708 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3709 }
3710 return 0;
3711}
3712
f9449ab7 3713static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3714{
11ac75ed 3715 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3716 u32 vf;
3717
257a3feb 3718 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3719 dev_warn(&adapter->pdev->dev,
3720 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3721 goto done;
3722 }
3723
b4c1df93
SP
3724 pci_disable_sriov(adapter->pdev);
3725
11ac75ed 3726 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3727 if (BEx_chip(adapter))
11ac75ed
SP
3728 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3729 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3730 else
3731 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3732 vf + 1);
f9449ab7 3733
11ac75ed
SP
3734 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3735 }
39f1d94d
SP
3736done:
3737 kfree(adapter->vf_cfg);
3738 adapter->num_vfs = 0;
f174c7ec 3739 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3740}
3741
7707133c
SP
3742static void be_clear_queues(struct be_adapter *adapter)
3743{
3744 be_mcc_queues_destroy(adapter);
3745 be_rx_cqs_destroy(adapter);
3746 be_tx_queues_destroy(adapter);
3747 be_evt_queues_destroy(adapter);
3748}
3749
68d7bdcb 3750static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3751{
191eb756
SP
3752 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3753 cancel_delayed_work_sync(&adapter->work);
3754 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3755 }
68d7bdcb
SP
3756}
3757
eb7dd46c
SP
3758static void be_cancel_err_detection(struct be_adapter *adapter)
3759{
3760 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3761 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3762 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3763 }
3764}
3765
c5abe7c0 3766#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3767static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3768{
630f4b70
SB
3769 struct net_device *netdev = adapter->netdev;
3770
c9c47142
SP
3771 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3772 be_cmd_manage_iface(adapter, adapter->if_handle,
3773 OP_CONVERT_TUNNEL_TO_NORMAL);
3774
3775 if (adapter->vxlan_port)
3776 be_cmd_set_vxlan_port(adapter, 0);
3777
3778 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3779 adapter->vxlan_port = 0;
630f4b70
SB
3780
3781 netdev->hw_enc_features = 0;
3782 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3783 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3784}
c5abe7c0 3785#endif
c9c47142 3786
f2858738
VV
3787static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3788{
3789 struct be_resources res = adapter->pool_res;
3790 u16 num_vf_qs = 1;
3791
3792 /* Distribute the queue resources equally among the PF and it's VFs
3793 * Do not distribute queue resources in multi-channel configuration.
3794 */
3795 if (num_vfs && !be_is_mc(adapter)) {
3796 /* If number of VFs requested is 8 less than max supported,
3797 * assign 8 queue pairs to the PF and divide the remaining
3798 * resources evenly among the VFs
3799 */
3800 if (num_vfs < (be_max_vfs(adapter) - 8))
3801 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3802 else
3803 num_vf_qs = res.max_rss_qs / num_vfs;
3804
3805 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3806 * interfaces per port. Provide RSS on VFs, only if number
3807 * of VFs requested is less than MAX_RSS_IFACES limit.
3808 */
3809 if (num_vfs >= MAX_RSS_IFACES)
3810 num_vf_qs = 1;
3811 }
3812 return num_vf_qs;
3813}
3814
b05004ad
SK
3815static int be_clear(struct be_adapter *adapter)
3816{
f2858738
VV
3817 struct pci_dev *pdev = adapter->pdev;
3818 u16 num_vf_qs;
3819
68d7bdcb 3820 be_cancel_worker(adapter);
191eb756 3821
11ac75ed 3822 if (sriov_enabled(adapter))
f9449ab7
SP
3823 be_vf_clear(adapter);
3824
bec84e6b
VV
3825 /* Re-configure FW to distribute resources evenly across max-supported
3826 * number of VFs, only when VFs are not already enabled.
3827 */
ace40aff
VV
3828 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3829 !pci_vfs_assigned(pdev)) {
f2858738
VV
3830 num_vf_qs = be_calculate_vf_qs(adapter,
3831 pci_sriov_get_totalvfs(pdev));
bec84e6b 3832 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3833 pci_sriov_get_totalvfs(pdev),
3834 num_vf_qs);
3835 }
bec84e6b 3836
c5abe7c0 3837#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3838 be_disable_vxlan_offloads(adapter);
c5abe7c0 3839#endif
bcc84140
KA
3840 kfree(adapter->pmac_id);
3841 adapter->pmac_id = NULL;
fbc13f01 3842
f9449ab7 3843 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3844
7707133c 3845 be_clear_queues(adapter);
a54769f5 3846
10ef9ab4 3847 be_msix_disable(adapter);
e1ad8e33 3848 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3849 return 0;
3850}
3851
4c876616 3852static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3853{
92bf14ab 3854 struct be_resources res = {0};
bcc84140 3855 u32 cap_flags, en_flags, vf;
4c876616 3856 struct be_vf_cfg *vf_cfg;
0700d816 3857 int status;
abb93951 3858
0700d816 3859 /* If a FW profile exists, then cap_flags are updated */
4c876616 3860 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3861 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3862
4c876616 3863 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3864 if (!BE3_chip(adapter)) {
3865 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3866 RESOURCE_LIMITS,
92bf14ab 3867 vf + 1);
435452aa 3868 if (!status) {
92bf14ab 3869 cap_flags = res.if_cap_flags;
435452aa
VV
3870 /* Prevent VFs from enabling VLAN promiscuous
3871 * mode
3872 */
3873 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3874 }
92bf14ab 3875 }
4c876616 3876
bcc84140
KA
3877 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3878 BE_IF_FLAGS_BROADCAST |
3879 BE_IF_FLAGS_MULTICAST |
3880 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3881 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3882 &vf_cfg->if_handle, vf + 1);
4c876616 3883 if (status)
0700d816 3884 return status;
4c876616 3885 }
0700d816
KA
3886
3887 return 0;
abb93951
PR
3888}
3889
39f1d94d 3890static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3891{
11ac75ed 3892 struct be_vf_cfg *vf_cfg;
30128031
SP
3893 int vf;
3894
39f1d94d
SP
3895 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3896 GFP_KERNEL);
3897 if (!adapter->vf_cfg)
3898 return -ENOMEM;
3899
11ac75ed
SP
3900 for_all_vfs(adapter, vf_cfg, vf) {
3901 vf_cfg->if_handle = -1;
3902 vf_cfg->pmac_id = -1;
30128031 3903 }
39f1d94d 3904 return 0;
30128031
SP
3905}
3906
f9449ab7
SP
3907static int be_vf_setup(struct be_adapter *adapter)
3908{
c502224e 3909 struct device *dev = &adapter->pdev->dev;
11ac75ed 3910 struct be_vf_cfg *vf_cfg;
4c876616 3911 int status, old_vfs, vf;
e7bcbd7b 3912 bool spoofchk;
39f1d94d 3913
257a3feb 3914 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3915
3916 status = be_vf_setup_init(adapter);
3917 if (status)
3918 goto err;
30128031 3919
4c876616
SP
3920 if (old_vfs) {
3921 for_all_vfs(adapter, vf_cfg, vf) {
3922 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3923 if (status)
3924 goto err;
3925 }
f9449ab7 3926
4c876616
SP
3927 status = be_vfs_mac_query(adapter);
3928 if (status)
3929 goto err;
3930 } else {
bec84e6b
VV
3931 status = be_vfs_if_create(adapter);
3932 if (status)
3933 goto err;
3934
39f1d94d
SP
3935 status = be_vf_eth_addr_config(adapter);
3936 if (status)
3937 goto err;
3938 }
f9449ab7 3939
11ac75ed 3940 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3941 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3942 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3943 vf + 1);
3944 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3945 status = be_cmd_set_fn_privileges(adapter,
435452aa 3946 vf_cfg->privileges |
04a06028
SP
3947 BE_PRIV_FILTMGMT,
3948 vf + 1);
435452aa
VV
3949 if (!status) {
3950 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3951 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3952 vf);
435452aa 3953 }
04a06028
SP
3954 }
3955
0f77ba73
RN
3956 /* Allow full available bandwidth */
3957 if (!old_vfs)
3958 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3959
e7bcbd7b
KA
3960 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3961 vf_cfg->if_handle, NULL,
3962 &spoofchk);
3963 if (!status)
3964 vf_cfg->spoofchk = spoofchk;
3965
bdce2ad7 3966 if (!old_vfs) {
0599863d 3967 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3968 be_cmd_set_logical_link_config(adapter,
3969 IFLA_VF_LINK_STATE_AUTO,
3970 vf+1);
3971 }
f9449ab7 3972 }
b4c1df93
SP
3973
3974 if (!old_vfs) {
3975 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3976 if (status) {
3977 dev_err(dev, "SRIOV enable failed\n");
3978 adapter->num_vfs = 0;
3979 goto err;
3980 }
3981 }
f174c7ec
VV
3982
3983 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3984 return 0;
3985err:
4c876616
SP
3986 dev_err(dev, "VF setup failed\n");
3987 be_vf_clear(adapter);
f9449ab7
SP
3988 return status;
3989}
3990
f93f160b
VV
3991/* Converting function_mode bits on BE3 to SH mc_type enums */
3992
3993static u8 be_convert_mc_type(u32 function_mode)
3994{
66064dbc 3995 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 3996 return vNIC1;
66064dbc 3997 else if (function_mode & QNQ_MODE)
f93f160b
VV
3998 return FLEX10;
3999 else if (function_mode & VNIC_MODE)
4000 return vNIC2;
4001 else if (function_mode & UMC_ENABLED)
4002 return UMC;
4003 else
4004 return MC_NONE;
4005}
4006
92bf14ab
SP
4007/* On BE2/BE3 FW does not suggest the supported limits */
4008static void BEx_get_resources(struct be_adapter *adapter,
4009 struct be_resources *res)
4010{
bec84e6b 4011 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4012
4013 if (be_physfn(adapter))
4014 res->max_uc_mac = BE_UC_PMAC_COUNT;
4015 else
4016 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4017
f93f160b
VV
4018 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4019
4020 if (be_is_mc(adapter)) {
4021 /* Assuming that there are 4 channels per port,
4022 * when multi-channel is enabled
4023 */
4024 if (be_is_qnq_mode(adapter))
4025 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4026 else
4027 /* In a non-qnq multichannel mode, the pvid
4028 * takes up one vlan entry
4029 */
4030 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4031 } else {
92bf14ab 4032 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4033 }
4034
92bf14ab
SP
4035 res->max_mcast_mac = BE_MAX_MC;
4036
a5243dab
VV
4037 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4038 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4039 * *only* if it is RSS-capable.
4040 */
4041 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4042 be_virtfn(adapter) ||
4043 (be_is_mc(adapter) &&
4044 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4045 res->max_tx_qs = 1;
a28277dc
SR
4046 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4047 struct be_resources super_nic_res = {0};
4048
4049 /* On a SuperNIC profile, the driver needs to use the
4050 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4051 */
f2858738
VV
4052 be_cmd_get_profile_config(adapter, &super_nic_res,
4053 RESOURCE_LIMITS, 0);
a28277dc
SR
4054 /* Some old versions of BE3 FW don't report max_tx_qs value */
4055 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4056 } else {
92bf14ab 4057 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4058 }
92bf14ab
SP
4059
4060 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4061 !use_sriov && be_physfn(adapter))
4062 res->max_rss_qs = (adapter->be3_native) ?
4063 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4064 res->max_rx_qs = res->max_rss_qs + 1;
4065
e3dc867c 4066 if (be_physfn(adapter))
d3518e21 4067 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4068 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4069 else
4070 res->max_evt_qs = 1;
92bf14ab
SP
4071
4072 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4073 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4074 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4075 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4076}
4077
30128031
SP
4078static void be_setup_init(struct be_adapter *adapter)
4079{
4080 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4081 adapter->phy.link_speed = -1;
30128031
SP
4082 adapter->if_handle = -1;
4083 adapter->be3_native = false;
f66b7cfd 4084 adapter->if_flags = 0;
f25b119c
PR
4085 if (be_physfn(adapter))
4086 adapter->cmd_privileges = MAX_PRIVILEGES;
4087 else
4088 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4089}
4090
bec84e6b
VV
4091static int be_get_sriov_config(struct be_adapter *adapter)
4092{
bec84e6b 4093 struct be_resources res = {0};
d3d18312 4094 int max_vfs, old_vfs;
bec84e6b 4095
f2858738 4096 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4097
ace40aff 4098 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4099 if (BE3_chip(adapter) && !res.max_vfs) {
4100 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4101 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4102 }
4103
d3d18312 4104 adapter->pool_res = res;
bec84e6b 4105
ace40aff
VV
4106 /* If during previous unload of the driver, the VFs were not disabled,
4107 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4108 * Instead use the TotalVFs value stored in the pci-dev struct.
4109 */
bec84e6b
VV
4110 old_vfs = pci_num_vf(adapter->pdev);
4111 if (old_vfs) {
ace40aff
VV
4112 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4113 old_vfs);
4114
4115 adapter->pool_res.max_vfs =
4116 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4117 adapter->num_vfs = old_vfs;
bec84e6b
VV
4118 }
4119
4120 return 0;
4121}
4122
ace40aff
VV
4123static void be_alloc_sriov_res(struct be_adapter *adapter)
4124{
4125 int old_vfs = pci_num_vf(adapter->pdev);
4126 u16 num_vf_qs;
4127 int status;
4128
4129 be_get_sriov_config(adapter);
4130
4131 if (!old_vfs)
4132 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4133
4134 /* When the HW is in SRIOV capable configuration, the PF-pool
4135 * resources are given to PF during driver load, if there are no
4136 * old VFs. This facility is not available in BE3 FW.
4137 * Also, this is done by FW in Lancer chip.
4138 */
4139 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4140 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4141 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4142 num_vf_qs);
4143 if (status)
4144 dev_err(&adapter->pdev->dev,
4145 "Failed to optimize SRIOV resources\n");
4146 }
4147}
4148
92bf14ab 4149static int be_get_resources(struct be_adapter *adapter)
abb93951 4150{
92bf14ab
SP
4151 struct device *dev = &adapter->pdev->dev;
4152 struct be_resources res = {0};
4153 int status;
abb93951 4154
92bf14ab
SP
4155 if (BEx_chip(adapter)) {
4156 BEx_get_resources(adapter, &res);
4157 adapter->res = res;
abb93951
PR
4158 }
4159
92bf14ab
SP
4160 /* For Lancer, SH etc read per-function resource limits from FW.
4161 * GET_FUNC_CONFIG returns per function guaranteed limits.
4162 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4163 */
4164 if (!BEx_chip(adapter)) {
4165 status = be_cmd_get_func_config(adapter, &res);
4166 if (status)
4167 return status;
abb93951 4168
71bb8bd0
VV
4169 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4170 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4171 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4172 res.max_rss_qs -= 1;
4173
92bf14ab
SP
4174 /* If RoCE may be enabled stash away half the EQs for RoCE */
4175 if (be_roce_supported(adapter))
4176 res.max_evt_qs /= 2;
4177 adapter->res = res;
abb93951 4178 }
4c876616 4179
71bb8bd0
VV
4180 /* If FW supports RSS default queue, then skip creating non-RSS
4181 * queue for non-IP traffic.
4182 */
4183 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4184 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4185
acbafeb1
SP
4186 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4187 be_max_txqs(adapter), be_max_rxqs(adapter),
4188 be_max_rss(adapter), be_max_eqs(adapter),
4189 be_max_vfs(adapter));
4190 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4191 be_max_uc(adapter), be_max_mc(adapter),
4192 be_max_vlans(adapter));
4193
ace40aff
VV
4194 /* Sanitize cfg_num_qs based on HW and platform limits */
4195 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4196 be_max_qs(adapter));
92bf14ab 4197 return 0;
abb93951
PR
4198}
4199
39f1d94d
SP
4200static int be_get_config(struct be_adapter *adapter)
4201{
6b085ba9 4202 int status, level;
542963b7 4203 u16 profile_id;
6b085ba9 4204
e97e3cda 4205 status = be_cmd_query_fw_cfg(adapter);
abb93951 4206 if (status)
92bf14ab 4207 return status;
abb93951 4208
6b085ba9
SP
4209 if (BEx_chip(adapter)) {
4210 level = be_cmd_get_fw_log_level(adapter);
4211 adapter->msg_enable =
4212 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4213 }
4214
4215 be_cmd_get_acpi_wol_cap(adapter);
4216
21252377
VV
4217 be_cmd_query_port_name(adapter);
4218
4219 if (be_physfn(adapter)) {
542963b7
VV
4220 status = be_cmd_get_active_profile(adapter, &profile_id);
4221 if (!status)
4222 dev_info(&adapter->pdev->dev,
4223 "Using profile 0x%x\n", profile_id);
962bcb75 4224 }
bec84e6b 4225
92bf14ab
SP
4226 status = be_get_resources(adapter);
4227 if (status)
4228 return status;
abb93951 4229
46ee9c14
RN
4230 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4231 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4232 if (!adapter->pmac_id)
4233 return -ENOMEM;
abb93951 4234
92bf14ab 4235 return 0;
39f1d94d
SP
4236}
4237
95046b92
SP
4238static int be_mac_setup(struct be_adapter *adapter)
4239{
4240 u8 mac[ETH_ALEN];
4241 int status;
4242
4243 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4244 status = be_cmd_get_perm_mac(adapter, mac);
4245 if (status)
4246 return status;
4247
4248 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4249 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4250 }
4251
95046b92
SP
4252 return 0;
4253}
4254
68d7bdcb
SP
4255static void be_schedule_worker(struct be_adapter *adapter)
4256{
4257 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4258 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4259}
4260
eb7dd46c
SP
4261static void be_schedule_err_detection(struct be_adapter *adapter)
4262{
4263 schedule_delayed_work(&adapter->be_err_detection_work,
4264 msecs_to_jiffies(1000));
4265 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4266}
4267
7707133c 4268static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4269{
68d7bdcb 4270 struct net_device *netdev = adapter->netdev;
10ef9ab4 4271 int status;
ba343c77 4272
7707133c 4273 status = be_evt_queues_create(adapter);
abb93951
PR
4274 if (status)
4275 goto err;
73d540f2 4276
7707133c 4277 status = be_tx_qs_create(adapter);
c2bba3df
SK
4278 if (status)
4279 goto err;
10ef9ab4 4280
7707133c 4281 status = be_rx_cqs_create(adapter);
10ef9ab4 4282 if (status)
a54769f5 4283 goto err;
6b7c5b94 4284
7707133c 4285 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4286 if (status)
4287 goto err;
4288
68d7bdcb
SP
4289 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4290 if (status)
4291 goto err;
4292
4293 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4294 if (status)
4295 goto err;
4296
7707133c
SP
4297 return 0;
4298err:
4299 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4300 return status;
4301}
4302
68d7bdcb
SP
4303int be_update_queues(struct be_adapter *adapter)
4304{
4305 struct net_device *netdev = adapter->netdev;
4306 int status;
4307
4308 if (netif_running(netdev))
4309 be_close(netdev);
4310
4311 be_cancel_worker(adapter);
4312
4313 /* If any vectors have been shared with RoCE we cannot re-program
4314 * the MSIx table.
4315 */
4316 if (!adapter->num_msix_roce_vec)
4317 be_msix_disable(adapter);
4318
4319 be_clear_queues(adapter);
4320
4321 if (!msix_enabled(adapter)) {
4322 status = be_msix_enable(adapter);
4323 if (status)
4324 return status;
4325 }
4326
4327 status = be_setup_queues(adapter);
4328 if (status)
4329 return status;
4330
4331 be_schedule_worker(adapter);
4332
4333 if (netif_running(netdev))
4334 status = be_open(netdev);
4335
4336 return status;
4337}
4338
f7062ee5
SP
4339static inline int fw_major_num(const char *fw_ver)
4340{
4341 int fw_major = 0, i;
4342
4343 i = sscanf(fw_ver, "%d.", &fw_major);
4344 if (i != 1)
4345 return 0;
4346
4347 return fw_major;
4348}
4349
f962f840
SP
4350/* If any VFs are already enabled don't FLR the PF */
4351static bool be_reset_required(struct be_adapter *adapter)
4352{
4353 return pci_num_vf(adapter->pdev) ? false : true;
4354}
4355
4356/* Wait for the FW to be ready and perform the required initialization */
4357static int be_func_init(struct be_adapter *adapter)
4358{
4359 int status;
4360
4361 status = be_fw_wait_ready(adapter);
4362 if (status)
4363 return status;
4364
4365 if (be_reset_required(adapter)) {
4366 status = be_cmd_reset_function(adapter);
4367 if (status)
4368 return status;
4369
4370 /* Wait for interrupts to quiesce after an FLR */
4371 msleep(100);
4372
4373 /* We can clear all errors when function reset succeeds */
954f6825 4374 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4375 }
4376
4377 /* Tell FW we're ready to fire cmds */
4378 status = be_cmd_fw_init(adapter);
4379 if (status)
4380 return status;
4381
4382 /* Allow interrupts for other ULPs running on NIC function */
4383 be_intr_set(adapter, true);
4384
4385 return 0;
4386}
4387
7707133c
SP
4388static int be_setup(struct be_adapter *adapter)
4389{
4390 struct device *dev = &adapter->pdev->dev;
bcc84140 4391 u32 en_flags;
7707133c
SP
4392 int status;
4393
f962f840
SP
4394 status = be_func_init(adapter);
4395 if (status)
4396 return status;
4397
7707133c
SP
4398 be_setup_init(adapter);
4399
4400 if (!lancer_chip(adapter))
4401 be_cmd_req_native_mode(adapter);
4402
72ef3a88
SK
4403 /* Need to invoke this cmd first to get the PCI Function Number */
4404 status = be_cmd_get_cntl_attributes(adapter);
4405 if (status)
4406 return status;
4407
ace40aff
VV
4408 if (!BE2_chip(adapter) && be_physfn(adapter))
4409 be_alloc_sriov_res(adapter);
4410
7707133c 4411 status = be_get_config(adapter);
10ef9ab4 4412 if (status)
a54769f5 4413 goto err;
6b7c5b94 4414
7707133c 4415 status = be_msix_enable(adapter);
10ef9ab4 4416 if (status)
a54769f5 4417 goto err;
6b7c5b94 4418
bcc84140
KA
4419 /* will enable all the needed filter flags in be_open() */
4420 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4421 en_flags = en_flags & be_if_cap_flags(adapter);
4422 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4423 &adapter->if_handle, 0);
7707133c 4424 if (status)
a54769f5 4425 goto err;
6b7c5b94 4426
68d7bdcb
SP
4427 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4428 rtnl_lock();
7707133c 4429 status = be_setup_queues(adapter);
68d7bdcb 4430 rtnl_unlock();
95046b92 4431 if (status)
1578e777
PR
4432 goto err;
4433
7707133c 4434 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4435
4436 status = be_mac_setup(adapter);
10ef9ab4
SP
4437 if (status)
4438 goto err;
4439
e97e3cda 4440 be_cmd_get_fw_ver(adapter);
acbafeb1 4441 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4442
e9e2a904 4443 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4444 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4445 adapter->fw_ver);
4446 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4447 }
4448
00d594c3
KA
4449 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4450 adapter->rx_fc);
4451 if (status)
4452 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4453 &adapter->rx_fc);
590c391d 4454
00d594c3
KA
4455 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4456 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4457
bdce2ad7
SR
4458 if (be_physfn(adapter))
4459 be_cmd_set_logical_link_config(adapter,
4460 IFLA_VF_LINK_STATE_AUTO, 0);
4461
bec84e6b
VV
4462 if (adapter->num_vfs)
4463 be_vf_setup(adapter);
f9449ab7 4464
f25b119c
PR
4465 status = be_cmd_get_phy_info(adapter);
4466 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4467 adapter->phy.fc_autoneg = 1;
4468
68d7bdcb 4469 be_schedule_worker(adapter);
e1ad8e33 4470 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4471 return 0;
a54769f5
SP
4472err:
4473 be_clear(adapter);
4474 return status;
4475}
6b7c5b94 4476
66268739
IV
4477#ifdef CONFIG_NET_POLL_CONTROLLER
4478static void be_netpoll(struct net_device *netdev)
4479{
4480 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4481 struct be_eq_obj *eqo;
66268739
IV
4482 int i;
4483
e49cc34f 4484 for_all_evt_queues(adapter, eqo, i) {
20947770 4485 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4486 napi_schedule(&eqo->napi);
4487 }
66268739
IV
4488}
4489#endif
4490
96c9b2e4 4491static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4492
306f1348
SP
4493static bool phy_flashing_required(struct be_adapter *adapter)
4494{
e02cfd96 4495 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4496 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4497}
4498
c165541e
PR
4499static bool is_comp_in_ufi(struct be_adapter *adapter,
4500 struct flash_section_info *fsec, int type)
4501{
4502 int i = 0, img_type = 0;
4503 struct flash_section_info_g2 *fsec_g2 = NULL;
4504
ca34fe38 4505 if (BE2_chip(adapter))
c165541e
PR
4506 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4507
4508 for (i = 0; i < MAX_FLASH_COMP; i++) {
4509 if (fsec_g2)
4510 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4511 else
4512 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4513
4514 if (img_type == type)
4515 return true;
4516 }
4517 return false;
4518
4519}
4520
4188e7df 4521static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4522 int header_size,
4523 const struct firmware *fw)
c165541e
PR
4524{
4525 struct flash_section_info *fsec = NULL;
4526 const u8 *p = fw->data;
4527
4528 p += header_size;
4529 while (p < (fw->data + fw->size)) {
4530 fsec = (struct flash_section_info *)p;
4531 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4532 return fsec;
4533 p += 32;
4534 }
4535 return NULL;
4536}
4537
96c9b2e4
VV
4538static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4539 u32 img_offset, u32 img_size, int hdr_size,
4540 u16 img_optype, bool *crc_match)
4541{
4542 u32 crc_offset;
4543 int status;
4544 u8 crc[4];
4545
70a7b525
VV
4546 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4547 img_size - 4);
96c9b2e4
VV
4548 if (status)
4549 return status;
4550
4551 crc_offset = hdr_size + img_offset + img_size - 4;
4552
4553 /* Skip flashing, if crc of flashed region matches */
4554 if (!memcmp(crc, p + crc_offset, 4))
4555 *crc_match = true;
4556 else
4557 *crc_match = false;
4558
4559 return status;
4560}
4561
773a2d7c 4562static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4563 struct be_dma_mem *flash_cmd, int optype, int img_size,
4564 u32 img_offset)
773a2d7c 4565{
70a7b525 4566 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4567 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4568 int status;
773a2d7c 4569
773a2d7c
PR
4570 while (total_bytes) {
4571 num_bytes = min_t(u32, 32*1024, total_bytes);
4572
4573 total_bytes -= num_bytes;
4574
4575 if (!total_bytes) {
4576 if (optype == OPTYPE_PHY_FW)
4577 flash_op = FLASHROM_OPER_PHY_FLASH;
4578 else
4579 flash_op = FLASHROM_OPER_FLASH;
4580 } else {
4581 if (optype == OPTYPE_PHY_FW)
4582 flash_op = FLASHROM_OPER_PHY_SAVE;
4583 else
4584 flash_op = FLASHROM_OPER_SAVE;
4585 }
4586
be716446 4587 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4588 img += num_bytes;
4589 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4590 flash_op, img_offset +
4591 bytes_sent, num_bytes);
4c60005f 4592 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4593 optype == OPTYPE_PHY_FW)
4594 break;
4595 else if (status)
773a2d7c 4596 return status;
70a7b525
VV
4597
4598 bytes_sent += num_bytes;
773a2d7c
PR
4599 }
4600 return 0;
4601}
4602
0ad3157e 4603/* For BE2, BE3 and BE3-R */
ca34fe38 4604static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4605 const struct firmware *fw,
4606 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4607{
c165541e 4608 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4609 struct device *dev = &adapter->pdev->dev;
c165541e 4610 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4611 int status, i, filehdr_size, num_comp;
4612 const struct flash_comp *pflashcomp;
4613 bool crc_match;
4614 const u8 *p;
c165541e
PR
4615
4616 struct flash_comp gen3_flash_types[] = {
4617 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4618 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4619 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4620 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4621 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4622 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4623 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4624 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4625 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4626 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4627 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4628 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4629 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4630 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4631 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4632 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4633 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4634 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4635 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4636 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4637 };
c165541e
PR
4638
4639 struct flash_comp gen2_flash_types[] = {
4640 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4641 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4642 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4643 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4644 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4645 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4646 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4647 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4648 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4649 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4650 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4651 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4652 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4653 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4654 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4655 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4656 };
4657
ca34fe38 4658 if (BE3_chip(adapter)) {
3f0d4560
AK
4659 pflashcomp = gen3_flash_types;
4660 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4661 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4662 } else {
4663 pflashcomp = gen2_flash_types;
4664 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4665 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4666 img_hdrs_size = 0;
84517482 4667 }
ca34fe38 4668
c165541e
PR
4669 /* Get flash section info*/
4670 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4671 if (!fsec) {
96c9b2e4 4672 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4673 return -1;
4674 }
9fe96934 4675 for (i = 0; i < num_comp; i++) {
c165541e 4676 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4677 continue;
c165541e
PR
4678
4679 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4680 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4681 continue;
4682
773a2d7c
PR
4683 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4684 !phy_flashing_required(adapter))
306f1348 4685 continue;
c165541e 4686
773a2d7c 4687 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4688 status = be_check_flash_crc(adapter, fw->data,
4689 pflashcomp[i].offset,
4690 pflashcomp[i].size,
4691 filehdr_size +
4692 img_hdrs_size,
4693 OPTYPE_REDBOOT, &crc_match);
4694 if (status) {
4695 dev_err(dev,
4696 "Could not get CRC for 0x%x region\n",
4697 pflashcomp[i].optype);
4698 continue;
4699 }
4700
4701 if (crc_match)
773a2d7c
PR
4702 continue;
4703 }
c165541e 4704
96c9b2e4
VV
4705 p = fw->data + filehdr_size + pflashcomp[i].offset +
4706 img_hdrs_size;
306f1348
SP
4707 if (p + pflashcomp[i].size > fw->data + fw->size)
4708 return -1;
773a2d7c
PR
4709
4710 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4711 pflashcomp[i].size, 0);
773a2d7c 4712 if (status) {
96c9b2e4 4713 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4714 pflashcomp[i].img_type);
4715 return status;
84517482 4716 }
84517482 4717 }
84517482
AK
4718 return 0;
4719}
4720
96c9b2e4
VV
4721static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4722{
4723 u32 img_type = le32_to_cpu(fsec_entry.type);
4724 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4725
4726 if (img_optype != 0xFFFF)
4727 return img_optype;
4728
4729 switch (img_type) {
4730 case IMAGE_FIRMWARE_iSCSI:
4731 img_optype = OPTYPE_ISCSI_ACTIVE;
4732 break;
4733 case IMAGE_BOOT_CODE:
4734 img_optype = OPTYPE_REDBOOT;
4735 break;
4736 case IMAGE_OPTION_ROM_ISCSI:
4737 img_optype = OPTYPE_BIOS;
4738 break;
4739 case IMAGE_OPTION_ROM_PXE:
4740 img_optype = OPTYPE_PXE_BIOS;
4741 break;
4742 case IMAGE_OPTION_ROM_FCoE:
4743 img_optype = OPTYPE_FCOE_BIOS;
4744 break;
4745 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4746 img_optype = OPTYPE_ISCSI_BACKUP;
4747 break;
4748 case IMAGE_NCSI:
4749 img_optype = OPTYPE_NCSI_FW;
4750 break;
4751 case IMAGE_FLASHISM_JUMPVECTOR:
4752 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4753 break;
4754 case IMAGE_FIRMWARE_PHY:
4755 img_optype = OPTYPE_SH_PHY_FW;
4756 break;
4757 case IMAGE_REDBOOT_DIR:
4758 img_optype = OPTYPE_REDBOOT_DIR;
4759 break;
4760 case IMAGE_REDBOOT_CONFIG:
4761 img_optype = OPTYPE_REDBOOT_CONFIG;
4762 break;
4763 case IMAGE_UFI_DIR:
4764 img_optype = OPTYPE_UFI_DIR;
4765 break;
4766 default:
4767 break;
4768 }
4769
4770 return img_optype;
4771}
4772
773a2d7c 4773static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4774 const struct firmware *fw,
4775 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4776{
773a2d7c 4777 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4778 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4779 struct device *dev = &adapter->pdev->dev;
773a2d7c 4780 struct flash_section_info *fsec = NULL;
96c9b2e4 4781 u32 img_offset, img_size, img_type;
70a7b525 4782 u16 img_optype, flash_optype;
96c9b2e4 4783 int status, i, filehdr_size;
96c9b2e4 4784 const u8 *p;
773a2d7c
PR
4785
4786 filehdr_size = sizeof(struct flash_file_hdr_g3);
4787 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4788 if (!fsec) {
96c9b2e4 4789 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4790 return -EINVAL;
773a2d7c
PR
4791 }
4792
70a7b525 4793retry_flash:
773a2d7c
PR
4794 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4795 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4796 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4797 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4798 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4799 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4800
96c9b2e4 4801 if (img_optype == 0xFFFF)
773a2d7c 4802 continue;
70a7b525
VV
4803
4804 if (flash_offset_support)
4805 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4806 else
4807 flash_optype = img_optype;
4808
96c9b2e4
VV
4809 /* Don't bother verifying CRC if an old FW image is being
4810 * flashed
4811 */
4812 if (old_fw_img)
4813 goto flash;
4814
4815 status = be_check_flash_crc(adapter, fw->data, img_offset,
4816 img_size, filehdr_size +
70a7b525 4817 img_hdrs_size, flash_optype,
96c9b2e4 4818 &crc_match);
4c60005f
KA
4819 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4820 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4821 /* The current FW image on the card does not support
4822 * OFFSET based flashing. Retry using older mechanism
4823 * of OPTYPE based flashing
4824 */
4825 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4826 flash_offset_support = false;
4827 goto retry_flash;
4828 }
4829
4830 /* The current FW image on the card does not recognize
4831 * the new FLASH op_type. The FW download is partially
4832 * complete. Reboot the server now to enable FW image
4833 * to recognize the new FLASH op_type. To complete the
4834 * remaining process, download the same FW again after
4835 * the reboot.
4836 */
96c9b2e4
VV
4837 dev_err(dev, "Flash incomplete. Reset the server\n");
4838 dev_err(dev, "Download FW image again after reset\n");
4839 return -EAGAIN;
4840 } else if (status) {
4841 dev_err(dev, "Could not get CRC for 0x%x region\n",
4842 img_optype);
4843 return -EFAULT;
773a2d7c
PR
4844 }
4845
96c9b2e4
VV
4846 if (crc_match)
4847 continue;
773a2d7c 4848
96c9b2e4
VV
4849flash:
4850 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4851 if (p + img_size > fw->data + fw->size)
4852 return -1;
4853
70a7b525
VV
4854 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4855 img_offset);
4856
4857 /* The current FW image on the card does not support OFFSET
4858 * based flashing. Retry using older mechanism of OPTYPE based
4859 * flashing
4860 */
4861 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4862 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4863 flash_offset_support = false;
4864 goto retry_flash;
4865 }
4866
96c9b2e4
VV
4867 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4868 * UFI_DIR region
4869 */
4c60005f
KA
4870 if (old_fw_img &&
4871 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4872 (img_optype == OPTYPE_UFI_DIR &&
4873 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4874 continue;
4875 } else if (status) {
4876 dev_err(dev, "Flashing section type 0x%x failed\n",
4877 img_type);
4878 return -EFAULT;
773a2d7c
PR
4879 }
4880 }
4881 return 0;
3f0d4560
AK
4882}
4883
485bf569 4884static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4885 const struct firmware *fw)
84517482 4886{
485bf569
SN
4887#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4888#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4889 struct device *dev = &adapter->pdev->dev;
84517482 4890 struct be_dma_mem flash_cmd;
485bf569
SN
4891 const u8 *data_ptr = NULL;
4892 u8 *dest_image_ptr = NULL;
4893 size_t image_size = 0;
4894 u32 chunk_size = 0;
4895 u32 data_written = 0;
4896 u32 offset = 0;
4897 int status = 0;
4898 u8 add_status = 0;
f67ef7ba 4899 u8 change_status;
84517482 4900
485bf569 4901 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4902 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4903 return -EINVAL;
d9efd2af
SB
4904 }
4905
485bf569
SN
4906 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4907 + LANCER_FW_DOWNLOAD_CHUNK;
e51000db
SB
4908 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4909 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4910 if (!flash_cmd.va)
4911 return -ENOMEM;
84517482 4912
485bf569
SN
4913 dest_image_ptr = flash_cmd.va +
4914 sizeof(struct lancer_cmd_req_write_object);
4915 image_size = fw->size;
4916 data_ptr = fw->data;
4917
4918 while (image_size) {
4919 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4920
4921 /* Copy the image chunk content. */
4922 memcpy(dest_image_ptr, data_ptr, chunk_size);
4923
4924 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4925 chunk_size, offset,
4926 LANCER_FW_DOWNLOAD_LOCATION,
4927 &data_written, &change_status,
4928 &add_status);
485bf569
SN
4929 if (status)
4930 break;
4931
4932 offset += data_written;
4933 data_ptr += data_written;
4934 image_size -= data_written;
4935 }
4936
4937 if (!status) {
4938 /* Commit the FW written */
4939 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4940 0, offset,
4941 LANCER_FW_DOWNLOAD_LOCATION,
4942 &data_written, &change_status,
4943 &add_status);
485bf569
SN
4944 }
4945
bb864e07 4946 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4947 if (status) {
bb864e07 4948 dev_err(dev, "Firmware load error\n");
3fb8cb80 4949 return be_cmd_status(status);
485bf569
SN
4950 }
4951
bb864e07
KA
4952 dev_info(dev, "Firmware flashed successfully\n");
4953
f67ef7ba 4954 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4955 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4956 status = lancer_physdev_ctrl(adapter,
4957 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4958 if (status) {
bb864e07
KA
4959 dev_err(dev, "Adapter busy, could not reset FW\n");
4960 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4961 }
4962 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4963 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4964 }
3fb8cb80
KA
4965
4966 return 0;
485bf569
SN
4967}
4968
a6e6ff6e
VV
4969/* Check if the flash image file is compatible with the adapter that
4970 * is being flashed.
4971 */
4972static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4973 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4974{
5d3acd0d
VV
4975 if (!fhdr) {
4976 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
887a65c4 4977 return false;
5d3acd0d 4978 }
773a2d7c 4979
5d3acd0d
VV
4980 /* First letter of the build version is used to identify
4981 * which chip this image file is meant for.
4982 */
4983 switch (fhdr->build[0]) {
4984 case BLD_STR_UFI_TYPE_SH:
a6e6ff6e
VV
4985 if (!skyhawk_chip(adapter))
4986 return false;
4987 break;
5d3acd0d 4988 case BLD_STR_UFI_TYPE_BE3:
a6e6ff6e
VV
4989 if (!BE3_chip(adapter))
4990 return false;
4991 break;
5d3acd0d 4992 case BLD_STR_UFI_TYPE_BE2:
a6e6ff6e
VV
4993 if (!BE2_chip(adapter))
4994 return false;
4995 break;
5d3acd0d
VV
4996 default:
4997 return false;
4998 }
a6e6ff6e 4999
ae4a9d6a
KA
5000 /* In BE3 FW images the "asic_type_rev" field doesn't track the
5001 * asic_rev of the chips it is compatible with.
5002 * When asic_type_rev is 0 the image is compatible only with
5003 * pre-BE3-R chips (asic_rev < 0x10)
5004 */
5005 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
5006 return adapter->asic_rev < 0x10;
5007 else
5008 return (fhdr->asic_type_rev >= adapter->asic_rev);
773a2d7c
PR
5009}
5010
485bf569
SN
5011static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5012{
5d3acd0d 5013 struct device *dev = &adapter->pdev->dev;
485bf569 5014 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
5015 struct image_hdr *img_hdr_ptr;
5016 int status = 0, i, num_imgs;
485bf569 5017 struct be_dma_mem flash_cmd;
84517482 5018
5d3acd0d
VV
5019 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5020 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5021 dev_err(dev, "Flash image is not compatible with adapter\n");
5022 return -EINVAL;
84517482
AK
5023 }
5024
5d3acd0d 5025 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
e51000db
SB
5026 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5027 GFP_KERNEL);
5d3acd0d
VV
5028 if (!flash_cmd.va)
5029 return -ENOMEM;
773a2d7c 5030
773a2d7c
PR
5031 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5032 for (i = 0; i < num_imgs; i++) {
5033 img_hdr_ptr = (struct image_hdr *)(fw->data +
5034 (sizeof(struct flash_file_hdr_g3) +
5035 i * sizeof(struct image_hdr)));
5d3acd0d
VV
5036 if (!BE2_chip(adapter) &&
5037 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5038 continue;
84517482 5039
5d3acd0d
VV
5040 if (skyhawk_chip(adapter))
5041 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5042 num_imgs);
5043 else
5044 status = be_flash_BEx(adapter, fw, &flash_cmd,
5045 num_imgs);
84517482
AK
5046 }
5047
5d3acd0d
VV
5048 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5049 if (!status)
5050 dev_info(dev, "Firmware flashed successfully\n");
84517482 5051
485bf569
SN
5052 return status;
5053}
5054
5055int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5056{
5057 const struct firmware *fw;
5058 int status;
5059
5060 if (!netif_running(adapter->netdev)) {
5061 dev_err(&adapter->pdev->dev,
5062 "Firmware load not allowed (interface is down)\n");
940a3fcd 5063 return -ENETDOWN;
485bf569
SN
5064 }
5065
5066 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5067 if (status)
5068 goto fw_exit;
5069
5070 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5071
5072 if (lancer_chip(adapter))
5073 status = lancer_fw_download(adapter, fw);
5074 else
5075 status = be_fw_download(adapter, fw);
5076
eeb65ced 5077 if (!status)
e97e3cda 5078 be_cmd_get_fw_ver(adapter);
eeb65ced 5079
84517482
AK
5080fw_exit:
5081 release_firmware(fw);
5082 return status;
5083}
5084
add511b3
RP
5085static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5086 u16 flags)
a77dcb8c
AK
5087{
5088 struct be_adapter *adapter = netdev_priv(dev);
5089 struct nlattr *attr, *br_spec;
5090 int rem;
5091 int status = 0;
5092 u16 mode = 0;
5093
5094 if (!sriov_enabled(adapter))
5095 return -EOPNOTSUPP;
5096
5097 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
5098 if (!br_spec)
5099 return -EINVAL;
a77dcb8c
AK
5100
5101 nla_for_each_nested(attr, br_spec, rem) {
5102 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5103 continue;
5104
b7c1a314
TG
5105 if (nla_len(attr) < sizeof(mode))
5106 return -EINVAL;
5107
a77dcb8c
AK
5108 mode = nla_get_u16(attr);
5109 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5110 return -EINVAL;
5111
5112 status = be_cmd_set_hsw_config(adapter, 0, 0,
5113 adapter->if_handle,
5114 mode == BRIDGE_MODE_VEPA ?
5115 PORT_FWD_TYPE_VEPA :
e7bcbd7b 5116 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
5117 if (status)
5118 goto err;
5119
5120 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5121 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5122
5123 return status;
5124 }
5125err:
5126 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5127 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5128
5129 return status;
5130}
5131
5132static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5133 struct net_device *dev, u32 filter_mask,
5134 int nlflags)
a77dcb8c
AK
5135{
5136 struct be_adapter *adapter = netdev_priv(dev);
5137 int status = 0;
5138 u8 hsw_mode;
5139
a77dcb8c
AK
5140 /* BE and Lancer chips support VEB mode only */
5141 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5142 hsw_mode = PORT_FWD_TYPE_VEB;
5143 } else {
5144 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5145 adapter->if_handle, &hsw_mode,
5146 NULL);
a77dcb8c
AK
5147 if (status)
5148 return 0;
ff9ed19d
KP
5149
5150 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5151 return 0;
a77dcb8c
AK
5152 }
5153
5154 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5155 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5156 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5157 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5158}
5159
c5abe7c0 5160#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
5161/* VxLAN offload Notes:
5162 *
5163 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5164 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5165 * is expected to work across all types of IP tunnels once exported. Skyhawk
5166 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5167 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5168 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5169 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5170 *
5171 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5172 * adds more than one port, disable offloads and don't re-enable them again
5173 * until after all the tunnels are removed.
5174 */
c9c47142
SP
5175static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5176 __be16 port)
5177{
5178 struct be_adapter *adapter = netdev_priv(netdev);
5179 struct device *dev = &adapter->pdev->dev;
5180 int status;
5181
af19e686 5182 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5183 return;
5184
1e5b311a
JB
5185 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5186 adapter->vxlan_port_aliases++;
5187 return;
5188 }
5189
c9c47142 5190 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5191 dev_info(dev,
5192 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5193 dev_info(dev, "Disabling VxLAN offloads\n");
5194 adapter->vxlan_port_count++;
5195 goto err;
c9c47142
SP
5196 }
5197
630f4b70
SB
5198 if (adapter->vxlan_port_count++ >= 1)
5199 return;
5200
c9c47142
SP
5201 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5202 OP_CONVERT_NORMAL_TO_TUNNEL);
5203 if (status) {
5204 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5205 goto err;
5206 }
5207
5208 status = be_cmd_set_vxlan_port(adapter, port);
5209 if (status) {
5210 dev_warn(dev, "Failed to add VxLAN port\n");
5211 goto err;
5212 }
5213 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5214 adapter->vxlan_port = port;
5215
630f4b70
SB
5216 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5217 NETIF_F_TSO | NETIF_F_TSO6 |
5218 NETIF_F_GSO_UDP_TUNNEL;
5219 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5220 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5221
c9c47142
SP
5222 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5223 be16_to_cpu(port));
5224 return;
5225err:
5226 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5227}
5228
5229static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5230 __be16 port)
5231{
5232 struct be_adapter *adapter = netdev_priv(netdev);
5233
af19e686 5234 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5235 return;
5236
5237 if (adapter->vxlan_port != port)
630f4b70 5238 goto done;
c9c47142 5239
1e5b311a
JB
5240 if (adapter->vxlan_port_aliases) {
5241 adapter->vxlan_port_aliases--;
5242 return;
5243 }
5244
c9c47142
SP
5245 be_disable_vxlan_offloads(adapter);
5246
5247 dev_info(&adapter->pdev->dev,
5248 "Disabled VxLAN offloads for UDP port %d\n",
5249 be16_to_cpu(port));
630f4b70
SB
5250done:
5251 adapter->vxlan_port_count--;
c9c47142 5252}
725d548f 5253
5f35227e
JG
5254static netdev_features_t be_features_check(struct sk_buff *skb,
5255 struct net_device *dev,
5256 netdev_features_t features)
725d548f 5257{
16dde0d6
SB
5258 struct be_adapter *adapter = netdev_priv(dev);
5259 u8 l4_hdr = 0;
5260
5261 /* The code below restricts offload features for some tunneled packets.
5262 * Offload features for normal (non tunnel) packets are unchanged.
5263 */
5264 if (!skb->encapsulation ||
5265 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5266 return features;
5267
5268 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5269 * should disable tunnel offload features if it's not a VxLAN packet,
5270 * as tunnel offloads have been enabled only for VxLAN. This is done to
5271 * allow other tunneled traffic like GRE work fine while VxLAN
5272 * offloads are configured in Skyhawk-R.
5273 */
5274 switch (vlan_get_protocol(skb)) {
5275 case htons(ETH_P_IP):
5276 l4_hdr = ip_hdr(skb)->protocol;
5277 break;
5278 case htons(ETH_P_IPV6):
5279 l4_hdr = ipv6_hdr(skb)->nexthdr;
5280 break;
5281 default:
5282 return features;
5283 }
5284
5285 if (l4_hdr != IPPROTO_UDP ||
5286 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5287 skb->inner_protocol != htons(ETH_P_TEB) ||
5288 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5289 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5290 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5291
5292 return features;
725d548f 5293}
c5abe7c0 5294#endif
c9c47142 5295
a155a5db
SB
5296static int be_get_phys_port_id(struct net_device *dev,
5297 struct netdev_phys_item_id *ppid)
5298{
5299 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5300 struct be_adapter *adapter = netdev_priv(dev);
5301 u8 *id;
5302
5303 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5304 return -ENOSPC;
5305
5306 ppid->id[0] = adapter->hba_port_num + 1;
5307 id = &ppid->id[1];
5308 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5309 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5310 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5311
5312 ppid->id_len = id_len;
5313
5314 return 0;
5315}
5316
e5686ad8 5317static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5318 .ndo_open = be_open,
5319 .ndo_stop = be_close,
5320 .ndo_start_xmit = be_xmit,
a54769f5 5321 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5322 .ndo_set_mac_address = be_mac_addr_set,
5323 .ndo_change_mtu = be_change_mtu,
ab1594e9 5324 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5325 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5326 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5327 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5328 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5329 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5330 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5331 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5332 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5333 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5334#ifdef CONFIG_NET_POLL_CONTROLLER
5335 .ndo_poll_controller = be_netpoll,
5336#endif
a77dcb8c
AK
5337 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5338 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5339#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5340 .ndo_busy_poll = be_busy_poll,
6384a4d0 5341#endif
c5abe7c0 5342#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5343 .ndo_add_vxlan_port = be_add_vxlan_port,
5344 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5345 .ndo_features_check = be_features_check,
c5abe7c0 5346#endif
a155a5db 5347 .ndo_get_phys_port_id = be_get_phys_port_id,
6b7c5b94
SP
5348};
5349
5350static void be_netdev_init(struct net_device *netdev)
5351{
5352 struct be_adapter *adapter = netdev_priv(netdev);
5353
6332c8d3 5354 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5355 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5356 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5357 if (be_multi_rxq(adapter))
5358 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5359
5360 netdev->features |= netdev->hw_features |
f646968f 5361 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5362
eb8a50d9 5363 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5364 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5365
fbc13f01
AK
5366 netdev->priv_flags |= IFF_UNICAST_FLT;
5367
6b7c5b94
SP
5368 netdev->flags |= IFF_MULTICAST;
5369
b7e5887e 5370 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5371
10ef9ab4 5372 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5373
7ad24ea4 5374 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5375}
5376
87ac1a52
KA
5377static void be_cleanup(struct be_adapter *adapter)
5378{
5379 struct net_device *netdev = adapter->netdev;
5380
5381 rtnl_lock();
5382 netif_device_detach(netdev);
5383 if (netif_running(netdev))
5384 be_close(netdev);
5385 rtnl_unlock();
5386
5387 be_clear(adapter);
5388}
5389
484d76fd 5390static int be_resume(struct be_adapter *adapter)
78fad34e 5391{
d0e1b319 5392 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5393 int status;
5394
78fad34e
SP
5395 status = be_setup(adapter);
5396 if (status)
484d76fd 5397 return status;
78fad34e 5398
d0e1b319
KA
5399 if (netif_running(netdev)) {
5400 status = be_open(netdev);
78fad34e 5401 if (status)
484d76fd 5402 return status;
78fad34e
SP
5403 }
5404
d0e1b319
KA
5405 netif_device_attach(netdev);
5406
484d76fd
KA
5407 return 0;
5408}
5409
5410static int be_err_recover(struct be_adapter *adapter)
5411{
5412 struct device *dev = &adapter->pdev->dev;
5413 int status;
5414
5415 status = be_resume(adapter);
5416 if (status)
5417 goto err;
5418
9fa465c0 5419 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5420 return 0;
5421err:
9fa465c0 5422 if (be_physfn(adapter))
78fad34e 5423 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5424 else
5425 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5426
5427 return status;
5428}
5429
eb7dd46c 5430static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5431{
5432 struct be_adapter *adapter =
eb7dd46c
SP
5433 container_of(work, struct be_adapter,
5434 be_err_detection_work.work);
78fad34e
SP
5435 int status = 0;
5436
5437 be_detect_error(adapter);
5438
954f6825 5439 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 5440 be_cleanup(adapter);
d0e1b319
KA
5441
5442 /* As of now error recovery support is in Lancer only */
5443 if (lancer_chip(adapter))
5444 status = be_err_recover(adapter);
78fad34e
SP
5445 }
5446
9fa465c0
SP
5447 /* Always attempt recovery on VFs */
5448 if (!status || be_virtfn(adapter))
eb7dd46c 5449 be_schedule_err_detection(adapter);
78fad34e
SP
5450}
5451
5452static void be_log_sfp_info(struct be_adapter *adapter)
5453{
5454 int status;
5455
5456 status = be_cmd_query_sfp_info(adapter);
5457 if (!status) {
5458 dev_err(&adapter->pdev->dev,
5459 "Unqualified SFP+ detected on %c from %s part no: %s",
5460 adapter->port_name, adapter->phy.vendor_name,
5461 adapter->phy.vendor_pn);
5462 }
5463 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5464}
5465
5466static void be_worker(struct work_struct *work)
5467{
5468 struct be_adapter *adapter =
5469 container_of(work, struct be_adapter, work.work);
5470 struct be_rx_obj *rxo;
5471 int i;
5472
5473 /* when interrupts are not yet enabled, just reap any pending
5474 * mcc completions
5475 */
5476 if (!netif_running(adapter->netdev)) {
5477 local_bh_disable();
5478 be_process_mcc(adapter);
5479 local_bh_enable();
5480 goto reschedule;
5481 }
5482
5483 if (!adapter->stats_cmd_sent) {
5484 if (lancer_chip(adapter))
5485 lancer_cmd_get_pport_stats(adapter,
5486 &adapter->stats_cmd);
5487 else
5488 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5489 }
5490
5491 if (be_physfn(adapter) &&
5492 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5493 be_cmd_get_die_temperature(adapter);
5494
5495 for_all_rx_queues(adapter, rxo, i) {
5496 /* Replenish RX-queues starved due to memory
5497 * allocation failures.
5498 */
5499 if (rxo->rx_post_starved)
5500 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5501 }
5502
20947770
PR
5503 /* EQ-delay update for Skyhawk is done while notifying EQ */
5504 if (!skyhawk_chip(adapter))
5505 be_eqd_update(adapter, false);
78fad34e
SP
5506
5507 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5508 be_log_sfp_info(adapter);
5509
5510reschedule:
5511 adapter->work_counter++;
5512 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5513}
5514
6b7c5b94
SP
5515static void be_unmap_pci_bars(struct be_adapter *adapter)
5516{
c5b3ad4c
SP
5517 if (adapter->csr)
5518 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5519 if (adapter->db)
ce66f781 5520 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5521}
5522
ce66f781
SP
5523static int db_bar(struct be_adapter *adapter)
5524{
18c57c74 5525 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5526 return 0;
5527 else
5528 return 4;
5529}
5530
5531static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5532{
dbf0f2a7 5533 if (skyhawk_chip(adapter)) {
ce66f781
SP
5534 adapter->roce_db.size = 4096;
5535 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5536 db_bar(adapter));
5537 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5538 db_bar(adapter));
5539 }
045508a8 5540 return 0;
6b7c5b94
SP
5541}
5542
5543static int be_map_pci_bars(struct be_adapter *adapter)
5544{
0fa74a4b 5545 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5546 u8 __iomem *addr;
78fad34e
SP
5547 u32 sli_intf;
5548
5549 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5550 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5551 SLI_INTF_FAMILY_SHIFT;
5552 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5553
c5b3ad4c 5554 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5555 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5556 if (!adapter->csr)
c5b3ad4c
SP
5557 return -ENOMEM;
5558 }
5559
25848c90 5560 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5561 if (!addr)
6b7c5b94 5562 goto pci_map_err;
ba343c77 5563 adapter->db = addr;
ce66f781 5564
25848c90
SR
5565 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5566 if (be_physfn(adapter)) {
5567 /* PCICFG is the 2nd BAR in BE2 */
5568 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5569 if (!addr)
5570 goto pci_map_err;
5571 adapter->pcicfg = addr;
5572 } else {
5573 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5574 }
5575 }
5576
ce66f781 5577 be_roce_map_pci_bars(adapter);
6b7c5b94 5578 return 0;
ce66f781 5579
6b7c5b94 5580pci_map_err:
25848c90 5581 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5582 be_unmap_pci_bars(adapter);
5583 return -ENOMEM;
5584}
5585
78fad34e 5586static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5587{
8788fdc2 5588 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5589 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5590
5591 if (mem->va)
78fad34e 5592 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5593
5b8821b7 5594 mem = &adapter->rx_filter;
e7b909a6 5595 if (mem->va)
78fad34e
SP
5596 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5597
5598 mem = &adapter->stats_cmd;
5599 if (mem->va)
5600 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5601}
5602
78fad34e
SP
5603/* Allocate and initialize various fields in be_adapter struct */
5604static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5605{
8788fdc2
SP
5606 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5607 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5608 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5609 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5610 struct device *dev = &adapter->pdev->dev;
5611 int status = 0;
6b7c5b94
SP
5612
5613 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5614 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5615 &mbox_mem_alloc->dma,
5616 GFP_KERNEL);
78fad34e
SP
5617 if (!mbox_mem_alloc->va)
5618 return -ENOMEM;
5619
6b7c5b94
SP
5620 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5621 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5622 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5623
5b8821b7 5624 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5625 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5626 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5627 if (!rx_filter->va) {
e7b909a6
SP
5628 status = -ENOMEM;
5629 goto free_mbox;
5630 }
1f9061d2 5631
78fad34e
SP
5632 if (lancer_chip(adapter))
5633 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5634 else if (BE2_chip(adapter))
5635 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5636 else if (BE3_chip(adapter))
5637 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5638 else
5639 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5640 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5641 &stats_cmd->dma, GFP_KERNEL);
5642 if (!stats_cmd->va) {
5643 status = -ENOMEM;
5644 goto free_rx_filter;
5645 }
5646
2984961c 5647 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5648 spin_lock_init(&adapter->mcc_lock);
5649 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5650 init_completion(&adapter->et_cmd_compl);
e7b909a6 5651
78fad34e 5652 pci_save_state(adapter->pdev);
6b7c5b94 5653
78fad34e 5654 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5655 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5656 be_err_detection_task);
6b7c5b94 5657
78fad34e
SP
5658 adapter->rx_fc = true;
5659 adapter->tx_fc = true;
6b7c5b94 5660
78fad34e
SP
5661 /* Must be a power of 2 or else MODULO will BUG_ON */
5662 adapter->be_get_temp_freq = 64;
ca34fe38 5663
6b7c5b94 5664 return 0;
78fad34e
SP
5665
5666free_rx_filter:
5667 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5668free_mbox:
5669 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5670 mbox_mem_alloc->dma);
5671 return status;
6b7c5b94
SP
5672}
5673
3bc6b06c 5674static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5675{
5676 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5677
6b7c5b94
SP
5678 if (!adapter)
5679 return;
5680
045508a8 5681 be_roce_dev_remove(adapter);
8cef7a78 5682 be_intr_set(adapter, false);
045508a8 5683
eb7dd46c 5684 be_cancel_err_detection(adapter);
f67ef7ba 5685
6b7c5b94
SP
5686 unregister_netdev(adapter->netdev);
5687
5fb379ee
SP
5688 be_clear(adapter);
5689
bf99e50d
PR
5690 /* tell fw we're done with firing cmds */
5691 be_cmd_fw_clean(adapter);
5692
78fad34e
SP
5693 be_unmap_pci_bars(adapter);
5694 be_drv_cleanup(adapter);
6b7c5b94 5695
d6b6d987
SP
5696 pci_disable_pcie_error_reporting(pdev);
5697
6b7c5b94
SP
5698 pci_release_regions(pdev);
5699 pci_disable_device(pdev);
5700
5701 free_netdev(adapter->netdev);
5702}
5703
9a03259c
AB
5704static ssize_t be_hwmon_show_temp(struct device *dev,
5705 struct device_attribute *dev_attr,
5706 char *buf)
29e9122b
VD
5707{
5708 struct be_adapter *adapter = dev_get_drvdata(dev);
5709
5710 /* Unit: millidegree Celsius */
5711 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5712 return -EIO;
5713 else
5714 return sprintf(buf, "%u\n",
5715 adapter->hwmon_info.be_on_die_temp * 1000);
5716}
5717
5718static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5719 be_hwmon_show_temp, NULL, 1);
5720
5721static struct attribute *be_hwmon_attrs[] = {
5722 &sensor_dev_attr_temp1_input.dev_attr.attr,
5723 NULL
5724};
5725
5726ATTRIBUTE_GROUPS(be_hwmon);
5727
d379142b
SP
5728static char *mc_name(struct be_adapter *adapter)
5729{
f93f160b
VV
5730 char *str = ""; /* default */
5731
5732 switch (adapter->mc_type) {
5733 case UMC:
5734 str = "UMC";
5735 break;
5736 case FLEX10:
5737 str = "FLEX10";
5738 break;
5739 case vNIC1:
5740 str = "vNIC-1";
5741 break;
5742 case nPAR:
5743 str = "nPAR";
5744 break;
5745 case UFP:
5746 str = "UFP";
5747 break;
5748 case vNIC2:
5749 str = "vNIC-2";
5750 break;
5751 default:
5752 str = "";
5753 }
5754
5755 return str;
d379142b
SP
5756}
5757
5758static inline char *func_name(struct be_adapter *adapter)
5759{
5760 return be_physfn(adapter) ? "PF" : "VF";
5761}
5762
f7062ee5
SP
5763static inline char *nic_name(struct pci_dev *pdev)
5764{
5765 switch (pdev->device) {
5766 case OC_DEVICE_ID1:
5767 return OC_NAME;
5768 case OC_DEVICE_ID2:
5769 return OC_NAME_BE;
5770 case OC_DEVICE_ID3:
5771 case OC_DEVICE_ID4:
5772 return OC_NAME_LANCER;
5773 case BE_DEVICE_ID2:
5774 return BE3_NAME;
5775 case OC_DEVICE_ID5:
5776 case OC_DEVICE_ID6:
5777 return OC_NAME_SH;
5778 default:
5779 return BE_NAME;
5780 }
5781}
5782
1dd06ae8 5783static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5784{
6b7c5b94
SP
5785 struct be_adapter *adapter;
5786 struct net_device *netdev;
21252377 5787 int status = 0;
6b7c5b94 5788
acbafeb1
SP
5789 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5790
6b7c5b94
SP
5791 status = pci_enable_device(pdev);
5792 if (status)
5793 goto do_none;
5794
5795 status = pci_request_regions(pdev, DRV_NAME);
5796 if (status)
5797 goto disable_dev;
5798 pci_set_master(pdev);
5799
7f640062 5800 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5801 if (!netdev) {
6b7c5b94
SP
5802 status = -ENOMEM;
5803 goto rel_reg;
5804 }
5805 adapter = netdev_priv(netdev);
5806 adapter->pdev = pdev;
5807 pci_set_drvdata(pdev, adapter);
5808 adapter->netdev = netdev;
2243e2e9 5809 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5810
4c15c243 5811 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5812 if (!status) {
5813 netdev->features |= NETIF_F_HIGHDMA;
5814 } else {
4c15c243 5815 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5816 if (status) {
5817 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5818 goto free_netdev;
5819 }
5820 }
5821
2f951a9a
KA
5822 status = pci_enable_pcie_error_reporting(pdev);
5823 if (!status)
5824 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5825
78fad34e 5826 status = be_map_pci_bars(adapter);
6b7c5b94 5827 if (status)
39f1d94d 5828 goto free_netdev;
6b7c5b94 5829
78fad34e
SP
5830 status = be_drv_init(adapter);
5831 if (status)
5832 goto unmap_bars;
5833
5fb379ee
SP
5834 status = be_setup(adapter);
5835 if (status)
78fad34e 5836 goto drv_cleanup;
2243e2e9 5837
3abcdeda 5838 be_netdev_init(netdev);
6b7c5b94
SP
5839 status = register_netdev(netdev);
5840 if (status != 0)
5fb379ee 5841 goto unsetup;
6b7c5b94 5842
045508a8
PP
5843 be_roce_dev_add(adapter);
5844
eb7dd46c 5845 be_schedule_err_detection(adapter);
b4e32a71 5846
29e9122b 5847 /* On Die temperature not supported for VF. */
9a03259c 5848 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5849 adapter->hwmon_info.hwmon_dev =
5850 devm_hwmon_device_register_with_groups(&pdev->dev,
5851 DRV_NAME,
5852 adapter,
5853 be_hwmon_groups);
5854 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5855 }
5856
d379142b 5857 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5858 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5859
6b7c5b94
SP
5860 return 0;
5861
5fb379ee
SP
5862unsetup:
5863 be_clear(adapter);
78fad34e
SP
5864drv_cleanup:
5865 be_drv_cleanup(adapter);
5866unmap_bars:
5867 be_unmap_pci_bars(adapter);
f9449ab7 5868free_netdev:
fe6d2a38 5869 free_netdev(netdev);
6b7c5b94
SP
5870rel_reg:
5871 pci_release_regions(pdev);
5872disable_dev:
5873 pci_disable_device(pdev);
5874do_none:
c4ca2374 5875 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5876 return status;
5877}
5878
5879static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5880{
5881 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5882
76a9e08e 5883 if (adapter->wol_en)
71d8d1b5
AK
5884 be_setup_wol(adapter, true);
5885
d4360d6f 5886 be_intr_set(adapter, false);
eb7dd46c 5887 be_cancel_err_detection(adapter);
f67ef7ba 5888
87ac1a52 5889 be_cleanup(adapter);
6b7c5b94
SP
5890
5891 pci_save_state(pdev);
5892 pci_disable_device(pdev);
5893 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5894 return 0;
5895}
5896
484d76fd 5897static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5898{
6b7c5b94 5899 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5900 int status = 0;
6b7c5b94
SP
5901
5902 status = pci_enable_device(pdev);
5903 if (status)
5904 return status;
5905
6b7c5b94
SP
5906 pci_restore_state(pdev);
5907
484d76fd 5908 status = be_resume(adapter);
2243e2e9
SP
5909 if (status)
5910 return status;
5911
eb7dd46c
SP
5912 be_schedule_err_detection(adapter);
5913
76a9e08e 5914 if (adapter->wol_en)
71d8d1b5 5915 be_setup_wol(adapter, false);
a4ca055f 5916
6b7c5b94
SP
5917 return 0;
5918}
5919
82456b03
SP
5920/*
5921 * An FLR will stop BE from DMAing any data.
5922 */
5923static void be_shutdown(struct pci_dev *pdev)
5924{
5925 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5926
2d5d4154
AK
5927 if (!adapter)
5928 return;
82456b03 5929
d114f99a 5930 be_roce_dev_shutdown(adapter);
0f4a6828 5931 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5932 be_cancel_err_detection(adapter);
a4ca055f 5933
2d5d4154 5934 netif_device_detach(adapter->netdev);
82456b03 5935
57841869
AK
5936 be_cmd_reset_function(adapter);
5937
82456b03 5938 pci_disable_device(pdev);
82456b03
SP
5939}
5940
cf588477 5941static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5942 pci_channel_state_t state)
cf588477
SP
5943{
5944 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5945
5946 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5947
954f6825
VD
5948 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5949 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5950
eb7dd46c 5951 be_cancel_err_detection(adapter);
cf588477 5952
87ac1a52 5953 be_cleanup(adapter);
cf588477 5954 }
cf588477
SP
5955
5956 if (state == pci_channel_io_perm_failure)
5957 return PCI_ERS_RESULT_DISCONNECT;
5958
5959 pci_disable_device(pdev);
5960
eeb7fc7b
SK
5961 /* The error could cause the FW to trigger a flash debug dump.
5962 * Resetting the card while flash dump is in progress
c8a54163
PR
5963 * can cause it not to recover; wait for it to finish.
5964 * Wait only for first function as it is needed only once per
5965 * adapter.
eeb7fc7b 5966 */
c8a54163
PR
5967 if (pdev->devfn == 0)
5968 ssleep(30);
5969
cf588477
SP
5970 return PCI_ERS_RESULT_NEED_RESET;
5971}
5972
5973static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5974{
5975 struct be_adapter *adapter = pci_get_drvdata(pdev);
5976 int status;
5977
5978 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5979
5980 status = pci_enable_device(pdev);
5981 if (status)
5982 return PCI_ERS_RESULT_DISCONNECT;
5983
5984 pci_set_master(pdev);
cf588477
SP
5985 pci_restore_state(pdev);
5986
5987 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5988 dev_info(&adapter->pdev->dev,
5989 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5990 status = be_fw_wait_ready(adapter);
cf588477
SP
5991 if (status)
5992 return PCI_ERS_RESULT_DISCONNECT;
5993
d6b6d987 5994 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5995 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5996 return PCI_ERS_RESULT_RECOVERED;
5997}
5998
5999static void be_eeh_resume(struct pci_dev *pdev)
6000{
6001 int status = 0;
6002 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
6003
6004 dev_info(&adapter->pdev->dev, "EEH resume\n");
6005
6006 pci_save_state(pdev);
6007
484d76fd 6008 status = be_resume(adapter);
bf99e50d
PR
6009 if (status)
6010 goto err;
6011
eb7dd46c 6012 be_schedule_err_detection(adapter);
cf588477
SP
6013 return;
6014err:
6015 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
6016}
6017
ace40aff
VV
6018static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6019{
6020 struct be_adapter *adapter = pci_get_drvdata(pdev);
6021 u16 num_vf_qs;
6022 int status;
6023
6024 if (!num_vfs)
6025 be_vf_clear(adapter);
6026
6027 adapter->num_vfs = num_vfs;
6028
6029 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6030 dev_warn(&pdev->dev,
6031 "Cannot disable VFs while they are assigned\n");
6032 return -EBUSY;
6033 }
6034
6035 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6036 * are equally distributed across the max-number of VFs. The user may
6037 * request only a subset of the max-vfs to be enabled.
6038 * Based on num_vfs, redistribute the resources across num_vfs so that
6039 * each VF will have access to more number of resources.
6040 * This facility is not available in BE3 FW.
6041 * Also, this is done by FW in Lancer chip.
6042 */
6043 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6044 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6045 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6046 adapter->num_vfs, num_vf_qs);
6047 if (status)
6048 dev_err(&pdev->dev,
6049 "Failed to optimize SR-IOV resources\n");
6050 }
6051
6052 status = be_get_resources(adapter);
6053 if (status)
6054 return be_cmd_status(status);
6055
6056 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6057 rtnl_lock();
6058 status = be_update_queues(adapter);
6059 rtnl_unlock();
6060 if (status)
6061 return be_cmd_status(status);
6062
6063 if (adapter->num_vfs)
6064 status = be_vf_setup(adapter);
6065
6066 if (!status)
6067 return adapter->num_vfs;
6068
6069 return 0;
6070}
6071
3646f0e5 6072static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6073 .error_detected = be_eeh_err_detected,
6074 .slot_reset = be_eeh_reset,
6075 .resume = be_eeh_resume,
6076};
6077
6b7c5b94
SP
6078static struct pci_driver be_driver = {
6079 .name = DRV_NAME,
6080 .id_table = be_dev_ids,
6081 .probe = be_probe,
6082 .remove = be_remove,
6083 .suspend = be_suspend,
484d76fd 6084 .resume = be_pci_resume,
82456b03 6085 .shutdown = be_shutdown,
ace40aff 6086 .sriov_configure = be_pci_sriov_configure,
cf588477 6087 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6088};
6089
6090static int __init be_init_module(void)
6091{
8e95a202
JP
6092 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6093 rx_frag_size != 2048) {
6b7c5b94
SP
6094 printk(KERN_WARNING DRV_NAME
6095 " : Module param rx_frag_size must be 2048/4096/8192."
6096 " Using 2048\n");
6097 rx_frag_size = 2048;
6098 }
6b7c5b94 6099
ace40aff
VV
6100 if (num_vfs > 0) {
6101 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6102 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6103 }
6104
6b7c5b94
SP
6105 return pci_register_driver(&be_driver);
6106}
6107module_init(be_init_module);
6108
6109static void __exit be_exit_module(void)
6110{
6111 pci_unregister_driver(&be_driver);
6112}
6113module_exit(be_exit_module);