]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
Merge branches 'pm-cpu', 'pm-cpuidle' and 'pm-domains'
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d19261b8 2 * Copyright (C) 2005 - 2015 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
a77dcb8c 24#include <linux/if_bridge.h>
6384a4d0 25#include <net/busy_poll.h>
c9c47142 26#include <net/vxlan.h>
6b7c5b94
SP
27
28MODULE_VERSION(DRV_VER);
6b7c5b94 29MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 30MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
31MODULE_LICENSE("GPL");
32
ace40aff
VV
33/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
ba343c77 36static unsigned int num_vfs;
ba343c77 37module_param(num_vfs, uint, S_IRUGO);
ba343c77 38MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 39
11ac75ed
SP
40static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
9baa3c34 44static const struct pci_device_id be_dev_ids[] = {
c4ca2374 45 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 46 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
47 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 50 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 51 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 52 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
53 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 56/* UE Status Low CSR */
42c8b11e 57static const char * const ue_status_low_desc[] = {
7c185276
AK
58 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
6bdf8f55
VV
86 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
7c185276 90};
e2fb1afa 91
7c185276 92/* UE Status High CSR */
42c8b11e 93static const char * const ue_status_hi_desc[] = {
7c185276
AK
94 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
6bdf8f55
VV
115 "ECRC",
116 "Poison TLP",
42c8b11e 117 "NETC",
6bdf8f55
VV
118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
7c185276
AK
125 "Unknown"
126};
6b7c5b94
SP
127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
03d28ffe 131
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
748b539a 140 u16 len, u16 entry_size)
6b7c5b94
SP
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
ede23fa8
JP
148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
748b539a 160 &reg);
db3ea781
SP
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781 170 pci_write_config_dword(adapter->pdev,
748b539a 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
954f6825 182 if (be_check_error(adapter, BE_ERROR_EEH))
68c45a2d
SK
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
03d28ffe 193
954f6825
VD
194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
6b7c5b94
SP
197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
199
200 wmb();
8788fdc2 201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
202}
203
94d73aaa
VV
204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
6b7c5b94
SP
206{
207 u32 val = 0;
03d28ffe 208
954f6825
VD
209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
94d73aaa 212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
214
215 wmb();
94d73aaa 216 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
217}
218
8788fdc2 219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
20947770
PR
220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
6b7c5b94
SP
222{
223 u32 val = 0;
03d28ffe 224
6b7c5b94 225 val |= qid & DB_EQ_RING_ID_MASK;
748b539a 226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 227
954f6825 228 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
229 return;
230
6b7c5b94
SP
231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
20947770 237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
8788fdc2 238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
239}
240
8788fdc2 241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
242{
243 u32 val = 0;
03d28ffe 244
6b7c5b94 245 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 248
954f6825 249 if (be_check_error(adapter, BE_ERROR_HW))
cf588477
SP
250 return;
251
6b7c5b94
SP
252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
256}
257
6b7c5b94
SP
258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
5a712c13 261 struct device *dev = &adapter->pdev->dev;
6b7c5b94 262 struct sockaddr *addr = p;
5a712c13
SP
263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
6b7c5b94 266
ca9e4988
AK
267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
ff32f8ab
VV
270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
bcc84140
KA
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
5a712c13
SP
280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
704e4c88 285 */
5a712c13
SP
286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
704e4c88
PR
297 }
298
5a712c13
SP
299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
704e4c88 301 */
b188f090
SR
302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
a65027e4 304 if (status)
e3a7ae2c 305 goto err;
6b7c5b94 306
5a712c13
SP
307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
61d23e9f 310 if (!ether_addr_equal(addr->sa_data, mac)) {
5a712c13
SP
311 status = -EPERM;
312 goto err;
313 }
bcc84140
KA
314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
e3a7ae2c
SK
317 return 0;
318err:
5a712c13 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
6b7c5b94
SP
320 return status;
321}
322
ca34fe38
SP
323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
61000861 330 } else if (BE3_chip(adapter)) {
ca34fe38
SP
331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
61000861
AK
333 return &cmd->hw_stats;
334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
ca34fe38
SP
337 return &cmd->hw_stats;
338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
61000861 348 } else if (BE3_chip(adapter)) {
ca34fe38
SP
349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
61000861
AK
351 return &hw_stats->erx;
352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
ca34fe38
SP
355 return &hw_stats->erx;
356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 360{
ac124ff9
SP
361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 364 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 367
ac124ff9 368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
89a88ab8
AK
389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
ac124ff9 396 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 397 else
ac124ff9 398 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
ca34fe38 408static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 409{
ac124ff9
SP
410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 413 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 416
ac124ff9 417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
ac124ff9 440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
61000861
AK
454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
748b539a 498 if (be_roce_supported(adapter)) {
461ae379
AK
499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
61000861
AK
506}
507
005d5696
SX
508static void populate_lancer_stats(struct be_adapter *adapter)
509{
005d5696 510 struct be_drv_stats *drvs = &adapter->drv_stats;
748b539a 511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
ac124ff9
SP
512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
ac124ff9 534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 538 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 541 drvs->rx_drops_too_many_frags =
ac124ff9 542 pport_stats->rx_drops_too_many_frags_lo;
005d5696 543}
89a88ab8 544
09c1c68f
SP
545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
4188e7df 557static void populate_erx_stats(struct be_adapter *adapter,
748b539a 558 struct be_rx_obj *rxo, u32 erx_stat)
a6c578ef
AK
559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
89a88ab8
AK
570void be_parse_stats(struct be_adapter *adapter)
571{
61000861 572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
ac124ff9
SP
573 struct be_rx_obj *rxo;
574 int i;
a6c578ef 575 u32 erx_stat;
ac124ff9 576
ca34fe38
SP
577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
005d5696 579 } else {
ca34fe38
SP
580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
61000861
AK
582 else if (BE3_chip(adapter))
583 /* for BE3 */
ca34fe38 584 populate_be_v1_stats(adapter);
61000861
AK
585 else
586 populate_be_v2_stats(adapter);
d51ebd33 587
61000861 588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
ca34fe38 589 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 592 }
09c1c68f 593 }
89a88ab8
AK
594}
595
ab1594e9 596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
748b539a 597 struct rtnl_link_stats64 *stats)
6b7c5b94 598{
ab1594e9 599 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 600 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 601 struct be_rx_obj *rxo;
3c8def97 602 struct be_tx_obj *txo;
ab1594e9
SP
603 u64 pkts, bytes;
604 unsigned int start;
3abcdeda 605 int i;
6b7c5b94 606
3abcdeda 607 for_all_rx_queues(adapter, rxo, i) {
ab1594e9 608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
03d28ffe 609
ab1594e9 610 do {
57a7744e 611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
ab1594e9
SP
612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
57a7744e 614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
ab1594e9
SP
615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
620 }
621
3c8def97 622 for_all_tx_queues(adapter, txo, i) {
ab1594e9 623 const struct be_tx_stats *tx_stats = tx_stats(txo);
03d28ffe 624
ab1594e9 625 do {
57a7744e 626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
ab1594e9
SP
627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
57a7744e 629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
ab1594e9
SP
630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
3c8def97 632 }
6b7c5b94
SP
633
634 /* bad pkts received */
ab1594e9 635 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
ab1594e9 644 drvs->rx_dropped_runt;
68110868 645
6b7c5b94 646 /* detailed rx errors */
ab1594e9 647 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
68110868 650
ab1594e9 651 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
652
653 /* frame alignment errors */
ab1594e9 654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 655
6b7c5b94
SP
656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
ab1594e9 661 return stats;
6b7c5b94
SP
662}
663
b236916a 664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 665{
6b7c5b94
SP
666 struct net_device *netdev = adapter->netdev;
667
b236916a 668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 669 netif_carrier_off(netdev);
b236916a 670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 671 }
b236916a 672
bdce2ad7 673 if (link_status)
b236916a
AK
674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
18824894
IV
677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
6b7c5b94
SP
679}
680
5f07b3c5 681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
6b7c5b94 682{
3c8def97
SP
683 struct be_tx_stats *stats = tx_stats(txo);
684
ab1594e9 685 u64_stats_update_begin(&stats->sync);
ac124ff9 686 stats->tx_reqs++;
5f07b3c5
SP
687 stats->tx_bytes += skb->len;
688 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
ab1594e9 689 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
690}
691
5f07b3c5
SP
692/* Returns number of WRBs needed for the skb */
693static u32 skb_wrb_cnt(struct sk_buff *skb)
6b7c5b94 694{
5f07b3c5
SP
695 /* +1 for the header wrb */
696 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
6b7c5b94
SP
697}
698
699static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
700{
f986afcb
SP
701 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
702 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
703 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
704 wrb->rsvd0 = 0;
705}
706
707/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
708 * to avoid the swap and shift/mask operations in wrb_fill().
709 */
710static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
711{
712 wrb->frag_pa_hi = 0;
713 wrb->frag_pa_lo = 0;
714 wrb->frag_len = 0;
89b1f496 715 wrb->rsvd0 = 0;
6b7c5b94
SP
716}
717
1ded132d 718static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
748b539a 719 struct sk_buff *skb)
1ded132d
AK
720{
721 u8 vlan_prio;
722 u16 vlan_tag;
723
df8a39de 724 vlan_tag = skb_vlan_tag_get(skb);
1ded132d
AK
725 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
726 /* If vlan priority provided by OS is NOT in available bmap */
727 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
728 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
729 adapter->recommended_prio;
730
731 return vlan_tag;
732}
733
c9c47142
SP
734/* Used only for IP tunnel packets */
735static u16 skb_inner_ip_proto(struct sk_buff *skb)
736{
737 return (inner_ip_hdr(skb)->version == 4) ?
738 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
739}
740
741static u16 skb_ip_proto(struct sk_buff *skb)
742{
743 return (ip_hdr(skb)->version == 4) ?
744 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
745}
746
cf5671e6
SB
747static inline bool be_is_txq_full(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
750}
751
752static inline bool be_can_txq_wake(struct be_tx_obj *txo)
753{
754 return atomic_read(&txo->q.used) < txo->q.len / 2;
755}
756
757static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
758{
759 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
760}
761
804abcdb
SB
762static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
763 struct sk_buff *skb,
764 struct be_wrb_params *wrb_params)
6b7c5b94 765{
804abcdb 766 u16 proto;
6b7c5b94 767
49e4b847 768 if (skb_is_gso(skb)) {
804abcdb
SB
769 BE_WRB_F_SET(wrb_params->features, LSO, 1);
770 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
fe6d2a38 771 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
804abcdb 772 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
6b7c5b94 773 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
c9c47142 774 if (skb->encapsulation) {
804abcdb 775 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
c9c47142
SP
776 proto = skb_inner_ip_proto(skb);
777 } else {
778 proto = skb_ip_proto(skb);
779 }
780 if (proto == IPPROTO_TCP)
804abcdb 781 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
c9c47142 782 else if (proto == IPPROTO_UDP)
804abcdb 783 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
6b7c5b94
SP
784 }
785
df8a39de 786 if (skb_vlan_tag_present(skb)) {
804abcdb
SB
787 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
788 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6b7c5b94
SP
789 }
790
804abcdb
SB
791 BE_WRB_F_SET(wrb_params->features, CRC, 1);
792}
5f07b3c5 793
804abcdb
SB
794static void wrb_fill_hdr(struct be_adapter *adapter,
795 struct be_eth_hdr_wrb *hdr,
796 struct be_wrb_params *wrb_params,
797 struct sk_buff *skb)
798{
799 memset(hdr, 0, sizeof(*hdr));
800
801 SET_TX_WRB_HDR_BITS(crc, hdr,
802 BE_WRB_F_GET(wrb_params->features, CRC));
803 SET_TX_WRB_HDR_BITS(ipcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, IPCS));
805 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
806 BE_WRB_F_GET(wrb_params->features, TCPCS));
807 SET_TX_WRB_HDR_BITS(udpcs, hdr,
808 BE_WRB_F_GET(wrb_params->features, UDPCS));
809
810 SET_TX_WRB_HDR_BITS(lso, hdr,
811 BE_WRB_F_GET(wrb_params->features, LSO));
812 SET_TX_WRB_HDR_BITS(lso6, hdr,
813 BE_WRB_F_GET(wrb_params->features, LSO6));
814 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
815
816 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
817 * hack is not needed, the evt bit is set while ringing DB.
5f07b3c5 818 */
804abcdb
SB
819 SET_TX_WRB_HDR_BITS(event, hdr,
820 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
821 SET_TX_WRB_HDR_BITS(vlan, hdr,
822 BE_WRB_F_GET(wrb_params->features, VLAN));
823 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
824
825 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
826 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
760c295e
VD
827 SET_TX_WRB_HDR_BITS(mgmt, hdr,
828 BE_WRB_F_GET(wrb_params->features, OS2BMC));
6b7c5b94
SP
829}
830
2b7bcebf 831static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
748b539a 832 bool unmap_single)
7101e111
SP
833{
834 dma_addr_t dma;
f986afcb 835 u32 frag_len = le32_to_cpu(wrb->frag_len);
7101e111 836
7101e111 837
f986afcb
SP
838 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
839 (u64)le32_to_cpu(wrb->frag_pa_lo);
840 if (frag_len) {
7101e111 841 if (unmap_single)
f986afcb 842 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111 843 else
f986afcb 844 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
7101e111
SP
845 }
846}
6b7c5b94 847
79a0d7d8
SB
848/* Grab a WRB header for xmit */
849static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
850{
851 u16 head = txo->q.head;
852
853 queue_head_inc(&txo->q);
854 return head;
855}
856
857/* Set up the WRB header for xmit */
858static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
859 struct be_tx_obj *txo,
860 struct be_wrb_params *wrb_params,
861 struct sk_buff *skb, u16 head)
862{
863 u32 num_frags = skb_wrb_cnt(skb);
864 struct be_queue_info *txq = &txo->q;
865 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
866
867 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
868 be_dws_cpu_to_le(hdr, sizeof(*hdr));
869
870 BUG_ON(txo->sent_skb_list[head]);
871 txo->sent_skb_list[head] = skb;
872 txo->last_req_hdr = head;
873 atomic_add(num_frags, &txq->used);
874 txo->last_req_wrb_cnt = num_frags;
875 txo->pend_wrb_cnt += num_frags;
876}
877
878/* Setup a WRB fragment (buffer descriptor) for xmit */
879static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
880 int len)
881{
882 struct be_eth_wrb *wrb;
883 struct be_queue_info *txq = &txo->q;
884
885 wrb = queue_head_node(txq);
886 wrb_fill(wrb, busaddr, len);
887 queue_head_inc(txq);
888}
889
890/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
891 * was invoked. The producer index is restored to the previous packet and the
892 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
893 */
894static void be_xmit_restore(struct be_adapter *adapter,
895 struct be_tx_obj *txo, u16 head, bool map_single,
896 u32 copied)
897{
898 struct device *dev;
899 struct be_eth_wrb *wrb;
900 struct be_queue_info *txq = &txo->q;
901
902 dev = &adapter->pdev->dev;
903 txq->head = head;
904
905 /* skip the first wrb (hdr); it's not mapped */
906 queue_head_inc(txq);
907 while (copied) {
908 wrb = queue_head_node(txq);
909 unmap_tx_frag(dev, wrb, map_single);
910 map_single = false;
911 copied -= le32_to_cpu(wrb->frag_len);
912 queue_head_inc(txq);
913 }
914
915 txq->head = head;
916}
917
918/* Enqueue the given packet for transmit. This routine allocates WRBs for the
919 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
920 * of WRBs used up by the packet.
921 */
5f07b3c5 922static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
804abcdb
SB
923 struct sk_buff *skb,
924 struct be_wrb_params *wrb_params)
6b7c5b94 925{
5f07b3c5 926 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
2b7bcebf 927 struct device *dev = &adapter->pdev->dev;
5f07b3c5 928 struct be_queue_info *txq = &txo->q;
7101e111 929 bool map_single = false;
5f07b3c5 930 u16 head = txq->head;
79a0d7d8
SB
931 dma_addr_t busaddr;
932 int len;
6b7c5b94 933
79a0d7d8 934 head = be_tx_get_wrb_hdr(txo);
6b7c5b94 935
ebc8d2ab 936 if (skb->len > skb->data_len) {
79a0d7d8 937 len = skb_headlen(skb);
03d28ffe 938
2b7bcebf
IV
939 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
7101e111
SP
941 goto dma_err;
942 map_single = true;
79a0d7d8 943 be_tx_setup_wrb_frag(txo, busaddr, len);
ebc8d2ab
DM
944 copied += len;
945 }
6b7c5b94 946
ebc8d2ab 947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
748b539a 948 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
79a0d7d8 949 len = skb_frag_size(frag);
03d28ffe 950
79a0d7d8 951 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
2b7bcebf 952 if (dma_mapping_error(dev, busaddr))
7101e111 953 goto dma_err;
79a0d7d8
SB
954 be_tx_setup_wrb_frag(txo, busaddr, len);
955 copied += len;
6b7c5b94
SP
956 }
957
79a0d7d8 958 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
6b7c5b94 959
5f07b3c5
SP
960 be_tx_stats_update(txo, skb);
961 return wrb_cnt;
6b7c5b94 962
7101e111 963dma_err:
79a0d7d8
SB
964 adapter->drv_stats.dma_map_errors++;
965 be_xmit_restore(adapter, txo, head, map_single, copied);
7101e111 966 return 0;
6b7c5b94
SP
967}
968
f7062ee5
SP
969static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
970{
971 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
972}
973
93040ae5 974static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405 975 struct sk_buff *skb,
804abcdb
SB
976 struct be_wrb_params
977 *wrb_params)
93040ae5
SK
978{
979 u16 vlan_tag = 0;
980
981 skb = skb_share_check(skb, GFP_ATOMIC);
982 if (unlikely(!skb))
983 return skb;
984
df8a39de 985 if (skb_vlan_tag_present(skb))
93040ae5 986 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
52fe29e4
SB
987
988 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
989 if (!vlan_tag)
990 vlan_tag = adapter->pvid;
991 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
992 * skip VLAN insertion
993 */
804abcdb 994 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
52fe29e4 995 }
bc0c3405
AK
996
997 if (vlan_tag) {
62749e2c
JP
998 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
999 vlan_tag);
bc0c3405
AK
1000 if (unlikely(!skb))
1001 return skb;
bc0c3405
AK
1002 skb->vlan_tci = 0;
1003 }
1004
1005 /* Insert the outer VLAN, if any */
1006 if (adapter->qnq_vid) {
1007 vlan_tag = adapter->qnq_vid;
62749e2c
JP
1008 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1009 vlan_tag);
bc0c3405
AK
1010 if (unlikely(!skb))
1011 return skb;
804abcdb 1012 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
bc0c3405
AK
1013 }
1014
93040ae5
SK
1015 return skb;
1016}
1017
bc0c3405
AK
1018static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1019{
1020 struct ethhdr *eh = (struct ethhdr *)skb->data;
1021 u16 offset = ETH_HLEN;
1022
1023 if (eh->h_proto == htons(ETH_P_IPV6)) {
1024 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1025
1026 offset += sizeof(struct ipv6hdr);
1027 if (ip6h->nexthdr != NEXTHDR_TCP &&
1028 ip6h->nexthdr != NEXTHDR_UDP) {
1029 struct ipv6_opt_hdr *ehdr =
504fbf1e 1030 (struct ipv6_opt_hdr *)(skb->data + offset);
bc0c3405
AK
1031
1032 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1033 if (ehdr->hdrlen == 0xff)
1034 return true;
1035 }
1036 }
1037 return false;
1038}
1039
1040static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1041{
df8a39de 1042 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
bc0c3405
AK
1043}
1044
748b539a 1045static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
bc0c3405 1046{
ee9c799c 1047 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
1048}
1049
ec495fac
VV
1050static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1051 struct sk_buff *skb,
804abcdb
SB
1052 struct be_wrb_params
1053 *wrb_params)
6b7c5b94 1054{
d2cb6ce7 1055 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
1056 unsigned int eth_hdr_len;
1057 struct iphdr *ip;
93040ae5 1058
1297f9db
AK
1059 /* For padded packets, BE HW modifies tot_len field in IP header
1060 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 1061 * For padded packets, Lancer computes incorrect checksum.
1ded132d 1062 */
ee9c799c
SP
1063 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1064 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4 1065 if (skb->len <= 60 &&
df8a39de 1066 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
ee9c799c 1067 is_ipv4_pkt(skb)) {
93040ae5
SK
1068 ip = (struct iphdr *)ip_hdr(skb);
1069 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1070 }
1ded132d 1071
d2cb6ce7 1072 /* If vlan tag is already inlined in the packet, skip HW VLAN
f93f160b 1073 * tagging in pvid-tagging mode
d2cb6ce7 1074 */
f93f160b 1075 if (be_pvid_tagging_enabled(adapter) &&
d2cb6ce7 1076 veh->h_vlan_proto == htons(ETH_P_8021Q))
804abcdb 1077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
d2cb6ce7 1078
93040ae5
SK
1079 /* HW has a bug wherein it will calculate CSUM for VLAN
1080 * pkts even though it is disabled.
1081 * Manually insert VLAN in pkt.
1082 */
1083 if (skb->ip_summed != CHECKSUM_PARTIAL &&
df8a39de 1084 skb_vlan_tag_present(skb)) {
804abcdb 1085 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
bc0c3405 1086 if (unlikely(!skb))
c9128951 1087 goto err;
bc0c3405
AK
1088 }
1089
1090 /* HW may lockup when VLAN HW tagging is requested on
1091 * certain ipv6 packets. Drop such pkts if the HW workaround to
1092 * skip HW tagging is not enabled by FW.
1093 */
1094 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
cd3307aa
KA
1095 (adapter->pvid || adapter->qnq_vid) &&
1096 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
1097 goto tx_drop;
1098
1099 /* Manual VLAN tag insertion to prevent:
1100 * ASIC lockup when the ASIC inserts VLAN tag into
1101 * certain ipv6 packets. Insert VLAN tags in driver,
1102 * and set event, completion, vlan bits accordingly
1103 * in the Tx WRB.
1104 */
1105 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1106 be_vlan_tag_tx_chk(adapter, skb)) {
804abcdb 1107 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1ded132d 1108 if (unlikely(!skb))
c9128951 1109 goto err;
1ded132d
AK
1110 }
1111
ee9c799c
SP
1112 return skb;
1113tx_drop:
1114 dev_kfree_skb_any(skb);
c9128951 1115err:
ee9c799c
SP
1116 return NULL;
1117}
1118
ec495fac
VV
1119static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1120 struct sk_buff *skb,
804abcdb 1121 struct be_wrb_params *wrb_params)
ec495fac
VV
1122{
1123 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1124 * less may cause a transmit stall on that port. So the work-around is
1125 * to pad short packets (<= 32 bytes) to a 36-byte length.
1126 */
1127 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
74b6939d 1128 if (skb_put_padto(skb, 36))
ec495fac 1129 return NULL;
ec495fac
VV
1130 }
1131
1132 if (BEx_chip(adapter) || lancer_chip(adapter)) {
804abcdb 1133 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
ec495fac
VV
1134 if (!skb)
1135 return NULL;
1136 }
1137
1138 return skb;
1139}
1140
5f07b3c5
SP
1141static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1142{
1143 struct be_queue_info *txq = &txo->q;
1144 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1145
1146 /* Mark the last request eventable if it hasn't been marked already */
1147 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1148 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1149
1150 /* compose a dummy wrb if there are odd set of wrbs to notify */
1151 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
f986afcb 1152 wrb_fill_dummy(queue_head_node(txq));
5f07b3c5
SP
1153 queue_head_inc(txq);
1154 atomic_inc(&txq->used);
1155 txo->pend_wrb_cnt++;
1156 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1157 TX_HDR_WRB_NUM_SHIFT);
1158 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1159 TX_HDR_WRB_NUM_SHIFT);
1160 }
1161 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1162 txo->pend_wrb_cnt = 0;
1163}
1164
760c295e
VD
1165/* OS2BMC related */
1166
1167#define DHCP_CLIENT_PORT 68
1168#define DHCP_SERVER_PORT 67
1169#define NET_BIOS_PORT1 137
1170#define NET_BIOS_PORT2 138
1171#define DHCPV6_RAS_PORT 547
1172
1173#define is_mc_allowed_on_bmc(adapter, eh) \
1174 (!is_multicast_filt_enabled(adapter) && \
1175 is_multicast_ether_addr(eh->h_dest) && \
1176 !is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_bc_allowed_on_bmc(adapter, eh) \
1179 (!is_broadcast_filt_enabled(adapter) && \
1180 is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_arp_allowed_on_bmc(adapter, skb) \
1183 (is_arp(skb) && is_arp_filt_enabled(adapter))
1184
1185#define is_broadcast_packet(eh, adapter) \
1186 (is_multicast_ether_addr(eh->h_dest) && \
1187 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1188
1189#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1190
1191#define is_arp_filt_enabled(adapter) \
1192 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1193
1194#define is_dhcp_client_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1196
1197#define is_dhcp_srvr_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1199
1200#define is_nbios_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1202
1203#define is_ipv6_na_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & \
1205 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1206
1207#define is_ipv6_ra_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1209
1210#define is_ipv6_ras_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1212
1213#define is_broadcast_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1215
1216#define is_multicast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1218
1219static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1220 struct sk_buff **skb)
1221{
1222 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1223 bool os2bmc = false;
1224
1225 if (!be_is_os2bmc_enabled(adapter))
1226 goto done;
1227
1228 if (!is_multicast_ether_addr(eh->h_dest))
1229 goto done;
1230
1231 if (is_mc_allowed_on_bmc(adapter, eh) ||
1232 is_bc_allowed_on_bmc(adapter, eh) ||
1233 is_arp_allowed_on_bmc(adapter, (*skb))) {
1234 os2bmc = true;
1235 goto done;
1236 }
1237
1238 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1239 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1240 u8 nexthdr = hdr->nexthdr;
1241
1242 if (nexthdr == IPPROTO_ICMPV6) {
1243 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1244
1245 switch (icmp6->icmp6_type) {
1246 case NDISC_ROUTER_ADVERTISEMENT:
1247 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1248 goto done;
1249 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1250 os2bmc = is_ipv6_na_filt_enabled(adapter);
1251 goto done;
1252 default:
1253 break;
1254 }
1255 }
1256 }
1257
1258 if (is_udp_pkt((*skb))) {
1259 struct udphdr *udp = udp_hdr((*skb));
1260
1261 switch (udp->dest) {
1262 case DHCP_CLIENT_PORT:
1263 os2bmc = is_dhcp_client_filt_enabled(adapter);
1264 goto done;
1265 case DHCP_SERVER_PORT:
1266 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1267 goto done;
1268 case NET_BIOS_PORT1:
1269 case NET_BIOS_PORT2:
1270 os2bmc = is_nbios_filt_enabled(adapter);
1271 goto done;
1272 case DHCPV6_RAS_PORT:
1273 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1274 goto done;
1275 default:
1276 break;
1277 }
1278 }
1279done:
1280 /* For packets over a vlan, which are destined
1281 * to BMC, asic expects the vlan to be inline in the packet.
1282 */
1283 if (os2bmc)
1284 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1285
1286 return os2bmc;
1287}
1288
ee9c799c
SP
1289static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1290{
1291 struct be_adapter *adapter = netdev_priv(netdev);
5f07b3c5
SP
1292 u16 q_idx = skb_get_queue_mapping(skb);
1293 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
804abcdb 1294 struct be_wrb_params wrb_params = { 0 };
804abcdb 1295 bool flush = !skb->xmit_more;
5f07b3c5 1296 u16 wrb_cnt;
ee9c799c 1297
804abcdb 1298 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
5f07b3c5
SP
1299 if (unlikely(!skb))
1300 goto drop;
6b7c5b94 1301
804abcdb
SB
1302 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1303
1304 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
5f07b3c5
SP
1305 if (unlikely(!wrb_cnt)) {
1306 dev_kfree_skb_any(skb);
1307 goto drop;
1308 }
cd8f76c0 1309
760c295e
VD
1310 /* if os2bmc is enabled and if the pkt is destined to bmc,
1311 * enqueue the pkt a 2nd time with mgmt bit set.
1312 */
1313 if (be_send_pkt_to_bmc(adapter, &skb)) {
1314 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1315 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1316 if (unlikely(!wrb_cnt))
1317 goto drop;
1318 else
1319 skb_get(skb);
1320 }
1321
cf5671e6 1322 if (be_is_txq_full(txo)) {
5f07b3c5
SP
1323 netif_stop_subqueue(netdev, q_idx);
1324 tx_stats(txo)->tx_stops++;
1325 }
c190e3c8 1326
5f07b3c5
SP
1327 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1328 be_xmit_flush(adapter, txo);
6b7c5b94 1329
5f07b3c5
SP
1330 return NETDEV_TX_OK;
1331drop:
1332 tx_stats(txo)->tx_drv_drops++;
1333 /* Flush the already enqueued tx requests */
1334 if (flush && txo->pend_wrb_cnt)
1335 be_xmit_flush(adapter, txo);
6b7c5b94 1336
6b7c5b94
SP
1337 return NETDEV_TX_OK;
1338}
1339
1340static int be_change_mtu(struct net_device *netdev, int new_mtu)
1341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
0d3f5cce
KA
1343 struct device *dev = &adapter->pdev->dev;
1344
1345 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1346 dev_info(dev, "MTU must be between %d and %d bytes\n",
1347 BE_MIN_MTU, BE_MAX_MTU);
6b7c5b94
SP
1348 return -EINVAL;
1349 }
0d3f5cce
KA
1350
1351 dev_info(dev, "MTU changed from %d to %d bytes\n",
748b539a 1352 netdev->mtu, new_mtu);
6b7c5b94
SP
1353 netdev->mtu = new_mtu;
1354 return 0;
1355}
1356
f66b7cfd
SP
1357static inline bool be_in_all_promisc(struct be_adapter *adapter)
1358{
1359 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1360 BE_IF_FLAGS_ALL_PROMISCUOUS;
1361}
1362
1363static int be_set_vlan_promisc(struct be_adapter *adapter)
1364{
1365 struct device *dev = &adapter->pdev->dev;
1366 int status;
1367
1368 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1369 return 0;
1370
1371 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1372 if (!status) {
1373 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1374 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1375 } else {
1376 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1377 }
1378 return status;
1379}
1380
1381static int be_clear_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1387 if (!status) {
1388 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1389 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1390 }
1391 return status;
1392}
1393
6b7c5b94 1394/*
82903e4b
AK
1395 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1396 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 1397 */
10329df8 1398static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 1399{
50762667 1400 struct device *dev = &adapter->pdev->dev;
10329df8 1401 u16 vids[BE_NUM_VLANS_SUPPORTED];
f6cbd364 1402 u16 num = 0, i = 0;
82903e4b 1403 int status = 0;
1da87b7f 1404
c0e64ef4 1405 /* No need to further configure vids if in promiscuous mode */
f66b7cfd 1406 if (be_in_all_promisc(adapter))
c0e64ef4
SP
1407 return 0;
1408
92bf14ab 1409 if (adapter->vlans_added > be_max_vlans(adapter))
f66b7cfd 1410 return be_set_vlan_promisc(adapter);
0fc16ebf
PR
1411
1412 /* Construct VLAN Table to give to HW */
f6cbd364
RN
1413 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1414 vids[num++] = cpu_to_le16(i);
0fc16ebf 1415
435452aa 1416 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
0fc16ebf 1417 if (status) {
f66b7cfd 1418 dev_err(dev, "Setting HW VLAN filtering failed\n");
d9d604f8 1419 /* Set to VLAN promisc mode as setting VLAN filter failed */
77be8c1c
KA
1420 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1421 addl_status(status) ==
4c60005f 1422 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
f66b7cfd
SP
1423 return be_set_vlan_promisc(adapter);
1424 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1425 status = be_clear_vlan_promisc(adapter);
6b7c5b94 1426 }
0fc16ebf 1427 return status;
6b7c5b94
SP
1428}
1429
80d5c368 1430static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1433 int status = 0;
6b7c5b94 1434
a85e9986
PR
1435 /* Packets with VID 0 are always received by Lancer by default */
1436 if (lancer_chip(adapter) && vid == 0)
48291c22
VV
1437 return status;
1438
f6cbd364 1439 if (test_bit(vid, adapter->vids))
48291c22 1440 return status;
a85e9986 1441
f6cbd364 1442 set_bit(vid, adapter->vids);
a6b74e01 1443 adapter->vlans_added++;
8e586137 1444
a6b74e01
SK
1445 status = be_vid_config(adapter);
1446 if (status) {
1447 adapter->vlans_added--;
f6cbd364 1448 clear_bit(vid, adapter->vids);
a6b74e01 1449 }
48291c22 1450
80817cbf 1451 return status;
6b7c5b94
SP
1452}
1453
80d5c368 1454static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1455{
1456 struct be_adapter *adapter = netdev_priv(netdev);
1457
a85e9986
PR
1458 /* Packets with VID 0 are always received by Lancer by default */
1459 if (lancer_chip(adapter) && vid == 0)
9d4dfe4a 1460 return 0;
a85e9986 1461
f6cbd364 1462 clear_bit(vid, adapter->vids);
9d4dfe4a
KA
1463 adapter->vlans_added--;
1464
1465 return be_vid_config(adapter);
6b7c5b94
SP
1466}
1467
f66b7cfd 1468static void be_clear_all_promisc(struct be_adapter *adapter)
7ad09458 1469{
ac34b743 1470 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
f66b7cfd 1471 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
7ad09458
S
1472}
1473
f66b7cfd
SP
1474static void be_set_all_promisc(struct be_adapter *adapter)
1475{
1476 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1477 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1478}
1479
1480static void be_set_mc_promisc(struct be_adapter *adapter)
6b7c5b94 1481{
0fc16ebf 1482 int status;
6b7c5b94 1483
f66b7cfd
SP
1484 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1485 return;
6b7c5b94 1486
f66b7cfd
SP
1487 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1488 if (!status)
1489 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1490}
1491
1492static void be_set_mc_list(struct be_adapter *adapter)
1493{
1494 int status;
1495
1496 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1497 if (!status)
1498 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499 else
1500 be_set_mc_promisc(adapter);
1501}
1502
1503static void be_set_uc_list(struct be_adapter *adapter)
1504{
1505 struct netdev_hw_addr *ha;
1506 int i = 1; /* First slot is claimed by the Primary MAC */
1507
1508 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1509 be_cmd_pmac_del(adapter, adapter->if_handle,
1510 adapter->pmac_id[i], 0);
1511
1512 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1513 be_set_all_promisc(adapter);
1514 return;
6b7c5b94
SP
1515 }
1516
f66b7cfd
SP
1517 netdev_for_each_uc_addr(ha, adapter->netdev) {
1518 adapter->uc_macs++; /* First slot is for Primary MAC */
1519 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1520 &adapter->pmac_id[adapter->uc_macs], 0);
1521 }
1522}
6b7c5b94 1523
f66b7cfd
SP
1524static void be_clear_uc_list(struct be_adapter *adapter)
1525{
1526 int i;
fbc13f01 1527
f66b7cfd
SP
1528 for (i = 1; i < (adapter->uc_macs + 1); i++)
1529 be_cmd_pmac_del(adapter, adapter->if_handle,
1530 adapter->pmac_id[i], 0);
1531 adapter->uc_macs = 0;
1532}
fbc13f01 1533
f66b7cfd
SP
1534static void be_set_rx_mode(struct net_device *netdev)
1535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
fbc13f01 1537
f66b7cfd
SP
1538 if (netdev->flags & IFF_PROMISC) {
1539 be_set_all_promisc(adapter);
1540 return;
fbc13f01
AK
1541 }
1542
f66b7cfd
SP
1543 /* Interface was previously in promiscuous mode; disable it */
1544 if (be_in_all_promisc(adapter)) {
1545 be_clear_all_promisc(adapter);
1546 if (adapter->vlans_added)
1547 be_vid_config(adapter);
0fc16ebf 1548 }
a0794885 1549
f66b7cfd
SP
1550 /* Enable multicast promisc if num configured exceeds what we support */
1551 if (netdev->flags & IFF_ALLMULTI ||
1552 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1553 be_set_mc_promisc(adapter);
a0794885 1554 return;
f66b7cfd 1555 }
a0794885 1556
f66b7cfd
SP
1557 if (netdev_uc_count(netdev) != adapter->uc_macs)
1558 be_set_uc_list(adapter);
1559
1560 be_set_mc_list(adapter);
6b7c5b94
SP
1561}
1562
ba343c77
SB
1563static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1566 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77
SB
1567 int status;
1568
11ac75ed 1569 if (!sriov_enabled(adapter))
ba343c77
SB
1570 return -EPERM;
1571
11ac75ed 1572 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1573 return -EINVAL;
1574
3c31aaf3
VV
1575 /* Proceed further only if user provided MAC is different
1576 * from active MAC
1577 */
1578 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1579 return 0;
1580
3175d8c2
SP
1581 if (BEx_chip(adapter)) {
1582 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1583 vf + 1);
ba343c77 1584
11ac75ed
SP
1585 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1586 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
1587 } else {
1588 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1589 vf + 1);
590c391d
PR
1590 }
1591
abccf23e
KA
1592 if (status) {
1593 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1594 mac, vf, status);
1595 return be_cmd_status(status);
1596 }
64600ea5 1597
abccf23e
KA
1598 ether_addr_copy(vf_cfg->mac_addr, mac);
1599
1600 return 0;
ba343c77
SB
1601}
1602
64600ea5 1603static int be_get_vf_config(struct net_device *netdev, int vf,
748b539a 1604 struct ifla_vf_info *vi)
64600ea5
AK
1605{
1606 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1607 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1608
11ac75ed 1609 if (!sriov_enabled(adapter))
64600ea5
AK
1610 return -EPERM;
1611
11ac75ed 1612 if (vf >= adapter->num_vfs)
64600ea5
AK
1613 return -EINVAL;
1614
1615 vi->vf = vf;
ed616689
SC
1616 vi->max_tx_rate = vf_cfg->tx_rate;
1617 vi->min_tx_rate = 0;
a60b3a13
AK
1618 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1619 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
11ac75ed 1620 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
bdce2ad7 1621 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
e7bcbd7b 1622 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
64600ea5
AK
1623
1624 return 0;
1625}
1626
435452aa
VV
1627static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1628{
1629 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1630 u16 vids[BE_NUM_VLANS_SUPPORTED];
1631 int vf_if_id = vf_cfg->if_handle;
1632 int status;
1633
1634 /* Enable Transparent VLAN Tagging */
e7bcbd7b 1635 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
435452aa
VV
1636 if (status)
1637 return status;
1638
1639 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1640 vids[0] = 0;
1641 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1642 if (!status)
1643 dev_info(&adapter->pdev->dev,
1644 "Cleared guest VLANs on VF%d", vf);
1645
1646 /* After TVT is enabled, disallow VFs to program VLAN filters */
1647 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1648 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1649 ~BE_PRIV_FILTMGMT, vf + 1);
1650 if (!status)
1651 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1652 }
1653 return 0;
1654}
1655
1656static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1657{
1658 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1659 struct device *dev = &adapter->pdev->dev;
1660 int status;
1661
1662 /* Reset Transparent VLAN Tagging. */
1663 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
e7bcbd7b 1664 vf_cfg->if_handle, 0, 0);
435452aa
VV
1665 if (status)
1666 return status;
1667
1668 /* Allow VFs to program VLAN filtering */
1669 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1670 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1671 BE_PRIV_FILTMGMT, vf + 1);
1672 if (!status) {
1673 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1674 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1675 }
1676 }
1677
1678 dev_info(dev,
1679 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1680 return 0;
1681}
1682
748b539a 1683static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1da87b7f
AK
1684{
1685 struct be_adapter *adapter = netdev_priv(netdev);
b9fc0e53 1686 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
435452aa 1687 int status;
1da87b7f 1688
11ac75ed 1689 if (!sriov_enabled(adapter))
1da87b7f
AK
1690 return -EPERM;
1691
b9fc0e53 1692 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1da87b7f
AK
1693 return -EINVAL;
1694
b9fc0e53
AK
1695 if (vlan || qos) {
1696 vlan |= qos << VLAN_PRIO_SHIFT;
435452aa 1697 status = be_set_vf_tvt(adapter, vf, vlan);
1da87b7f 1698 } else {
435452aa 1699 status = be_clear_vf_tvt(adapter, vf);
1da87b7f
AK
1700 }
1701
abccf23e
KA
1702 if (status) {
1703 dev_err(&adapter->pdev->dev,
435452aa
VV
1704 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1705 status);
abccf23e
KA
1706 return be_cmd_status(status);
1707 }
1708
1709 vf_cfg->vlan_tag = vlan;
abccf23e 1710 return 0;
1da87b7f
AK
1711}
1712
ed616689
SC
1713static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1714 int min_tx_rate, int max_tx_rate)
e1d18735
AK
1715{
1716 struct be_adapter *adapter = netdev_priv(netdev);
0f77ba73
RN
1717 struct device *dev = &adapter->pdev->dev;
1718 int percent_rate, status = 0;
1719 u16 link_speed = 0;
1720 u8 link_status;
e1d18735 1721
11ac75ed 1722 if (!sriov_enabled(adapter))
e1d18735
AK
1723 return -EPERM;
1724
94f434c2 1725 if (vf >= adapter->num_vfs)
e1d18735
AK
1726 return -EINVAL;
1727
ed616689
SC
1728 if (min_tx_rate)
1729 return -EINVAL;
1730
0f77ba73
RN
1731 if (!max_tx_rate)
1732 goto config_qos;
1733
1734 status = be_cmd_link_status_query(adapter, &link_speed,
1735 &link_status, 0);
1736 if (status)
1737 goto err;
1738
1739 if (!link_status) {
1740 dev_err(dev, "TX-rate setting not allowed when link is down\n");
940a3fcd 1741 status = -ENETDOWN;
0f77ba73
RN
1742 goto err;
1743 }
1744
1745 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1746 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1747 link_speed);
1748 status = -EINVAL;
1749 goto err;
1750 }
1751
1752 /* On Skyhawk the QOS setting must be done only as a % value */
1753 percent_rate = link_speed / 100;
1754 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1755 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1756 percent_rate);
1757 status = -EINVAL;
1758 goto err;
94f434c2 1759 }
e1d18735 1760
0f77ba73
RN
1761config_qos:
1762 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
e1d18735 1763 if (status)
0f77ba73
RN
1764 goto err;
1765
1766 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1767 return 0;
1768
1769err:
1770 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1771 max_tx_rate, vf);
abccf23e 1772 return be_cmd_status(status);
e1d18735 1773}
e2fb1afa 1774
bdce2ad7
SR
1775static int be_set_vf_link_state(struct net_device *netdev, int vf,
1776 int link_state)
1777{
1778 struct be_adapter *adapter = netdev_priv(netdev);
1779 int status;
1780
1781 if (!sriov_enabled(adapter))
1782 return -EPERM;
1783
1784 if (vf >= adapter->num_vfs)
1785 return -EINVAL;
1786
1787 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
abccf23e
KA
1788 if (status) {
1789 dev_err(&adapter->pdev->dev,
1790 "Link state change on VF %d failed: %#x\n", vf, status);
1791 return be_cmd_status(status);
1792 }
bdce2ad7 1793
abccf23e
KA
1794 adapter->vf_cfg[vf].plink_tracking = link_state;
1795
1796 return 0;
bdce2ad7 1797}
e1d18735 1798
e7bcbd7b
KA
1799static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1800{
1801 struct be_adapter *adapter = netdev_priv(netdev);
1802 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1803 u8 spoofchk;
1804 int status;
1805
1806 if (!sriov_enabled(adapter))
1807 return -EPERM;
1808
1809 if (vf >= adapter->num_vfs)
1810 return -EINVAL;
1811
1812 if (BEx_chip(adapter))
1813 return -EOPNOTSUPP;
1814
1815 if (enable == vf_cfg->spoofchk)
1816 return 0;
1817
1818 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1819
1820 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1821 0, spoofchk);
1822 if (status) {
1823 dev_err(&adapter->pdev->dev,
1824 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1825 return be_cmd_status(status);
1826 }
1827
1828 vf_cfg->spoofchk = enable;
1829 return 0;
1830}
1831
2632bafd
SP
1832static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1833 ulong now)
6b7c5b94 1834{
2632bafd
SP
1835 aic->rx_pkts_prev = rx_pkts;
1836 aic->tx_reqs_prev = tx_pkts;
1837 aic->jiffies = now;
1838}
ac124ff9 1839
20947770 1840static int be_get_new_eqd(struct be_eq_obj *eqo)
2632bafd 1841{
20947770
PR
1842 struct be_adapter *adapter = eqo->adapter;
1843 int eqd, start;
2632bafd 1844 struct be_aic_obj *aic;
2632bafd
SP
1845 struct be_rx_obj *rxo;
1846 struct be_tx_obj *txo;
20947770 1847 u64 rx_pkts = 0, tx_pkts = 0;
2632bafd
SP
1848 ulong now;
1849 u32 pps, delta;
20947770 1850 int i;
10ef9ab4 1851
20947770
PR
1852 aic = &adapter->aic_obj[eqo->idx];
1853 if (!aic->enable) {
1854 if (aic->jiffies)
1855 aic->jiffies = 0;
1856 eqd = aic->et_eqd;
1857 return eqd;
1858 }
6b7c5b94 1859
20947770 1860 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2632bafd 1861 do {
57a7744e 1862 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
20947770 1863 rx_pkts += rxo->stats.rx_pkts;
57a7744e 1864 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
20947770 1865 }
10ef9ab4 1866
20947770 1867 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2632bafd 1868 do {
57a7744e 1869 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
20947770 1870 tx_pkts += txo->stats.tx_reqs;
57a7744e 1871 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
20947770 1872 }
6b7c5b94 1873
20947770
PR
1874 /* Skip, if wrapped around or first calculation */
1875 now = jiffies;
1876 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1877 rx_pkts < aic->rx_pkts_prev ||
1878 tx_pkts < aic->tx_reqs_prev) {
1879 be_aic_update(aic, rx_pkts, tx_pkts, now);
1880 return aic->prev_eqd;
1881 }
2632bafd 1882
20947770
PR
1883 delta = jiffies_to_msecs(now - aic->jiffies);
1884 if (delta == 0)
1885 return aic->prev_eqd;
10ef9ab4 1886
20947770
PR
1887 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1888 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1889 eqd = (pps / 15000) << 2;
2632bafd 1890
20947770
PR
1891 if (eqd < 8)
1892 eqd = 0;
1893 eqd = min_t(u32, eqd, aic->max_eqd);
1894 eqd = max_t(u32, eqd, aic->min_eqd);
1895
1896 be_aic_update(aic, rx_pkts, tx_pkts, now);
1897
1898 return eqd;
1899}
1900
1901/* For Skyhawk-R only */
1902static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1903{
1904 struct be_adapter *adapter = eqo->adapter;
1905 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1906 ulong now = jiffies;
1907 int eqd;
1908 u32 mult_enc;
1909
1910 if (!aic->enable)
1911 return 0;
1912
1913 if (time_before_eq(now, aic->jiffies) ||
1914 jiffies_to_msecs(now - aic->jiffies) < 1)
1915 eqd = aic->prev_eqd;
1916 else
1917 eqd = be_get_new_eqd(eqo);
1918
1919 if (eqd > 100)
1920 mult_enc = R2I_DLY_ENC_1;
1921 else if (eqd > 60)
1922 mult_enc = R2I_DLY_ENC_2;
1923 else if (eqd > 20)
1924 mult_enc = R2I_DLY_ENC_3;
1925 else
1926 mult_enc = R2I_DLY_ENC_0;
1927
1928 aic->prev_eqd = eqd;
1929
1930 return mult_enc;
1931}
1932
1933void be_eqd_update(struct be_adapter *adapter, bool force_update)
1934{
1935 struct be_set_eqd set_eqd[MAX_EVT_QS];
1936 struct be_aic_obj *aic;
1937 struct be_eq_obj *eqo;
1938 int i, num = 0, eqd;
1939
1940 for_all_evt_queues(adapter, eqo, i) {
1941 aic = &adapter->aic_obj[eqo->idx];
1942 eqd = be_get_new_eqd(eqo);
1943 if (force_update || eqd != aic->prev_eqd) {
2632bafd
SP
1944 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1945 set_eqd[num].eq_id = eqo->q.id;
1946 aic->prev_eqd = eqd;
1947 num++;
1948 }
ac124ff9 1949 }
2632bafd
SP
1950
1951 if (num)
1952 be_cmd_modify_eqd(adapter, set_eqd, num);
6b7c5b94
SP
1953}
1954
3abcdeda 1955static void be_rx_stats_update(struct be_rx_obj *rxo,
748b539a 1956 struct be_rx_compl_info *rxcp)
4097f663 1957{
ac124ff9 1958 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1959
ab1594e9 1960 u64_stats_update_begin(&stats->sync);
3abcdeda 1961 stats->rx_compl++;
2e588f84 1962 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1963 stats->rx_pkts++;
2e588f84 1964 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1965 stats->rx_mcast_pkts++;
2e588f84 1966 if (rxcp->err)
ac124ff9 1967 stats->rx_compl_err++;
ab1594e9 1968 u64_stats_update_end(&stats->sync);
4097f663
SP
1969}
1970
2e588f84 1971static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1972{
19fad86f 1973 /* L4 checksum is not reliable for non TCP/UDP packets.
c9c47142
SP
1974 * Also ignore ipcksm for ipv6 pkts
1975 */
2e588f84 1976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
c9c47142 1977 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
728a9972
AK
1978}
1979
0b0ef1d0 1980static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
6b7c5b94 1981{
10ef9ab4 1982 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1983 struct be_rx_page_info *rx_page_info;
3abcdeda 1984 struct be_queue_info *rxq = &rxo->q;
0b0ef1d0 1985 u16 frag_idx = rxq->tail;
6b7c5b94 1986
3abcdeda 1987 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1988 BUG_ON(!rx_page_info->page);
1989
e50287be 1990 if (rx_page_info->last_frag) {
2b7bcebf
IV
1991 dma_unmap_page(&adapter->pdev->dev,
1992 dma_unmap_addr(rx_page_info, bus),
1993 adapter->big_page_size, DMA_FROM_DEVICE);
e50287be
SP
1994 rx_page_info->last_frag = false;
1995 } else {
1996 dma_sync_single_for_cpu(&adapter->pdev->dev,
1997 dma_unmap_addr(rx_page_info, bus),
1998 rx_frag_size, DMA_FROM_DEVICE);
205859a2 1999 }
6b7c5b94 2000
0b0ef1d0 2001 queue_tail_inc(rxq);
6b7c5b94
SP
2002 atomic_dec(&rxq->used);
2003 return rx_page_info;
2004}
2005
2006/* Throwaway the data in the Rx completion */
10ef9ab4
SP
2007static void be_rx_compl_discard(struct be_rx_obj *rxo,
2008 struct be_rx_compl_info *rxcp)
6b7c5b94 2009{
6b7c5b94 2010 struct be_rx_page_info *page_info;
2e588f84 2011 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 2012
e80d9da6 2013 for (i = 0; i < num_rcvd; i++) {
0b0ef1d0 2014 page_info = get_rx_page_info(rxo);
e80d9da6
PR
2015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
6b7c5b94
SP
2017 }
2018}
2019
2020/*
2021 * skb_fill_rx_data forms a complete skb for an ether frame
2022 * indicated by rxcp.
2023 */
10ef9ab4
SP
2024static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2025 struct be_rx_compl_info *rxcp)
6b7c5b94 2026{
6b7c5b94 2027 struct be_rx_page_info *page_info;
2e588f84
SP
2028 u16 i, j;
2029 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 2030 u8 *start;
6b7c5b94 2031
0b0ef1d0 2032 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2033 start = page_address(page_info->page) + page_info->page_offset;
2034 prefetch(start);
2035
2036 /* Copy data in the first descriptor of this completion */
2e588f84 2037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 2038
6b7c5b94
SP
2039 skb->len = curr_frag_len;
2040 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 2041 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
2042 /* Complete packet has now been moved to data */
2043 put_page(page_info->page);
2044 skb->data_len = 0;
2045 skb->tail += curr_frag_len;
2046 } else {
ac1ae5f3
ED
2047 hdr_len = ETH_HLEN;
2048 memcpy(skb->data, start, hdr_len);
6b7c5b94 2049 skb_shinfo(skb)->nr_frags = 1;
b061b39e 2050 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
2051 skb_shinfo(skb)->frags[0].page_offset =
2052 page_info->page_offset + hdr_len;
748b539a
SP
2053 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2054 curr_frag_len - hdr_len);
6b7c5b94 2055 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 2056 skb->truesize += rx_frag_size;
6b7c5b94
SP
2057 skb->tail += hdr_len;
2058 }
205859a2 2059 page_info->page = NULL;
6b7c5b94 2060
2e588f84
SP
2061 if (rxcp->pkt_size <= rx_frag_size) {
2062 BUG_ON(rxcp->num_rcvd != 1);
2063 return;
6b7c5b94
SP
2064 }
2065
2066 /* More frags present for this completion */
2e588f84
SP
2067 remaining = rxcp->pkt_size - curr_frag_len;
2068 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2069 page_info = get_rx_page_info(rxo);
2e588f84 2070 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 2071
bd46cb6c
AK
2072 /* Coalesce all frags from the same physical page in one slot */
2073 if (page_info->page_offset == 0) {
2074 /* Fresh page */
2075 j++;
b061b39e 2076 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
2077 skb_shinfo(skb)->frags[j].page_offset =
2078 page_info->page_offset;
9e903e08 2079 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2080 skb_shinfo(skb)->nr_frags++;
2081 } else {
2082 put_page(page_info->page);
2083 }
2084
9e903e08 2085 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
2086 skb->len += curr_frag_len;
2087 skb->data_len += curr_frag_len;
bdb28a97 2088 skb->truesize += rx_frag_size;
2e588f84 2089 remaining -= curr_frag_len;
205859a2 2090 page_info->page = NULL;
6b7c5b94 2091 }
bd46cb6c 2092 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
2093}
2094
5be93b9a 2095/* Process the RX completion indicated by rxcp when GRO is disabled */
6384a4d0 2096static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
10ef9ab4 2097 struct be_rx_compl_info *rxcp)
6b7c5b94 2098{
10ef9ab4 2099 struct be_adapter *adapter = rxo->adapter;
6332c8d3 2100 struct net_device *netdev = adapter->netdev;
6b7c5b94 2101 struct sk_buff *skb;
89420424 2102
bb349bb4 2103 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 2104 if (unlikely(!skb)) {
ac124ff9 2105 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 2106 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
2107 return;
2108 }
2109
10ef9ab4 2110 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 2111
6332c8d3 2112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 2113 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
2114 else
2115 skb_checksum_none_assert(skb);
6b7c5b94 2116
6332c8d3 2117 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 2118 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 2119 if (netdev->features & NETIF_F_RXHASH)
d2464c8c 2120 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2121
b6c0e89d 2122 skb->csum_level = rxcp->tunneled;
6384a4d0 2123 skb_mark_napi_id(skb, napi);
6b7c5b94 2124
343e43c0 2125 if (rxcp->vlanf)
86a9bad3 2126 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
2127
2128 netif_receive_skb(skb);
6b7c5b94
SP
2129}
2130
5be93b9a 2131/* Process the RX completion indicated by rxcp when GRO is enabled */
4188e7df
JH
2132static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2133 struct napi_struct *napi,
2134 struct be_rx_compl_info *rxcp)
6b7c5b94 2135{
10ef9ab4 2136 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2137 struct be_rx_page_info *page_info;
5be93b9a 2138 struct sk_buff *skb = NULL;
2e588f84
SP
2139 u16 remaining, curr_frag_len;
2140 u16 i, j;
3968fa1e 2141
10ef9ab4 2142 skb = napi_get_frags(napi);
5be93b9a 2143 if (!skb) {
10ef9ab4 2144 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
2145 return;
2146 }
2147
2e588f84
SP
2148 remaining = rxcp->pkt_size;
2149 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
0b0ef1d0 2150 page_info = get_rx_page_info(rxo);
6b7c5b94
SP
2151
2152 curr_frag_len = min(remaining, rx_frag_size);
2153
bd46cb6c
AK
2154 /* Coalesce all frags from the same physical page in one slot */
2155 if (i == 0 || page_info->page_offset == 0) {
2156 /* First frag or Fresh page */
2157 j++;
b061b39e 2158 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
2159 skb_shinfo(skb)->frags[j].page_offset =
2160 page_info->page_offset;
9e903e08 2161 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
2162 } else {
2163 put_page(page_info->page);
2164 }
9e903e08 2165 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 2166 skb->truesize += rx_frag_size;
bd46cb6c 2167 remaining -= curr_frag_len;
6b7c5b94
SP
2168 memset(page_info, 0, sizeof(*page_info));
2169 }
bd46cb6c 2170 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 2171
5be93b9a 2172 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
2173 skb->len = rxcp->pkt_size;
2174 skb->data_len = rxcp->pkt_size;
5be93b9a 2175 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 2176 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914 2177 if (adapter->netdev->features & NETIF_F_RXHASH)
d2464c8c 2178 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
c9c47142 2179
b6c0e89d 2180 skb->csum_level = rxcp->tunneled;
6384a4d0 2181 skb_mark_napi_id(skb, napi);
5be93b9a 2182
343e43c0 2183 if (rxcp->vlanf)
86a9bad3 2184 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 2185
10ef9ab4 2186 napi_gro_frags(napi);
2e588f84
SP
2187}
2188
10ef9ab4
SP
2189static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2190 struct be_rx_compl_info *rxcp)
2e588f84 2191{
c3c18bc1
SP
2192 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2193 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2194 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2195 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2196 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2197 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2198 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2199 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2200 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2201 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2202 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
15d72184 2203 if (rxcp->vlanf) {
c3c18bc1
SP
2204 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2205 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
15d72184 2206 }
c3c18bc1 2207 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
c9c47142 2208 rxcp->tunneled =
c3c18bc1 2209 GET_RX_COMPL_V1_BITS(tunneled, compl);
2e588f84
SP
2210}
2211
10ef9ab4
SP
2212static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2213 struct be_rx_compl_info *rxcp)
2e588f84 2214{
c3c18bc1
SP
2215 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2216 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2217 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2218 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2219 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2220 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2221 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2222 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2223 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2224 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2225 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
15d72184 2226 if (rxcp->vlanf) {
c3c18bc1
SP
2227 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2228 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
15d72184 2229 }
c3c18bc1
SP
2230 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2231 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2e588f84
SP
2232}
2233
2234static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2235{
2236 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2237 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2238 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 2239
2e588f84
SP
2240 /* For checking the valid bit it is Ok to use either definition as the
2241 * valid bit is at the same position in both v0 and v1 Rx compl */
2242 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2243 return NULL;
6b7c5b94 2244
2e588f84
SP
2245 rmb();
2246 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2247
2e588f84 2248 if (adapter->be3_native)
10ef9ab4 2249 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 2250 else
10ef9ab4 2251 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 2252
e38b1706
SK
2253 if (rxcp->ip_frag)
2254 rxcp->l4_csum = 0;
2255
15d72184 2256 if (rxcp->vlanf) {
f93f160b
VV
2257 /* In QNQ modes, if qnq bit is not set, then the packet was
2258 * tagged only with the transparent outer vlan-tag and must
2259 * not be treated as a vlan packet by host
2260 */
2261 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
15d72184 2262 rxcp->vlanf = 0;
6b7c5b94 2263
15d72184 2264 if (!lancer_chip(adapter))
3c709f8f 2265 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 2266
939cf306 2267 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
f6cbd364 2268 !test_bit(rxcp->vlan_tag, adapter->vids))
15d72184
SP
2269 rxcp->vlanf = 0;
2270 }
2e588f84
SP
2271
2272 /* As the compl has been parsed, reset it; we wont touch it again */
2273 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 2274
3abcdeda 2275 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
2276 return rxcp;
2277}
2278
1829b086 2279static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 2280{
6b7c5b94 2281 u32 order = get_order(size);
1829b086 2282
6b7c5b94 2283 if (order > 0)
1829b086
ED
2284 gfp |= __GFP_COMP;
2285 return alloc_pages(gfp, order);
6b7c5b94
SP
2286}
2287
2288/*
2289 * Allocate a page, split it to fragments of size rx_frag_size and post as
2290 * receive buffers to BE
2291 */
c30d7266 2292static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
6b7c5b94 2293{
3abcdeda 2294 struct be_adapter *adapter = rxo->adapter;
26d92f92 2295 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 2296 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 2297 struct page *pagep = NULL;
ba42fad0 2298 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
2299 struct be_eth_rx_d *rxd;
2300 u64 page_dmaaddr = 0, frag_dmaaddr;
c30d7266 2301 u32 posted, page_offset = 0, notify = 0;
6b7c5b94 2302
3abcdeda 2303 page_info = &rxo->page_info_tbl[rxq->head];
c30d7266 2304 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
6b7c5b94 2305 if (!pagep) {
1829b086 2306 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 2307 if (unlikely(!pagep)) {
ac124ff9 2308 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
2309 break;
2310 }
ba42fad0
IV
2311 page_dmaaddr = dma_map_page(dev, pagep, 0,
2312 adapter->big_page_size,
2b7bcebf 2313 DMA_FROM_DEVICE);
ba42fad0
IV
2314 if (dma_mapping_error(dev, page_dmaaddr)) {
2315 put_page(pagep);
2316 pagep = NULL;
d3de1540 2317 adapter->drv_stats.dma_map_errors++;
ba42fad0
IV
2318 break;
2319 }
e50287be 2320 page_offset = 0;
6b7c5b94
SP
2321 } else {
2322 get_page(pagep);
e50287be 2323 page_offset += rx_frag_size;
6b7c5b94 2324 }
e50287be 2325 page_info->page_offset = page_offset;
6b7c5b94 2326 page_info->page = pagep;
6b7c5b94
SP
2327
2328 rxd = queue_head_node(rxq);
e50287be 2329 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
6b7c5b94
SP
2330 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2331 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
2332
2333 /* Any space left in the current big page for another frag? */
2334 if ((page_offset + rx_frag_size + rx_frag_size) >
2335 adapter->big_page_size) {
2336 pagep = NULL;
e50287be
SP
2337 page_info->last_frag = true;
2338 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2339 } else {
2340 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
6b7c5b94 2341 }
26d92f92
SP
2342
2343 prev_page_info = page_info;
2344 queue_head_inc(rxq);
10ef9ab4 2345 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94 2346 }
e50287be
SP
2347
2348 /* Mark the last frag of a page when we break out of the above loop
2349 * with no more slots available in the RXQ
2350 */
2351 if (pagep) {
2352 prev_page_info->last_frag = true;
2353 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2354 }
6b7c5b94
SP
2355
2356 if (posted) {
6b7c5b94 2357 atomic_add(posted, &rxq->used);
6384a4d0
SP
2358 if (rxo->rx_post_starved)
2359 rxo->rx_post_starved = false;
c30d7266 2360 do {
69304cc9 2361 notify = min(MAX_NUM_POST_ERX_DB, posted);
c30d7266
AK
2362 be_rxq_notify(adapter, rxq->id, notify);
2363 posted -= notify;
2364 } while (posted);
ea1dae11
SP
2365 } else if (atomic_read(&rxq->used) == 0) {
2366 /* Let be_worker replenish when memory is available */
3abcdeda 2367 rxo->rx_post_starved = true;
6b7c5b94 2368 }
6b7c5b94
SP
2369}
2370
152ffe5b 2371static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
6b7c5b94 2372{
152ffe5b
SB
2373 struct be_queue_info *tx_cq = &txo->cq;
2374 struct be_tx_compl_info *txcp = &txo->txcp;
2375 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
6b7c5b94 2376
152ffe5b 2377 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
6b7c5b94
SP
2378 return NULL;
2379
152ffe5b 2380 /* Ensure load ordering of valid bit dword and other dwords below */
f3eb62d2 2381 rmb();
152ffe5b 2382 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 2383
152ffe5b
SB
2384 txcp->status = GET_TX_COMPL_BITS(status, compl);
2385 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
6b7c5b94 2386
152ffe5b 2387 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
6b7c5b94
SP
2388 queue_tail_inc(tx_cq);
2389 return txcp;
2390}
2391
3c8def97 2392static u16 be_tx_compl_process(struct be_adapter *adapter,
748b539a 2393 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 2394{
5f07b3c5 2395 struct sk_buff **sent_skbs = txo->sent_skb_list;
3c8def97 2396 struct be_queue_info *txq = &txo->q;
5f07b3c5
SP
2397 u16 frag_index, num_wrbs = 0;
2398 struct sk_buff *skb = NULL;
2399 bool unmap_skb_hdr = false;
a73b796e 2400 struct be_eth_wrb *wrb;
6b7c5b94 2401
ec43b1a6 2402 do {
5f07b3c5
SP
2403 if (sent_skbs[txq->tail]) {
2404 /* Free skb from prev req */
2405 if (skb)
2406 dev_consume_skb_any(skb);
2407 skb = sent_skbs[txq->tail];
2408 sent_skbs[txq->tail] = NULL;
2409 queue_tail_inc(txq); /* skip hdr wrb */
2410 num_wrbs++;
2411 unmap_skb_hdr = true;
2412 }
a73b796e 2413 wrb = queue_tail_node(txq);
5f07b3c5 2414 frag_index = txq->tail;
2b7bcebf 2415 unmap_tx_frag(&adapter->pdev->dev, wrb,
5f07b3c5 2416 (unmap_skb_hdr && skb_headlen(skb)));
ec43b1a6 2417 unmap_skb_hdr = false;
6b7c5b94 2418 queue_tail_inc(txq);
5f07b3c5
SP
2419 num_wrbs++;
2420 } while (frag_index != last_index);
2421 dev_consume_skb_any(skb);
6b7c5b94 2422
4d586b82 2423 return num_wrbs;
6b7c5b94
SP
2424}
2425
10ef9ab4
SP
2426/* Return the number of events in the event queue */
2427static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 2428{
10ef9ab4
SP
2429 struct be_eq_entry *eqe;
2430 int num = 0;
859b1e4e 2431
10ef9ab4
SP
2432 do {
2433 eqe = queue_tail_node(&eqo->q);
2434 if (eqe->evt == 0)
2435 break;
859b1e4e 2436
10ef9ab4
SP
2437 rmb();
2438 eqe->evt = 0;
2439 num++;
2440 queue_tail_inc(&eqo->q);
2441 } while (true);
2442
2443 return num;
859b1e4e
SP
2444}
2445
10ef9ab4
SP
2446/* Leaves the EQ is disarmed state */
2447static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 2448{
10ef9ab4 2449 int num = events_get(eqo);
859b1e4e 2450
20947770 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
859b1e4e
SP
2452}
2453
99b44304
KA
2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
6b7c5b94 2456{
3abcdeda 2457 struct be_queue_info *rxq = &rxo->q;
99b44304
KA
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
3abcdeda 2472 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2473 struct be_rx_compl_info *rxcp;
d23e946c
SP
2474 struct be_adapter *adapter = rxo->adapter;
2475 int flush_wait = 0;
6b7c5b94 2476
d23e946c
SP
2477 /* Consume pending rx completions.
2478 * Wait for the flush completion (identified by zero num_rcvd)
2479 * to arrive. Notify CQ even when there are no more CQ entries
2480 * for HW to flush partially coalesced CQ entries.
2481 * In Lancer, there is no need to wait for flush compl.
2482 */
2483 for (;;) {
2484 rxcp = be_rx_compl_get(rxo);
ddf1169f 2485 if (!rxcp) {
d23e946c
SP
2486 if (lancer_chip(adapter))
2487 break;
2488
954f6825
VD
2489 if (flush_wait++ > 50 ||
2490 be_check_error(adapter,
2491 BE_ERROR_HW)) {
d23e946c
SP
2492 dev_warn(&adapter->pdev->dev,
2493 "did not receive flush compl\n");
2494 break;
2495 }
2496 be_cq_notify(adapter, rx_cq->id, true, 0);
2497 mdelay(1);
2498 } else {
2499 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 2500 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
2501 if (rxcp->num_rcvd == 0)
2502 break;
2503 }
6b7c5b94
SP
2504 }
2505
d23e946c
SP
2506 /* After cleanup, leave the CQ in unarmed state */
2507 be_cq_notify(adapter, rx_cq->id, false, 0);
6b7c5b94
SP
2508}
2509
0ae57bb3 2510static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 2511{
5f07b3c5
SP
2512 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2513 struct device *dev = &adapter->pdev->dev;
152ffe5b 2514 struct be_tx_compl_info *txcp;
0ae57bb3 2515 struct be_queue_info *txq;
152ffe5b 2516 struct be_tx_obj *txo;
0ae57bb3 2517 int i, pending_txqs;
a8e9179a 2518
1a3d0717 2519 /* Stop polling for compls when HW has been silent for 10ms */
a8e9179a 2520 do {
0ae57bb3
SP
2521 pending_txqs = adapter->num_tx_qs;
2522
2523 for_all_tx_queues(adapter, txo, i) {
1a3d0717
VV
2524 cmpl = 0;
2525 num_wrbs = 0;
0ae57bb3 2526 txq = &txo->q;
152ffe5b
SB
2527 while ((txcp = be_tx_compl_get(txo))) {
2528 num_wrbs +=
2529 be_tx_compl_process(adapter, txo,
2530 txcp->end_index);
0ae57bb3
SP
2531 cmpl++;
2532 }
2533 if (cmpl) {
2534 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2535 atomic_sub(num_wrbs, &txq->used);
1a3d0717 2536 timeo = 0;
0ae57bb3 2537 }
cf5671e6 2538 if (!be_is_tx_compl_pending(txo))
0ae57bb3 2539 pending_txqs--;
a8e9179a
SP
2540 }
2541
954f6825
VD
2542 if (pending_txqs == 0 || ++timeo > 10 ||
2543 be_check_error(adapter, BE_ERROR_HW))
a8e9179a
SP
2544 break;
2545
2546 mdelay(1);
2547 } while (true);
2548
5f07b3c5 2549 /* Free enqueued TX that was never notified to HW */
0ae57bb3
SP
2550 for_all_tx_queues(adapter, txo, i) {
2551 txq = &txo->q;
0ae57bb3 2552
5f07b3c5
SP
2553 if (atomic_read(&txq->used)) {
2554 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2555 i, atomic_read(&txq->used));
2556 notified_idx = txq->tail;
0ae57bb3 2557 end_idx = txq->tail;
5f07b3c5
SP
2558 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2559 txq->len);
2560 /* Use the tx-compl process logic to handle requests
2561 * that were not sent to the HW.
2562 */
0ae57bb3
SP
2563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2564 atomic_sub(num_wrbs, &txq->used);
5f07b3c5
SP
2565 BUG_ON(atomic_read(&txq->used));
2566 txo->pend_wrb_cnt = 0;
2567 /* Since hw was never notified of these requests,
2568 * reset TXQ indices
2569 */
2570 txq->head = notified_idx;
2571 txq->tail = notified_idx;
0ae57bb3 2572 }
b03388d6 2573 }
6b7c5b94
SP
2574}
2575
10ef9ab4
SP
2576static void be_evt_queues_destroy(struct be_adapter *adapter)
2577{
2578 struct be_eq_obj *eqo;
2579 int i;
2580
2581 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
2582 if (eqo->q.created) {
2583 be_eq_clean(eqo);
10ef9ab4 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
6384a4d0 2585 napi_hash_del(&eqo->napi);
68d7bdcb 2586 netif_napi_del(&eqo->napi);
649886a3 2587 free_cpumask_var(eqo->affinity_mask);
19d59aa7 2588 }
10ef9ab4
SP
2589 be_queue_free(adapter, &eqo->q);
2590 }
2591}
2592
2593static int be_evt_queues_create(struct be_adapter *adapter)
2594{
2595 struct be_queue_info *eq;
2596 struct be_eq_obj *eqo;
2632bafd 2597 struct be_aic_obj *aic;
10ef9ab4
SP
2598 int i, rc;
2599
92bf14ab
SP
2600 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2601 adapter->cfg_num_qs);
10ef9ab4
SP
2602
2603 for_all_evt_queues(adapter, eqo, i) {
f36963c9 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
649886a3 2605
2632bafd 2606 aic = &adapter->aic_obj[i];
10ef9ab4 2607 eqo->adapter = adapter;
10ef9ab4 2608 eqo->idx = i;
2632bafd
SP
2609 aic->max_eqd = BE_MAX_EQD;
2610 aic->enable = true;
10ef9ab4
SP
2611
2612 eq = &eqo->q;
2613 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
748b539a 2614 sizeof(struct be_eq_entry));
10ef9ab4
SP
2615 if (rc)
2616 return rc;
2617
f2f781a7 2618 rc = be_cmd_eq_create(adapter, eqo);
10ef9ab4
SP
2619 if (rc)
2620 return rc;
649886a3
KA
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
10ef9ab4 2629 }
1cfafab9 2630 return 0;
10ef9ab4
SP
2631}
2632
5fb379ee
SP
2633static void be_mcc_queues_destroy(struct be_adapter *adapter)
2634{
2635 struct be_queue_info *q;
5fb379ee 2636
8788fdc2 2637 q = &adapter->mcc_obj.q;
5fb379ee 2638 if (q->created)
8788fdc2 2639 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
2640 be_queue_free(adapter, q);
2641
8788fdc2 2642 q = &adapter->mcc_obj.cq;
5fb379ee 2643 if (q->created)
8788fdc2 2644 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
2645 be_queue_free(adapter, q);
2646}
2647
2648/* Must be called only after TX qs are created as MCC shares TX EQ */
2649static int be_mcc_queues_create(struct be_adapter *adapter)
2650{
2651 struct be_queue_info *q, *cq;
5fb379ee 2652
8788fdc2 2653 cq = &adapter->mcc_obj.cq;
5fb379ee 2654 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
748b539a 2655 sizeof(struct be_mcc_compl)))
5fb379ee
SP
2656 goto err;
2657
10ef9ab4
SP
2658 /* Use the default EQ for MCC completions */
2659 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2660 goto mcc_cq_free;
2661
8788fdc2 2662 q = &adapter->mcc_obj.q;
5fb379ee
SP
2663 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2664 goto mcc_cq_destroy;
2665
8788fdc2 2666 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2667 goto mcc_q_free;
2668
2669 return 0;
2670
2671mcc_q_free:
2672 be_queue_free(adapter, q);
2673mcc_cq_destroy:
8788fdc2 2674 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2675mcc_cq_free:
2676 be_queue_free(adapter, cq);
2677err:
2678 return -1;
2679}
2680
6b7c5b94
SP
2681static void be_tx_queues_destroy(struct be_adapter *adapter)
2682{
2683 struct be_queue_info *q;
3c8def97
SP
2684 struct be_tx_obj *txo;
2685 u8 i;
6b7c5b94 2686
3c8def97
SP
2687 for_all_tx_queues(adapter, txo, i) {
2688 q = &txo->q;
2689 if (q->created)
2690 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2691 be_queue_free(adapter, q);
6b7c5b94 2692
3c8def97
SP
2693 q = &txo->cq;
2694 if (q->created)
2695 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2696 be_queue_free(adapter, q);
2697 }
6b7c5b94
SP
2698}
2699
7707133c 2700static int be_tx_qs_create(struct be_adapter *adapter)
6b7c5b94 2701{
73f394e6 2702 struct be_queue_info *cq;
3c8def97 2703 struct be_tx_obj *txo;
73f394e6 2704 struct be_eq_obj *eqo;
92bf14ab 2705 int status, i;
6b7c5b94 2706
92bf14ab 2707 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
dafc0fe3 2708
10ef9ab4
SP
2709 for_all_tx_queues(adapter, txo, i) {
2710 cq = &txo->cq;
2711 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2712 sizeof(struct be_eth_tx_compl));
2713 if (status)
2714 return status;
3c8def97 2715
827da44c
JS
2716 u64_stats_init(&txo->stats.sync);
2717 u64_stats_init(&txo->stats.sync_compl);
2718
10ef9ab4
SP
2719 /* If num_evt_qs is less than num_tx_qs, then more than
2720 * one txq share an eq
2721 */
73f394e6
SP
2722 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2723 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
10ef9ab4
SP
2724 if (status)
2725 return status;
6b7c5b94 2726
10ef9ab4
SP
2727 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2728 sizeof(struct be_eth_wrb));
2729 if (status)
2730 return status;
6b7c5b94 2731
94d73aaa 2732 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2733 if (status)
2734 return status;
73f394e6
SP
2735
2736 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2737 eqo->idx);
3c8def97 2738 }
6b7c5b94 2739
d379142b
SP
2740 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2741 adapter->num_tx_qs);
10ef9ab4 2742 return 0;
6b7c5b94
SP
2743}
2744
10ef9ab4 2745static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2746{
2747 struct be_queue_info *q;
3abcdeda
SP
2748 struct be_rx_obj *rxo;
2749 int i;
2750
2751 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2752 q = &rxo->cq;
2753 if (q->created)
2754 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2755 be_queue_free(adapter, q);
ac6a0c4a
SP
2756 }
2757}
2758
10ef9ab4 2759static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2760{
10ef9ab4 2761 struct be_queue_info *eq, *cq;
3abcdeda
SP
2762 struct be_rx_obj *rxo;
2763 int rc, i;
6b7c5b94 2764
92bf14ab 2765 /* We can create as many RSS rings as there are EQs. */
71bb8bd0 2766 adapter->num_rss_qs = adapter->num_evt_qs;
92bf14ab 2767
71bb8bd0
VV
2768 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2769 if (adapter->num_rss_qs <= 1)
2770 adapter->num_rss_qs = 0;
2771
2772 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2773
2774 /* When the interface is not capable of RSS rings (and there is no
2775 * need to create a default RXQ) we'll still need one RXQ
10ef9ab4 2776 */
71bb8bd0
VV
2777 if (adapter->num_rx_qs == 0)
2778 adapter->num_rx_qs = 1;
92bf14ab 2779
6b7c5b94 2780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2781 for_all_rx_queues(adapter, rxo, i) {
2782 rxo->adapter = adapter;
3abcdeda
SP
2783 cq = &rxo->cq;
2784 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
748b539a 2785 sizeof(struct be_eth_rx_compl));
3abcdeda 2786 if (rc)
10ef9ab4 2787 return rc;
3abcdeda 2788
827da44c 2789 u64_stats_init(&rxo->stats.sync);
10ef9ab4
SP
2790 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2791 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2792 if (rc)
10ef9ab4 2793 return rc;
3abcdeda 2794 }
6b7c5b94 2795
d379142b 2796 dev_info(&adapter->pdev->dev,
71bb8bd0 2797 "created %d RX queue(s)\n", adapter->num_rx_qs);
10ef9ab4 2798 return 0;
b628bde2
SP
2799}
2800
6b7c5b94
SP
2801static irqreturn_t be_intx(int irq, void *dev)
2802{
e49cc34f
SP
2803 struct be_eq_obj *eqo = dev;
2804 struct be_adapter *adapter = eqo->adapter;
2805 int num_evts = 0;
6b7c5b94 2806
d0b9cec3
SP
2807 /* IRQ is not expected when NAPI is scheduled as the EQ
2808 * will not be armed.
2809 * But, this can happen on Lancer INTx where it takes
2810 * a while to de-assert INTx or in BE2 where occasionaly
2811 * an interrupt may be raised even when EQ is unarmed.
2812 * If NAPI is already scheduled, then counting & notifying
2813 * events will orphan them.
e49cc34f 2814 */
d0b9cec3 2815 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2816 num_evts = events_get(eqo);
d0b9cec3
SP
2817 __napi_schedule(&eqo->napi);
2818 if (num_evts)
2819 eqo->spurious_intr = 0;
2820 }
20947770 2821 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
e49cc34f 2822
d0b9cec3
SP
2823 /* Return IRQ_HANDLED only for the the first spurious intr
2824 * after a valid intr to stop the kernel from branding
2825 * this irq as a bad one!
e49cc34f 2826 */
d0b9cec3
SP
2827 if (num_evts || eqo->spurious_intr++ == 0)
2828 return IRQ_HANDLED;
2829 else
2830 return IRQ_NONE;
6b7c5b94
SP
2831}
2832
10ef9ab4 2833static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2834{
10ef9ab4 2835 struct be_eq_obj *eqo = dev;
6b7c5b94 2836
20947770 2837 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
0b545a62 2838 napi_schedule(&eqo->napi);
6b7c5b94
SP
2839 return IRQ_HANDLED;
2840}
2841
2e588f84 2842static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2843{
e38b1706 2844 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2845}
2846
10ef9ab4 2847static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
748b539a 2848 int budget, int polling)
6b7c5b94 2849{
3abcdeda
SP
2850 struct be_adapter *adapter = rxo->adapter;
2851 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2852 struct be_rx_compl_info *rxcp;
6b7c5b94 2853 u32 work_done;
c30d7266 2854 u32 frags_consumed = 0;
6b7c5b94
SP
2855
2856 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2857 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2858 if (!rxcp)
2859 break;
2860
12004ae9
SP
2861 /* Is it a flush compl that has no data */
2862 if (unlikely(rxcp->num_rcvd == 0))
2863 goto loop_continue;
2864
2865 /* Discard compl with partial DMA Lancer B0 */
2866 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2867 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2868 goto loop_continue;
2869 }
2870
2871 /* On BE drop pkts that arrive due to imperfect filtering in
2872 * promiscuous mode on some skews
2873 */
2874 if (unlikely(rxcp->port != adapter->port_num &&
748b539a 2875 !lancer_chip(adapter))) {
10ef9ab4 2876 be_rx_compl_discard(rxo, rxcp);
12004ae9 2877 goto loop_continue;
64642811 2878 }
009dd872 2879
6384a4d0
SP
2880 /* Don't do gro when we're busy_polling */
2881 if (do_gro(rxcp) && polling != BUSY_POLLING)
10ef9ab4 2882 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2883 else
6384a4d0
SP
2884 be_rx_compl_process(rxo, napi, rxcp);
2885
12004ae9 2886loop_continue:
c30d7266 2887 frags_consumed += rxcp->num_rcvd;
2e588f84 2888 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2889 }
2890
10ef9ab4
SP
2891 if (work_done) {
2892 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2893
6384a4d0
SP
2894 /* When an rx-obj gets into post_starved state, just
2895 * let be_worker do the posting.
2896 */
2897 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2898 !rxo->rx_post_starved)
c30d7266
AK
2899 be_post_rx_frags(rxo, GFP_ATOMIC,
2900 max_t(u32, MAX_RX_POST,
2901 frags_consumed));
6b7c5b94 2902 }
10ef9ab4 2903
6b7c5b94
SP
2904 return work_done;
2905}
2906
152ffe5b 2907static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2908{
2909 switch (status) {
2910 case BE_TX_COMP_HDR_PARSE_ERR:
2911 tx_stats(txo)->tx_hdr_parse_err++;
2912 break;
2913 case BE_TX_COMP_NDMA_ERR:
2914 tx_stats(txo)->tx_dma_err++;
2915 break;
2916 case BE_TX_COMP_ACL_ERR:
2917 tx_stats(txo)->tx_spoof_check_err++;
2918 break;
2919 }
2920}
2921
152ffe5b 2922static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
512bb8a2
KA
2923{
2924 switch (status) {
2925 case LANCER_TX_COMP_LSO_ERR:
2926 tx_stats(txo)->tx_tso_err++;
2927 break;
2928 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2929 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2930 tx_stats(txo)->tx_spoof_check_err++;
2931 break;
2932 case LANCER_TX_COMP_QINQ_ERR:
2933 tx_stats(txo)->tx_qinq_err++;
2934 break;
2935 case LANCER_TX_COMP_PARITY_ERR:
2936 tx_stats(txo)->tx_internal_parity_err++;
2937 break;
2938 case LANCER_TX_COMP_DMA_ERR:
2939 tx_stats(txo)->tx_dma_err++;
2940 break;
2941 }
2942}
2943
c8f64615
SP
2944static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2945 int idx)
6b7c5b94 2946{
c8f64615 2947 int num_wrbs = 0, work_done = 0;
152ffe5b 2948 struct be_tx_compl_info *txcp;
c8f64615 2949
152ffe5b
SB
2950 while ((txcp = be_tx_compl_get(txo))) {
2951 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
c8f64615 2952 work_done++;
3c8def97 2953
152ffe5b 2954 if (txcp->status) {
512bb8a2 2955 if (lancer_chip(adapter))
152ffe5b 2956 lancer_update_tx_err(txo, txcp->status);
512bb8a2 2957 else
152ffe5b 2958 be_update_tx_err(txo, txcp->status);
512bb8a2 2959 }
10ef9ab4 2960 }
6b7c5b94 2961
10ef9ab4
SP
2962 if (work_done) {
2963 be_cq_notify(adapter, txo->cq.id, true, work_done);
2964 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2965
10ef9ab4
SP
2966 /* As Tx wrbs have been freed up, wake up netdev queue
2967 * if it was stopped due to lack of tx wrbs. */
2968 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
cf5671e6 2969 be_can_txq_wake(txo)) {
10ef9ab4 2970 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2971 }
10ef9ab4
SP
2972
2973 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2974 tx_stats(txo)->tx_compl += work_done;
2975 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2976 }
10ef9ab4 2977}
6b7c5b94 2978
f7062ee5
SP
2979#ifdef CONFIG_NET_RX_BUSY_POLL
2980static inline bool be_lock_napi(struct be_eq_obj *eqo)
2981{
2982 bool status = true;
2983
2984 spin_lock(&eqo->lock); /* BH is already disabled */
2985 if (eqo->state & BE_EQ_LOCKED) {
2986 WARN_ON(eqo->state & BE_EQ_NAPI);
2987 eqo->state |= BE_EQ_NAPI_YIELD;
2988 status = false;
2989 } else {
2990 eqo->state = BE_EQ_NAPI;
2991 }
2992 spin_unlock(&eqo->lock);
2993 return status;
2994}
2995
2996static inline void be_unlock_napi(struct be_eq_obj *eqo)
2997{
2998 spin_lock(&eqo->lock); /* BH is already disabled */
2999
3000 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3001 eqo->state = BE_EQ_IDLE;
3002
3003 spin_unlock(&eqo->lock);
3004}
3005
3006static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3007{
3008 bool status = true;
3009
3010 spin_lock_bh(&eqo->lock);
3011 if (eqo->state & BE_EQ_LOCKED) {
3012 eqo->state |= BE_EQ_POLL_YIELD;
3013 status = false;
3014 } else {
3015 eqo->state |= BE_EQ_POLL;
3016 }
3017 spin_unlock_bh(&eqo->lock);
3018 return status;
3019}
3020
3021static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_bh(&eqo->lock);
3024
3025 WARN_ON(eqo->state & (BE_EQ_NAPI));
3026 eqo->state = BE_EQ_IDLE;
3027
3028 spin_unlock_bh(&eqo->lock);
3029}
3030
3031static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3032{
3033 spin_lock_init(&eqo->lock);
3034 eqo->state = BE_EQ_IDLE;
3035}
3036
3037static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 local_bh_disable();
3040
3041 /* It's enough to just acquire napi lock on the eqo to stop
3042 * be_busy_poll() from processing any queueus.
3043 */
3044 while (!be_lock_napi(eqo))
3045 mdelay(1);
3046
3047 local_bh_enable();
3048}
3049
3050#else /* CONFIG_NET_RX_BUSY_POLL */
3051
3052static inline bool be_lock_napi(struct be_eq_obj *eqo)
3053{
3054 return true;
3055}
3056
3057static inline void be_unlock_napi(struct be_eq_obj *eqo)
3058{
3059}
3060
3061static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3062{
3063 return false;
3064}
3065
3066static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3067{
3068}
3069
3070static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3071{
3072}
3073
3074static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077#endif /* CONFIG_NET_RX_BUSY_POLL */
3078
68d7bdcb 3079int be_poll(struct napi_struct *napi, int budget)
10ef9ab4
SP
3080{
3081 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3082 struct be_adapter *adapter = eqo->adapter;
0b545a62 3083 int max_work = 0, work, i, num_evts;
6384a4d0 3084 struct be_rx_obj *rxo;
a4906ea0 3085 struct be_tx_obj *txo;
20947770 3086 u32 mult_enc = 0;
f31e50a8 3087
0b545a62
SP
3088 num_evts = events_get(eqo);
3089
a4906ea0
SP
3090 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3091 be_process_tx(adapter, txo, i);
f31e50a8 3092
6384a4d0
SP
3093 if (be_lock_napi(eqo)) {
3094 /* This loop will iterate twice for EQ0 in which
3095 * completions of the last RXQ (default one) are also processed
3096 * For other EQs the loop iterates only once
3097 */
3098 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3099 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3100 max_work = max(work, max_work);
3101 }
3102 be_unlock_napi(eqo);
3103 } else {
3104 max_work = budget;
10ef9ab4 3105 }
6b7c5b94 3106
10ef9ab4
SP
3107 if (is_mcc_eqo(eqo))
3108 be_process_mcc(adapter);
93c86700 3109
10ef9ab4
SP
3110 if (max_work < budget) {
3111 napi_complete(napi);
20947770
PR
3112
3113 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3114 * delay via a delay multiplier encoding value
3115 */
3116 if (skyhawk_chip(adapter))
3117 mult_enc = be_get_eq_delay_mult_enc(eqo);
3118
3119 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3120 mult_enc);
10ef9ab4
SP
3121 } else {
3122 /* As we'll continue in polling mode, count and clear events */
20947770 3123 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
93c86700 3124 }
10ef9ab4 3125 return max_work;
6b7c5b94
SP
3126}
3127
6384a4d0
SP
3128#ifdef CONFIG_NET_RX_BUSY_POLL
3129static int be_busy_poll(struct napi_struct *napi)
3130{
3131 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3132 struct be_adapter *adapter = eqo->adapter;
3133 struct be_rx_obj *rxo;
3134 int i, work = 0;
3135
3136 if (!be_lock_busy_poll(eqo))
3137 return LL_FLUSH_BUSY;
3138
3139 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3140 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3141 if (work)
3142 break;
3143 }
3144
3145 be_unlock_busy_poll(eqo);
3146 return work;
3147}
3148#endif
3149
f67ef7ba 3150void be_detect_error(struct be_adapter *adapter)
7c185276 3151{
e1cfb67a
PR
3152 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3153 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276 3154 u32 i;
eb0eecc1 3155 struct device *dev = &adapter->pdev->dev;
7c185276 3156
954f6825 3157 if (be_check_error(adapter, BE_ERROR_HW))
72f02485
SP
3158 return;
3159
e1cfb67a
PR
3160 if (lancer_chip(adapter)) {
3161 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3162 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
954f6825 3163 be_set_error(adapter, BE_ERROR_UE);
e1cfb67a 3164 sliport_err1 = ioread32(adapter->db +
748b539a 3165 SLIPORT_ERROR1_OFFSET);
e1cfb67a 3166 sliport_err2 = ioread32(adapter->db +
748b539a 3167 SLIPORT_ERROR2_OFFSET);
eb0eecc1
SK
3168 /* Do not log error messages if its a FW reset */
3169 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3170 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3171 dev_info(dev, "Firmware update in progress\n");
3172 } else {
eb0eecc1
SK
3173 dev_err(dev, "Error detected in the card\n");
3174 dev_err(dev, "ERR: sliport status 0x%x\n",
3175 sliport_status);
3176 dev_err(dev, "ERR: sliport error1 0x%x\n",
3177 sliport_err1);
3178 dev_err(dev, "ERR: sliport error2 0x%x\n",
3179 sliport_err2);
3180 }
e1cfb67a
PR
3181 }
3182 } else {
25848c90
SR
3183 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3184 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3185 ue_lo_mask = ioread32(adapter->pcicfg +
3186 PCICFG_UE_STATUS_LOW_MASK);
3187 ue_hi_mask = ioread32(adapter->pcicfg +
3188 PCICFG_UE_STATUS_HI_MASK);
e1cfb67a 3189
f67ef7ba
PR
3190 ue_lo = (ue_lo & ~ue_lo_mask);
3191 ue_hi = (ue_hi & ~ue_hi_mask);
7c185276 3192
eb0eecc1
SK
3193 /* On certain platforms BE hardware can indicate spurious UEs.
3194 * Allow HW to stop working completely in case of a real UE.
3195 * Hence not setting the hw_error for UE detection.
3196 */
f67ef7ba 3197
eb0eecc1 3198 if (ue_lo || ue_hi) {
eb0eecc1
SK
3199 dev_err(dev,
3200 "Unrecoverable Error detected in the adapter");
3201 dev_err(dev, "Please reboot server to recover");
3202 if (skyhawk_chip(adapter))
954f6825
VD
3203 be_set_error(adapter, BE_ERROR_UE);
3204
eb0eecc1
SK
3205 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3206 if (ue_lo & 1)
3207 dev_err(dev, "UE: %s bit set\n",
3208 ue_status_low_desc[i]);
3209 }
3210 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3211 if (ue_hi & 1)
3212 dev_err(dev, "UE: %s bit set\n",
3213 ue_status_hi_desc[i]);
3214 }
7c185276
AK
3215 }
3216 }
7c185276
AK
3217}
3218
8d56ff11
SP
3219static void be_msix_disable(struct be_adapter *adapter)
3220{
ac6a0c4a 3221 if (msix_enabled(adapter)) {
8d56ff11 3222 pci_disable_msix(adapter->pdev);
ac6a0c4a 3223 adapter->num_msix_vec = 0;
68d7bdcb 3224 adapter->num_msix_roce_vec = 0;
3abcdeda
SP
3225 }
3226}
3227
c2bba3df 3228static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 3229{
7dc4c064 3230 int i, num_vec;
d379142b 3231 struct device *dev = &adapter->pdev->dev;
6b7c5b94 3232
92bf14ab
SP
3233 /* If RoCE is supported, program the max number of NIC vectors that
3234 * may be configured via set-channels, along with vectors needed for
3235 * RoCe. Else, just program the number we'll use initially.
3236 */
3237 if (be_roce_supported(adapter))
3238 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3239 2 * num_online_cpus());
3240 else
3241 num_vec = adapter->cfg_num_qs;
3abcdeda 3242
ac6a0c4a 3243 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
3244 adapter->msix_entries[i].entry = i;
3245
7dc4c064
AG
3246 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3247 MIN_MSIX_VECTORS, num_vec);
3248 if (num_vec < 0)
3249 goto fail;
92bf14ab 3250
92bf14ab
SP
3251 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3252 adapter->num_msix_roce_vec = num_vec / 2;
3253 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3254 adapter->num_msix_roce_vec);
3255 }
3256
3257 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3258
3259 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3260 adapter->num_msix_vec);
c2bba3df 3261 return 0;
7dc4c064
AG
3262
3263fail:
3264 dev_warn(dev, "MSIx enable failed\n");
3265
3266 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
18c57c74 3267 if (be_virtfn(adapter))
7dc4c064
AG
3268 return num_vec;
3269 return 0;
6b7c5b94
SP
3270}
3271
fe6d2a38 3272static inline int be_msix_vec_get(struct be_adapter *adapter,
748b539a 3273 struct be_eq_obj *eqo)
b628bde2 3274{
f2f781a7 3275 return adapter->msix_entries[eqo->msix_idx].vector;
b628bde2 3276}
6b7c5b94 3277
b628bde2
SP
3278static int be_msix_register(struct be_adapter *adapter)
3279{
10ef9ab4
SP
3280 struct net_device *netdev = adapter->netdev;
3281 struct be_eq_obj *eqo;
3282 int status, i, vec;
6b7c5b94 3283
10ef9ab4
SP
3284 for_all_evt_queues(adapter, eqo, i) {
3285 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3286 vec = be_msix_vec_get(adapter, eqo);
3287 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
3288 if (status)
3289 goto err_msix;
d658d98a
PR
3290
3291 irq_set_affinity_hint(vec, eqo->affinity_mask);
3abcdeda 3292 }
b628bde2 3293
6b7c5b94 3294 return 0;
3abcdeda 3295err_msix:
10ef9ab4
SP
3296 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3297 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3298 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
748b539a 3299 status);
ac6a0c4a 3300 be_msix_disable(adapter);
6b7c5b94
SP
3301 return status;
3302}
3303
3304static int be_irq_register(struct be_adapter *adapter)
3305{
3306 struct net_device *netdev = adapter->netdev;
3307 int status;
3308
ac6a0c4a 3309 if (msix_enabled(adapter)) {
6b7c5b94
SP
3310 status = be_msix_register(adapter);
3311 if (status == 0)
3312 goto done;
ba343c77 3313 /* INTx is not supported for VF */
18c57c74 3314 if (be_virtfn(adapter))
ba343c77 3315 return status;
6b7c5b94
SP
3316 }
3317
e49cc34f 3318 /* INTx: only the first EQ is used */
6b7c5b94
SP
3319 netdev->irq = adapter->pdev->irq;
3320 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 3321 &adapter->eq_obj[0]);
6b7c5b94
SP
3322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "INTx request IRQ failed - err %d\n", status);
3325 return status;
3326 }
3327done:
3328 adapter->isr_registered = true;
3329 return 0;
3330}
3331
3332static void be_irq_unregister(struct be_adapter *adapter)
3333{
3334 struct net_device *netdev = adapter->netdev;
10ef9ab4 3335 struct be_eq_obj *eqo;
d658d98a 3336 int i, vec;
6b7c5b94
SP
3337
3338 if (!adapter->isr_registered)
3339 return;
3340
3341 /* INTx */
ac6a0c4a 3342 if (!msix_enabled(adapter)) {
e49cc34f 3343 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
3344 goto done;
3345 }
3346
3347 /* MSIx */
d658d98a
PR
3348 for_all_evt_queues(adapter, eqo, i) {
3349 vec = be_msix_vec_get(adapter, eqo);
3350 irq_set_affinity_hint(vec, NULL);
3351 free_irq(vec, eqo);
3352 }
3abcdeda 3353
6b7c5b94
SP
3354done:
3355 adapter->isr_registered = false;
6b7c5b94
SP
3356}
3357
10ef9ab4 3358static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
3359{
3360 struct be_queue_info *q;
3361 struct be_rx_obj *rxo;
3362 int i;
3363
3364 for_all_rx_queues(adapter, rxo, i) {
3365 q = &rxo->q;
3366 if (q->created) {
99b44304
KA
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
482c9e79 3380 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 3381 be_rx_cq_clean(rxo);
99b44304 3382 be_rxq_clean(rxo);
482c9e79 3383 }
10ef9ab4 3384 be_queue_free(adapter, q);
482c9e79
SP
3385 }
3386}
3387
bcc84140
KA
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
889cd4b2
SP
3415static int be_close(struct net_device *netdev)
3416{
3417 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
3418 struct be_eq_obj *eqo;
3419 int i;
889cd4b2 3420
e1ad8e33
KA
3421 /* This protection is needed as be_close() may be called even when the
3422 * adapter is in cleared state (after eeh perm failure)
3423 */
3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3425 return 0;
3426
bcc84140
KA
3427 be_disable_if_filters(adapter);
3428
045508a8
PP
3429 be_roce_dev_close(adapter);
3430
dff345c5
IV
3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3432 for_all_evt_queues(adapter, eqo, i) {
04d3d624 3433 napi_disable(&eqo->napi);
6384a4d0
SP
3434 be_disable_busy_poll(eqo);
3435 }
71237b6f 3436 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
04d3d624 3437 }
a323d9bf
SP
3438
3439 be_async_mcc_disable(adapter);
3440
3441 /* Wait for all pending tx completions to arrive so that
3442 * all tx skbs are freed.
3443 */
fba87559 3444 netif_tx_disable(netdev);
6e1f9975 3445 be_tx_compl_clean(adapter);
a323d9bf
SP
3446
3447 be_rx_qs_destroy(adapter);
d11a347d 3448
a323d9bf 3449 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
3450 if (msix_enabled(adapter))
3451 synchronize_irq(be_msix_vec_get(adapter, eqo));
3452 else
3453 synchronize_irq(netdev->irq);
3454 be_eq_clean(eqo);
63fcb27f
PR
3455 }
3456
889cd4b2
SP
3457 be_irq_unregister(adapter);
3458
482c9e79
SP
3459 return 0;
3460}
3461
10ef9ab4 3462static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79 3463{
1dcf7b1c
ED
3464 struct rss_info *rss = &adapter->rss_info;
3465 u8 rss_key[RSS_HASH_KEY_LEN];
482c9e79 3466 struct be_rx_obj *rxo;
e9008ee9 3467 int rc, i, j;
482c9e79
SP
3468
3469 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
3470 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3471 sizeof(struct be_eth_rx_d));
3472 if (rc)
3473 return rc;
3474 }
3475
71bb8bd0
VV
3476 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3477 rxo = default_rxo(adapter);
3478 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3479 rx_frag_size, adapter->if_handle,
3480 false, &rxo->rss_id);
3481 if (rc)
3482 return rc;
3483 }
10ef9ab4
SP
3484
3485 for_all_rss_queues(adapter, rxo, i) {
482c9e79 3486 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
3487 rx_frag_size, adapter->if_handle,
3488 true, &rxo->rss_id);
482c9e79
SP
3489 if (rc)
3490 return rc;
3491 }
3492
3493 if (be_multi_rxq(adapter)) {
71bb8bd0 3494 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
e9008ee9 3495 for_all_rss_queues(adapter, rxo, i) {
e2557877 3496 if ((j + i) >= RSS_INDIR_TABLE_LEN)
e9008ee9 3497 break;
e2557877
VD
3498 rss->rsstable[j + i] = rxo->rss_id;
3499 rss->rss_queue[j + i] = i;
e9008ee9
PR
3500 }
3501 }
e2557877
VD
3502 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3503 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
594ad54a
SR
3504
3505 if (!BEx_chip(adapter))
e2557877
VD
3506 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3507 RSS_ENABLE_UDP_IPV6;
da1388d6
VV
3508 } else {
3509 /* Disable RSS, if only default RX Q is created */
e2557877 3510 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3511 }
594ad54a 3512
1dcf7b1c 3513 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
748b539a 3514 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
1dcf7b1c 3515 128, rss_key);
da1388d6 3516 if (rc) {
e2557877 3517 rss->rss_flags = RSS_ENABLE_NONE;
da1388d6 3518 return rc;
482c9e79
SP
3519 }
3520
1dcf7b1c 3521 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
e2557877 3522
b02e60c8
SR
3523 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3524 * which is a queue empty condition
3525 */
10ef9ab4 3526 for_all_rx_queues(adapter, rxo, i)
b02e60c8
SR
3527 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3528
889cd4b2
SP
3529 return 0;
3530}
3531
bcc84140
KA
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
6b7c5b94
SP
3557static int be_open(struct net_device *netdev)
3558{
3559 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3560 struct be_eq_obj *eqo;
3abcdeda 3561 struct be_rx_obj *rxo;
10ef9ab4 3562 struct be_tx_obj *txo;
b236916a 3563 u8 link_status;
3abcdeda 3564 int status, i;
5fb379ee 3565
10ef9ab4 3566 status = be_rx_qs_create(adapter);
482c9e79
SP
3567 if (status)
3568 goto err;
3569
bcc84140
KA
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
c2bba3df
SK
3574 status = be_irq_register(adapter);
3575 if (status)
3576 goto err;
5fb379ee 3577
10ef9ab4 3578 for_all_rx_queues(adapter, rxo, i)
3abcdeda 3579 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 3580
10ef9ab4
SP
3581 for_all_tx_queues(adapter, txo, i)
3582 be_cq_notify(adapter, txo->cq.id, true, 0);
3583
7a1e9b20
SP
3584 be_async_mcc_enable(adapter);
3585
10ef9ab4
SP
3586 for_all_evt_queues(adapter, eqo, i) {
3587 napi_enable(&eqo->napi);
6384a4d0 3588 be_enable_busy_poll(eqo);
20947770 3589 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
10ef9ab4 3590 }
04d3d624 3591 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 3592
323ff71e 3593 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
3594 if (!status)
3595 be_link_status_update(adapter, link_status);
3596
fba87559 3597 netif_tx_start_all_queues(netdev);
045508a8 3598 be_roce_dev_open(adapter);
c9c47142 3599
c5abe7c0 3600#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3601 if (skyhawk_chip(adapter))
3602 vxlan_get_rx_port(netdev);
c5abe7c0
SP
3603#endif
3604
889cd4b2
SP
3605 return 0;
3606err:
3607 be_close(adapter->netdev);
3608 return -EIO;
5fb379ee
SP
3609}
3610
71d8d1b5
AK
3611static int be_setup_wol(struct be_adapter *adapter, bool enable)
3612{
3613 struct be_dma_mem cmd;
3614 int status = 0;
3615 u8 mac[ETH_ALEN];
3616
c7bf7169 3617 eth_zero_addr(mac);
71d8d1b5
AK
3618
3619 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
ede23fa8
JP
3620 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3621 GFP_KERNEL);
ddf1169f 3622 if (!cmd.va)
6b568689 3623 return -ENOMEM;
71d8d1b5
AK
3624
3625 if (enable) {
3626 status = pci_write_config_dword(adapter->pdev,
748b539a
SP
3627 PCICFG_PM_CONTROL_OFFSET,
3628 PCICFG_PM_CONTROL_MASK);
71d8d1b5
AK
3629 if (status) {
3630 dev_err(&adapter->pdev->dev,
2381a55c 3631 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
3632 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3633 cmd.dma);
71d8d1b5
AK
3634 return status;
3635 }
3636 status = be_cmd_enable_magic_wol(adapter,
748b539a
SP
3637 adapter->netdev->dev_addr,
3638 &cmd);
71d8d1b5
AK
3639 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3640 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3641 } else {
3642 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3643 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3644 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3645 }
3646
2b7bcebf 3647 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
3648 return status;
3649}
3650
f7062ee5
SP
3651static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3652{
3653 u32 addr;
3654
3655 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3656
3657 mac[5] = (u8)(addr & 0xFF);
3658 mac[4] = (u8)((addr >> 8) & 0xFF);
3659 mac[3] = (u8)((addr >> 16) & 0xFF);
3660 /* Use the OUI from the current MAC address */
3661 memcpy(mac, adapter->netdev->dev_addr, 3);
3662}
3663
6d87f5c3
AK
3664/*
3665 * Generate a seed MAC address from the PF MAC Address using jhash.
3666 * MAC Address for VFs are assigned incrementally starting from the seed.
3667 * These addresses are programmed in the ASIC by the PF and the VF driver
3668 * queries for the MAC address during its probe.
3669 */
4c876616 3670static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 3671{
f9449ab7 3672 u32 vf;
3abcdeda 3673 int status = 0;
6d87f5c3 3674 u8 mac[ETH_ALEN];
11ac75ed 3675 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3676
3677 be_vf_eth_addr_generate(adapter, mac);
3678
11ac75ed 3679 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3680 if (BEx_chip(adapter))
590c391d 3681 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
3682 vf_cfg->if_handle,
3683 &vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3684 else
3685 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3686 vf + 1);
590c391d 3687
6d87f5c3
AK
3688 if (status)
3689 dev_err(&adapter->pdev->dev,
748b539a
SP
3690 "Mac address assignment failed for VF %d\n",
3691 vf);
6d87f5c3 3692 else
11ac75ed 3693 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
3694
3695 mac[5] += 1;
3696 }
3697 return status;
3698}
3699
4c876616
SP
3700static int be_vfs_mac_query(struct be_adapter *adapter)
3701{
3702 int status, vf;
3703 u8 mac[ETH_ALEN];
3704 struct be_vf_cfg *vf_cfg;
4c876616
SP
3705
3706 for_all_vfs(adapter, vf_cfg, vf) {
b188f090
SR
3707 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3708 mac, vf_cfg->if_handle,
3709 false, vf+1);
4c876616
SP
3710 if (status)
3711 return status;
3712 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3713 }
3714 return 0;
3715}
3716
f9449ab7 3717static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 3718{
11ac75ed 3719 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
3720 u32 vf;
3721
257a3feb 3722 if (pci_vfs_assigned(adapter->pdev)) {
4c876616
SP
3723 dev_warn(&adapter->pdev->dev,
3724 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
3725 goto done;
3726 }
3727
b4c1df93
SP
3728 pci_disable_sriov(adapter->pdev);
3729
11ac75ed 3730 for_all_vfs(adapter, vf_cfg, vf) {
3175d8c2 3731 if (BEx_chip(adapter))
11ac75ed
SP
3732 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3733 vf_cfg->pmac_id, vf + 1);
3175d8c2
SP
3734 else
3735 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3736 vf + 1);
f9449ab7 3737
11ac75ed
SP
3738 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3739 }
39f1d94d
SP
3740done:
3741 kfree(adapter->vf_cfg);
3742 adapter->num_vfs = 0;
f174c7ec 3743 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
6d87f5c3
AK
3744}
3745
7707133c
SP
3746static void be_clear_queues(struct be_adapter *adapter)
3747{
3748 be_mcc_queues_destroy(adapter);
3749 be_rx_cqs_destroy(adapter);
3750 be_tx_queues_destroy(adapter);
3751 be_evt_queues_destroy(adapter);
3752}
3753
68d7bdcb 3754static void be_cancel_worker(struct be_adapter *adapter)
a54769f5 3755{
191eb756
SP
3756 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3757 cancel_delayed_work_sync(&adapter->work);
3758 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3759 }
68d7bdcb
SP
3760}
3761
eb7dd46c
SP
3762static void be_cancel_err_detection(struct be_adapter *adapter)
3763{
3764 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3766 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3767 }
3768}
3769
c5abe7c0 3770#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3772{
630f4b70
SB
3773 struct net_device *netdev = adapter->netdev;
3774
c9c47142
SP
3775 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3776 be_cmd_manage_iface(adapter, adapter->if_handle,
3777 OP_CONVERT_TUNNEL_TO_NORMAL);
3778
3779 if (adapter->vxlan_port)
3780 be_cmd_set_vxlan_port(adapter, 0);
3781
3782 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3783 adapter->vxlan_port = 0;
630f4b70
SB
3784
3785 netdev->hw_enc_features = 0;
3786 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
ac9a3d84 3787 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
c9c47142 3788}
c5abe7c0 3789#endif
c9c47142 3790
f2858738
VV
3791static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3792{
3793 struct be_resources res = adapter->pool_res;
3794 u16 num_vf_qs = 1;
3795
3796 /* Distribute the queue resources equally among the PF and it's VFs
3797 * Do not distribute queue resources in multi-channel configuration.
3798 */
3799 if (num_vfs && !be_is_mc(adapter)) {
3800 /* If number of VFs requested is 8 less than max supported,
3801 * assign 8 queue pairs to the PF and divide the remaining
3802 * resources evenly among the VFs
3803 */
3804 if (num_vfs < (be_max_vfs(adapter) - 8))
3805 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3806 else
3807 num_vf_qs = res.max_rss_qs / num_vfs;
3808
3809 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3810 * interfaces per port. Provide RSS on VFs, only if number
3811 * of VFs requested is less than MAX_RSS_IFACES limit.
3812 */
3813 if (num_vfs >= MAX_RSS_IFACES)
3814 num_vf_qs = 1;
3815 }
3816 return num_vf_qs;
3817}
3818
b05004ad
SK
3819static int be_clear(struct be_adapter *adapter)
3820{
f2858738
VV
3821 struct pci_dev *pdev = adapter->pdev;
3822 u16 num_vf_qs;
3823
68d7bdcb 3824 be_cancel_worker(adapter);
191eb756 3825
11ac75ed 3826 if (sriov_enabled(adapter))
f9449ab7
SP
3827 be_vf_clear(adapter);
3828
bec84e6b
VV
3829 /* Re-configure FW to distribute resources evenly across max-supported
3830 * number of VFs, only when VFs are not already enabled.
3831 */
ace40aff
VV
3832 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3833 !pci_vfs_assigned(pdev)) {
f2858738
VV
3834 num_vf_qs = be_calculate_vf_qs(adapter,
3835 pci_sriov_get_totalvfs(pdev));
bec84e6b 3836 be_cmd_set_sriov_config(adapter, adapter->pool_res,
f2858738
VV
3837 pci_sriov_get_totalvfs(pdev),
3838 num_vf_qs);
3839 }
bec84e6b 3840
c5abe7c0 3841#ifdef CONFIG_BE2NET_VXLAN
c9c47142 3842 be_disable_vxlan_offloads(adapter);
c5abe7c0 3843#endif
bcc84140
KA
3844 kfree(adapter->pmac_id);
3845 adapter->pmac_id = NULL;
fbc13f01 3846
f9449ab7 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5 3848
7707133c 3849 be_clear_queues(adapter);
a54769f5 3850
10ef9ab4 3851 be_msix_disable(adapter);
e1ad8e33 3852 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
a54769f5
SP
3853 return 0;
3854}
3855
4c876616 3856static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 3857{
92bf14ab 3858 struct be_resources res = {0};
bcc84140 3859 u32 cap_flags, en_flags, vf;
4c876616 3860 struct be_vf_cfg *vf_cfg;
0700d816 3861 int status;
abb93951 3862
0700d816 3863 /* If a FW profile exists, then cap_flags are updated */
4c876616 3864 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
0ed7d749 3865 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
abb93951 3866
4c876616 3867 for_all_vfs(adapter, vf_cfg, vf) {
92bf14ab
SP
3868 if (!BE3_chip(adapter)) {
3869 status = be_cmd_get_profile_config(adapter, &res,
f2858738 3870 RESOURCE_LIMITS,
92bf14ab 3871 vf + 1);
435452aa 3872 if (!status) {
92bf14ab 3873 cap_flags = res.if_cap_flags;
435452aa
VV
3874 /* Prevent VFs from enabling VLAN promiscuous
3875 * mode
3876 */
3877 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3878 }
92bf14ab 3879 }
4c876616 3880
bcc84140
KA
3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
4c876616 3887 if (status)
0700d816 3888 return status;
4c876616 3889 }
0700d816
KA
3890
3891 return 0;
abb93951
PR
3892}
3893
39f1d94d 3894static int be_vf_setup_init(struct be_adapter *adapter)
30128031 3895{
11ac75ed 3896 struct be_vf_cfg *vf_cfg;
30128031
SP
3897 int vf;
3898
39f1d94d
SP
3899 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3900 GFP_KERNEL);
3901 if (!adapter->vf_cfg)
3902 return -ENOMEM;
3903
11ac75ed
SP
3904 for_all_vfs(adapter, vf_cfg, vf) {
3905 vf_cfg->if_handle = -1;
3906 vf_cfg->pmac_id = -1;
30128031 3907 }
39f1d94d 3908 return 0;
30128031
SP
3909}
3910
f9449ab7
SP
3911static int be_vf_setup(struct be_adapter *adapter)
3912{
c502224e 3913 struct device *dev = &adapter->pdev->dev;
11ac75ed 3914 struct be_vf_cfg *vf_cfg;
4c876616 3915 int status, old_vfs, vf;
e7bcbd7b 3916 bool spoofchk;
39f1d94d 3917
257a3feb 3918 old_vfs = pci_num_vf(adapter->pdev);
39f1d94d
SP
3919
3920 status = be_vf_setup_init(adapter);
3921 if (status)
3922 goto err;
30128031 3923
4c876616
SP
3924 if (old_vfs) {
3925 for_all_vfs(adapter, vf_cfg, vf) {
3926 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3927 if (status)
3928 goto err;
3929 }
f9449ab7 3930
4c876616
SP
3931 status = be_vfs_mac_query(adapter);
3932 if (status)
3933 goto err;
3934 } else {
bec84e6b
VV
3935 status = be_vfs_if_create(adapter);
3936 if (status)
3937 goto err;
3938
39f1d94d
SP
3939 status = be_vf_eth_addr_config(adapter);
3940 if (status)
3941 goto err;
3942 }
f9449ab7 3943
11ac75ed 3944 for_all_vfs(adapter, vf_cfg, vf) {
04a06028 3945 /* Allow VFs to programs MAC/VLAN filters */
435452aa
VV
3946 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3947 vf + 1);
3948 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
04a06028 3949 status = be_cmd_set_fn_privileges(adapter,
435452aa 3950 vf_cfg->privileges |
04a06028
SP
3951 BE_PRIV_FILTMGMT,
3952 vf + 1);
435452aa
VV
3953 if (!status) {
3954 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
04a06028
SP
3955 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3956 vf);
435452aa 3957 }
04a06028
SP
3958 }
3959
0f77ba73
RN
3960 /* Allow full available bandwidth */
3961 if (!old_vfs)
3962 be_cmd_config_qos(adapter, 0, 0, vf + 1);
f1f3ee1b 3963
e7bcbd7b
KA
3964 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3965 vf_cfg->if_handle, NULL,
3966 &spoofchk);
3967 if (!status)
3968 vf_cfg->spoofchk = spoofchk;
3969
bdce2ad7 3970 if (!old_vfs) {
0599863d 3971 be_cmd_enable_vf(adapter, vf + 1);
bdce2ad7
SR
3972 be_cmd_set_logical_link_config(adapter,
3973 IFLA_VF_LINK_STATE_AUTO,
3974 vf+1);
3975 }
f9449ab7 3976 }
b4c1df93
SP
3977
3978 if (!old_vfs) {
3979 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3980 if (status) {
3981 dev_err(dev, "SRIOV enable failed\n");
3982 adapter->num_vfs = 0;
3983 goto err;
3984 }
3985 }
f174c7ec
VV
3986
3987 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
f9449ab7
SP
3988 return 0;
3989err:
4c876616
SP
3990 dev_err(dev, "VF setup failed\n");
3991 be_vf_clear(adapter);
f9449ab7
SP
3992 return status;
3993}
3994
f93f160b
VV
3995/* Converting function_mode bits on BE3 to SH mc_type enums */
3996
3997static u8 be_convert_mc_type(u32 function_mode)
3998{
66064dbc 3999 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
f93f160b 4000 return vNIC1;
66064dbc 4001 else if (function_mode & QNQ_MODE)
f93f160b
VV
4002 return FLEX10;
4003 else if (function_mode & VNIC_MODE)
4004 return vNIC2;
4005 else if (function_mode & UMC_ENABLED)
4006 return UMC;
4007 else
4008 return MC_NONE;
4009}
4010
92bf14ab
SP
4011/* On BE2/BE3 FW does not suggest the supported limits */
4012static void BEx_get_resources(struct be_adapter *adapter,
4013 struct be_resources *res)
4014{
bec84e6b 4015 bool use_sriov = adapter->num_vfs ? 1 : 0;
92bf14ab
SP
4016
4017 if (be_physfn(adapter))
4018 res->max_uc_mac = BE_UC_PMAC_COUNT;
4019 else
4020 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4021
f93f160b
VV
4022 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4023
4024 if (be_is_mc(adapter)) {
4025 /* Assuming that there are 4 channels per port,
4026 * when multi-channel is enabled
4027 */
4028 if (be_is_qnq_mode(adapter))
4029 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4030 else
4031 /* In a non-qnq multichannel mode, the pvid
4032 * takes up one vlan entry
4033 */
4034 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4035 } else {
92bf14ab 4036 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
f93f160b
VV
4037 }
4038
92bf14ab
SP
4039 res->max_mcast_mac = BE_MAX_MC;
4040
a5243dab
VV
4041 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4042 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4043 * *only* if it is RSS-capable.
4044 */
4045 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
18c57c74
KA
4046 be_virtfn(adapter) ||
4047 (be_is_mc(adapter) &&
4048 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
92bf14ab 4049 res->max_tx_qs = 1;
a28277dc
SR
4050 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4051 struct be_resources super_nic_res = {0};
4052
4053 /* On a SuperNIC profile, the driver needs to use the
4054 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4055 */
f2858738
VV
4056 be_cmd_get_profile_config(adapter, &super_nic_res,
4057 RESOURCE_LIMITS, 0);
a28277dc
SR
4058 /* Some old versions of BE3 FW don't report max_tx_qs value */
4059 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4060 } else {
92bf14ab 4061 res->max_tx_qs = BE3_MAX_TX_QS;
a28277dc 4062 }
92bf14ab
SP
4063
4064 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4065 !use_sriov && be_physfn(adapter))
4066 res->max_rss_qs = (adapter->be3_native) ?
4067 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4068 res->max_rx_qs = res->max_rss_qs + 1;
4069
e3dc867c 4070 if (be_physfn(adapter))
d3518e21 4071 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
e3dc867c
SR
4072 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4073 else
4074 res->max_evt_qs = 1;
92bf14ab
SP
4075
4076 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
71bb8bd0 4077 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
92bf14ab
SP
4078 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4079 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4080}
4081
30128031
SP
4082static void be_setup_init(struct be_adapter *adapter)
4083{
4084 adapter->vlan_prio_bmap = 0xff;
42f11cf2 4085 adapter->phy.link_speed = -1;
30128031
SP
4086 adapter->if_handle = -1;
4087 adapter->be3_native = false;
f66b7cfd 4088 adapter->if_flags = 0;
f25b119c
PR
4089 if (be_physfn(adapter))
4090 adapter->cmd_privileges = MAX_PRIVILEGES;
4091 else
4092 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
4093}
4094
bec84e6b
VV
4095static int be_get_sriov_config(struct be_adapter *adapter)
4096{
bec84e6b 4097 struct be_resources res = {0};
d3d18312 4098 int max_vfs, old_vfs;
bec84e6b 4099
f2858738 4100 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
d3d18312 4101
ace40aff 4102 /* Some old versions of BE3 FW don't report max_vfs value */
bec84e6b
VV
4103 if (BE3_chip(adapter) && !res.max_vfs) {
4104 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4105 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4106 }
4107
d3d18312 4108 adapter->pool_res = res;
bec84e6b 4109
ace40aff
VV
4110 /* If during previous unload of the driver, the VFs were not disabled,
4111 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4112 * Instead use the TotalVFs value stored in the pci-dev struct.
4113 */
bec84e6b
VV
4114 old_vfs = pci_num_vf(adapter->pdev);
4115 if (old_vfs) {
ace40aff
VV
4116 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4117 old_vfs);
4118
4119 adapter->pool_res.max_vfs =
4120 pci_sriov_get_totalvfs(adapter->pdev);
bec84e6b 4121 adapter->num_vfs = old_vfs;
bec84e6b
VV
4122 }
4123
4124 return 0;
4125}
4126
ace40aff
VV
4127static void be_alloc_sriov_res(struct be_adapter *adapter)
4128{
4129 int old_vfs = pci_num_vf(adapter->pdev);
4130 u16 num_vf_qs;
4131 int status;
4132
4133 be_get_sriov_config(adapter);
4134
4135 if (!old_vfs)
4136 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4137
4138 /* When the HW is in SRIOV capable configuration, the PF-pool
4139 * resources are given to PF during driver load, if there are no
4140 * old VFs. This facility is not available in BE3 FW.
4141 * Also, this is done by FW in Lancer chip.
4142 */
4143 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4144 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4145 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4146 num_vf_qs);
4147 if (status)
4148 dev_err(&adapter->pdev->dev,
4149 "Failed to optimize SRIOV resources\n");
4150 }
4151}
4152
92bf14ab 4153static int be_get_resources(struct be_adapter *adapter)
abb93951 4154{
92bf14ab
SP
4155 struct device *dev = &adapter->pdev->dev;
4156 struct be_resources res = {0};
4157 int status;
abb93951 4158
92bf14ab
SP
4159 if (BEx_chip(adapter)) {
4160 BEx_get_resources(adapter, &res);
4161 adapter->res = res;
abb93951
PR
4162 }
4163
92bf14ab
SP
4164 /* For Lancer, SH etc read per-function resource limits from FW.
4165 * GET_FUNC_CONFIG returns per function guaranteed limits.
4166 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4167 */
4168 if (!BEx_chip(adapter)) {
4169 status = be_cmd_get_func_config(adapter, &res);
4170 if (status)
4171 return status;
abb93951 4172
71bb8bd0
VV
4173 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4174 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4175 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4176 res.max_rss_qs -= 1;
4177
92bf14ab
SP
4178 /* If RoCE may be enabled stash away half the EQs for RoCE */
4179 if (be_roce_supported(adapter))
4180 res.max_evt_qs /= 2;
4181 adapter->res = res;
abb93951 4182 }
4c876616 4183
71bb8bd0
VV
4184 /* If FW supports RSS default queue, then skip creating non-RSS
4185 * queue for non-IP traffic.
4186 */
4187 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4188 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4189
acbafeb1
SP
4190 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4191 be_max_txqs(adapter), be_max_rxqs(adapter),
4192 be_max_rss(adapter), be_max_eqs(adapter),
4193 be_max_vfs(adapter));
4194 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4195 be_max_uc(adapter), be_max_mc(adapter),
4196 be_max_vlans(adapter));
4197
ace40aff
VV
4198 /* Sanitize cfg_num_qs based on HW and platform limits */
4199 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4200 be_max_qs(adapter));
92bf14ab 4201 return 0;
abb93951
PR
4202}
4203
39f1d94d
SP
4204static int be_get_config(struct be_adapter *adapter)
4205{
6b085ba9 4206 int status, level;
542963b7 4207 u16 profile_id;
6b085ba9
SP
4208
4209 status = be_cmd_get_cntl_attributes(adapter);
4210 if (status)
4211 return status;
39f1d94d 4212
e97e3cda 4213 status = be_cmd_query_fw_cfg(adapter);
abb93951 4214 if (status)
92bf14ab 4215 return status;
abb93951 4216
6b085ba9
SP
4217 if (BEx_chip(adapter)) {
4218 level = be_cmd_get_fw_log_level(adapter);
4219 adapter->msg_enable =
4220 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4221 }
4222
4223 be_cmd_get_acpi_wol_cap(adapter);
4224
21252377
VV
4225 be_cmd_query_port_name(adapter);
4226
4227 if (be_physfn(adapter)) {
542963b7
VV
4228 status = be_cmd_get_active_profile(adapter, &profile_id);
4229 if (!status)
4230 dev_info(&adapter->pdev->dev,
4231 "Using profile 0x%x\n", profile_id);
962bcb75 4232 }
bec84e6b 4233
92bf14ab
SP
4234 status = be_get_resources(adapter);
4235 if (status)
4236 return status;
abb93951 4237
46ee9c14
RN
4238 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4239 sizeof(*adapter->pmac_id), GFP_KERNEL);
92bf14ab
SP
4240 if (!adapter->pmac_id)
4241 return -ENOMEM;
abb93951 4242
92bf14ab 4243 return 0;
39f1d94d
SP
4244}
4245
95046b92
SP
4246static int be_mac_setup(struct be_adapter *adapter)
4247{
4248 u8 mac[ETH_ALEN];
4249 int status;
4250
4251 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4252 status = be_cmd_get_perm_mac(adapter, mac);
4253 if (status)
4254 return status;
4255
4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
95046b92
SP
4258 }
4259
95046b92
SP
4260 return 0;
4261}
4262
68d7bdcb
SP
4263static void be_schedule_worker(struct be_adapter *adapter)
4264{
4265 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4266 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4267}
4268
eb7dd46c
SP
4269static void be_schedule_err_detection(struct be_adapter *adapter)
4270{
4271 schedule_delayed_work(&adapter->be_err_detection_work,
4272 msecs_to_jiffies(1000));
4273 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4274}
4275
7707133c 4276static int be_setup_queues(struct be_adapter *adapter)
5fb379ee 4277{
68d7bdcb 4278 struct net_device *netdev = adapter->netdev;
10ef9ab4 4279 int status;
ba343c77 4280
7707133c 4281 status = be_evt_queues_create(adapter);
abb93951
PR
4282 if (status)
4283 goto err;
73d540f2 4284
7707133c 4285 status = be_tx_qs_create(adapter);
c2bba3df
SK
4286 if (status)
4287 goto err;
10ef9ab4 4288
7707133c 4289 status = be_rx_cqs_create(adapter);
10ef9ab4 4290 if (status)
a54769f5 4291 goto err;
6b7c5b94 4292
7707133c 4293 status = be_mcc_queues_create(adapter);
10ef9ab4
SP
4294 if (status)
4295 goto err;
4296
68d7bdcb
SP
4297 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4298 if (status)
4299 goto err;
4300
4301 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4302 if (status)
4303 goto err;
4304
7707133c
SP
4305 return 0;
4306err:
4307 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4308 return status;
4309}
4310
68d7bdcb
SP
4311int be_update_queues(struct be_adapter *adapter)
4312{
4313 struct net_device *netdev = adapter->netdev;
4314 int status;
4315
4316 if (netif_running(netdev))
4317 be_close(netdev);
4318
4319 be_cancel_worker(adapter);
4320
4321 /* If any vectors have been shared with RoCE we cannot re-program
4322 * the MSIx table.
4323 */
4324 if (!adapter->num_msix_roce_vec)
4325 be_msix_disable(adapter);
4326
4327 be_clear_queues(adapter);
4328
4329 if (!msix_enabled(adapter)) {
4330 status = be_msix_enable(adapter);
4331 if (status)
4332 return status;
4333 }
4334
4335 status = be_setup_queues(adapter);
4336 if (status)
4337 return status;
4338
4339 be_schedule_worker(adapter);
4340
4341 if (netif_running(netdev))
4342 status = be_open(netdev);
4343
4344 return status;
4345}
4346
f7062ee5
SP
4347static inline int fw_major_num(const char *fw_ver)
4348{
4349 int fw_major = 0, i;
4350
4351 i = sscanf(fw_ver, "%d.", &fw_major);
4352 if (i != 1)
4353 return 0;
4354
4355 return fw_major;
4356}
4357
f962f840
SP
4358/* If any VFs are already enabled don't FLR the PF */
4359static bool be_reset_required(struct be_adapter *adapter)
4360{
4361 return pci_num_vf(adapter->pdev) ? false : true;
4362}
4363
4364/* Wait for the FW to be ready and perform the required initialization */
4365static int be_func_init(struct be_adapter *adapter)
4366{
4367 int status;
4368
4369 status = be_fw_wait_ready(adapter);
4370 if (status)
4371 return status;
4372
4373 if (be_reset_required(adapter)) {
4374 status = be_cmd_reset_function(adapter);
4375 if (status)
4376 return status;
4377
4378 /* Wait for interrupts to quiesce after an FLR */
4379 msleep(100);
4380
4381 /* We can clear all errors when function reset succeeds */
954f6825 4382 be_clear_error(adapter, BE_CLEAR_ALL);
f962f840
SP
4383 }
4384
4385 /* Tell FW we're ready to fire cmds */
4386 status = be_cmd_fw_init(adapter);
4387 if (status)
4388 return status;
4389
4390 /* Allow interrupts for other ULPs running on NIC function */
4391 be_intr_set(adapter, true);
4392
4393 return 0;
4394}
4395
7707133c
SP
4396static int be_setup(struct be_adapter *adapter)
4397{
4398 struct device *dev = &adapter->pdev->dev;
bcc84140 4399 u32 en_flags;
7707133c
SP
4400 int status;
4401
f962f840
SP
4402 status = be_func_init(adapter);
4403 if (status)
4404 return status;
4405
7707133c
SP
4406 be_setup_init(adapter);
4407
4408 if (!lancer_chip(adapter))
4409 be_cmd_req_native_mode(adapter);
4410
ace40aff
VV
4411 if (!BE2_chip(adapter) && be_physfn(adapter))
4412 be_alloc_sriov_res(adapter);
4413
7707133c 4414 status = be_get_config(adapter);
10ef9ab4 4415 if (status)
a54769f5 4416 goto err;
6b7c5b94 4417
7707133c 4418 status = be_msix_enable(adapter);
10ef9ab4 4419 if (status)
a54769f5 4420 goto err;
6b7c5b94 4421
bcc84140
KA
4422 /* will enable all the needed filter flags in be_open() */
4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
7707133c 4427 if (status)
a54769f5 4428 goto err;
6b7c5b94 4429
68d7bdcb
SP
4430 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4431 rtnl_lock();
7707133c 4432 status = be_setup_queues(adapter);
68d7bdcb 4433 rtnl_unlock();
95046b92 4434 if (status)
1578e777
PR
4435 goto err;
4436
7707133c 4437 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
7707133c
SP
4438
4439 status = be_mac_setup(adapter);
10ef9ab4
SP
4440 if (status)
4441 goto err;
4442
e97e3cda 4443 be_cmd_get_fw_ver(adapter);
acbafeb1 4444 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
5a56eb10 4445
e9e2a904 4446 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
50762667 4447 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
e9e2a904
SK
4448 adapter->fw_ver);
4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4450 }
4451
00d594c3
KA
4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4453 adapter->rx_fc);
4454 if (status)
4455 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4456 &adapter->rx_fc);
590c391d 4457
00d594c3
KA
4458 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4459 adapter->tx_fc, adapter->rx_fc);
2dc1deb6 4460
bdce2ad7
SR
4461 if (be_physfn(adapter))
4462 be_cmd_set_logical_link_config(adapter,
4463 IFLA_VF_LINK_STATE_AUTO, 0);
4464
bec84e6b
VV
4465 if (adapter->num_vfs)
4466 be_vf_setup(adapter);
f9449ab7 4467
f25b119c
PR
4468 status = be_cmd_get_phy_info(adapter);
4469 if (!status && be_pause_supported(adapter))
42f11cf2
AK
4470 adapter->phy.fc_autoneg = 1;
4471
68d7bdcb 4472 be_schedule_worker(adapter);
e1ad8e33 4473 adapter->flags |= BE_FLAGS_SETUP_DONE;
f9449ab7 4474 return 0;
a54769f5
SP
4475err:
4476 be_clear(adapter);
4477 return status;
4478}
6b7c5b94 4479
66268739
IV
4480#ifdef CONFIG_NET_POLL_CONTROLLER
4481static void be_netpoll(struct net_device *netdev)
4482{
4483 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 4484 struct be_eq_obj *eqo;
66268739
IV
4485 int i;
4486
e49cc34f 4487 for_all_evt_queues(adapter, eqo, i) {
20947770 4488 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
e49cc34f
SP
4489 napi_schedule(&eqo->napi);
4490 }
66268739
IV
4491}
4492#endif
4493
96c9b2e4 4494static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
fa9a6fed 4495
306f1348
SP
4496static bool phy_flashing_required(struct be_adapter *adapter)
4497{
e02cfd96 4498 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
42f11cf2 4499 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
4500}
4501
c165541e
PR
4502static bool is_comp_in_ufi(struct be_adapter *adapter,
4503 struct flash_section_info *fsec, int type)
4504{
4505 int i = 0, img_type = 0;
4506 struct flash_section_info_g2 *fsec_g2 = NULL;
4507
ca34fe38 4508 if (BE2_chip(adapter))
c165541e
PR
4509 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4510
4511 for (i = 0; i < MAX_FLASH_COMP; i++) {
4512 if (fsec_g2)
4513 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4514 else
4515 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4516
4517 if (img_type == type)
4518 return true;
4519 }
4520 return false;
4521
4522}
4523
4188e7df 4524static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
748b539a
SP
4525 int header_size,
4526 const struct firmware *fw)
c165541e
PR
4527{
4528 struct flash_section_info *fsec = NULL;
4529 const u8 *p = fw->data;
4530
4531 p += header_size;
4532 while (p < (fw->data + fw->size)) {
4533 fsec = (struct flash_section_info *)p;
4534 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4535 return fsec;
4536 p += 32;
4537 }
4538 return NULL;
4539}
4540
96c9b2e4
VV
4541static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4542 u32 img_offset, u32 img_size, int hdr_size,
4543 u16 img_optype, bool *crc_match)
4544{
4545 u32 crc_offset;
4546 int status;
4547 u8 crc[4];
4548
70a7b525
VV
4549 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4550 img_size - 4);
96c9b2e4
VV
4551 if (status)
4552 return status;
4553
4554 crc_offset = hdr_size + img_offset + img_size - 4;
4555
4556 /* Skip flashing, if crc of flashed region matches */
4557 if (!memcmp(crc, p + crc_offset, 4))
4558 *crc_match = true;
4559 else
4560 *crc_match = false;
4561
4562 return status;
4563}
4564
773a2d7c 4565static int be_flash(struct be_adapter *adapter, const u8 *img,
70a7b525
VV
4566 struct be_dma_mem *flash_cmd, int optype, int img_size,
4567 u32 img_offset)
773a2d7c 4568{
70a7b525 4569 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
773a2d7c 4570 struct be_cmd_write_flashrom *req = flash_cmd->va;
96c9b2e4 4571 int status;
773a2d7c 4572
773a2d7c
PR
4573 while (total_bytes) {
4574 num_bytes = min_t(u32, 32*1024, total_bytes);
4575
4576 total_bytes -= num_bytes;
4577
4578 if (!total_bytes) {
4579 if (optype == OPTYPE_PHY_FW)
4580 flash_op = FLASHROM_OPER_PHY_FLASH;
4581 else
4582 flash_op = FLASHROM_OPER_FLASH;
4583 } else {
4584 if (optype == OPTYPE_PHY_FW)
4585 flash_op = FLASHROM_OPER_PHY_SAVE;
4586 else
4587 flash_op = FLASHROM_OPER_SAVE;
4588 }
4589
be716446 4590 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
4591 img += num_bytes;
4592 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
70a7b525
VV
4593 flash_op, img_offset +
4594 bytes_sent, num_bytes);
4c60005f 4595 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
96c9b2e4
VV
4596 optype == OPTYPE_PHY_FW)
4597 break;
4598 else if (status)
773a2d7c 4599 return status;
70a7b525
VV
4600
4601 bytes_sent += num_bytes;
773a2d7c
PR
4602 }
4603 return 0;
4604}
4605
0ad3157e 4606/* For BE2, BE3 and BE3-R */
ca34fe38 4607static int be_flash_BEx(struct be_adapter *adapter,
748b539a
SP
4608 const struct firmware *fw,
4609 struct be_dma_mem *flash_cmd, int num_of_images)
84517482 4610{
c165541e 4611 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
96c9b2e4 4612 struct device *dev = &adapter->pdev->dev;
c165541e 4613 struct flash_section_info *fsec = NULL;
96c9b2e4
VV
4614 int status, i, filehdr_size, num_comp;
4615 const struct flash_comp *pflashcomp;
4616 bool crc_match;
4617 const u8 *p;
c165541e
PR
4618
4619 struct flash_comp gen3_flash_types[] = {
4620 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4621 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4622 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4623 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4624 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4625 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4626 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4627 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4628 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4629 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4630 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4631 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4632 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4633 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4634 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4635 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4636 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4637 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4638 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4639 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 4640 };
c165541e
PR
4641
4642 struct flash_comp gen2_flash_types[] = {
4643 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4644 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4645 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4646 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4647 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4648 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4649 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4650 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4651 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4652 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4653 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4654 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4655 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4656 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4657 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4658 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
4659 };
4660
ca34fe38 4661 if (BE3_chip(adapter)) {
3f0d4560
AK
4662 pflashcomp = gen3_flash_types;
4663 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 4664 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
4665 } else {
4666 pflashcomp = gen2_flash_types;
4667 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 4668 num_comp = ARRAY_SIZE(gen2_flash_types);
5d3acd0d 4669 img_hdrs_size = 0;
84517482 4670 }
ca34fe38 4671
c165541e
PR
4672 /* Get flash section info*/
4673 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4674 if (!fsec) {
96c9b2e4 4675 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
c165541e
PR
4676 return -1;
4677 }
9fe96934 4678 for (i = 0; i < num_comp; i++) {
c165541e 4679 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 4680 continue;
c165541e
PR
4681
4682 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4684 continue;
4685
773a2d7c
PR
4686 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4687 !phy_flashing_required(adapter))
306f1348 4688 continue;
c165541e 4689
773a2d7c 4690 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
96c9b2e4
VV
4691 status = be_check_flash_crc(adapter, fw->data,
4692 pflashcomp[i].offset,
4693 pflashcomp[i].size,
4694 filehdr_size +
4695 img_hdrs_size,
4696 OPTYPE_REDBOOT, &crc_match);
4697 if (status) {
4698 dev_err(dev,
4699 "Could not get CRC for 0x%x region\n",
4700 pflashcomp[i].optype);
4701 continue;
4702 }
4703
4704 if (crc_match)
773a2d7c
PR
4705 continue;
4706 }
c165541e 4707
96c9b2e4
VV
4708 p = fw->data + filehdr_size + pflashcomp[i].offset +
4709 img_hdrs_size;
306f1348
SP
4710 if (p + pflashcomp[i].size > fw->data + fw->size)
4711 return -1;
773a2d7c
PR
4712
4713 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
70a7b525 4714 pflashcomp[i].size, 0);
773a2d7c 4715 if (status) {
96c9b2e4 4716 dev_err(dev, "Flashing section type 0x%x failed\n",
773a2d7c
PR
4717 pflashcomp[i].img_type);
4718 return status;
84517482 4719 }
84517482 4720 }
84517482
AK
4721 return 0;
4722}
4723
96c9b2e4
VV
4724static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4725{
4726 u32 img_type = le32_to_cpu(fsec_entry.type);
4727 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4728
4729 if (img_optype != 0xFFFF)
4730 return img_optype;
4731
4732 switch (img_type) {
4733 case IMAGE_FIRMWARE_iSCSI:
4734 img_optype = OPTYPE_ISCSI_ACTIVE;
4735 break;
4736 case IMAGE_BOOT_CODE:
4737 img_optype = OPTYPE_REDBOOT;
4738 break;
4739 case IMAGE_OPTION_ROM_ISCSI:
4740 img_optype = OPTYPE_BIOS;
4741 break;
4742 case IMAGE_OPTION_ROM_PXE:
4743 img_optype = OPTYPE_PXE_BIOS;
4744 break;
4745 case IMAGE_OPTION_ROM_FCoE:
4746 img_optype = OPTYPE_FCOE_BIOS;
4747 break;
4748 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4749 img_optype = OPTYPE_ISCSI_BACKUP;
4750 break;
4751 case IMAGE_NCSI:
4752 img_optype = OPTYPE_NCSI_FW;
4753 break;
4754 case IMAGE_FLASHISM_JUMPVECTOR:
4755 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4756 break;
4757 case IMAGE_FIRMWARE_PHY:
4758 img_optype = OPTYPE_SH_PHY_FW;
4759 break;
4760 case IMAGE_REDBOOT_DIR:
4761 img_optype = OPTYPE_REDBOOT_DIR;
4762 break;
4763 case IMAGE_REDBOOT_CONFIG:
4764 img_optype = OPTYPE_REDBOOT_CONFIG;
4765 break;
4766 case IMAGE_UFI_DIR:
4767 img_optype = OPTYPE_UFI_DIR;
4768 break;
4769 default:
4770 break;
4771 }
4772
4773 return img_optype;
4774}
4775
773a2d7c 4776static int be_flash_skyhawk(struct be_adapter *adapter,
748b539a
SP
4777 const struct firmware *fw,
4778 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 4779{
773a2d7c 4780 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
70a7b525 4781 bool crc_match, old_fw_img, flash_offset_support = true;
96c9b2e4 4782 struct device *dev = &adapter->pdev->dev;
773a2d7c 4783 struct flash_section_info *fsec = NULL;
96c9b2e4 4784 u32 img_offset, img_size, img_type;
70a7b525 4785 u16 img_optype, flash_optype;
96c9b2e4 4786 int status, i, filehdr_size;
96c9b2e4 4787 const u8 *p;
773a2d7c
PR
4788
4789 filehdr_size = sizeof(struct flash_file_hdr_g3);
4790 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4791 if (!fsec) {
96c9b2e4 4792 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
56ace3a0 4793 return -EINVAL;
773a2d7c
PR
4794 }
4795
70a7b525 4796retry_flash:
773a2d7c
PR
4797 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4798 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4799 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
96c9b2e4
VV
4800 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4801 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4802 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
773a2d7c 4803
96c9b2e4 4804 if (img_optype == 0xFFFF)
773a2d7c 4805 continue;
70a7b525
VV
4806
4807 if (flash_offset_support)
4808 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4809 else
4810 flash_optype = img_optype;
4811
96c9b2e4
VV
4812 /* Don't bother verifying CRC if an old FW image is being
4813 * flashed
4814 */
4815 if (old_fw_img)
4816 goto flash;
4817
4818 status = be_check_flash_crc(adapter, fw->data, img_offset,
4819 img_size, filehdr_size +
70a7b525 4820 img_hdrs_size, flash_optype,
96c9b2e4 4821 &crc_match);
4c60005f
KA
4822 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4823 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
70a7b525
VV
4824 /* The current FW image on the card does not support
4825 * OFFSET based flashing. Retry using older mechanism
4826 * of OPTYPE based flashing
4827 */
4828 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4829 flash_offset_support = false;
4830 goto retry_flash;
4831 }
4832
4833 /* The current FW image on the card does not recognize
4834 * the new FLASH op_type. The FW download is partially
4835 * complete. Reboot the server now to enable FW image
4836 * to recognize the new FLASH op_type. To complete the
4837 * remaining process, download the same FW again after
4838 * the reboot.
4839 */
96c9b2e4
VV
4840 dev_err(dev, "Flash incomplete. Reset the server\n");
4841 dev_err(dev, "Download FW image again after reset\n");
4842 return -EAGAIN;
4843 } else if (status) {
4844 dev_err(dev, "Could not get CRC for 0x%x region\n",
4845 img_optype);
4846 return -EFAULT;
773a2d7c
PR
4847 }
4848
96c9b2e4
VV
4849 if (crc_match)
4850 continue;
773a2d7c 4851
96c9b2e4
VV
4852flash:
4853 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
773a2d7c
PR
4854 if (p + img_size > fw->data + fw->size)
4855 return -1;
4856
70a7b525
VV
4857 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4858 img_offset);
4859
4860 /* The current FW image on the card does not support OFFSET
4861 * based flashing. Retry using older mechanism of OPTYPE based
4862 * flashing
4863 */
4864 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4865 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4866 flash_offset_support = false;
4867 goto retry_flash;
4868 }
4869
96c9b2e4
VV
4870 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4871 * UFI_DIR region
4872 */
4c60005f
KA
4873 if (old_fw_img &&
4874 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4875 (img_optype == OPTYPE_UFI_DIR &&
4876 base_status(status) == MCC_STATUS_FAILED))) {
96c9b2e4
VV
4877 continue;
4878 } else if (status) {
4879 dev_err(dev, "Flashing section type 0x%x failed\n",
4880 img_type);
4881 return -EFAULT;
773a2d7c
PR
4882 }
4883 }
4884 return 0;
3f0d4560
AK
4885}
4886
485bf569 4887static int lancer_fw_download(struct be_adapter *adapter,
748b539a 4888 const struct firmware *fw)
84517482 4889{
485bf569
SN
4890#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4891#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
bb864e07 4892 struct device *dev = &adapter->pdev->dev;
84517482 4893 struct be_dma_mem flash_cmd;
485bf569
SN
4894 const u8 *data_ptr = NULL;
4895 u8 *dest_image_ptr = NULL;
4896 size_t image_size = 0;
4897 u32 chunk_size = 0;
4898 u32 data_written = 0;
4899 u32 offset = 0;
4900 int status = 0;
4901 u8 add_status = 0;
f67ef7ba 4902 u8 change_status;
84517482 4903
485bf569 4904 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
bb864e07 4905 dev_err(dev, "FW image size should be multiple of 4\n");
3fb8cb80 4906 return -EINVAL;
d9efd2af
SB
4907 }
4908
485bf569
SN
4909 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4910 + LANCER_FW_DOWNLOAD_CHUNK;
e51000db
SB
4911 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4912 &flash_cmd.dma, GFP_KERNEL);
3fb8cb80
KA
4913 if (!flash_cmd.va)
4914 return -ENOMEM;
84517482 4915
485bf569
SN
4916 dest_image_ptr = flash_cmd.va +
4917 sizeof(struct lancer_cmd_req_write_object);
4918 image_size = fw->size;
4919 data_ptr = fw->data;
4920
4921 while (image_size) {
4922 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4923
4924 /* Copy the image chunk content. */
4925 memcpy(dest_image_ptr, data_ptr, chunk_size);
4926
4927 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4928 chunk_size, offset,
4929 LANCER_FW_DOWNLOAD_LOCATION,
4930 &data_written, &change_status,
4931 &add_status);
485bf569
SN
4932 if (status)
4933 break;
4934
4935 offset += data_written;
4936 data_ptr += data_written;
4937 image_size -= data_written;
4938 }
4939
4940 if (!status) {
4941 /* Commit the FW written */
4942 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
4943 0, offset,
4944 LANCER_FW_DOWNLOAD_LOCATION,
4945 &data_written, &change_status,
4946 &add_status);
485bf569
SN
4947 }
4948
bb864e07 4949 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
485bf569 4950 if (status) {
bb864e07 4951 dev_err(dev, "Firmware load error\n");
3fb8cb80 4952 return be_cmd_status(status);
485bf569
SN
4953 }
4954
bb864e07
KA
4955 dev_info(dev, "Firmware flashed successfully\n");
4956
f67ef7ba 4957 if (change_status == LANCER_FW_RESET_NEEDED) {
bb864e07 4958 dev_info(dev, "Resetting adapter to activate new FW\n");
5c510811
SK
4959 status = lancer_physdev_ctrl(adapter,
4960 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba 4961 if (status) {
bb864e07
KA
4962 dev_err(dev, "Adapter busy, could not reset FW\n");
4963 dev_err(dev, "Reboot server to activate new FW\n");
f67ef7ba
PR
4964 }
4965 } else if (change_status != LANCER_NO_RESET_NEEDED) {
bb864e07 4966 dev_info(dev, "Reboot server to activate new FW\n");
f67ef7ba 4967 }
3fb8cb80
KA
4968
4969 return 0;
485bf569
SN
4970}
4971
a6e6ff6e
VV
4972/* Check if the flash image file is compatible with the adapter that
4973 * is being flashed.
4974 */
4975static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4976 struct flash_file_hdr_g3 *fhdr)
773a2d7c 4977{
5d3acd0d
VV
4978 if (!fhdr) {
4979 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4980 return -1;
4981 }
773a2d7c 4982
5d3acd0d
VV
4983 /* First letter of the build version is used to identify
4984 * which chip this image file is meant for.
4985 */
4986 switch (fhdr->build[0]) {
4987 case BLD_STR_UFI_TYPE_SH:
a6e6ff6e
VV
4988 if (!skyhawk_chip(adapter))
4989 return false;
4990 break;
5d3acd0d 4991 case BLD_STR_UFI_TYPE_BE3:
a6e6ff6e
VV
4992 if (!BE3_chip(adapter))
4993 return false;
4994 break;
5d3acd0d 4995 case BLD_STR_UFI_TYPE_BE2:
a6e6ff6e
VV
4996 if (!BE2_chip(adapter))
4997 return false;
4998 break;
5d3acd0d
VV
4999 default:
5000 return false;
5001 }
a6e6ff6e
VV
5002
5003 return (fhdr->asic_type_rev >= adapter->asic_rev);
773a2d7c
PR
5004}
5005
485bf569
SN
5006static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5007{
5d3acd0d 5008 struct device *dev = &adapter->pdev->dev;
485bf569 5009 struct flash_file_hdr_g3 *fhdr3;
5d3acd0d
VV
5010 struct image_hdr *img_hdr_ptr;
5011 int status = 0, i, num_imgs;
485bf569 5012 struct be_dma_mem flash_cmd;
84517482 5013
5d3acd0d
VV
5014 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5015 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5016 dev_err(dev, "Flash image is not compatible with adapter\n");
5017 return -EINVAL;
84517482
AK
5018 }
5019
5d3acd0d 5020 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
e51000db
SB
5021 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5022 GFP_KERNEL);
5d3acd0d
VV
5023 if (!flash_cmd.va)
5024 return -ENOMEM;
773a2d7c 5025
773a2d7c
PR
5026 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5027 for (i = 0; i < num_imgs; i++) {
5028 img_hdr_ptr = (struct image_hdr *)(fw->data +
5029 (sizeof(struct flash_file_hdr_g3) +
5030 i * sizeof(struct image_hdr)));
5d3acd0d
VV
5031 if (!BE2_chip(adapter) &&
5032 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5033 continue;
84517482 5034
5d3acd0d
VV
5035 if (skyhawk_chip(adapter))
5036 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5037 num_imgs);
5038 else
5039 status = be_flash_BEx(adapter, fw, &flash_cmd,
5040 num_imgs);
84517482
AK
5041 }
5042
5d3acd0d
VV
5043 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5044 if (!status)
5045 dev_info(dev, "Firmware flashed successfully\n");
84517482 5046
485bf569
SN
5047 return status;
5048}
5049
5050int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5051{
5052 const struct firmware *fw;
5053 int status;
5054
5055 if (!netif_running(adapter->netdev)) {
5056 dev_err(&adapter->pdev->dev,
5057 "Firmware load not allowed (interface is down)\n");
940a3fcd 5058 return -ENETDOWN;
485bf569
SN
5059 }
5060
5061 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5062 if (status)
5063 goto fw_exit;
5064
5065 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5066
5067 if (lancer_chip(adapter))
5068 status = lancer_fw_download(adapter, fw);
5069 else
5070 status = be_fw_download(adapter, fw);
5071
eeb65ced 5072 if (!status)
e97e3cda 5073 be_cmd_get_fw_ver(adapter);
eeb65ced 5074
84517482
AK
5075fw_exit:
5076 release_firmware(fw);
5077 return status;
5078}
5079
add511b3
RP
5080static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5081 u16 flags)
a77dcb8c
AK
5082{
5083 struct be_adapter *adapter = netdev_priv(dev);
5084 struct nlattr *attr, *br_spec;
5085 int rem;
5086 int status = 0;
5087 u16 mode = 0;
5088
5089 if (!sriov_enabled(adapter))
5090 return -EOPNOTSUPP;
5091
5092 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4ea85e83
TG
5093 if (!br_spec)
5094 return -EINVAL;
a77dcb8c
AK
5095
5096 nla_for_each_nested(attr, br_spec, rem) {
5097 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5098 continue;
5099
b7c1a314
TG
5100 if (nla_len(attr) < sizeof(mode))
5101 return -EINVAL;
5102
a77dcb8c
AK
5103 mode = nla_get_u16(attr);
5104 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5105 return -EINVAL;
5106
5107 status = be_cmd_set_hsw_config(adapter, 0, 0,
5108 adapter->if_handle,
5109 mode == BRIDGE_MODE_VEPA ?
5110 PORT_FWD_TYPE_VEPA :
e7bcbd7b 5111 PORT_FWD_TYPE_VEB, 0);
a77dcb8c
AK
5112 if (status)
5113 goto err;
5114
5115 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5116 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5117
5118 return status;
5119 }
5120err:
5121 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5122 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5123
5124 return status;
5125}
5126
5127static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
46c264da
ND
5128 struct net_device *dev, u32 filter_mask,
5129 int nlflags)
a77dcb8c
AK
5130{
5131 struct be_adapter *adapter = netdev_priv(dev);
5132 int status = 0;
5133 u8 hsw_mode;
5134
5135 if (!sriov_enabled(adapter))
5136 return 0;
5137
5138 /* BE and Lancer chips support VEB mode only */
5139 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5140 hsw_mode = PORT_FWD_TYPE_VEB;
5141 } else {
5142 status = be_cmd_get_hsw_config(adapter, NULL, 0,
e7bcbd7b
KA
5143 adapter->if_handle, &hsw_mode,
5144 NULL);
a77dcb8c
AK
5145 if (status)
5146 return 0;
5147 }
5148
5149 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5150 hsw_mode == PORT_FWD_TYPE_VEPA ?
2c3c031c 5151 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
7d4f8d87 5152 0, 0, nlflags, filter_mask, NULL);
a77dcb8c
AK
5153}
5154
c5abe7c0 5155#ifdef CONFIG_BE2NET_VXLAN
630f4b70
SB
5156/* VxLAN offload Notes:
5157 *
5158 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5159 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5160 * is expected to work across all types of IP tunnels once exported. Skyhawk
5161 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
16dde0d6
SB
5162 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5163 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5164 * those other tunnels are unexported on the fly through ndo_features_check().
630f4b70
SB
5165 *
5166 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5167 * adds more than one port, disable offloads and don't re-enable them again
5168 * until after all the tunnels are removed.
5169 */
c9c47142
SP
5170static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5171 __be16 port)
5172{
5173 struct be_adapter *adapter = netdev_priv(netdev);
5174 struct device *dev = &adapter->pdev->dev;
5175 int status;
5176
af19e686 5177 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5178 return;
5179
5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
c9c47142
SP
5181 dev_info(dev,
5182 "Only one UDP port supported for VxLAN offloads\n");
630f4b70
SB
5183 dev_info(dev, "Disabling VxLAN offloads\n");
5184 adapter->vxlan_port_count++;
5185 goto err;
c9c47142
SP
5186 }
5187
630f4b70
SB
5188 if (adapter->vxlan_port_count++ >= 1)
5189 return;
5190
c9c47142
SP
5191 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5192 OP_CONVERT_NORMAL_TO_TUNNEL);
5193 if (status) {
5194 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5195 goto err;
5196 }
5197
5198 status = be_cmd_set_vxlan_port(adapter, port);
5199 if (status) {
5200 dev_warn(dev, "Failed to add VxLAN port\n");
5201 goto err;
5202 }
5203 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5204 adapter->vxlan_port = port;
5205
630f4b70
SB
5206 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5207 NETIF_F_TSO | NETIF_F_TSO6 |
5208 NETIF_F_GSO_UDP_TUNNEL;
5209 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
ac9a3d84 5210 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
630f4b70 5211
c9c47142
SP
5212 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5213 be16_to_cpu(port));
5214 return;
5215err:
5216 be_disable_vxlan_offloads(adapter);
c9c47142
SP
5217}
5218
5219static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5220 __be16 port)
5221{
5222 struct be_adapter *adapter = netdev_priv(netdev);
5223
af19e686 5224 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
c9c47142
SP
5225 return;
5226
5227 if (adapter->vxlan_port != port)
630f4b70 5228 goto done;
c9c47142
SP
5229
5230 be_disable_vxlan_offloads(adapter);
5231
5232 dev_info(&adapter->pdev->dev,
5233 "Disabled VxLAN offloads for UDP port %d\n",
5234 be16_to_cpu(port));
630f4b70
SB
5235done:
5236 adapter->vxlan_port_count--;
c9c47142 5237}
725d548f 5238
5f35227e
JG
5239static netdev_features_t be_features_check(struct sk_buff *skb,
5240 struct net_device *dev,
5241 netdev_features_t features)
725d548f 5242{
16dde0d6
SB
5243 struct be_adapter *adapter = netdev_priv(dev);
5244 u8 l4_hdr = 0;
5245
5246 /* The code below restricts offload features for some tunneled packets.
5247 * Offload features for normal (non tunnel) packets are unchanged.
5248 */
5249 if (!skb->encapsulation ||
5250 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5251 return features;
5252
5253 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5254 * should disable tunnel offload features if it's not a VxLAN packet,
5255 * as tunnel offloads have been enabled only for VxLAN. This is done to
5256 * allow other tunneled traffic like GRE work fine while VxLAN
5257 * offloads are configured in Skyhawk-R.
5258 */
5259 switch (vlan_get_protocol(skb)) {
5260 case htons(ETH_P_IP):
5261 l4_hdr = ip_hdr(skb)->protocol;
5262 break;
5263 case htons(ETH_P_IPV6):
5264 l4_hdr = ipv6_hdr(skb)->nexthdr;
5265 break;
5266 default:
5267 return features;
5268 }
5269
5270 if (l4_hdr != IPPROTO_UDP ||
5271 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5272 skb->inner_protocol != htons(ETH_P_TEB) ||
5273 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5274 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5275 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5276
5277 return features;
725d548f 5278}
c5abe7c0 5279#endif
c9c47142 5280
e5686ad8 5281static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
5282 .ndo_open = be_open,
5283 .ndo_stop = be_close,
5284 .ndo_start_xmit = be_xmit,
a54769f5 5285 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
5286 .ndo_set_mac_address = be_mac_addr_set,
5287 .ndo_change_mtu = be_change_mtu,
ab1594e9 5288 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 5289 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
5290 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5291 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 5292 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 5293 .ndo_set_vf_vlan = be_set_vf_vlan,
ed616689 5294 .ndo_set_vf_rate = be_set_vf_tx_rate,
66268739 5295 .ndo_get_vf_config = be_get_vf_config,
bdce2ad7 5296 .ndo_set_vf_link_state = be_set_vf_link_state,
e7bcbd7b 5297 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
66268739
IV
5298#ifdef CONFIG_NET_POLL_CONTROLLER
5299 .ndo_poll_controller = be_netpoll,
5300#endif
a77dcb8c
AK
5301 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5302 .ndo_bridge_getlink = be_ndo_bridge_getlink,
6384a4d0 5303#ifdef CONFIG_NET_RX_BUSY_POLL
c9c47142 5304 .ndo_busy_poll = be_busy_poll,
6384a4d0 5305#endif
c5abe7c0 5306#ifdef CONFIG_BE2NET_VXLAN
c9c47142
SP
5307 .ndo_add_vxlan_port = be_add_vxlan_port,
5308 .ndo_del_vxlan_port = be_del_vxlan_port,
5f35227e 5309 .ndo_features_check = be_features_check,
c5abe7c0 5310#endif
6b7c5b94
SP
5311};
5312
5313static void be_netdev_init(struct net_device *netdev)
5314{
5315 struct be_adapter *adapter = netdev_priv(netdev);
5316
6332c8d3 5317 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 5318 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 5319 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
5320 if (be_multi_rxq(adapter))
5321 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
5322
5323 netdev->features |= netdev->hw_features |
f646968f 5324 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 5325
eb8a50d9 5326 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 5327 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 5328
fbc13f01
AK
5329 netdev->priv_flags |= IFF_UNICAST_FLT;
5330
6b7c5b94
SP
5331 netdev->flags |= IFF_MULTICAST;
5332
b7e5887e 5333 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 5334
10ef9ab4 5335 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94 5336
7ad24ea4 5337 netdev->ethtool_ops = &be_ethtool_ops;
6b7c5b94
SP
5338}
5339
87ac1a52
KA
5340static void be_cleanup(struct be_adapter *adapter)
5341{
5342 struct net_device *netdev = adapter->netdev;
5343
5344 rtnl_lock();
5345 netif_device_detach(netdev);
5346 if (netif_running(netdev))
5347 be_close(netdev);
5348 rtnl_unlock();
5349
5350 be_clear(adapter);
5351}
5352
484d76fd 5353static int be_resume(struct be_adapter *adapter)
78fad34e 5354{
d0e1b319 5355 struct net_device *netdev = adapter->netdev;
78fad34e
SP
5356 int status;
5357
78fad34e
SP
5358 status = be_setup(adapter);
5359 if (status)
484d76fd 5360 return status;
78fad34e 5361
d0e1b319
KA
5362 if (netif_running(netdev)) {
5363 status = be_open(netdev);
78fad34e 5364 if (status)
484d76fd 5365 return status;
78fad34e
SP
5366 }
5367
d0e1b319
KA
5368 netif_device_attach(netdev);
5369
484d76fd
KA
5370 return 0;
5371}
5372
5373static int be_err_recover(struct be_adapter *adapter)
5374{
5375 struct device *dev = &adapter->pdev->dev;
5376 int status;
5377
5378 status = be_resume(adapter);
5379 if (status)
5380 goto err;
5381
9fa465c0 5382 dev_info(dev, "Adapter recovery successful\n");
78fad34e
SP
5383 return 0;
5384err:
9fa465c0 5385 if (be_physfn(adapter))
78fad34e 5386 dev_err(dev, "Adapter recovery failed\n");
9fa465c0
SP
5387 else
5388 dev_err(dev, "Re-trying adapter recovery\n");
78fad34e
SP
5389
5390 return status;
5391}
5392
eb7dd46c 5393static void be_err_detection_task(struct work_struct *work)
78fad34e
SP
5394{
5395 struct be_adapter *adapter =
eb7dd46c
SP
5396 container_of(work, struct be_adapter,
5397 be_err_detection_work.work);
78fad34e
SP
5398 int status = 0;
5399
5400 be_detect_error(adapter);
5401
954f6825 5402 if (be_check_error(adapter, BE_ERROR_HW)) {
87ac1a52 5403 be_cleanup(adapter);
d0e1b319
KA
5404
5405 /* As of now error recovery support is in Lancer only */
5406 if (lancer_chip(adapter))
5407 status = be_err_recover(adapter);
78fad34e
SP
5408 }
5409
9fa465c0
SP
5410 /* Always attempt recovery on VFs */
5411 if (!status || be_virtfn(adapter))
eb7dd46c 5412 be_schedule_err_detection(adapter);
78fad34e
SP
5413}
5414
5415static void be_log_sfp_info(struct be_adapter *adapter)
5416{
5417 int status;
5418
5419 status = be_cmd_query_sfp_info(adapter);
5420 if (!status) {
5421 dev_err(&adapter->pdev->dev,
5422 "Unqualified SFP+ detected on %c from %s part no: %s",
5423 adapter->port_name, adapter->phy.vendor_name,
5424 adapter->phy.vendor_pn);
5425 }
5426 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5427}
5428
5429static void be_worker(struct work_struct *work)
5430{
5431 struct be_adapter *adapter =
5432 container_of(work, struct be_adapter, work.work);
5433 struct be_rx_obj *rxo;
5434 int i;
5435
5436 /* when interrupts are not yet enabled, just reap any pending
5437 * mcc completions
5438 */
5439 if (!netif_running(adapter->netdev)) {
5440 local_bh_disable();
5441 be_process_mcc(adapter);
5442 local_bh_enable();
5443 goto reschedule;
5444 }
5445
5446 if (!adapter->stats_cmd_sent) {
5447 if (lancer_chip(adapter))
5448 lancer_cmd_get_pport_stats(adapter,
5449 &adapter->stats_cmd);
5450 else
5451 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5452 }
5453
5454 if (be_physfn(adapter) &&
5455 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5456 be_cmd_get_die_temperature(adapter);
5457
5458 for_all_rx_queues(adapter, rxo, i) {
5459 /* Replenish RX-queues starved due to memory
5460 * allocation failures.
5461 */
5462 if (rxo->rx_post_starved)
5463 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5464 }
5465
20947770
PR
5466 /* EQ-delay update for Skyhawk is done while notifying EQ */
5467 if (!skyhawk_chip(adapter))
5468 be_eqd_update(adapter, false);
78fad34e
SP
5469
5470 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5471 be_log_sfp_info(adapter);
5472
5473reschedule:
5474 adapter->work_counter++;
5475 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5476}
5477
6b7c5b94
SP
5478static void be_unmap_pci_bars(struct be_adapter *adapter)
5479{
c5b3ad4c
SP
5480 if (adapter->csr)
5481 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 5482 if (adapter->db)
ce66f781 5483 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
5484}
5485
ce66f781
SP
5486static int db_bar(struct be_adapter *adapter)
5487{
18c57c74 5488 if (lancer_chip(adapter) || be_virtfn(adapter))
ce66f781
SP
5489 return 0;
5490 else
5491 return 4;
5492}
5493
5494static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 5495{
dbf0f2a7 5496 if (skyhawk_chip(adapter)) {
ce66f781
SP
5497 adapter->roce_db.size = 4096;
5498 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5499 db_bar(adapter));
5500 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5501 db_bar(adapter));
5502 }
045508a8 5503 return 0;
6b7c5b94
SP
5504}
5505
5506static int be_map_pci_bars(struct be_adapter *adapter)
5507{
0fa74a4b 5508 struct pci_dev *pdev = adapter->pdev;
6b7c5b94 5509 u8 __iomem *addr;
78fad34e
SP
5510 u32 sli_intf;
5511
5512 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5513 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5514 SLI_INTF_FAMILY_SHIFT;
5515 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38 5516
c5b3ad4c 5517 if (BEx_chip(adapter) && be_physfn(adapter)) {
0fa74a4b 5518 adapter->csr = pci_iomap(pdev, 2, 0);
ddf1169f 5519 if (!adapter->csr)
c5b3ad4c
SP
5520 return -ENOMEM;
5521 }
5522
25848c90 5523 addr = pci_iomap(pdev, db_bar(adapter), 0);
ddf1169f 5524 if (!addr)
6b7c5b94 5525 goto pci_map_err;
ba343c77 5526 adapter->db = addr;
ce66f781 5527
25848c90
SR
5528 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5529 if (be_physfn(adapter)) {
5530 /* PCICFG is the 2nd BAR in BE2 */
5531 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5532 if (!addr)
5533 goto pci_map_err;
5534 adapter->pcicfg = addr;
5535 } else {
5536 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5537 }
5538 }
5539
ce66f781 5540 be_roce_map_pci_bars(adapter);
6b7c5b94 5541 return 0;
ce66f781 5542
6b7c5b94 5543pci_map_err:
25848c90 5544 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
6b7c5b94
SP
5545 be_unmap_pci_bars(adapter);
5546 return -ENOMEM;
5547}
5548
78fad34e 5549static void be_drv_cleanup(struct be_adapter *adapter)
6b7c5b94 5550{
8788fdc2 5551 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
78fad34e 5552 struct device *dev = &adapter->pdev->dev;
6b7c5b94
SP
5553
5554 if (mem->va)
78fad34e 5555 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
e7b909a6 5556
5b8821b7 5557 mem = &adapter->rx_filter;
e7b909a6 5558 if (mem->va)
78fad34e
SP
5559 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5560
5561 mem = &adapter->stats_cmd;
5562 if (mem->va)
5563 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
6b7c5b94
SP
5564}
5565
78fad34e
SP
5566/* Allocate and initialize various fields in be_adapter struct */
5567static int be_drv_init(struct be_adapter *adapter)
6b7c5b94 5568{
8788fdc2
SP
5569 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5570 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 5571 struct be_dma_mem *rx_filter = &adapter->rx_filter;
78fad34e
SP
5572 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5573 struct device *dev = &adapter->pdev->dev;
5574 int status = 0;
6b7c5b94
SP
5575
5576 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
e51000db
SB
5577 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5578 &mbox_mem_alloc->dma,
5579 GFP_KERNEL);
78fad34e
SP
5580 if (!mbox_mem_alloc->va)
5581 return -ENOMEM;
5582
6b7c5b94
SP
5583 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5584 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5585 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
e7b909a6 5586
5b8821b7 5587 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
78fad34e
SP
5588 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5589 &rx_filter->dma, GFP_KERNEL);
ddf1169f 5590 if (!rx_filter->va) {
e7b909a6
SP
5591 status = -ENOMEM;
5592 goto free_mbox;
5593 }
1f9061d2 5594
78fad34e
SP
5595 if (lancer_chip(adapter))
5596 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5597 else if (BE2_chip(adapter))
5598 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5599 else if (BE3_chip(adapter))
5600 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5601 else
5602 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5603 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5604 &stats_cmd->dma, GFP_KERNEL);
5605 if (!stats_cmd->va) {
5606 status = -ENOMEM;
5607 goto free_rx_filter;
5608 }
5609
2984961c 5610 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
5611 spin_lock_init(&adapter->mcc_lock);
5612 spin_lock_init(&adapter->mcc_cq_lock);
5eeff635 5613 init_completion(&adapter->et_cmd_compl);
e7b909a6 5614
78fad34e 5615 pci_save_state(adapter->pdev);
6b7c5b94 5616
78fad34e 5617 INIT_DELAYED_WORK(&adapter->work, be_worker);
eb7dd46c
SP
5618 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5619 be_err_detection_task);
6b7c5b94 5620
78fad34e
SP
5621 adapter->rx_fc = true;
5622 adapter->tx_fc = true;
6b7c5b94 5623
78fad34e
SP
5624 /* Must be a power of 2 or else MODULO will BUG_ON */
5625 adapter->be_get_temp_freq = 64;
ca34fe38 5626
6b7c5b94 5627 return 0;
78fad34e
SP
5628
5629free_rx_filter:
5630 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5631free_mbox:
5632 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5633 mbox_mem_alloc->dma);
5634 return status;
6b7c5b94
SP
5635}
5636
3bc6b06c 5637static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
5638{
5639 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 5640
6b7c5b94
SP
5641 if (!adapter)
5642 return;
5643
045508a8 5644 be_roce_dev_remove(adapter);
8cef7a78 5645 be_intr_set(adapter, false);
045508a8 5646
eb7dd46c 5647 be_cancel_err_detection(adapter);
f67ef7ba 5648
6b7c5b94
SP
5649 unregister_netdev(adapter->netdev);
5650
5fb379ee
SP
5651 be_clear(adapter);
5652
bf99e50d
PR
5653 /* tell fw we're done with firing cmds */
5654 be_cmd_fw_clean(adapter);
5655
78fad34e
SP
5656 be_unmap_pci_bars(adapter);
5657 be_drv_cleanup(adapter);
6b7c5b94 5658
d6b6d987
SP
5659 pci_disable_pcie_error_reporting(pdev);
5660
6b7c5b94
SP
5661 pci_release_regions(pdev);
5662 pci_disable_device(pdev);
5663
5664 free_netdev(adapter->netdev);
5665}
5666
9a03259c
AB
5667static ssize_t be_hwmon_show_temp(struct device *dev,
5668 struct device_attribute *dev_attr,
5669 char *buf)
29e9122b
VD
5670{
5671 struct be_adapter *adapter = dev_get_drvdata(dev);
5672
5673 /* Unit: millidegree Celsius */
5674 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5675 return -EIO;
5676 else
5677 return sprintf(buf, "%u\n",
5678 adapter->hwmon_info.be_on_die_temp * 1000);
5679}
5680
5681static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5682 be_hwmon_show_temp, NULL, 1);
5683
5684static struct attribute *be_hwmon_attrs[] = {
5685 &sensor_dev_attr_temp1_input.dev_attr.attr,
5686 NULL
5687};
5688
5689ATTRIBUTE_GROUPS(be_hwmon);
5690
d379142b
SP
5691static char *mc_name(struct be_adapter *adapter)
5692{
f93f160b
VV
5693 char *str = ""; /* default */
5694
5695 switch (adapter->mc_type) {
5696 case UMC:
5697 str = "UMC";
5698 break;
5699 case FLEX10:
5700 str = "FLEX10";
5701 break;
5702 case vNIC1:
5703 str = "vNIC-1";
5704 break;
5705 case nPAR:
5706 str = "nPAR";
5707 break;
5708 case UFP:
5709 str = "UFP";
5710 break;
5711 case vNIC2:
5712 str = "vNIC-2";
5713 break;
5714 default:
5715 str = "";
5716 }
5717
5718 return str;
d379142b
SP
5719}
5720
5721static inline char *func_name(struct be_adapter *adapter)
5722{
5723 return be_physfn(adapter) ? "PF" : "VF";
5724}
5725
f7062ee5
SP
5726static inline char *nic_name(struct pci_dev *pdev)
5727{
5728 switch (pdev->device) {
5729 case OC_DEVICE_ID1:
5730 return OC_NAME;
5731 case OC_DEVICE_ID2:
5732 return OC_NAME_BE;
5733 case OC_DEVICE_ID3:
5734 case OC_DEVICE_ID4:
5735 return OC_NAME_LANCER;
5736 case BE_DEVICE_ID2:
5737 return BE3_NAME;
5738 case OC_DEVICE_ID5:
5739 case OC_DEVICE_ID6:
5740 return OC_NAME_SH;
5741 default:
5742 return BE_NAME;
5743 }
5744}
5745
1dd06ae8 5746static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94 5747{
6b7c5b94
SP
5748 struct be_adapter *adapter;
5749 struct net_device *netdev;
21252377 5750 int status = 0;
6b7c5b94 5751
acbafeb1
SP
5752 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5753
6b7c5b94
SP
5754 status = pci_enable_device(pdev);
5755 if (status)
5756 goto do_none;
5757
5758 status = pci_request_regions(pdev, DRV_NAME);
5759 if (status)
5760 goto disable_dev;
5761 pci_set_master(pdev);
5762
7f640062 5763 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
ddf1169f 5764 if (!netdev) {
6b7c5b94
SP
5765 status = -ENOMEM;
5766 goto rel_reg;
5767 }
5768 adapter = netdev_priv(netdev);
5769 adapter->pdev = pdev;
5770 pci_set_drvdata(pdev, adapter);
5771 adapter->netdev = netdev;
2243e2e9 5772 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 5773
4c15c243 5774 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
5775 if (!status) {
5776 netdev->features |= NETIF_F_HIGHDMA;
5777 } else {
4c15c243 5778 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
5779 if (status) {
5780 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5781 goto free_netdev;
5782 }
5783 }
5784
2f951a9a
KA
5785 status = pci_enable_pcie_error_reporting(pdev);
5786 if (!status)
5787 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
d6b6d987 5788
78fad34e 5789 status = be_map_pci_bars(adapter);
6b7c5b94 5790 if (status)
39f1d94d 5791 goto free_netdev;
6b7c5b94 5792
78fad34e
SP
5793 status = be_drv_init(adapter);
5794 if (status)
5795 goto unmap_bars;
5796
5fb379ee
SP
5797 status = be_setup(adapter);
5798 if (status)
78fad34e 5799 goto drv_cleanup;
2243e2e9 5800
3abcdeda 5801 be_netdev_init(netdev);
6b7c5b94
SP
5802 status = register_netdev(netdev);
5803 if (status != 0)
5fb379ee 5804 goto unsetup;
6b7c5b94 5805
045508a8
PP
5806 be_roce_dev_add(adapter);
5807
eb7dd46c 5808 be_schedule_err_detection(adapter);
b4e32a71 5809
29e9122b 5810 /* On Die temperature not supported for VF. */
9a03259c 5811 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
29e9122b
VD
5812 adapter->hwmon_info.hwmon_dev =
5813 devm_hwmon_device_register_with_groups(&pdev->dev,
5814 DRV_NAME,
5815 adapter,
5816 be_hwmon_groups);
5817 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5818 }
5819
d379142b 5820 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
21252377 5821 func_name(adapter), mc_name(adapter), adapter->port_name);
34b1ef04 5822
6b7c5b94
SP
5823 return 0;
5824
5fb379ee
SP
5825unsetup:
5826 be_clear(adapter);
78fad34e
SP
5827drv_cleanup:
5828 be_drv_cleanup(adapter);
5829unmap_bars:
5830 be_unmap_pci_bars(adapter);
f9449ab7 5831free_netdev:
fe6d2a38 5832 free_netdev(netdev);
6b7c5b94
SP
5833rel_reg:
5834 pci_release_regions(pdev);
5835disable_dev:
5836 pci_disable_device(pdev);
5837do_none:
c4ca2374 5838 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
5839 return status;
5840}
5841
5842static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5843{
5844 struct be_adapter *adapter = pci_get_drvdata(pdev);
6b7c5b94 5845
76a9e08e 5846 if (adapter->wol_en)
71d8d1b5
AK
5847 be_setup_wol(adapter, true);
5848
d4360d6f 5849 be_intr_set(adapter, false);
eb7dd46c 5850 be_cancel_err_detection(adapter);
f67ef7ba 5851
87ac1a52 5852 be_cleanup(adapter);
6b7c5b94
SP
5853
5854 pci_save_state(pdev);
5855 pci_disable_device(pdev);
5856 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5857 return 0;
5858}
5859
484d76fd 5860static int be_pci_resume(struct pci_dev *pdev)
6b7c5b94 5861{
6b7c5b94 5862 struct be_adapter *adapter = pci_get_drvdata(pdev);
484d76fd 5863 int status = 0;
6b7c5b94
SP
5864
5865 status = pci_enable_device(pdev);
5866 if (status)
5867 return status;
5868
1ca01512 5869 pci_set_power_state(pdev, PCI_D0);
6b7c5b94
SP
5870 pci_restore_state(pdev);
5871
484d76fd 5872 status = be_resume(adapter);
2243e2e9
SP
5873 if (status)
5874 return status;
5875
eb7dd46c
SP
5876 be_schedule_err_detection(adapter);
5877
76a9e08e 5878 if (adapter->wol_en)
71d8d1b5 5879 be_setup_wol(adapter, false);
a4ca055f 5880
6b7c5b94
SP
5881 return 0;
5882}
5883
82456b03
SP
5884/*
5885 * An FLR will stop BE from DMAing any data.
5886 */
5887static void be_shutdown(struct pci_dev *pdev)
5888{
5889 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 5890
2d5d4154
AK
5891 if (!adapter)
5892 return;
82456b03 5893
d114f99a 5894 be_roce_dev_shutdown(adapter);
0f4a6828 5895 cancel_delayed_work_sync(&adapter->work);
eb7dd46c 5896 be_cancel_err_detection(adapter);
a4ca055f 5897
2d5d4154 5898 netif_device_detach(adapter->netdev);
82456b03 5899
57841869
AK
5900 be_cmd_reset_function(adapter);
5901
82456b03 5902 pci_disable_device(pdev);
82456b03
SP
5903}
5904
cf588477 5905static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
748b539a 5906 pci_channel_state_t state)
cf588477
SP
5907{
5908 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5909
5910 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5911
954f6825
VD
5912 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5913 be_set_error(adapter, BE_ERROR_EEH);
cf588477 5914
eb7dd46c 5915 be_cancel_err_detection(adapter);
cf588477 5916
87ac1a52 5917 be_cleanup(adapter);
cf588477 5918 }
cf588477
SP
5919
5920 if (state == pci_channel_io_perm_failure)
5921 return PCI_ERS_RESULT_DISCONNECT;
5922
5923 pci_disable_device(pdev);
5924
eeb7fc7b
SK
5925 /* The error could cause the FW to trigger a flash debug dump.
5926 * Resetting the card while flash dump is in progress
c8a54163
PR
5927 * can cause it not to recover; wait for it to finish.
5928 * Wait only for first function as it is needed only once per
5929 * adapter.
eeb7fc7b 5930 */
c8a54163
PR
5931 if (pdev->devfn == 0)
5932 ssleep(30);
5933
cf588477
SP
5934 return PCI_ERS_RESULT_NEED_RESET;
5935}
5936
5937static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5938{
5939 struct be_adapter *adapter = pci_get_drvdata(pdev);
5940 int status;
5941
5942 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
5943
5944 status = pci_enable_device(pdev);
5945 if (status)
5946 return PCI_ERS_RESULT_DISCONNECT;
5947
5948 pci_set_master(pdev);
1ca01512 5949 pci_set_power_state(pdev, PCI_D0);
cf588477
SP
5950 pci_restore_state(pdev);
5951
5952 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
5953 dev_info(&adapter->pdev->dev,
5954 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 5955 status = be_fw_wait_ready(adapter);
cf588477
SP
5956 if (status)
5957 return PCI_ERS_RESULT_DISCONNECT;
5958
d6b6d987 5959 pci_cleanup_aer_uncorrect_error_status(pdev);
954f6825 5960 be_clear_error(adapter, BE_CLEAR_ALL);
cf588477
SP
5961 return PCI_ERS_RESULT_RECOVERED;
5962}
5963
5964static void be_eeh_resume(struct pci_dev *pdev)
5965{
5966 int status = 0;
5967 struct be_adapter *adapter = pci_get_drvdata(pdev);
cf588477
SP
5968
5969 dev_info(&adapter->pdev->dev, "EEH resume\n");
5970
5971 pci_save_state(pdev);
5972
484d76fd 5973 status = be_resume(adapter);
bf99e50d
PR
5974 if (status)
5975 goto err;
5976
eb7dd46c 5977 be_schedule_err_detection(adapter);
cf588477
SP
5978 return;
5979err:
5980 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
5981}
5982
ace40aff
VV
5983static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5984{
5985 struct be_adapter *adapter = pci_get_drvdata(pdev);
5986 u16 num_vf_qs;
5987 int status;
5988
5989 if (!num_vfs)
5990 be_vf_clear(adapter);
5991
5992 adapter->num_vfs = num_vfs;
5993
5994 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5995 dev_warn(&pdev->dev,
5996 "Cannot disable VFs while they are assigned\n");
5997 return -EBUSY;
5998 }
5999
6000 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6001 * are equally distributed across the max-number of VFs. The user may
6002 * request only a subset of the max-vfs to be enabled.
6003 * Based on num_vfs, redistribute the resources across num_vfs so that
6004 * each VF will have access to more number of resources.
6005 * This facility is not available in BE3 FW.
6006 * Also, this is done by FW in Lancer chip.
6007 */
6008 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6009 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6010 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6011 adapter->num_vfs, num_vf_qs);
6012 if (status)
6013 dev_err(&pdev->dev,
6014 "Failed to optimize SR-IOV resources\n");
6015 }
6016
6017 status = be_get_resources(adapter);
6018 if (status)
6019 return be_cmd_status(status);
6020
6021 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6022 rtnl_lock();
6023 status = be_update_queues(adapter);
6024 rtnl_unlock();
6025 if (status)
6026 return be_cmd_status(status);
6027
6028 if (adapter->num_vfs)
6029 status = be_vf_setup(adapter);
6030
6031 if (!status)
6032 return adapter->num_vfs;
6033
6034 return 0;
6035}
6036
3646f0e5 6037static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
6038 .error_detected = be_eeh_err_detected,
6039 .slot_reset = be_eeh_reset,
6040 .resume = be_eeh_resume,
6041};
6042
6b7c5b94
SP
6043static struct pci_driver be_driver = {
6044 .name = DRV_NAME,
6045 .id_table = be_dev_ids,
6046 .probe = be_probe,
6047 .remove = be_remove,
6048 .suspend = be_suspend,
484d76fd 6049 .resume = be_pci_resume,
82456b03 6050 .shutdown = be_shutdown,
ace40aff 6051 .sriov_configure = be_pci_sriov_configure,
cf588477 6052 .err_handler = &be_eeh_handlers
6b7c5b94
SP
6053};
6054
6055static int __init be_init_module(void)
6056{
8e95a202
JP
6057 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6058 rx_frag_size != 2048) {
6b7c5b94
SP
6059 printk(KERN_WARNING DRV_NAME
6060 " : Module param rx_frag_size must be 2048/4096/8192."
6061 " Using 2048\n");
6062 rx_frag_size = 2048;
6063 }
6b7c5b94 6064
ace40aff
VV
6065 if (num_vfs > 0) {
6066 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6067 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6068 }
6069
6b7c5b94
SP
6070 return pci_register_driver(&be_driver);
6071}
6072module_init(be_init_module);
6073
6074static void __exit be_exit_module(void)
6075{
6076 pci_unregister_driver(&be_driver);
6077}
6078module_exit(be_exit_module);