]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: fix payload_len value for GET_MAC_LIST cmd req
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
783 if (vlan_tx_tag_present(skb)) {
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
6e0895c2 785 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
f11a869d
IV
786 if (skb)
787 skb->vlan_tci = 0;
93040ae5
SK
788 }
789
bc0c3405
AK
790 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791 if (!vlan_tag)
792 vlan_tag = adapter->pvid;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796
797 if (vlan_tag) {
58717686 798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
799 if (unlikely(!skb))
800 return skb;
801
802 skb->vlan_tci = 0;
803 }
804
805 /* Insert the outer VLAN, if any */
806 if (adapter->qnq_vid) {
807 vlan_tag = adapter->qnq_vid;
58717686 808 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
809 if (unlikely(!skb))
810 return skb;
811 if (skip_hw_vlan)
812 *skip_hw_vlan = true;
813 }
814
93040ae5
SK
815 return skb;
816}
817
bc0c3405
AK
818static bool be_ipv6_exthdr_check(struct sk_buff *skb)
819{
820 struct ethhdr *eh = (struct ethhdr *)skb->data;
821 u16 offset = ETH_HLEN;
822
823 if (eh->h_proto == htons(ETH_P_IPV6)) {
824 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
825
826 offset += sizeof(struct ipv6hdr);
827 if (ip6h->nexthdr != NEXTHDR_TCP &&
828 ip6h->nexthdr != NEXTHDR_UDP) {
829 struct ipv6_opt_hdr *ehdr =
830 (struct ipv6_opt_hdr *) (skb->data + offset);
831
832 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
833 if (ehdr->hdrlen == 0xff)
834 return true;
835 }
836 }
837 return false;
838}
839
840static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
841{
842 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
843}
844
845static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
846{
847 return BE3_chip(adapter) &&
848 be_ipv6_exthdr_check(skb);
849}
850
61357325 851static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 852 struct net_device *netdev)
6b7c5b94
SP
853{
854 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
855 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
856 struct be_queue_info *txq = &txo->q;
93040ae5 857 struct iphdr *ip = NULL;
6b7c5b94 858 u32 wrb_cnt = 0, copied = 0;
93040ae5 859 u32 start = txq->head, eth_hdr_len;
6b7c5b94 860 bool dummy_wrb, stopped = false;
bc0c3405 861 bool skip_hw_vlan = false;
d2cb6ce7 862 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
6b7c5b94 863
93040ae5
SK
864 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
865 VLAN_ETH_HLEN : ETH_HLEN;
866
1297f9db
AK
867 /* For padded packets, BE HW modifies tot_len field in IP header
868 * incorrecly when VLAN tag is inserted by HW.
1ded132d 869 */
1297f9db 870 if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
93040ae5
SK
871 ip = (struct iphdr *)ip_hdr(skb);
872 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
873 }
1ded132d 874
d2cb6ce7
AK
875 /* If vlan tag is already inlined in the packet, skip HW VLAN
876 * tagging in UMC mode
877 */
878 if ((adapter->function_mode & UMC_ENABLED) &&
879 veh->h_vlan_proto == htons(ETH_P_8021Q))
880 skip_hw_vlan = true;
881
93040ae5
SK
882 /* HW has a bug wherein it will calculate CSUM for VLAN
883 * pkts even though it is disabled.
884 * Manually insert VLAN in pkt.
885 */
886 if (skb->ip_summed != CHECKSUM_PARTIAL &&
1297f9db 887 vlan_tx_tag_present(skb)) {
bc0c3405
AK
888 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
889 if (unlikely(!skb))
890 goto tx_drop;
891 }
892
893 /* HW may lockup when VLAN HW tagging is requested on
894 * certain ipv6 packets. Drop such pkts if the HW workaround to
895 * skip HW tagging is not enabled by FW.
896 */
897 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
898 (adapter->pvid || adapter->qnq_vid) &&
899 !qnq_async_evt_rcvd(adapter)))
900 goto tx_drop;
901
902 /* Manual VLAN tag insertion to prevent:
903 * ASIC lockup when the ASIC inserts VLAN tag into
904 * certain ipv6 packets. Insert VLAN tags in driver,
905 * and set event, completion, vlan bits accordingly
906 * in the Tx WRB.
907 */
908 if (be_ipv6_tx_stall_chk(adapter, skb) &&
909 be_vlan_tag_tx_chk(adapter, skb)) {
910 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
1ded132d
AK
911 if (unlikely(!skb))
912 goto tx_drop;
1ded132d
AK
913 }
914
fe6d2a38 915 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 916
bc0c3405
AK
917 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
918 skip_hw_vlan);
c190e3c8 919 if (copied) {
cd8f76c0
ED
920 int gso_segs = skb_shinfo(skb)->gso_segs;
921
c190e3c8 922 /* record the sent skb in the sent_skb table */
3c8def97
SP
923 BUG_ON(txo->sent_skb_list[start]);
924 txo->sent_skb_list[start] = skb;
c190e3c8
AK
925
926 /* Ensure txq has space for the next skb; Else stop the queue
927 * *BEFORE* ringing the tx doorbell, so that we serialze the
928 * tx compls of the current transmit which'll wake up the queue
929 */
7101e111 930 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
931 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
932 txq->len) {
3c8def97 933 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
934 stopped = true;
935 }
6b7c5b94 936
94d73aaa 937 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 938
cd8f76c0 939 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
940 } else {
941 txq->head = start;
942 dev_kfree_skb_any(skb);
6b7c5b94 943 }
1ded132d 944tx_drop:
6b7c5b94
SP
945 return NETDEV_TX_OK;
946}
947
948static int be_change_mtu(struct net_device *netdev, int new_mtu)
949{
950 struct be_adapter *adapter = netdev_priv(netdev);
951 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
952 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
953 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
954 dev_info(&adapter->pdev->dev,
955 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
956 BE_MIN_MTU,
957 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
958 return -EINVAL;
959 }
960 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
961 netdev->mtu, new_mtu);
962 netdev->mtu = new_mtu;
963 return 0;
964}
965
966/*
82903e4b
AK
967 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
968 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 969 */
10329df8 970static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 971{
10329df8
SP
972 u16 vids[BE_NUM_VLANS_SUPPORTED];
973 u16 num = 0, i;
82903e4b 974 int status = 0;
1da87b7f 975
c0e64ef4
SP
976 /* No need to further configure vids if in promiscuous mode */
977 if (adapter->promiscuous)
978 return 0;
979
0fc16ebf
PR
980 if (adapter->vlans_added > adapter->max_vlans)
981 goto set_vlan_promisc;
982
983 /* Construct VLAN Table to give to HW */
984 for (i = 0; i < VLAN_N_VID; i++)
985 if (adapter->vlan_tag[i])
10329df8 986 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
987
988 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 989 vids, num, 1, 0);
0fc16ebf
PR
990
991 /* Set to VLAN promisc mode as setting VLAN filter failed */
992 if (status) {
993 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
994 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
995 goto set_vlan_promisc;
6b7c5b94 996 }
1da87b7f 997
b31c50a7 998 return status;
0fc16ebf
PR
999
1000set_vlan_promisc:
1001 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1002 NULL, 0, 1, 1);
1003 return status;
6b7c5b94
SP
1004}
1005
80d5c368 1006static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1007{
1008 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1009 int status = 0;
6b7c5b94 1010
a85e9986 1011 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1012 status = -EINVAL;
1013 goto ret;
1014 }
ba343c77 1015
a85e9986
PR
1016 /* Packets with VID 0 are always received by Lancer by default */
1017 if (lancer_chip(adapter) && vid == 0)
1018 goto ret;
1019
6b7c5b94 1020 adapter->vlan_tag[vid] = 1;
82903e4b 1021 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1022 status = be_vid_config(adapter);
8e586137 1023
80817cbf
AK
1024 if (!status)
1025 adapter->vlans_added++;
1026 else
1027 adapter->vlan_tag[vid] = 0;
1028ret:
1029 return status;
6b7c5b94
SP
1030}
1031
80d5c368 1032static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1035 int status = 0;
6b7c5b94 1036
a85e9986 1037 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1038 status = -EINVAL;
1039 goto ret;
1040 }
ba343c77 1041
a85e9986
PR
1042 /* Packets with VID 0 are always received by Lancer by default */
1043 if (lancer_chip(adapter) && vid == 0)
1044 goto ret;
1045
6b7c5b94 1046 adapter->vlan_tag[vid] = 0;
82903e4b 1047 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1048 status = be_vid_config(adapter);
8e586137 1049
80817cbf
AK
1050 if (!status)
1051 adapter->vlans_added--;
1052 else
1053 adapter->vlan_tag[vid] = 1;
1054ret:
1055 return status;
6b7c5b94
SP
1056}
1057
a54769f5 1058static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1059{
1060 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1061 int status;
6b7c5b94 1062
24307eef 1063 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1064 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1065 adapter->promiscuous = true;
1066 goto done;
6b7c5b94
SP
1067 }
1068
25985edc 1069 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1070 if (adapter->promiscuous) {
1071 adapter->promiscuous = false;
5b8821b7 1072 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1073
1074 if (adapter->vlans_added)
10329df8 1075 be_vid_config(adapter);
6b7c5b94
SP
1076 }
1077
e7b909a6 1078 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1079 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1080 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1081 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1082 goto done;
6b7c5b94 1083 }
6b7c5b94 1084
fbc13f01
AK
1085 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1086 struct netdev_hw_addr *ha;
1087 int i = 1; /* First slot is claimed by the Primary MAC */
1088
1089 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1090 be_cmd_pmac_del(adapter, adapter->if_handle,
1091 adapter->pmac_id[i], 0);
1092 }
1093
1094 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1095 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1096 adapter->promiscuous = true;
1097 goto done;
1098 }
1099
1100 netdev_for_each_uc_addr(ha, adapter->netdev) {
1101 adapter->uc_macs++; /* First slot is for Primary MAC */
1102 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1103 adapter->if_handle,
1104 &adapter->pmac_id[adapter->uc_macs], 0);
1105 }
1106 }
1107
0fc16ebf
PR
1108 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1109
1110 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1111 if (status) {
1112 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1113 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1114 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1115 }
24307eef
SP
1116done:
1117 return;
6b7c5b94
SP
1118}
1119
ba343c77
SB
1120static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1121{
1122 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1123 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1124 int status;
704e4c88
PR
1125 bool active_mac = false;
1126 u32 pmac_id;
1127 u8 old_mac[ETH_ALEN];
ba343c77 1128
11ac75ed 1129 if (!sriov_enabled(adapter))
ba343c77
SB
1130 return -EPERM;
1131
11ac75ed 1132 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1133 return -EINVAL;
1134
590c391d 1135 if (lancer_chip(adapter)) {
704e4c88
PR
1136 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1137 &pmac_id, vf + 1);
1138 if (!status && active_mac)
1139 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1140 pmac_id, vf + 1);
1141
590c391d
PR
1142 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1143 } else {
11ac75ed
SP
1144 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1145 vf_cfg->pmac_id, vf + 1);
ba343c77 1146
11ac75ed
SP
1147 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1148 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1149 }
1150
64600ea5 1151 if (status)
ba343c77
SB
1152 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1153 mac, vf);
64600ea5 1154 else
11ac75ed 1155 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1156
ba343c77
SB
1157 return status;
1158}
1159
64600ea5
AK
1160static int be_get_vf_config(struct net_device *netdev, int vf,
1161 struct ifla_vf_info *vi)
1162{
1163 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1164 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1165
11ac75ed 1166 if (!sriov_enabled(adapter))
64600ea5
AK
1167 return -EPERM;
1168
11ac75ed 1169 if (vf >= adapter->num_vfs)
64600ea5
AK
1170 return -EINVAL;
1171
1172 vi->vf = vf;
11ac75ed
SP
1173 vi->tx_rate = vf_cfg->tx_rate;
1174 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1175 vi->qos = 0;
11ac75ed 1176 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1177
1178 return 0;
1179}
1180
1da87b7f
AK
1181static int be_set_vf_vlan(struct net_device *netdev,
1182 int vf, u16 vlan, u8 qos)
1183{
1184 struct be_adapter *adapter = netdev_priv(netdev);
1185 int status = 0;
1186
11ac75ed 1187 if (!sriov_enabled(adapter))
1da87b7f
AK
1188 return -EPERM;
1189
11ac75ed 1190 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1191 return -EINVAL;
1192
1193 if (vlan) {
f1f3ee1b
AK
1194 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1195 /* If this is new value, program it. Else skip. */
1196 adapter->vf_cfg[vf].vlan_tag = vlan;
1197
1198 status = be_cmd_set_hsw_config(adapter, vlan,
1199 vf + 1, adapter->vf_cfg[vf].if_handle);
1200 }
1da87b7f 1201 } else {
f1f3ee1b 1202 /* Reset Transparent Vlan Tagging. */
11ac75ed 1203 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1204 vlan = adapter->vf_cfg[vf].def_vid;
1205 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1206 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1207 }
1208
1da87b7f
AK
1209
1210 if (status)
1211 dev_info(&adapter->pdev->dev,
1212 "VLAN %d config on VF %d failed\n", vlan, vf);
1213 return status;
1214}
1215
e1d18735
AK
1216static int be_set_vf_tx_rate(struct net_device *netdev,
1217 int vf, int rate)
1218{
1219 struct be_adapter *adapter = netdev_priv(netdev);
1220 int status = 0;
1221
11ac75ed 1222 if (!sriov_enabled(adapter))
e1d18735
AK
1223 return -EPERM;
1224
94f434c2 1225 if (vf >= adapter->num_vfs)
e1d18735
AK
1226 return -EINVAL;
1227
94f434c2
AK
1228 if (rate < 100 || rate > 10000) {
1229 dev_err(&adapter->pdev->dev,
1230 "tx rate must be between 100 and 10000 Mbps\n");
1231 return -EINVAL;
1232 }
e1d18735 1233
d5c18473
PR
1234 if (lancer_chip(adapter))
1235 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1236 else
1237 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1238
1239 if (status)
94f434c2 1240 dev_err(&adapter->pdev->dev,
e1d18735 1241 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1242 else
1243 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1244 return status;
1245}
1246
39f1d94d
SP
1247static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1248{
1249 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1250 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1251 u16 offset, stride;
1252
1253 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1254 if (!pos)
1255 return 0;
39f1d94d
SP
1256 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1257 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1258
1259 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1260 while (dev) {
2f6a0260 1261 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1262 vfs++;
1263 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1264 assigned_vfs++;
1265 }
1266 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1267 }
1268 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1269}
1270
10ef9ab4 1271static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1272{
10ef9ab4 1273 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1274 ulong now = jiffies;
ac124ff9 1275 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1276 u64 pkts;
1277 unsigned int start, eqd;
ac124ff9 1278
10ef9ab4
SP
1279 if (!eqo->enable_aic) {
1280 eqd = eqo->eqd;
1281 goto modify_eqd;
1282 }
1283
1284 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1285 return;
6b7c5b94 1286
10ef9ab4
SP
1287 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1288
4097f663 1289 /* Wrapped around */
3abcdeda
SP
1290 if (time_before(now, stats->rx_jiffies)) {
1291 stats->rx_jiffies = now;
4097f663
SP
1292 return;
1293 }
6b7c5b94 1294
ac124ff9
SP
1295 /* Update once a second */
1296 if (delta < HZ)
6b7c5b94
SP
1297 return;
1298
ab1594e9
SP
1299 do {
1300 start = u64_stats_fetch_begin_bh(&stats->sync);
1301 pkts = stats->rx_pkts;
1302 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1303
68c3e5a7 1304 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1305 stats->rx_pkts_prev = pkts;
3abcdeda 1306 stats->rx_jiffies = now;
10ef9ab4
SP
1307 eqd = (stats->rx_pps / 110000) << 3;
1308 eqd = min(eqd, eqo->max_eqd);
1309 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1310 if (eqd < 10)
1311 eqd = 0;
10ef9ab4
SP
1312
1313modify_eqd:
1314 if (eqd != eqo->cur_eqd) {
1315 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1316 eqo->cur_eqd = eqd;
ac124ff9 1317 }
6b7c5b94
SP
1318}
1319
3abcdeda 1320static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1321 struct be_rx_compl_info *rxcp)
4097f663 1322{
ac124ff9 1323 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1324
ab1594e9 1325 u64_stats_update_begin(&stats->sync);
3abcdeda 1326 stats->rx_compl++;
2e588f84 1327 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1328 stats->rx_pkts++;
2e588f84 1329 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1330 stats->rx_mcast_pkts++;
2e588f84 1331 if (rxcp->err)
ac124ff9 1332 stats->rx_compl_err++;
ab1594e9 1333 u64_stats_update_end(&stats->sync);
4097f663
SP
1334}
1335
2e588f84 1336static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1337{
19fad86f
PR
1338 /* L4 checksum is not reliable for non TCP/UDP packets.
1339 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1340 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1341 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1342}
1343
10ef9ab4
SP
1344static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1345 u16 frag_idx)
6b7c5b94 1346{
10ef9ab4 1347 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1348 struct be_rx_page_info *rx_page_info;
3abcdeda 1349 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1350
3abcdeda 1351 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1352 BUG_ON(!rx_page_info->page);
1353
205859a2 1354 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1355 dma_unmap_page(&adapter->pdev->dev,
1356 dma_unmap_addr(rx_page_info, bus),
1357 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1358 rx_page_info->last_page_user = false;
1359 }
6b7c5b94
SP
1360
1361 atomic_dec(&rxq->used);
1362 return rx_page_info;
1363}
1364
1365/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1366static void be_rx_compl_discard(struct be_rx_obj *rxo,
1367 struct be_rx_compl_info *rxcp)
6b7c5b94 1368{
3abcdeda 1369 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1370 struct be_rx_page_info *page_info;
2e588f84 1371 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1372
e80d9da6 1373 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1374 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1375 put_page(page_info->page);
1376 memset(page_info, 0, sizeof(*page_info));
2e588f84 1377 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1378 }
1379}
1380
1381/*
1382 * skb_fill_rx_data forms a complete skb for an ether frame
1383 * indicated by rxcp.
1384 */
10ef9ab4
SP
1385static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1386 struct be_rx_compl_info *rxcp)
6b7c5b94 1387{
3abcdeda 1388 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1389 struct be_rx_page_info *page_info;
2e588f84
SP
1390 u16 i, j;
1391 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1392 u8 *start;
6b7c5b94 1393
10ef9ab4 1394 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1395 start = page_address(page_info->page) + page_info->page_offset;
1396 prefetch(start);
1397
1398 /* Copy data in the first descriptor of this completion */
2e588f84 1399 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1400
6b7c5b94
SP
1401 skb->len = curr_frag_len;
1402 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1403 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1404 /* Complete packet has now been moved to data */
1405 put_page(page_info->page);
1406 skb->data_len = 0;
1407 skb->tail += curr_frag_len;
1408 } else {
ac1ae5f3
ED
1409 hdr_len = ETH_HLEN;
1410 memcpy(skb->data, start, hdr_len);
6b7c5b94 1411 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1412 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1413 skb_shinfo(skb)->frags[0].page_offset =
1414 page_info->page_offset + hdr_len;
9e903e08 1415 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1416 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1417 skb->truesize += rx_frag_size;
6b7c5b94
SP
1418 skb->tail += hdr_len;
1419 }
205859a2 1420 page_info->page = NULL;
6b7c5b94 1421
2e588f84
SP
1422 if (rxcp->pkt_size <= rx_frag_size) {
1423 BUG_ON(rxcp->num_rcvd != 1);
1424 return;
6b7c5b94
SP
1425 }
1426
1427 /* More frags present for this completion */
2e588f84
SP
1428 index_inc(&rxcp->rxq_idx, rxq->len);
1429 remaining = rxcp->pkt_size - curr_frag_len;
1430 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1431 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1432 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1433
bd46cb6c
AK
1434 /* Coalesce all frags from the same physical page in one slot */
1435 if (page_info->page_offset == 0) {
1436 /* Fresh page */
1437 j++;
b061b39e 1438 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1439 skb_shinfo(skb)->frags[j].page_offset =
1440 page_info->page_offset;
9e903e08 1441 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1442 skb_shinfo(skb)->nr_frags++;
1443 } else {
1444 put_page(page_info->page);
1445 }
1446
9e903e08 1447 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1448 skb->len += curr_frag_len;
1449 skb->data_len += curr_frag_len;
bdb28a97 1450 skb->truesize += rx_frag_size;
2e588f84
SP
1451 remaining -= curr_frag_len;
1452 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1453 page_info->page = NULL;
6b7c5b94 1454 }
bd46cb6c 1455 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1456}
1457
5be93b9a 1458/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1459static void be_rx_compl_process(struct be_rx_obj *rxo,
1460 struct be_rx_compl_info *rxcp)
6b7c5b94 1461{
10ef9ab4 1462 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1463 struct net_device *netdev = adapter->netdev;
6b7c5b94 1464 struct sk_buff *skb;
89420424 1465
bb349bb4 1466 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1467 if (unlikely(!skb)) {
ac124ff9 1468 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1469 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1470 return;
1471 }
1472
10ef9ab4 1473 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1474
6332c8d3 1475 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1476 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1477 else
1478 skb_checksum_none_assert(skb);
6b7c5b94 1479
6332c8d3 1480 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1481 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1482 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1483 skb->rxhash = rxcp->rss_hash;
1484
6b7c5b94 1485
343e43c0 1486 if (rxcp->vlanf)
86a9bad3 1487 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1488
1489 netif_receive_skb(skb);
6b7c5b94
SP
1490}
1491
5be93b9a 1492/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1493void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1494 struct be_rx_compl_info *rxcp)
6b7c5b94 1495{
10ef9ab4 1496 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1497 struct be_rx_page_info *page_info;
5be93b9a 1498 struct sk_buff *skb = NULL;
3abcdeda 1499 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1500 u16 remaining, curr_frag_len;
1501 u16 i, j;
3968fa1e 1502
10ef9ab4 1503 skb = napi_get_frags(napi);
5be93b9a 1504 if (!skb) {
10ef9ab4 1505 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1506 return;
1507 }
1508
2e588f84
SP
1509 remaining = rxcp->pkt_size;
1510 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1511 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1512
1513 curr_frag_len = min(remaining, rx_frag_size);
1514
bd46cb6c
AK
1515 /* Coalesce all frags from the same physical page in one slot */
1516 if (i == 0 || page_info->page_offset == 0) {
1517 /* First frag or Fresh page */
1518 j++;
b061b39e 1519 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1520 skb_shinfo(skb)->frags[j].page_offset =
1521 page_info->page_offset;
9e903e08 1522 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1523 } else {
1524 put_page(page_info->page);
1525 }
9e903e08 1526 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1527 skb->truesize += rx_frag_size;
bd46cb6c 1528 remaining -= curr_frag_len;
2e588f84 1529 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1530 memset(page_info, 0, sizeof(*page_info));
1531 }
bd46cb6c 1532 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1533
5be93b9a 1534 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1535 skb->len = rxcp->pkt_size;
1536 skb->data_len = rxcp->pkt_size;
5be93b9a 1537 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1538 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1539 if (adapter->netdev->features & NETIF_F_RXHASH)
1540 skb->rxhash = rxcp->rss_hash;
5be93b9a 1541
343e43c0 1542 if (rxcp->vlanf)
86a9bad3 1543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1544
10ef9ab4 1545 napi_gro_frags(napi);
2e588f84
SP
1546}
1547
10ef9ab4
SP
1548static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1549 struct be_rx_compl_info *rxcp)
2e588f84
SP
1550{
1551 rxcp->pkt_size =
1552 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1553 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1554 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1555 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1556 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1557 rxcp->ip_csum =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1559 rxcp->l4_csum =
1560 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1561 rxcp->ipv6 =
1562 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1563 rxcp->rxq_idx =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1565 rxcp->num_rcvd =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1567 rxcp->pkt_type =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1569 rxcp->rss_hash =
c297977e 1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1571 if (rxcp->vlanf) {
1572 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1573 compl);
1574 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1575 compl);
15d72184 1576 }
12004ae9 1577 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1578}
1579
10ef9ab4
SP
1580static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1581 struct be_rx_compl_info *rxcp)
2e588f84
SP
1582{
1583 rxcp->pkt_size =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1585 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1586 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1587 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1588 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1589 rxcp->ip_csum =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1591 rxcp->l4_csum =
1592 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1593 rxcp->ipv6 =
1594 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1595 rxcp->rxq_idx =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1597 rxcp->num_rcvd =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1599 rxcp->pkt_type =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1601 rxcp->rss_hash =
c297977e 1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1603 if (rxcp->vlanf) {
1604 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1605 compl);
1606 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1607 compl);
15d72184 1608 }
12004ae9 1609 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1610}
1611
1612static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1613{
1614 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1615 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1616 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1617
2e588f84
SP
1618 /* For checking the valid bit it is Ok to use either definition as the
1619 * valid bit is at the same position in both v0 and v1 Rx compl */
1620 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1621 return NULL;
6b7c5b94 1622
2e588f84
SP
1623 rmb();
1624 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1625
2e588f84 1626 if (adapter->be3_native)
10ef9ab4 1627 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1628 else
10ef9ab4 1629 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1630
15d72184
SP
1631 if (rxcp->vlanf) {
1632 /* vlanf could be wrongly set in some cards.
1633 * ignore if vtm is not set */
752961a1 1634 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1635 rxcp->vlanf = 0;
6b7c5b94 1636
15d72184 1637 if (!lancer_chip(adapter))
3c709f8f 1638 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1639
939cf306 1640 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1641 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1642 rxcp->vlanf = 0;
1643 }
2e588f84
SP
1644
1645 /* As the compl has been parsed, reset it; we wont touch it again */
1646 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1647
3abcdeda 1648 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1649 return rxcp;
1650}
1651
1829b086 1652static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1653{
6b7c5b94 1654 u32 order = get_order(size);
1829b086 1655
6b7c5b94 1656 if (order > 0)
1829b086
ED
1657 gfp |= __GFP_COMP;
1658 return alloc_pages(gfp, order);
6b7c5b94
SP
1659}
1660
1661/*
1662 * Allocate a page, split it to fragments of size rx_frag_size and post as
1663 * receive buffers to BE
1664 */
1829b086 1665static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1666{
3abcdeda 1667 struct be_adapter *adapter = rxo->adapter;
26d92f92 1668 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1669 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1670 struct page *pagep = NULL;
1671 struct be_eth_rx_d *rxd;
1672 u64 page_dmaaddr = 0, frag_dmaaddr;
1673 u32 posted, page_offset = 0;
1674
3abcdeda 1675 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1676 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1677 if (!pagep) {
1829b086 1678 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1679 if (unlikely(!pagep)) {
ac124ff9 1680 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1681 break;
1682 }
2b7bcebf
IV
1683 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1684 0, adapter->big_page_size,
1685 DMA_FROM_DEVICE);
6b7c5b94
SP
1686 page_info->page_offset = 0;
1687 } else {
1688 get_page(pagep);
1689 page_info->page_offset = page_offset + rx_frag_size;
1690 }
1691 page_offset = page_info->page_offset;
1692 page_info->page = pagep;
fac6da5b 1693 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1694 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1695
1696 rxd = queue_head_node(rxq);
1697 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1698 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1699
1700 /* Any space left in the current big page for another frag? */
1701 if ((page_offset + rx_frag_size + rx_frag_size) >
1702 adapter->big_page_size) {
1703 pagep = NULL;
1704 page_info->last_page_user = true;
1705 }
26d92f92
SP
1706
1707 prev_page_info = page_info;
1708 queue_head_inc(rxq);
10ef9ab4 1709 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1710 }
1711 if (pagep)
26d92f92 1712 prev_page_info->last_page_user = true;
6b7c5b94
SP
1713
1714 if (posted) {
6b7c5b94 1715 atomic_add(posted, &rxq->used);
8788fdc2 1716 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1717 } else if (atomic_read(&rxq->used) == 0) {
1718 /* Let be_worker replenish when memory is available */
3abcdeda 1719 rxo->rx_post_starved = true;
6b7c5b94 1720 }
6b7c5b94
SP
1721}
1722
5fb379ee 1723static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1724{
6b7c5b94
SP
1725 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1726
1727 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1728 return NULL;
1729
f3eb62d2 1730 rmb();
6b7c5b94
SP
1731 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1732
1733 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1734
1735 queue_tail_inc(tx_cq);
1736 return txcp;
1737}
1738
3c8def97
SP
1739static u16 be_tx_compl_process(struct be_adapter *adapter,
1740 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1741{
3c8def97 1742 struct be_queue_info *txq = &txo->q;
a73b796e 1743 struct be_eth_wrb *wrb;
3c8def97 1744 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1745 struct sk_buff *sent_skb;
ec43b1a6
SP
1746 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1747 bool unmap_skb_hdr = true;
6b7c5b94 1748
ec43b1a6 1749 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1750 BUG_ON(!sent_skb);
ec43b1a6
SP
1751 sent_skbs[txq->tail] = NULL;
1752
1753 /* skip header wrb */
a73b796e 1754 queue_tail_inc(txq);
6b7c5b94 1755
ec43b1a6 1756 do {
6b7c5b94 1757 cur_index = txq->tail;
a73b796e 1758 wrb = queue_tail_node(txq);
2b7bcebf
IV
1759 unmap_tx_frag(&adapter->pdev->dev, wrb,
1760 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1761 unmap_skb_hdr = false;
1762
6b7c5b94
SP
1763 num_wrbs++;
1764 queue_tail_inc(txq);
ec43b1a6 1765 } while (cur_index != last_index);
6b7c5b94 1766
6b7c5b94 1767 kfree_skb(sent_skb);
4d586b82 1768 return num_wrbs;
6b7c5b94
SP
1769}
1770
10ef9ab4
SP
1771/* Return the number of events in the event queue */
1772static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1773{
10ef9ab4
SP
1774 struct be_eq_entry *eqe;
1775 int num = 0;
859b1e4e 1776
10ef9ab4
SP
1777 do {
1778 eqe = queue_tail_node(&eqo->q);
1779 if (eqe->evt == 0)
1780 break;
859b1e4e 1781
10ef9ab4
SP
1782 rmb();
1783 eqe->evt = 0;
1784 num++;
1785 queue_tail_inc(&eqo->q);
1786 } while (true);
1787
1788 return num;
859b1e4e
SP
1789}
1790
10ef9ab4
SP
1791/* Leaves the EQ is disarmed state */
1792static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1793{
10ef9ab4 1794 int num = events_get(eqo);
859b1e4e 1795
10ef9ab4 1796 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1797}
1798
10ef9ab4 1799static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1800{
1801 struct be_rx_page_info *page_info;
3abcdeda
SP
1802 struct be_queue_info *rxq = &rxo->q;
1803 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1804 struct be_rx_compl_info *rxcp;
d23e946c
SP
1805 struct be_adapter *adapter = rxo->adapter;
1806 int flush_wait = 0;
6b7c5b94
SP
1807 u16 tail;
1808
d23e946c
SP
1809 /* Consume pending rx completions.
1810 * Wait for the flush completion (identified by zero num_rcvd)
1811 * to arrive. Notify CQ even when there are no more CQ entries
1812 * for HW to flush partially coalesced CQ entries.
1813 * In Lancer, there is no need to wait for flush compl.
1814 */
1815 for (;;) {
1816 rxcp = be_rx_compl_get(rxo);
1817 if (rxcp == NULL) {
1818 if (lancer_chip(adapter))
1819 break;
1820
1821 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1822 dev_warn(&adapter->pdev->dev,
1823 "did not receive flush compl\n");
1824 break;
1825 }
1826 be_cq_notify(adapter, rx_cq->id, true, 0);
1827 mdelay(1);
1828 } else {
1829 be_rx_compl_discard(rxo, rxcp);
1830 be_cq_notify(adapter, rx_cq->id, true, 1);
1831 if (rxcp->num_rcvd == 0)
1832 break;
1833 }
6b7c5b94
SP
1834 }
1835
d23e946c
SP
1836 /* After cleanup, leave the CQ in unarmed state */
1837 be_cq_notify(adapter, rx_cq->id, false, 0);
1838
1839 /* Then free posted rx buffers that were not used */
6b7c5b94 1840 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1841 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1842 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1843 put_page(page_info->page);
1844 memset(page_info, 0, sizeof(*page_info));
1845 }
1846 BUG_ON(atomic_read(&rxq->used));
482c9e79 1847 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1848}
1849
0ae57bb3 1850static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1851{
0ae57bb3
SP
1852 struct be_tx_obj *txo;
1853 struct be_queue_info *txq;
a8e9179a 1854 struct be_eth_tx_compl *txcp;
4d586b82 1855 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1856 struct sk_buff *sent_skb;
1857 bool dummy_wrb;
0ae57bb3 1858 int i, pending_txqs;
a8e9179a
SP
1859
1860 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1861 do {
0ae57bb3
SP
1862 pending_txqs = adapter->num_tx_qs;
1863
1864 for_all_tx_queues(adapter, txo, i) {
1865 txq = &txo->q;
1866 while ((txcp = be_tx_compl_get(&txo->cq))) {
1867 end_idx =
1868 AMAP_GET_BITS(struct amap_eth_tx_compl,
1869 wrb_index, txcp);
1870 num_wrbs += be_tx_compl_process(adapter, txo,
1871 end_idx);
1872 cmpl++;
1873 }
1874 if (cmpl) {
1875 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1876 atomic_sub(num_wrbs, &txq->used);
1877 cmpl = 0;
1878 num_wrbs = 0;
1879 }
1880 if (atomic_read(&txq->used) == 0)
1881 pending_txqs--;
a8e9179a
SP
1882 }
1883
0ae57bb3 1884 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1885 break;
1886
1887 mdelay(1);
1888 } while (true);
1889
0ae57bb3
SP
1890 for_all_tx_queues(adapter, txo, i) {
1891 txq = &txo->q;
1892 if (atomic_read(&txq->used))
1893 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1894 atomic_read(&txq->used));
1895
1896 /* free posted tx for which compls will never arrive */
1897 while (atomic_read(&txq->used)) {
1898 sent_skb = txo->sent_skb_list[txq->tail];
1899 end_idx = txq->tail;
1900 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1901 &dummy_wrb);
1902 index_adv(&end_idx, num_wrbs - 1, txq->len);
1903 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1904 atomic_sub(num_wrbs, &txq->used);
1905 }
b03388d6 1906 }
6b7c5b94
SP
1907}
1908
10ef9ab4
SP
1909static void be_evt_queues_destroy(struct be_adapter *adapter)
1910{
1911 struct be_eq_obj *eqo;
1912 int i;
1913
1914 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1915 if (eqo->q.created) {
1916 be_eq_clean(eqo);
10ef9ab4 1917 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1918 }
10ef9ab4
SP
1919 be_queue_free(adapter, &eqo->q);
1920 }
1921}
1922
1923static int be_evt_queues_create(struct be_adapter *adapter)
1924{
1925 struct be_queue_info *eq;
1926 struct be_eq_obj *eqo;
1927 int i, rc;
1928
1929 adapter->num_evt_qs = num_irqs(adapter);
1930
1931 for_all_evt_queues(adapter, eqo, i) {
1932 eqo->adapter = adapter;
1933 eqo->tx_budget = BE_TX_BUDGET;
1934 eqo->idx = i;
1935 eqo->max_eqd = BE_MAX_EQD;
1936 eqo->enable_aic = true;
1937
1938 eq = &eqo->q;
1939 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1940 sizeof(struct be_eq_entry));
1941 if (rc)
1942 return rc;
1943
1944 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1945 if (rc)
1946 return rc;
1947 }
1cfafab9 1948 return 0;
10ef9ab4
SP
1949}
1950
5fb379ee
SP
1951static void be_mcc_queues_destroy(struct be_adapter *adapter)
1952{
1953 struct be_queue_info *q;
5fb379ee 1954
8788fdc2 1955 q = &adapter->mcc_obj.q;
5fb379ee 1956 if (q->created)
8788fdc2 1957 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1958 be_queue_free(adapter, q);
1959
8788fdc2 1960 q = &adapter->mcc_obj.cq;
5fb379ee 1961 if (q->created)
8788fdc2 1962 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1963 be_queue_free(adapter, q);
1964}
1965
1966/* Must be called only after TX qs are created as MCC shares TX EQ */
1967static int be_mcc_queues_create(struct be_adapter *adapter)
1968{
1969 struct be_queue_info *q, *cq;
5fb379ee 1970
8788fdc2 1971 cq = &adapter->mcc_obj.cq;
5fb379ee 1972 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1973 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1974 goto err;
1975
10ef9ab4
SP
1976 /* Use the default EQ for MCC completions */
1977 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1978 goto mcc_cq_free;
1979
8788fdc2 1980 q = &adapter->mcc_obj.q;
5fb379ee
SP
1981 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1982 goto mcc_cq_destroy;
1983
8788fdc2 1984 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1985 goto mcc_q_free;
1986
1987 return 0;
1988
1989mcc_q_free:
1990 be_queue_free(adapter, q);
1991mcc_cq_destroy:
8788fdc2 1992 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1993mcc_cq_free:
1994 be_queue_free(adapter, cq);
1995err:
1996 return -1;
1997}
1998
6b7c5b94
SP
1999static void be_tx_queues_destroy(struct be_adapter *adapter)
2000{
2001 struct be_queue_info *q;
3c8def97
SP
2002 struct be_tx_obj *txo;
2003 u8 i;
6b7c5b94 2004
3c8def97
SP
2005 for_all_tx_queues(adapter, txo, i) {
2006 q = &txo->q;
2007 if (q->created)
2008 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2009 be_queue_free(adapter, q);
6b7c5b94 2010
3c8def97
SP
2011 q = &txo->cq;
2012 if (q->created)
2013 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2014 be_queue_free(adapter, q);
2015 }
6b7c5b94
SP
2016}
2017
dafc0fe3
SP
2018static int be_num_txqs_want(struct be_adapter *adapter)
2019{
abb93951
PR
2020 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2021 be_is_mc(adapter) ||
2022 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2023 BE2_chip(adapter))
dafc0fe3
SP
2024 return 1;
2025 else
abb93951 2026 return adapter->max_tx_queues;
dafc0fe3
SP
2027}
2028
10ef9ab4 2029static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2030{
10ef9ab4
SP
2031 struct be_queue_info *cq, *eq;
2032 int status;
3c8def97
SP
2033 struct be_tx_obj *txo;
2034 u8 i;
6b7c5b94 2035
dafc0fe3 2036 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2037 if (adapter->num_tx_qs != MAX_TX_QS) {
2038 rtnl_lock();
dafc0fe3
SP
2039 netif_set_real_num_tx_queues(adapter->netdev,
2040 adapter->num_tx_qs);
3bb62f4f
PR
2041 rtnl_unlock();
2042 }
dafc0fe3 2043
10ef9ab4
SP
2044 for_all_tx_queues(adapter, txo, i) {
2045 cq = &txo->cq;
2046 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2047 sizeof(struct be_eth_tx_compl));
2048 if (status)
2049 return status;
3c8def97 2050
10ef9ab4
SP
2051 /* If num_evt_qs is less than num_tx_qs, then more than
2052 * one txq share an eq
2053 */
2054 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2055 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2056 if (status)
2057 return status;
2058 }
2059 return 0;
2060}
6b7c5b94 2061
10ef9ab4
SP
2062static int be_tx_qs_create(struct be_adapter *adapter)
2063{
2064 struct be_tx_obj *txo;
2065 int i, status;
fe6d2a38 2066
3c8def97 2067 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2068 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2069 sizeof(struct be_eth_wrb));
2070 if (status)
2071 return status;
6b7c5b94 2072
94d73aaa 2073 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2074 if (status)
2075 return status;
3c8def97 2076 }
6b7c5b94 2077
d379142b
SP
2078 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2079 adapter->num_tx_qs);
10ef9ab4 2080 return 0;
6b7c5b94
SP
2081}
2082
10ef9ab4 2083static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2084{
2085 struct be_queue_info *q;
3abcdeda
SP
2086 struct be_rx_obj *rxo;
2087 int i;
2088
2089 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2090 q = &rxo->cq;
2091 if (q->created)
2092 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2093 be_queue_free(adapter, q);
ac6a0c4a
SP
2094 }
2095}
2096
10ef9ab4 2097static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2098{
10ef9ab4 2099 struct be_queue_info *eq, *cq;
3abcdeda
SP
2100 struct be_rx_obj *rxo;
2101 int rc, i;
6b7c5b94 2102
10ef9ab4
SP
2103 /* We'll create as many RSS rings as there are irqs.
2104 * But when there's only one irq there's no use creating RSS rings
2105 */
2106 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2107 num_irqs(adapter) + 1 : 1;
7f640062
SP
2108 if (adapter->num_rx_qs != MAX_RX_QS) {
2109 rtnl_lock();
2110 netif_set_real_num_rx_queues(adapter->netdev,
2111 adapter->num_rx_qs);
2112 rtnl_unlock();
2113 }
ac6a0c4a 2114
6b7c5b94 2115 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2116 for_all_rx_queues(adapter, rxo, i) {
2117 rxo->adapter = adapter;
3abcdeda
SP
2118 cq = &rxo->cq;
2119 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2120 sizeof(struct be_eth_rx_compl));
2121 if (rc)
10ef9ab4 2122 return rc;
3abcdeda 2123
10ef9ab4
SP
2124 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2125 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2126 if (rc)
10ef9ab4 2127 return rc;
3abcdeda 2128 }
6b7c5b94 2129
d379142b
SP
2130 dev_info(&adapter->pdev->dev,
2131 "created %d RSS queue(s) and 1 default RX queue\n",
2132 adapter->num_rx_qs - 1);
10ef9ab4 2133 return 0;
b628bde2
SP
2134}
2135
6b7c5b94
SP
2136static irqreturn_t be_intx(int irq, void *dev)
2137{
e49cc34f
SP
2138 struct be_eq_obj *eqo = dev;
2139 struct be_adapter *adapter = eqo->adapter;
2140 int num_evts = 0;
6b7c5b94 2141
d0b9cec3
SP
2142 /* IRQ is not expected when NAPI is scheduled as the EQ
2143 * will not be armed.
2144 * But, this can happen on Lancer INTx where it takes
2145 * a while to de-assert INTx or in BE2 where occasionaly
2146 * an interrupt may be raised even when EQ is unarmed.
2147 * If NAPI is already scheduled, then counting & notifying
2148 * events will orphan them.
e49cc34f 2149 */
d0b9cec3 2150 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2151 num_evts = events_get(eqo);
d0b9cec3
SP
2152 __napi_schedule(&eqo->napi);
2153 if (num_evts)
2154 eqo->spurious_intr = 0;
2155 }
2156 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2157
d0b9cec3
SP
2158 /* Return IRQ_HANDLED only for the the first spurious intr
2159 * after a valid intr to stop the kernel from branding
2160 * this irq as a bad one!
e49cc34f 2161 */
d0b9cec3
SP
2162 if (num_evts || eqo->spurious_intr++ == 0)
2163 return IRQ_HANDLED;
2164 else
2165 return IRQ_NONE;
6b7c5b94
SP
2166}
2167
10ef9ab4 2168static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2169{
10ef9ab4 2170 struct be_eq_obj *eqo = dev;
6b7c5b94 2171
0b545a62
SP
2172 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2173 napi_schedule(&eqo->napi);
6b7c5b94
SP
2174 return IRQ_HANDLED;
2175}
2176
2e588f84 2177static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2178{
2e588f84 2179 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2180}
2181
10ef9ab4
SP
2182static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2183 int budget)
6b7c5b94 2184{
3abcdeda
SP
2185 struct be_adapter *adapter = rxo->adapter;
2186 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2187 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2188 u32 work_done;
2189
2190 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2191 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2192 if (!rxcp)
2193 break;
2194
12004ae9
SP
2195 /* Is it a flush compl that has no data */
2196 if (unlikely(rxcp->num_rcvd == 0))
2197 goto loop_continue;
2198
2199 /* Discard compl with partial DMA Lancer B0 */
2200 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2201 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2202 goto loop_continue;
2203 }
2204
2205 /* On BE drop pkts that arrive due to imperfect filtering in
2206 * promiscuous mode on some skews
2207 */
2208 if (unlikely(rxcp->port != adapter->port_num &&
2209 !lancer_chip(adapter))) {
10ef9ab4 2210 be_rx_compl_discard(rxo, rxcp);
12004ae9 2211 goto loop_continue;
64642811 2212 }
009dd872 2213
12004ae9 2214 if (do_gro(rxcp))
10ef9ab4 2215 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2216 else
10ef9ab4 2217 be_rx_compl_process(rxo, rxcp);
12004ae9 2218loop_continue:
2e588f84 2219 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2220 }
2221
10ef9ab4
SP
2222 if (work_done) {
2223 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2224
10ef9ab4
SP
2225 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2226 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2227 }
10ef9ab4 2228
6b7c5b94
SP
2229 return work_done;
2230}
2231
10ef9ab4
SP
2232static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2233 int budget, int idx)
6b7c5b94 2234{
6b7c5b94 2235 struct be_eth_tx_compl *txcp;
10ef9ab4 2236 int num_wrbs = 0, work_done;
3c8def97 2237
10ef9ab4
SP
2238 for (work_done = 0; work_done < budget; work_done++) {
2239 txcp = be_tx_compl_get(&txo->cq);
2240 if (!txcp)
2241 break;
2242 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2243 AMAP_GET_BITS(struct amap_eth_tx_compl,
2244 wrb_index, txcp));
10ef9ab4 2245 }
6b7c5b94 2246
10ef9ab4
SP
2247 if (work_done) {
2248 be_cq_notify(adapter, txo->cq.id, true, work_done);
2249 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2250
10ef9ab4
SP
2251 /* As Tx wrbs have been freed up, wake up netdev queue
2252 * if it was stopped due to lack of tx wrbs. */
2253 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2254 atomic_read(&txo->q.used) < txo->q.len / 2) {
2255 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2256 }
10ef9ab4
SP
2257
2258 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2259 tx_stats(txo)->tx_compl += work_done;
2260 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2261 }
10ef9ab4
SP
2262 return (work_done < budget); /* Done */
2263}
6b7c5b94 2264
10ef9ab4
SP
2265int be_poll(struct napi_struct *napi, int budget)
2266{
2267 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2268 struct be_adapter *adapter = eqo->adapter;
0b545a62 2269 int max_work = 0, work, i, num_evts;
10ef9ab4 2270 bool tx_done;
f31e50a8 2271
0b545a62
SP
2272 num_evts = events_get(eqo);
2273
10ef9ab4
SP
2274 /* Process all TXQs serviced by this EQ */
2275 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2276 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2277 eqo->tx_budget, i);
2278 if (!tx_done)
2279 max_work = budget;
f31e50a8
SP
2280 }
2281
10ef9ab4
SP
2282 /* This loop will iterate twice for EQ0 in which
2283 * completions of the last RXQ (default one) are also processed
2284 * For other EQs the loop iterates only once
2285 */
2286 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2287 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2288 max_work = max(work, max_work);
2289 }
6b7c5b94 2290
10ef9ab4
SP
2291 if (is_mcc_eqo(eqo))
2292 be_process_mcc(adapter);
93c86700 2293
10ef9ab4
SP
2294 if (max_work < budget) {
2295 napi_complete(napi);
0b545a62 2296 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2297 } else {
2298 /* As we'll continue in polling mode, count and clear events */
0b545a62 2299 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2300 }
10ef9ab4 2301 return max_work;
6b7c5b94
SP
2302}
2303
f67ef7ba 2304void be_detect_error(struct be_adapter *adapter)
7c185276 2305{
e1cfb67a
PR
2306 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2307 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2308 u32 i;
2309
d23e946c 2310 if (be_hw_error(adapter))
72f02485
SP
2311 return;
2312
e1cfb67a
PR
2313 if (lancer_chip(adapter)) {
2314 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2315 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2316 sliport_err1 = ioread32(adapter->db +
2317 SLIPORT_ERROR1_OFFSET);
2318 sliport_err2 = ioread32(adapter->db +
2319 SLIPORT_ERROR2_OFFSET);
2320 }
2321 } else {
2322 pci_read_config_dword(adapter->pdev,
2323 PCICFG_UE_STATUS_LOW, &ue_lo);
2324 pci_read_config_dword(adapter->pdev,
2325 PCICFG_UE_STATUS_HIGH, &ue_hi);
2326 pci_read_config_dword(adapter->pdev,
2327 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2330
f67ef7ba
PR
2331 ue_lo = (ue_lo & ~ue_lo_mask);
2332 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2333 }
7c185276 2334
1451ae6e
AK
2335 /* On certain platforms BE hardware can indicate spurious UEs.
2336 * Allow the h/w to stop working completely in case of a real UE.
2337 * Hence not setting the hw_error for UE detection.
2338 */
2339 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2340 adapter->hw_error = true;
434b3648 2341 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2342 "Error detected in the card\n");
2343 }
2344
2345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2346 dev_err(&adapter->pdev->dev,
2347 "ERR: sliport status 0x%x\n", sliport_status);
2348 dev_err(&adapter->pdev->dev,
2349 "ERR: sliport error1 0x%x\n", sliport_err1);
2350 dev_err(&adapter->pdev->dev,
2351 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2352 }
2353
e1cfb67a
PR
2354 if (ue_lo) {
2355 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2356 if (ue_lo & 1)
7c185276
AK
2357 dev_err(&adapter->pdev->dev,
2358 "UE: %s bit set\n", ue_status_low_desc[i]);
2359 }
2360 }
f67ef7ba 2361
e1cfb67a
PR
2362 if (ue_hi) {
2363 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2364 if (ue_hi & 1)
7c185276
AK
2365 dev_err(&adapter->pdev->dev,
2366 "UE: %s bit set\n", ue_status_hi_desc[i]);
2367 }
2368 }
2369
2370}
2371
8d56ff11
SP
2372static void be_msix_disable(struct be_adapter *adapter)
2373{
ac6a0c4a 2374 if (msix_enabled(adapter)) {
8d56ff11 2375 pci_disable_msix(adapter->pdev);
ac6a0c4a 2376 adapter->num_msix_vec = 0;
3abcdeda
SP
2377 }
2378}
2379
10ef9ab4
SP
2380static uint be_num_rss_want(struct be_adapter *adapter)
2381{
30e80b55 2382 u32 num = 0;
abb93951 2383
10ef9ab4 2384 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2385 (lancer_chip(adapter) ||
2386 (!sriov_want(adapter) && be_physfn(adapter)))) {
2387 num = adapter->max_rss_queues;
30e80b55
YM
2388 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2389 }
2390 return num;
10ef9ab4
SP
2391}
2392
c2bba3df 2393static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2394{
10ef9ab4 2395#define BE_MIN_MSIX_VECTORS 1
045508a8 2396 int i, status, num_vec, num_roce_vec = 0;
d379142b 2397 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2398
10ef9ab4
SP
2399 /* If RSS queues are not used, need a vec for default RX Q */
2400 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2401 if (be_roce_supported(adapter)) {
2402 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2403 (num_online_cpus() + 1));
2404 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2405 num_vec += num_roce_vec;
2406 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2407 }
10ef9ab4 2408 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2409
ac6a0c4a 2410 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2411 adapter->msix_entries[i].entry = i;
2412
ac6a0c4a 2413 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2414 if (status == 0) {
2415 goto done;
2416 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2417 num_vec = status;
c2bba3df
SK
2418 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2419 num_vec);
2420 if (!status)
3abcdeda 2421 goto done;
3abcdeda 2422 }
d379142b
SP
2423
2424 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2425 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2426 if (!be_physfn(adapter))
2427 return status;
2428 return 0;
3abcdeda 2429done:
045508a8
PP
2430 if (be_roce_supported(adapter)) {
2431 if (num_vec > num_roce_vec) {
2432 adapter->num_msix_vec = num_vec - num_roce_vec;
2433 adapter->num_msix_roce_vec =
2434 num_vec - adapter->num_msix_vec;
2435 } else {
2436 adapter->num_msix_vec = num_vec;
2437 adapter->num_msix_roce_vec = 0;
2438 }
2439 } else
2440 adapter->num_msix_vec = num_vec;
d379142b 2441 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2442 return 0;
6b7c5b94
SP
2443}
2444
fe6d2a38 2445static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2446 struct be_eq_obj *eqo)
b628bde2 2447{
10ef9ab4 2448 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2449}
6b7c5b94 2450
b628bde2
SP
2451static int be_msix_register(struct be_adapter *adapter)
2452{
10ef9ab4
SP
2453 struct net_device *netdev = adapter->netdev;
2454 struct be_eq_obj *eqo;
2455 int status, i, vec;
6b7c5b94 2456
10ef9ab4
SP
2457 for_all_evt_queues(adapter, eqo, i) {
2458 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2459 vec = be_msix_vec_get(adapter, eqo);
2460 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2461 if (status)
2462 goto err_msix;
2463 }
b628bde2 2464
6b7c5b94 2465 return 0;
3abcdeda 2466err_msix:
10ef9ab4
SP
2467 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2468 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2469 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2470 status);
ac6a0c4a 2471 be_msix_disable(adapter);
6b7c5b94
SP
2472 return status;
2473}
2474
2475static int be_irq_register(struct be_adapter *adapter)
2476{
2477 struct net_device *netdev = adapter->netdev;
2478 int status;
2479
ac6a0c4a 2480 if (msix_enabled(adapter)) {
6b7c5b94
SP
2481 status = be_msix_register(adapter);
2482 if (status == 0)
2483 goto done;
ba343c77
SB
2484 /* INTx is not supported for VF */
2485 if (!be_physfn(adapter))
2486 return status;
6b7c5b94
SP
2487 }
2488
e49cc34f 2489 /* INTx: only the first EQ is used */
6b7c5b94
SP
2490 netdev->irq = adapter->pdev->irq;
2491 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2492 &adapter->eq_obj[0]);
6b7c5b94
SP
2493 if (status) {
2494 dev_err(&adapter->pdev->dev,
2495 "INTx request IRQ failed - err %d\n", status);
2496 return status;
2497 }
2498done:
2499 adapter->isr_registered = true;
2500 return 0;
2501}
2502
2503static void be_irq_unregister(struct be_adapter *adapter)
2504{
2505 struct net_device *netdev = adapter->netdev;
10ef9ab4 2506 struct be_eq_obj *eqo;
3abcdeda 2507 int i;
6b7c5b94
SP
2508
2509 if (!adapter->isr_registered)
2510 return;
2511
2512 /* INTx */
ac6a0c4a 2513 if (!msix_enabled(adapter)) {
e49cc34f 2514 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2515 goto done;
2516 }
2517
2518 /* MSIx */
10ef9ab4
SP
2519 for_all_evt_queues(adapter, eqo, i)
2520 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2521
6b7c5b94
SP
2522done:
2523 adapter->isr_registered = false;
6b7c5b94
SP
2524}
2525
10ef9ab4 2526static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2527{
2528 struct be_queue_info *q;
2529 struct be_rx_obj *rxo;
2530 int i;
2531
2532 for_all_rx_queues(adapter, rxo, i) {
2533 q = &rxo->q;
2534 if (q->created) {
2535 be_cmd_rxq_destroy(adapter, q);
2536 /* After the rxq is invalidated, wait for a grace time
2537 * of 1ms for all dma to end and the flush compl to
2538 * arrive
2539 */
2540 mdelay(1);
10ef9ab4 2541 be_rx_cq_clean(rxo);
482c9e79 2542 }
10ef9ab4 2543 be_queue_free(adapter, q);
482c9e79
SP
2544 }
2545}
2546
889cd4b2
SP
2547static int be_close(struct net_device *netdev)
2548{
2549 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2550 struct be_eq_obj *eqo;
2551 int i;
889cd4b2 2552
045508a8
PP
2553 be_roce_dev_close(adapter);
2554
04d3d624
SK
2555 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2556 for_all_evt_queues(adapter, eqo, i)
2557 napi_disable(&eqo->napi);
2558 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2559 }
a323d9bf
SP
2560
2561 be_async_mcc_disable(adapter);
2562
2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed.
2565 */
2566 be_tx_compl_clean(adapter);
2567
2568 be_rx_qs_destroy(adapter);
2569
2570 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2571 if (msix_enabled(adapter))
2572 synchronize_irq(be_msix_vec_get(adapter, eqo));
2573 else
2574 synchronize_irq(netdev->irq);
2575 be_eq_clean(eqo);
63fcb27f
PR
2576 }
2577
889cd4b2
SP
2578 be_irq_unregister(adapter);
2579
482c9e79
SP
2580 return 0;
2581}
2582
10ef9ab4 2583static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2584{
2585 struct be_rx_obj *rxo;
e9008ee9
PR
2586 int rc, i, j;
2587 u8 rsstable[128];
482c9e79
SP
2588
2589 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2590 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2591 sizeof(struct be_eth_rx_d));
2592 if (rc)
2593 return rc;
2594 }
2595
2596 /* The FW would like the default RXQ to be created first */
2597 rxo = default_rxo(adapter);
2598 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2599 adapter->if_handle, false, &rxo->rss_id);
2600 if (rc)
2601 return rc;
2602
2603 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2604 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2605 rx_frag_size, adapter->if_handle,
2606 true, &rxo->rss_id);
482c9e79
SP
2607 if (rc)
2608 return rc;
2609 }
2610
2611 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2612 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2613 for_all_rss_queues(adapter, rxo, i) {
2614 if ((j + i) >= 128)
2615 break;
2616 rsstable[j + i] = rxo->rss_id;
2617 }
2618 }
594ad54a
SR
2619 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2620 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2621
2622 if (!BEx_chip(adapter))
2623 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2624 RSS_ENABLE_UDP_IPV6;
2625
2626 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2627 128);
2628 if (rc) {
2629 adapter->rss_flags = 0;
482c9e79 2630 return rc;
594ad54a 2631 }
482c9e79
SP
2632 }
2633
2634 /* First time posting */
10ef9ab4 2635 for_all_rx_queues(adapter, rxo, i)
482c9e79 2636 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2637 return 0;
2638}
2639
6b7c5b94
SP
2640static int be_open(struct net_device *netdev)
2641{
2642 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2643 struct be_eq_obj *eqo;
3abcdeda 2644 struct be_rx_obj *rxo;
10ef9ab4 2645 struct be_tx_obj *txo;
b236916a 2646 u8 link_status;
3abcdeda 2647 int status, i;
5fb379ee 2648
10ef9ab4 2649 status = be_rx_qs_create(adapter);
482c9e79
SP
2650 if (status)
2651 goto err;
2652
c2bba3df
SK
2653 status = be_irq_register(adapter);
2654 if (status)
2655 goto err;
5fb379ee 2656
10ef9ab4 2657 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2658 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2659
10ef9ab4
SP
2660 for_all_tx_queues(adapter, txo, i)
2661 be_cq_notify(adapter, txo->cq.id, true, 0);
2662
7a1e9b20
SP
2663 be_async_mcc_enable(adapter);
2664
10ef9ab4
SP
2665 for_all_evt_queues(adapter, eqo, i) {
2666 napi_enable(&eqo->napi);
2667 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2668 }
04d3d624 2669 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2670
323ff71e 2671 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2672 if (!status)
2673 be_link_status_update(adapter, link_status);
2674
045508a8 2675 be_roce_dev_open(adapter);
889cd4b2
SP
2676 return 0;
2677err:
2678 be_close(adapter->netdev);
2679 return -EIO;
5fb379ee
SP
2680}
2681
71d8d1b5
AK
2682static int be_setup_wol(struct be_adapter *adapter, bool enable)
2683{
2684 struct be_dma_mem cmd;
2685 int status = 0;
2686 u8 mac[ETH_ALEN];
2687
2688 memset(mac, 0, ETH_ALEN);
2689
2690 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2691 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2692 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2693 if (cmd.va == NULL)
2694 return -1;
71d8d1b5
AK
2695
2696 if (enable) {
2697 status = pci_write_config_dword(adapter->pdev,
2698 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2699 if (status) {
2700 dev_err(&adapter->pdev->dev,
2381a55c 2701 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2702 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2703 cmd.dma);
71d8d1b5
AK
2704 return status;
2705 }
2706 status = be_cmd_enable_magic_wol(adapter,
2707 adapter->netdev->dev_addr, &cmd);
2708 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2709 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2710 } else {
2711 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2712 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2713 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2714 }
2715
2b7bcebf 2716 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2717 return status;
2718}
2719
6d87f5c3
AK
2720/*
2721 * Generate a seed MAC address from the PF MAC Address using jhash.
2722 * MAC Address for VFs are assigned incrementally starting from the seed.
2723 * These addresses are programmed in the ASIC by the PF and the VF driver
2724 * queries for the MAC address during its probe.
2725 */
4c876616 2726static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2727{
f9449ab7 2728 u32 vf;
3abcdeda 2729 int status = 0;
6d87f5c3 2730 u8 mac[ETH_ALEN];
11ac75ed 2731 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2732
2733 be_vf_eth_addr_generate(adapter, mac);
2734
11ac75ed 2735 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2736 if (lancer_chip(adapter)) {
2737 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2738 } else {
2739 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2740 vf_cfg->if_handle,
2741 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2742 }
2743
6d87f5c3
AK
2744 if (status)
2745 dev_err(&adapter->pdev->dev,
590c391d 2746 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2747 else
11ac75ed 2748 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2749
2750 mac[5] += 1;
2751 }
2752 return status;
2753}
2754
4c876616
SP
2755static int be_vfs_mac_query(struct be_adapter *adapter)
2756{
2757 int status, vf;
2758 u8 mac[ETH_ALEN];
2759 struct be_vf_cfg *vf_cfg;
2760 bool active;
2761
2762 for_all_vfs(adapter, vf_cfg, vf) {
2763 be_cmd_get_mac_from_list(adapter, mac, &active,
2764 &vf_cfg->pmac_id, 0);
2765
2766 status = be_cmd_mac_addr_query(adapter, mac, false,
2767 vf_cfg->if_handle, 0);
2768 if (status)
2769 return status;
2770 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2771 }
2772 return 0;
2773}
2774
f9449ab7 2775static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2776{
11ac75ed 2777 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2778 u32 vf;
2779
39f1d94d 2780 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2781 dev_warn(&adapter->pdev->dev,
2782 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2783 goto done;
2784 }
2785
b4c1df93
SP
2786 pci_disable_sriov(adapter->pdev);
2787
11ac75ed 2788 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2789 if (lancer_chip(adapter))
2790 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2791 else
11ac75ed
SP
2792 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2793 vf_cfg->pmac_id, vf + 1);
f9449ab7 2794
11ac75ed
SP
2795 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2796 }
39f1d94d
SP
2797done:
2798 kfree(adapter->vf_cfg);
2799 adapter->num_vfs = 0;
6d87f5c3
AK
2800}
2801
a54769f5
SP
2802static int be_clear(struct be_adapter *adapter)
2803{
fbc13f01
AK
2804 int i = 1;
2805
191eb756
SP
2806 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2807 cancel_delayed_work_sync(&adapter->work);
2808 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2809 }
2810
11ac75ed 2811 if (sriov_enabled(adapter))
f9449ab7
SP
2812 be_vf_clear(adapter);
2813
fbc13f01
AK
2814 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2815 be_cmd_pmac_del(adapter, adapter->if_handle,
2816 adapter->pmac_id[i], 0);
2817
f9449ab7 2818 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2819
2820 be_mcc_queues_destroy(adapter);
10ef9ab4 2821 be_rx_cqs_destroy(adapter);
a54769f5 2822 be_tx_queues_destroy(adapter);
10ef9ab4 2823 be_evt_queues_destroy(adapter);
a54769f5 2824
abb93951
PR
2825 kfree(adapter->pmac_id);
2826 adapter->pmac_id = NULL;
2827
10ef9ab4 2828 be_msix_disable(adapter);
a54769f5
SP
2829 return 0;
2830}
2831
4c876616 2832static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2833{
4c876616
SP
2834 struct be_vf_cfg *vf_cfg;
2835 u32 cap_flags, en_flags, vf;
abb93951
PR
2836 int status;
2837
4c876616
SP
2838 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2839 BE_IF_FLAGS_MULTICAST;
abb93951 2840
4c876616
SP
2841 for_all_vfs(adapter, vf_cfg, vf) {
2842 if (!BE3_chip(adapter))
a05f99db
VV
2843 be_cmd_get_profile_config(adapter, &cap_flags,
2844 NULL, vf + 1);
4c876616
SP
2845
2846 /* If a FW profile exists, then cap_flags are updated */
2847 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2848 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2849 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2850 &vf_cfg->if_handle, vf + 1);
2851 if (status)
2852 goto err;
2853 }
2854err:
2855 return status;
abb93951
PR
2856}
2857
39f1d94d 2858static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2859{
11ac75ed 2860 struct be_vf_cfg *vf_cfg;
30128031
SP
2861 int vf;
2862
39f1d94d
SP
2863 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2864 GFP_KERNEL);
2865 if (!adapter->vf_cfg)
2866 return -ENOMEM;
2867
11ac75ed
SP
2868 for_all_vfs(adapter, vf_cfg, vf) {
2869 vf_cfg->if_handle = -1;
2870 vf_cfg->pmac_id = -1;
30128031 2871 }
39f1d94d 2872 return 0;
30128031
SP
2873}
2874
f9449ab7
SP
2875static int be_vf_setup(struct be_adapter *adapter)
2876{
11ac75ed 2877 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2878 u16 def_vlan, lnk_speed;
4c876616
SP
2879 int status, old_vfs, vf;
2880 struct device *dev = &adapter->pdev->dev;
39f1d94d 2881
4c876616
SP
2882 old_vfs = be_find_vfs(adapter, ENABLED);
2883 if (old_vfs) {
2884 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2885 if (old_vfs != num_vfs)
2886 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2887 adapter->num_vfs = old_vfs;
39f1d94d 2888 } else {
4c876616
SP
2889 if (num_vfs > adapter->dev_num_vfs)
2890 dev_info(dev, "Device supports %d VFs and not %d\n",
2891 adapter->dev_num_vfs, num_vfs);
2892 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2893 if (!adapter->num_vfs)
4c876616 2894 return 0;
39f1d94d
SP
2895 }
2896
2897 status = be_vf_setup_init(adapter);
2898 if (status)
2899 goto err;
30128031 2900
4c876616
SP
2901 if (old_vfs) {
2902 for_all_vfs(adapter, vf_cfg, vf) {
2903 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2904 if (status)
2905 goto err;
2906 }
2907 } else {
2908 status = be_vfs_if_create(adapter);
f9449ab7
SP
2909 if (status)
2910 goto err;
f9449ab7
SP
2911 }
2912
4c876616
SP
2913 if (old_vfs) {
2914 status = be_vfs_mac_query(adapter);
2915 if (status)
2916 goto err;
2917 } else {
39f1d94d
SP
2918 status = be_vf_eth_addr_config(adapter);
2919 if (status)
2920 goto err;
2921 }
f9449ab7 2922
11ac75ed 2923 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2924 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2925 * Allow full available bandwidth
2926 */
2927 if (BE3_chip(adapter) && !old_vfs)
2928 be_cmd_set_qos(adapter, 1000, vf+1);
2929
2930 status = be_cmd_link_status_query(adapter, &lnk_speed,
2931 NULL, vf + 1);
2932 if (!status)
2933 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2934
2935 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2936 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2937 if (status)
2938 goto err;
2939 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2940
2941 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2942 }
b4c1df93
SP
2943
2944 if (!old_vfs) {
2945 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2946 if (status) {
2947 dev_err(dev, "SRIOV enable failed\n");
2948 adapter->num_vfs = 0;
2949 goto err;
2950 }
2951 }
f9449ab7
SP
2952 return 0;
2953err:
4c876616
SP
2954 dev_err(dev, "VF setup failed\n");
2955 be_vf_clear(adapter);
f9449ab7
SP
2956 return status;
2957}
2958
30128031
SP
2959static void be_setup_init(struct be_adapter *adapter)
2960{
2961 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2962 adapter->phy.link_speed = -1;
30128031
SP
2963 adapter->if_handle = -1;
2964 adapter->be3_native = false;
2965 adapter->promiscuous = false;
f25b119c
PR
2966 if (be_physfn(adapter))
2967 adapter->cmd_privileges = MAX_PRIVILEGES;
2968 else
2969 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2970}
2971
1578e777
PR
2972static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2973 bool *active_mac, u32 *pmac_id)
590c391d 2974{
1578e777 2975 int status = 0;
e5e1ee89 2976
1578e777
PR
2977 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2978 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2979 if (!lancer_chip(adapter) && !be_physfn(adapter))
2980 *active_mac = true;
2981 else
2982 *active_mac = false;
e5e1ee89 2983
1578e777
PR
2984 return status;
2985 }
e5e1ee89 2986
1578e777
PR
2987 if (lancer_chip(adapter)) {
2988 status = be_cmd_get_mac_from_list(adapter, mac,
2989 active_mac, pmac_id, 0);
2990 if (*active_mac) {
5ee4979b
SP
2991 status = be_cmd_mac_addr_query(adapter, mac, false,
2992 if_handle, *pmac_id);
1578e777
PR
2993 }
2994 } else if (be_physfn(adapter)) {
2995 /* For BE3, for PF get permanent MAC */
5ee4979b 2996 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2997 *active_mac = false;
e5e1ee89 2998 } else {
1578e777 2999 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 3000 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
3001 if_handle, 0);
3002 *active_mac = true;
e5e1ee89 3003 }
590c391d
PR
3004 return status;
3005}
3006
abb93951
PR
3007static void be_get_resources(struct be_adapter *adapter)
3008{
4c876616
SP
3009 u16 dev_num_vfs;
3010 int pos, status;
abb93951 3011 bool profile_present = false;
a05f99db 3012 u16 txq_count = 0;
abb93951 3013
4c876616 3014 if (!BEx_chip(adapter)) {
abb93951 3015 status = be_cmd_get_func_config(adapter);
abb93951
PR
3016 if (!status)
3017 profile_present = true;
a05f99db
VV
3018 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3019 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
3020 }
3021
3022 if (profile_present) {
3023 /* Sanity fixes for Lancer */
3024 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3025 BE_UC_PMAC_COUNT);
3026 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3027 BE_NUM_VLANS_SUPPORTED);
3028 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3029 BE_MAX_MC);
3030 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3031 MAX_TX_QS);
3032 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3033 BE3_MAX_RSS_QS);
3034 adapter->max_event_queues = min_t(u16,
3035 adapter->max_event_queues,
3036 BE3_MAX_RSS_QS);
3037
3038 if (adapter->max_rss_queues &&
3039 adapter->max_rss_queues == adapter->max_rx_queues)
3040 adapter->max_rss_queues -= 1;
3041
3042 if (adapter->max_event_queues < adapter->max_rss_queues)
3043 adapter->max_rss_queues = adapter->max_event_queues;
3044
3045 } else {
3046 if (be_physfn(adapter))
3047 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3048 else
3049 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3050
3051 if (adapter->function_mode & FLEX10_MODE)
3052 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3053 else
3054 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3055
3056 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3057 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3058 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3059 MAX_TX_QS);
abb93951
PR
3060 adapter->max_rss_queues = (adapter->be3_native) ?
3061 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3062 adapter->max_event_queues = BE3_MAX_RSS_QS;
3063
3064 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3065 BE_IF_FLAGS_BROADCAST |
3066 BE_IF_FLAGS_MULTICAST |
3067 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3068 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3069 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3070 BE_IF_FLAGS_PROMISCUOUS;
3071
3072 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3073 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3074 }
4c876616
SP
3075
3076 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3077 if (pos) {
3078 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3079 &dev_num_vfs);
3080 if (BE3_chip(adapter))
3081 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3082 adapter->dev_num_vfs = dev_num_vfs;
3083 }
abb93951
PR
3084}
3085
39f1d94d
SP
3086/* Routine to query per function resource limits */
3087static int be_get_config(struct be_adapter *adapter)
3088{
4c876616 3089 int status;
39f1d94d 3090
abb93951
PR
3091 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3092 &adapter->function_mode,
0ad3157e
VV
3093 &adapter->function_caps,
3094 &adapter->asic_rev);
abb93951
PR
3095 if (status)
3096 goto err;
3097
3098 be_get_resources(adapter);
3099
3100 /* primary mac needs 1 pmac entry */
3101 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3102 sizeof(u32), GFP_KERNEL);
3103 if (!adapter->pmac_id) {
3104 status = -ENOMEM;
3105 goto err;
3106 }
3107
abb93951
PR
3108err:
3109 return status;
39f1d94d
SP
3110}
3111
5fb379ee
SP
3112static int be_setup(struct be_adapter *adapter)
3113{
39f1d94d 3114 struct device *dev = &adapter->pdev->dev;
abb93951 3115 u32 en_flags;
a54769f5 3116 u32 tx_fc, rx_fc;
10ef9ab4 3117 int status;
ba343c77 3118 u8 mac[ETH_ALEN];
1578e777 3119 bool active_mac;
ba343c77 3120
30128031 3121 be_setup_init(adapter);
6b7c5b94 3122
abb93951
PR
3123 if (!lancer_chip(adapter))
3124 be_cmd_req_native_mode(adapter);
39f1d94d 3125
abb93951
PR
3126 status = be_get_config(adapter);
3127 if (status)
3128 goto err;
73d540f2 3129
c2bba3df
SK
3130 status = be_msix_enable(adapter);
3131 if (status)
3132 goto err;
10ef9ab4
SP
3133
3134 status = be_evt_queues_create(adapter);
3135 if (status)
a54769f5 3136 goto err;
6b7c5b94 3137
10ef9ab4
SP
3138 status = be_tx_cqs_create(adapter);
3139 if (status)
3140 goto err;
3141
3142 status = be_rx_cqs_create(adapter);
3143 if (status)
a54769f5 3144 goto err;
6b7c5b94 3145
f9449ab7 3146 status = be_mcc_queues_create(adapter);
10ef9ab4 3147 if (status)
a54769f5 3148 goto err;
6b7c5b94 3149
f25b119c
PR
3150 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3151 /* In UMC mode FW does not return right privileges.
3152 * Override with correct privilege equivalent to PF.
3153 */
3154 if (be_is_mc(adapter))
3155 adapter->cmd_privileges = MAX_PRIVILEGES;
3156
f9449ab7
SP
3157 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3158 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3159
abb93951 3160 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3161 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3162
abb93951 3163 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3164
abb93951 3165 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3166 &adapter->if_handle, 0);
5fb379ee 3167 if (status != 0)
a54769f5 3168 goto err;
6b7c5b94 3169
1578e777
PR
3170 memset(mac, 0, ETH_ALEN);
3171 active_mac = false;
3172 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3173 &active_mac, &adapter->pmac_id[0]);
3174 if (status != 0)
3175 goto err;
3176
3177 if (!active_mac) {
3178 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3179 &adapter->pmac_id[0], 0);
3180 if (status != 0)
3181 goto err;
3182 }
3183
3184 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3185 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3186 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3187 }
0dffc83e 3188
10ef9ab4
SP
3189 status = be_tx_qs_create(adapter);
3190 if (status)
3191 goto err;
3192
04b71175 3193 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 3194
1d1e9a46 3195 if (adapter->vlans_added)
10329df8 3196 be_vid_config(adapter);
7ab8b0b4 3197
a54769f5 3198 be_set_rx_mode(adapter->netdev);
5fb379ee 3199
ddc3f5cb 3200 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3201
ddc3f5cb
AK
3202 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3203 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3204 adapter->rx_fc);
2dc1deb6 3205
b4c1df93 3206 if (be_physfn(adapter)) {
39f1d94d
SP
3207 if (adapter->dev_num_vfs)
3208 be_vf_setup(adapter);
3209 else
3210 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3211 }
3212
f25b119c
PR
3213 status = be_cmd_get_phy_info(adapter);
3214 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3215 adapter->phy.fc_autoneg = 1;
3216
191eb756
SP
3217 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3218 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3219 return 0;
a54769f5
SP
3220err:
3221 be_clear(adapter);
3222 return status;
3223}
6b7c5b94 3224
66268739
IV
3225#ifdef CONFIG_NET_POLL_CONTROLLER
3226static void be_netpoll(struct net_device *netdev)
3227{
3228 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3229 struct be_eq_obj *eqo;
66268739
IV
3230 int i;
3231
e49cc34f
SP
3232 for_all_evt_queues(adapter, eqo, i) {
3233 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3234 napi_schedule(&eqo->napi);
3235 }
10ef9ab4
SP
3236
3237 return;
66268739
IV
3238}
3239#endif
3240
84517482 3241#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3242char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3243
fa9a6fed 3244static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3245 const u8 *p, u32 img_start, int image_size,
3246 int hdr_size)
fa9a6fed
SB
3247{
3248 u32 crc_offset;
3249 u8 flashed_crc[4];
3250 int status;
3f0d4560
AK
3251
3252 crc_offset = hdr_size + img_start + image_size - 4;
3253
fa9a6fed 3254 p += crc_offset;
3f0d4560
AK
3255
3256 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3257 (image_size - 4));
fa9a6fed
SB
3258 if (status) {
3259 dev_err(&adapter->pdev->dev,
3260 "could not get crc from flash, not flashing redboot\n");
3261 return false;
3262 }
3263
3264 /*update redboot only if crc does not match*/
3265 if (!memcmp(flashed_crc, p, 4))
3266 return false;
3267 else
3268 return true;
fa9a6fed
SB
3269}
3270
306f1348
SP
3271static bool phy_flashing_required(struct be_adapter *adapter)
3272{
42f11cf2
AK
3273 return (adapter->phy.phy_type == TN_8022 &&
3274 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3275}
3276
c165541e
PR
3277static bool is_comp_in_ufi(struct be_adapter *adapter,
3278 struct flash_section_info *fsec, int type)
3279{
3280 int i = 0, img_type = 0;
3281 struct flash_section_info_g2 *fsec_g2 = NULL;
3282
ca34fe38 3283 if (BE2_chip(adapter))
c165541e
PR
3284 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3285
3286 for (i = 0; i < MAX_FLASH_COMP; i++) {
3287 if (fsec_g2)
3288 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3289 else
3290 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3291
3292 if (img_type == type)
3293 return true;
3294 }
3295 return false;
3296
3297}
3298
3299struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3300 int header_size,
3301 const struct firmware *fw)
3302{
3303 struct flash_section_info *fsec = NULL;
3304 const u8 *p = fw->data;
3305
3306 p += header_size;
3307 while (p < (fw->data + fw->size)) {
3308 fsec = (struct flash_section_info *)p;
3309 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3310 return fsec;
3311 p += 32;
3312 }
3313 return NULL;
3314}
3315
773a2d7c
PR
3316static int be_flash(struct be_adapter *adapter, const u8 *img,
3317 struct be_dma_mem *flash_cmd, int optype, int img_size)
3318{
3319 u32 total_bytes = 0, flash_op, num_bytes = 0;
3320 int status = 0;
3321 struct be_cmd_write_flashrom *req = flash_cmd->va;
3322
3323 total_bytes = img_size;
3324 while (total_bytes) {
3325 num_bytes = min_t(u32, 32*1024, total_bytes);
3326
3327 total_bytes -= num_bytes;
3328
3329 if (!total_bytes) {
3330 if (optype == OPTYPE_PHY_FW)
3331 flash_op = FLASHROM_OPER_PHY_FLASH;
3332 else
3333 flash_op = FLASHROM_OPER_FLASH;
3334 } else {
3335 if (optype == OPTYPE_PHY_FW)
3336 flash_op = FLASHROM_OPER_PHY_SAVE;
3337 else
3338 flash_op = FLASHROM_OPER_SAVE;
3339 }
3340
be716446 3341 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3342 img += num_bytes;
3343 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3344 flash_op, num_bytes);
3345 if (status) {
3346 if (status == ILLEGAL_IOCTL_REQ &&
3347 optype == OPTYPE_PHY_FW)
3348 break;
3349 dev_err(&adapter->pdev->dev,
3350 "cmd to write to flash rom failed.\n");
3351 return status;
3352 }
3353 }
3354 return 0;
3355}
3356
0ad3157e 3357/* For BE2, BE3 and BE3-R */
ca34fe38 3358static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3359 const struct firmware *fw,
3360 struct be_dma_mem *flash_cmd,
3361 int num_of_images)
3f0d4560 3362
84517482 3363{
3f0d4560 3364 int status = 0, i, filehdr_size = 0;
c165541e 3365 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3366 const u8 *p = fw->data;
215faf9c 3367 const struct flash_comp *pflashcomp;
773a2d7c 3368 int num_comp, redboot;
c165541e
PR
3369 struct flash_section_info *fsec = NULL;
3370
3371 struct flash_comp gen3_flash_types[] = {
3372 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3373 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3374 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3375 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3376 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3377 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3378 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3379 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3380 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3381 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3382 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3383 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3384 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3385 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3386 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3387 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3388 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3389 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3390 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3391 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3392 };
c165541e
PR
3393
3394 struct flash_comp gen2_flash_types[] = {
3395 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3396 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3397 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3398 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3399 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3400 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3401 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3402 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3403 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3404 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3405 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3406 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3407 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3408 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3409 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3410 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3411 };
3412
ca34fe38 3413 if (BE3_chip(adapter)) {
3f0d4560
AK
3414 pflashcomp = gen3_flash_types;
3415 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3416 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3417 } else {
3418 pflashcomp = gen2_flash_types;
3419 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3420 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3421 }
ca34fe38 3422
c165541e
PR
3423 /* Get flash section info*/
3424 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3425 if (!fsec) {
3426 dev_err(&adapter->pdev->dev,
3427 "Invalid Cookie. UFI corrupted ?\n");
3428 return -1;
3429 }
9fe96934 3430 for (i = 0; i < num_comp; i++) {
c165541e 3431 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3432 continue;
c165541e
PR
3433
3434 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3435 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3436 continue;
3437
773a2d7c
PR
3438 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3439 !phy_flashing_required(adapter))
306f1348 3440 continue;
c165541e 3441
773a2d7c
PR
3442 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3443 redboot = be_flash_redboot(adapter, fw->data,
3444 pflashcomp[i].offset, pflashcomp[i].size,
3445 filehdr_size + img_hdrs_size);
3446 if (!redboot)
3447 continue;
3448 }
c165541e 3449
3f0d4560 3450 p = fw->data;
c165541e 3451 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3452 if (p + pflashcomp[i].size > fw->data + fw->size)
3453 return -1;
773a2d7c
PR
3454
3455 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3456 pflashcomp[i].size);
3457 if (status) {
3458 dev_err(&adapter->pdev->dev,
3459 "Flashing section type %d failed.\n",
3460 pflashcomp[i].img_type);
3461 return status;
84517482 3462 }
84517482 3463 }
84517482
AK
3464 return 0;
3465}
3466
773a2d7c
PR
3467static int be_flash_skyhawk(struct be_adapter *adapter,
3468 const struct firmware *fw,
3469 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3470{
773a2d7c
PR
3471 int status = 0, i, filehdr_size = 0;
3472 int img_offset, img_size, img_optype, redboot;
3473 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3474 const u8 *p = fw->data;
3475 struct flash_section_info *fsec = NULL;
3476
3477 filehdr_size = sizeof(struct flash_file_hdr_g3);
3478 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3479 if (!fsec) {
3480 dev_err(&adapter->pdev->dev,
3481 "Invalid Cookie. UFI corrupted ?\n");
3482 return -1;
3483 }
3484
3485 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3486 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3487 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3488
3489 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3490 case IMAGE_FIRMWARE_iSCSI:
3491 img_optype = OPTYPE_ISCSI_ACTIVE;
3492 break;
3493 case IMAGE_BOOT_CODE:
3494 img_optype = OPTYPE_REDBOOT;
3495 break;
3496 case IMAGE_OPTION_ROM_ISCSI:
3497 img_optype = OPTYPE_BIOS;
3498 break;
3499 case IMAGE_OPTION_ROM_PXE:
3500 img_optype = OPTYPE_PXE_BIOS;
3501 break;
3502 case IMAGE_OPTION_ROM_FCoE:
3503 img_optype = OPTYPE_FCOE_BIOS;
3504 break;
3505 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3506 img_optype = OPTYPE_ISCSI_BACKUP;
3507 break;
3508 case IMAGE_NCSI:
3509 img_optype = OPTYPE_NCSI_FW;
3510 break;
3511 default:
3512 continue;
3513 }
3514
3515 if (img_optype == OPTYPE_REDBOOT) {
3516 redboot = be_flash_redboot(adapter, fw->data,
3517 img_offset, img_size,
3518 filehdr_size + img_hdrs_size);
3519 if (!redboot)
3520 continue;
3521 }
3522
3523 p = fw->data;
3524 p += filehdr_size + img_offset + img_hdrs_size;
3525 if (p + img_size > fw->data + fw->size)
3526 return -1;
3527
3528 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3529 if (status) {
3530 dev_err(&adapter->pdev->dev,
3531 "Flashing section type %d failed.\n",
3532 fsec->fsec_entry[i].type);
3533 return status;
3534 }
3535 }
3536 return 0;
3f0d4560
AK
3537}
3538
f67ef7ba
PR
3539static int lancer_wait_idle(struct be_adapter *adapter)
3540{
3541#define SLIPORT_IDLE_TIMEOUT 30
3542 u32 reg_val;
3543 int status = 0, i;
3544
3545 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3546 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3547 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3548 break;
3549
3550 ssleep(1);
3551 }
3552
3553 if (i == SLIPORT_IDLE_TIMEOUT)
3554 status = -1;
3555
3556 return status;
3557}
3558
3559static int lancer_fw_reset(struct be_adapter *adapter)
3560{
3561 int status = 0;
3562
3563 status = lancer_wait_idle(adapter);
3564 if (status)
3565 return status;
3566
3567 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3568 PHYSDEV_CONTROL_OFFSET);
3569
3570 return status;
3571}
3572
485bf569
SN
3573static int lancer_fw_download(struct be_adapter *adapter,
3574 const struct firmware *fw)
84517482 3575{
485bf569
SN
3576#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3577#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3578 struct be_dma_mem flash_cmd;
485bf569
SN
3579 const u8 *data_ptr = NULL;
3580 u8 *dest_image_ptr = NULL;
3581 size_t image_size = 0;
3582 u32 chunk_size = 0;
3583 u32 data_written = 0;
3584 u32 offset = 0;
3585 int status = 0;
3586 u8 add_status = 0;
f67ef7ba 3587 u8 change_status;
84517482 3588
485bf569 3589 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3590 dev_err(&adapter->pdev->dev,
485bf569
SN
3591 "FW Image not properly aligned. "
3592 "Length must be 4 byte aligned.\n");
3593 status = -EINVAL;
3594 goto lancer_fw_exit;
d9efd2af
SB
3595 }
3596
485bf569
SN
3597 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3598 + LANCER_FW_DOWNLOAD_CHUNK;
3599 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3600 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3601 if (!flash_cmd.va) {
3602 status = -ENOMEM;
485bf569
SN
3603 goto lancer_fw_exit;
3604 }
84517482 3605
485bf569
SN
3606 dest_image_ptr = flash_cmd.va +
3607 sizeof(struct lancer_cmd_req_write_object);
3608 image_size = fw->size;
3609 data_ptr = fw->data;
3610
3611 while (image_size) {
3612 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3613
3614 /* Copy the image chunk content. */
3615 memcpy(dest_image_ptr, data_ptr, chunk_size);
3616
3617 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3618 chunk_size, offset,
3619 LANCER_FW_DOWNLOAD_LOCATION,
3620 &data_written, &change_status,
3621 &add_status);
485bf569
SN
3622 if (status)
3623 break;
3624
3625 offset += data_written;
3626 data_ptr += data_written;
3627 image_size -= data_written;
3628 }
3629
3630 if (!status) {
3631 /* Commit the FW written */
3632 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3633 0, offset,
3634 LANCER_FW_DOWNLOAD_LOCATION,
3635 &data_written, &change_status,
3636 &add_status);
485bf569
SN
3637 }
3638
3639 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3640 flash_cmd.dma);
3641 if (status) {
3642 dev_err(&adapter->pdev->dev,
3643 "Firmware load error. "
3644 "Status code: 0x%x Additional Status: 0x%x\n",
3645 status, add_status);
3646 goto lancer_fw_exit;
3647 }
3648
f67ef7ba
PR
3649 if (change_status == LANCER_FW_RESET_NEEDED) {
3650 status = lancer_fw_reset(adapter);
3651 if (status) {
3652 dev_err(&adapter->pdev->dev,
3653 "Adapter busy for FW reset.\n"
3654 "New FW will not be active.\n");
3655 goto lancer_fw_exit;
3656 }
3657 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3658 dev_err(&adapter->pdev->dev,
3659 "System reboot required for new FW"
3660 " to be active\n");
3661 }
3662
485bf569
SN
3663 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3664lancer_fw_exit:
3665 return status;
3666}
3667
ca34fe38
SP
3668#define UFI_TYPE2 2
3669#define UFI_TYPE3 3
0ad3157e 3670#define UFI_TYPE3R 10
ca34fe38
SP
3671#define UFI_TYPE4 4
3672static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3673 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3674{
3675 if (fhdr == NULL)
3676 goto be_get_ufi_exit;
3677
ca34fe38
SP
3678 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3679 return UFI_TYPE4;
0ad3157e
VV
3680 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3681 if (fhdr->asic_type_rev == 0x10)
3682 return UFI_TYPE3R;
3683 else
3684 return UFI_TYPE3;
3685 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3686 return UFI_TYPE2;
773a2d7c
PR
3687
3688be_get_ufi_exit:
3689 dev_err(&adapter->pdev->dev,
3690 "UFI and Interface are not compatible for flashing\n");
3691 return -1;
3692}
3693
485bf569
SN
3694static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3695{
485bf569
SN
3696 struct flash_file_hdr_g3 *fhdr3;
3697 struct image_hdr *img_hdr_ptr = NULL;
3698 struct be_dma_mem flash_cmd;
3699 const u8 *p;
773a2d7c 3700 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3701
be716446 3702 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3703 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3704 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3705 if (!flash_cmd.va) {
3706 status = -ENOMEM;
485bf569 3707 goto be_fw_exit;
84517482
AK
3708 }
3709
773a2d7c 3710 p = fw->data;
0ad3157e 3711 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3712
0ad3157e 3713 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3714
773a2d7c
PR
3715 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3716 for (i = 0; i < num_imgs; i++) {
3717 img_hdr_ptr = (struct image_hdr *)(fw->data +
3718 (sizeof(struct flash_file_hdr_g3) +
3719 i * sizeof(struct image_hdr)));
3720 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3721 switch (ufi_type) {
3722 case UFI_TYPE4:
773a2d7c
PR
3723 status = be_flash_skyhawk(adapter, fw,
3724 &flash_cmd, num_imgs);
0ad3157e
VV
3725 break;
3726 case UFI_TYPE3R:
ca34fe38
SP
3727 status = be_flash_BEx(adapter, fw, &flash_cmd,
3728 num_imgs);
0ad3157e
VV
3729 break;
3730 case UFI_TYPE3:
3731 /* Do not flash this ufi on BE3-R cards */
3732 if (adapter->asic_rev < 0x10)
3733 status = be_flash_BEx(adapter, fw,
3734 &flash_cmd,
3735 num_imgs);
3736 else {
3737 status = -1;
3738 dev_err(&adapter->pdev->dev,
3739 "Can't load BE3 UFI on BE3R\n");
3740 }
3741 }
3f0d4560 3742 }
773a2d7c
PR
3743 }
3744
ca34fe38
SP
3745 if (ufi_type == UFI_TYPE2)
3746 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3747 else if (ufi_type == -1)
3f0d4560 3748 status = -1;
84517482 3749
2b7bcebf
IV
3750 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3751 flash_cmd.dma);
84517482
AK
3752 if (status) {
3753 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3754 goto be_fw_exit;
84517482
AK
3755 }
3756
af901ca1 3757 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3758
485bf569
SN
3759be_fw_exit:
3760 return status;
3761}
3762
3763int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3764{
3765 const struct firmware *fw;
3766 int status;
3767
3768 if (!netif_running(adapter->netdev)) {
3769 dev_err(&adapter->pdev->dev,
3770 "Firmware load not allowed (interface is down)\n");
3771 return -1;
3772 }
3773
3774 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3775 if (status)
3776 goto fw_exit;
3777
3778 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3779
3780 if (lancer_chip(adapter))
3781 status = lancer_fw_download(adapter, fw);
3782 else
3783 status = be_fw_download(adapter, fw);
3784
84517482
AK
3785fw_exit:
3786 release_firmware(fw);
3787 return status;
3788}
3789
e5686ad8 3790static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3791 .ndo_open = be_open,
3792 .ndo_stop = be_close,
3793 .ndo_start_xmit = be_xmit,
a54769f5 3794 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3795 .ndo_set_mac_address = be_mac_addr_set,
3796 .ndo_change_mtu = be_change_mtu,
ab1594e9 3797 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3798 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3799 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3800 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3801 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3802 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3803 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3804 .ndo_get_vf_config = be_get_vf_config,
3805#ifdef CONFIG_NET_POLL_CONTROLLER
3806 .ndo_poll_controller = be_netpoll,
3807#endif
6b7c5b94
SP
3808};
3809
3810static void be_netdev_init(struct net_device *netdev)
3811{
3812 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3813 struct be_eq_obj *eqo;
3abcdeda 3814 int i;
6b7c5b94 3815
6332c8d3 3816 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3817 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3818 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3819 if (be_multi_rxq(adapter))
3820 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3821
3822 netdev->features |= netdev->hw_features |
f646968f 3823 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3824
eb8a50d9 3825 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3826 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3827
fbc13f01
AK
3828 netdev->priv_flags |= IFF_UNICAST_FLT;
3829
6b7c5b94
SP
3830 netdev->flags |= IFF_MULTICAST;
3831
b7e5887e 3832 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3833
10ef9ab4 3834 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3835
3836 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3837
10ef9ab4
SP
3838 for_all_evt_queues(adapter, eqo, i)
3839 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3840}
3841
3842static void be_unmap_pci_bars(struct be_adapter *adapter)
3843{
c5b3ad4c
SP
3844 if (adapter->csr)
3845 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3846 if (adapter->db)
ce66f781 3847 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3848}
3849
ce66f781
SP
3850static int db_bar(struct be_adapter *adapter)
3851{
3852 if (lancer_chip(adapter) || !be_physfn(adapter))
3853 return 0;
3854 else
3855 return 4;
3856}
3857
3858static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3859{
dbf0f2a7 3860 if (skyhawk_chip(adapter)) {
ce66f781
SP
3861 adapter->roce_db.size = 4096;
3862 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3863 db_bar(adapter));
3864 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3865 db_bar(adapter));
3866 }
045508a8 3867 return 0;
6b7c5b94
SP
3868}
3869
3870static int be_map_pci_bars(struct be_adapter *adapter)
3871{
3872 u8 __iomem *addr;
ce66f781 3873 u32 sli_intf;
6b7c5b94 3874
ce66f781
SP
3875 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3876 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3877 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3878
c5b3ad4c
SP
3879 if (BEx_chip(adapter) && be_physfn(adapter)) {
3880 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3881 if (adapter->csr == NULL)
3882 return -ENOMEM;
3883 }
3884
ce66f781 3885 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3886 if (addr == NULL)
3887 goto pci_map_err;
ba343c77 3888 adapter->db = addr;
ce66f781
SP
3889
3890 be_roce_map_pci_bars(adapter);
6b7c5b94 3891 return 0;
ce66f781 3892
6b7c5b94
SP
3893pci_map_err:
3894 be_unmap_pci_bars(adapter);
3895 return -ENOMEM;
3896}
3897
6b7c5b94
SP
3898static void be_ctrl_cleanup(struct be_adapter *adapter)
3899{
8788fdc2 3900 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3901
3902 be_unmap_pci_bars(adapter);
3903
3904 if (mem->va)
2b7bcebf
IV
3905 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3906 mem->dma);
e7b909a6 3907
5b8821b7 3908 mem = &adapter->rx_filter;
e7b909a6 3909 if (mem->va)
2b7bcebf
IV
3910 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3911 mem->dma);
6b7c5b94
SP
3912}
3913
6b7c5b94
SP
3914static int be_ctrl_init(struct be_adapter *adapter)
3915{
8788fdc2
SP
3916 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3917 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3918 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3919 u32 sli_intf;
6b7c5b94 3920 int status;
6b7c5b94 3921
ce66f781
SP
3922 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3923 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3924 SLI_INTF_FAMILY_SHIFT;
3925 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3926
6b7c5b94
SP
3927 status = be_map_pci_bars(adapter);
3928 if (status)
e7b909a6 3929 goto done;
6b7c5b94
SP
3930
3931 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3932 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3933 mbox_mem_alloc->size,
3934 &mbox_mem_alloc->dma,
3935 GFP_KERNEL);
6b7c5b94 3936 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3937 status = -ENOMEM;
3938 goto unmap_pci_bars;
6b7c5b94
SP
3939 }
3940 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3941 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3942 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3943 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3944
5b8821b7
SP
3945 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3946 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3947 &rx_filter->dma,
3948 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3949 if (rx_filter->va == NULL) {
e7b909a6
SP
3950 status = -ENOMEM;
3951 goto free_mbox;
3952 }
1f9061d2 3953
2984961c 3954 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3955 spin_lock_init(&adapter->mcc_lock);
3956 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3957
dd131e76 3958 init_completion(&adapter->flash_compl);
cf588477 3959 pci_save_state(adapter->pdev);
6b7c5b94 3960 return 0;
e7b909a6
SP
3961
3962free_mbox:
2b7bcebf
IV
3963 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3964 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3965
3966unmap_pci_bars:
3967 be_unmap_pci_bars(adapter);
3968
3969done:
3970 return status;
6b7c5b94
SP
3971}
3972
3973static void be_stats_cleanup(struct be_adapter *adapter)
3974{
3abcdeda 3975 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3976
3977 if (cmd->va)
2b7bcebf
IV
3978 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3979 cmd->va, cmd->dma);
6b7c5b94
SP
3980}
3981
3982static int be_stats_init(struct be_adapter *adapter)
3983{
3abcdeda 3984 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3985
ca34fe38
SP
3986 if (lancer_chip(adapter))
3987 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3988 else if (BE2_chip(adapter))
89a88ab8 3989 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3990 else
3991 /* BE3 and Skyhawk */
3992 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3993
2b7bcebf 3994 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3995 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3996 if (cmd->va == NULL)
3997 return -1;
3998 return 0;
3999}
4000
3bc6b06c 4001static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4002{
4003 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4004
6b7c5b94
SP
4005 if (!adapter)
4006 return;
4007
045508a8 4008 be_roce_dev_remove(adapter);
8cef7a78 4009 be_intr_set(adapter, false);
045508a8 4010
f67ef7ba
PR
4011 cancel_delayed_work_sync(&adapter->func_recovery_work);
4012
6b7c5b94
SP
4013 unregister_netdev(adapter->netdev);
4014
5fb379ee
SP
4015 be_clear(adapter);
4016
bf99e50d
PR
4017 /* tell fw we're done with firing cmds */
4018 be_cmd_fw_clean(adapter);
4019
6b7c5b94
SP
4020 be_stats_cleanup(adapter);
4021
4022 be_ctrl_cleanup(adapter);
4023
d6b6d987
SP
4024 pci_disable_pcie_error_reporting(pdev);
4025
6b7c5b94
SP
4026 pci_set_drvdata(pdev, NULL);
4027 pci_release_regions(pdev);
4028 pci_disable_device(pdev);
4029
4030 free_netdev(adapter->netdev);
4031}
4032
4762f6ce
AK
4033bool be_is_wol_supported(struct be_adapter *adapter)
4034{
4035 return ((adapter->wol_cap & BE_WOL_CAP) &&
4036 !be_is_wol_excluded(adapter)) ? true : false;
4037}
4038
941a77d5
SK
4039u32 be_get_fw_log_level(struct be_adapter *adapter)
4040{
4041 struct be_dma_mem extfat_cmd;
4042 struct be_fat_conf_params *cfgs;
4043 int status;
4044 u32 level = 0;
4045 int j;
4046
f25b119c
PR
4047 if (lancer_chip(adapter))
4048 return 0;
4049
941a77d5
SK
4050 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4051 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4052 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4053 &extfat_cmd.dma);
4054
4055 if (!extfat_cmd.va) {
4056 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4057 __func__);
4058 goto err;
4059 }
4060
4061 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4062 if (!status) {
4063 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4064 sizeof(struct be_cmd_resp_hdr));
ac46a462 4065 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4066 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4067 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4068 }
4069 }
4070 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4071 extfat_cmd.dma);
4072err:
4073 return level;
4074}
abb93951 4075
39f1d94d 4076static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4077{
6b7c5b94 4078 int status;
941a77d5 4079 u32 level;
6b7c5b94 4080
9e1453c5
AK
4081 status = be_cmd_get_cntl_attributes(adapter);
4082 if (status)
4083 return status;
4084
4762f6ce
AK
4085 status = be_cmd_get_acpi_wol_cap(adapter);
4086 if (status) {
4087 /* in case of a failure to get wol capabillities
4088 * check the exclusion list to determine WOL capability */
4089 if (!be_is_wol_excluded(adapter))
4090 adapter->wol_cap |= BE_WOL_CAP;
4091 }
4092
4093 if (be_is_wol_supported(adapter))
4094 adapter->wol = true;
4095
7aeb2156
PR
4096 /* Must be a power of 2 or else MODULO will BUG_ON */
4097 adapter->be_get_temp_freq = 64;
4098
941a77d5
SK
4099 level = be_get_fw_log_level(adapter);
4100 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4101
2243e2e9 4102 return 0;
6b7c5b94
SP
4103}
4104
f67ef7ba 4105static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
4106{
4107 int status;
d8110f62 4108
f67ef7ba
PR
4109 status = lancer_test_and_set_rdy_state(adapter);
4110 if (status)
4111 goto err;
d8110f62 4112
f67ef7ba
PR
4113 if (netif_running(adapter->netdev))
4114 be_close(adapter->netdev);
d8110f62 4115
f67ef7ba
PR
4116 be_clear(adapter);
4117
4118 adapter->hw_error = false;
4119 adapter->fw_timeout = false;
4120
4121 status = be_setup(adapter);
4122 if (status)
4123 goto err;
d8110f62 4124
f67ef7ba
PR
4125 if (netif_running(adapter->netdev)) {
4126 status = be_open(adapter->netdev);
d8110f62
PR
4127 if (status)
4128 goto err;
f67ef7ba 4129 }
d8110f62 4130
f67ef7ba
PR
4131 dev_err(&adapter->pdev->dev,
4132 "Adapter SLIPORT recovery succeeded\n");
4133 return 0;
4134err:
67297ad8
PR
4135 if (adapter->eeh_error)
4136 dev_err(&adapter->pdev->dev,
4137 "Adapter SLIPORT recovery failed\n");
d8110f62 4138
f67ef7ba
PR
4139 return status;
4140}
4141
4142static void be_func_recovery_task(struct work_struct *work)
4143{
4144 struct be_adapter *adapter =
4145 container_of(work, struct be_adapter, func_recovery_work.work);
4146 int status;
d8110f62 4147
f67ef7ba 4148 be_detect_error(adapter);
d8110f62 4149
f67ef7ba 4150 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4151
f67ef7ba
PR
4152 if (adapter->eeh_error)
4153 goto out;
d8110f62 4154
f67ef7ba
PR
4155 rtnl_lock();
4156 netif_device_detach(adapter->netdev);
4157 rtnl_unlock();
d8110f62 4158
f67ef7ba 4159 status = lancer_recover_func(adapter);
d8110f62 4160
f67ef7ba
PR
4161 if (!status)
4162 netif_device_attach(adapter->netdev);
d8110f62 4163 }
f67ef7ba
PR
4164
4165out:
4166 schedule_delayed_work(&adapter->func_recovery_work,
4167 msecs_to_jiffies(1000));
d8110f62
PR
4168}
4169
4170static void be_worker(struct work_struct *work)
4171{
4172 struct be_adapter *adapter =
4173 container_of(work, struct be_adapter, work.work);
4174 struct be_rx_obj *rxo;
10ef9ab4 4175 struct be_eq_obj *eqo;
d8110f62
PR
4176 int i;
4177
d8110f62
PR
4178 /* when interrupts are not yet enabled, just reap any pending
4179 * mcc completions */
4180 if (!netif_running(adapter->netdev)) {
072a9c48 4181 local_bh_disable();
10ef9ab4 4182 be_process_mcc(adapter);
072a9c48 4183 local_bh_enable();
d8110f62
PR
4184 goto reschedule;
4185 }
4186
4187 if (!adapter->stats_cmd_sent) {
4188 if (lancer_chip(adapter))
4189 lancer_cmd_get_pport_stats(adapter,
4190 &adapter->stats_cmd);
4191 else
4192 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4193 }
4194
7aeb2156
PR
4195 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4196 be_cmd_get_die_temperature(adapter);
4197
d8110f62 4198 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4199 if (rxo->rx_post_starved) {
4200 rxo->rx_post_starved = false;
4201 be_post_rx_frags(rxo, GFP_KERNEL);
4202 }
4203 }
4204
10ef9ab4
SP
4205 for_all_evt_queues(adapter, eqo, i)
4206 be_eqd_update(adapter, eqo);
4207
d8110f62
PR
4208reschedule:
4209 adapter->work_counter++;
4210 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4211}
4212
39f1d94d
SP
4213static bool be_reset_required(struct be_adapter *adapter)
4214{
d79c0a20 4215 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4216}
4217
d379142b
SP
4218static char *mc_name(struct be_adapter *adapter)
4219{
4220 if (adapter->function_mode & FLEX10_MODE)
4221 return "FLEX10";
4222 else if (adapter->function_mode & VNIC_MODE)
4223 return "vNIC";
4224 else if (adapter->function_mode & UMC_ENABLED)
4225 return "UMC";
4226 else
4227 return "";
4228}
4229
4230static inline char *func_name(struct be_adapter *adapter)
4231{
4232 return be_physfn(adapter) ? "PF" : "VF";
4233}
4234
1dd06ae8 4235static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4236{
4237 int status = 0;
4238 struct be_adapter *adapter;
4239 struct net_device *netdev;
b4e32a71 4240 char port_name;
6b7c5b94
SP
4241
4242 status = pci_enable_device(pdev);
4243 if (status)
4244 goto do_none;
4245
4246 status = pci_request_regions(pdev, DRV_NAME);
4247 if (status)
4248 goto disable_dev;
4249 pci_set_master(pdev);
4250
7f640062 4251 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4252 if (netdev == NULL) {
4253 status = -ENOMEM;
4254 goto rel_reg;
4255 }
4256 adapter = netdev_priv(netdev);
4257 adapter->pdev = pdev;
4258 pci_set_drvdata(pdev, adapter);
4259 adapter->netdev = netdev;
2243e2e9 4260 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4261
2b7bcebf 4262 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4263 if (!status) {
2bd92cd2
CH
4264 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4265 if (status < 0) {
4266 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4267 goto free_netdev;
4268 }
6b7c5b94
SP
4269 netdev->features |= NETIF_F_HIGHDMA;
4270 } else {
2b7bcebf 4271 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4272 if (status) {
4273 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4274 goto free_netdev;
4275 }
4276 }
4277
d6b6d987
SP
4278 status = pci_enable_pcie_error_reporting(pdev);
4279 if (status)
4280 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4281
6b7c5b94
SP
4282 status = be_ctrl_init(adapter);
4283 if (status)
39f1d94d 4284 goto free_netdev;
6b7c5b94 4285
2243e2e9 4286 /* sync up with fw's ready state */
ba343c77 4287 if (be_physfn(adapter)) {
bf99e50d 4288 status = be_fw_wait_ready(adapter);
ba343c77
SB
4289 if (status)
4290 goto ctrl_clean;
ba343c77 4291 }
6b7c5b94 4292
39f1d94d
SP
4293 if (be_reset_required(adapter)) {
4294 status = be_cmd_reset_function(adapter);
4295 if (status)
4296 goto ctrl_clean;
556ae191 4297
2d177be8
KA
4298 /* Wait for interrupts to quiesce after an FLR */
4299 msleep(100);
4300 }
8cef7a78
SK
4301
4302 /* Allow interrupts for other ULPs running on NIC function */
4303 be_intr_set(adapter, true);
10ef9ab4 4304
2d177be8
KA
4305 /* tell fw we're ready to fire cmds */
4306 status = be_cmd_fw_init(adapter);
4307 if (status)
4308 goto ctrl_clean;
4309
2243e2e9
SP
4310 status = be_stats_init(adapter);
4311 if (status)
4312 goto ctrl_clean;
4313
39f1d94d 4314 status = be_get_initial_config(adapter);
6b7c5b94
SP
4315 if (status)
4316 goto stats_clean;
6b7c5b94
SP
4317
4318 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4319 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4320 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4321
5fb379ee
SP
4322 status = be_setup(adapter);
4323 if (status)
55f5c3c5 4324 goto stats_clean;
2243e2e9 4325
3abcdeda 4326 be_netdev_init(netdev);
6b7c5b94
SP
4327 status = register_netdev(netdev);
4328 if (status != 0)
5fb379ee 4329 goto unsetup;
6b7c5b94 4330
045508a8
PP
4331 be_roce_dev_add(adapter);
4332
f67ef7ba
PR
4333 schedule_delayed_work(&adapter->func_recovery_work,
4334 msecs_to_jiffies(1000));
b4e32a71
PR
4335
4336 be_cmd_query_port_name(adapter, &port_name);
4337
d379142b
SP
4338 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4339 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4340
6b7c5b94
SP
4341 return 0;
4342
5fb379ee
SP
4343unsetup:
4344 be_clear(adapter);
6b7c5b94
SP
4345stats_clean:
4346 be_stats_cleanup(adapter);
4347ctrl_clean:
4348 be_ctrl_cleanup(adapter);
f9449ab7 4349free_netdev:
fe6d2a38 4350 free_netdev(netdev);
8d56ff11 4351 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4352rel_reg:
4353 pci_release_regions(pdev);
4354disable_dev:
4355 pci_disable_device(pdev);
4356do_none:
c4ca2374 4357 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4358 return status;
4359}
4360
4361static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4362{
4363 struct be_adapter *adapter = pci_get_drvdata(pdev);
4364 struct net_device *netdev = adapter->netdev;
4365
71d8d1b5
AK
4366 if (adapter->wol)
4367 be_setup_wol(adapter, true);
4368
f67ef7ba
PR
4369 cancel_delayed_work_sync(&adapter->func_recovery_work);
4370
6b7c5b94
SP
4371 netif_device_detach(netdev);
4372 if (netif_running(netdev)) {
4373 rtnl_lock();
4374 be_close(netdev);
4375 rtnl_unlock();
4376 }
9b0365f1 4377 be_clear(adapter);
6b7c5b94
SP
4378
4379 pci_save_state(pdev);
4380 pci_disable_device(pdev);
4381 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4382 return 0;
4383}
4384
4385static int be_resume(struct pci_dev *pdev)
4386{
4387 int status = 0;
4388 struct be_adapter *adapter = pci_get_drvdata(pdev);
4389 struct net_device *netdev = adapter->netdev;
4390
4391 netif_device_detach(netdev);
4392
4393 status = pci_enable_device(pdev);
4394 if (status)
4395 return status;
4396
4397 pci_set_power_state(pdev, 0);
4398 pci_restore_state(pdev);
4399
2243e2e9
SP
4400 /* tell fw we're ready to fire cmds */
4401 status = be_cmd_fw_init(adapter);
4402 if (status)
4403 return status;
4404
9b0365f1 4405 be_setup(adapter);
6b7c5b94
SP
4406 if (netif_running(netdev)) {
4407 rtnl_lock();
4408 be_open(netdev);
4409 rtnl_unlock();
4410 }
f67ef7ba
PR
4411
4412 schedule_delayed_work(&adapter->func_recovery_work,
4413 msecs_to_jiffies(1000));
6b7c5b94 4414 netif_device_attach(netdev);
71d8d1b5
AK
4415
4416 if (adapter->wol)
4417 be_setup_wol(adapter, false);
a4ca055f 4418
6b7c5b94
SP
4419 return 0;
4420}
4421
82456b03
SP
4422/*
4423 * An FLR will stop BE from DMAing any data.
4424 */
4425static void be_shutdown(struct pci_dev *pdev)
4426{
4427 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4428
2d5d4154
AK
4429 if (!adapter)
4430 return;
82456b03 4431
0f4a6828 4432 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4433 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4434
2d5d4154 4435 netif_device_detach(adapter->netdev);
82456b03 4436
57841869
AK
4437 be_cmd_reset_function(adapter);
4438
82456b03 4439 pci_disable_device(pdev);
82456b03
SP
4440}
4441
cf588477
SP
4442static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4443 pci_channel_state_t state)
4444{
4445 struct be_adapter *adapter = pci_get_drvdata(pdev);
4446 struct net_device *netdev = adapter->netdev;
4447
4448 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4449
f67ef7ba
PR
4450 adapter->eeh_error = true;
4451
4452 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4453
f67ef7ba 4454 rtnl_lock();
cf588477 4455 netif_device_detach(netdev);
f67ef7ba 4456 rtnl_unlock();
cf588477
SP
4457
4458 if (netif_running(netdev)) {
4459 rtnl_lock();
4460 be_close(netdev);
4461 rtnl_unlock();
4462 }
4463 be_clear(adapter);
4464
4465 if (state == pci_channel_io_perm_failure)
4466 return PCI_ERS_RESULT_DISCONNECT;
4467
4468 pci_disable_device(pdev);
4469
eeb7fc7b
SK
4470 /* The error could cause the FW to trigger a flash debug dump.
4471 * Resetting the card while flash dump is in progress
c8a54163
PR
4472 * can cause it not to recover; wait for it to finish.
4473 * Wait only for first function as it is needed only once per
4474 * adapter.
eeb7fc7b 4475 */
c8a54163
PR
4476 if (pdev->devfn == 0)
4477 ssleep(30);
4478
cf588477
SP
4479 return PCI_ERS_RESULT_NEED_RESET;
4480}
4481
4482static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4483{
4484 struct be_adapter *adapter = pci_get_drvdata(pdev);
4485 int status;
4486
4487 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4488 be_clear_all_error(adapter);
cf588477
SP
4489
4490 status = pci_enable_device(pdev);
4491 if (status)
4492 return PCI_ERS_RESULT_DISCONNECT;
4493
4494 pci_set_master(pdev);
4495 pci_set_power_state(pdev, 0);
4496 pci_restore_state(pdev);
4497
4498 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4499 dev_info(&adapter->pdev->dev,
4500 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4501 status = be_fw_wait_ready(adapter);
cf588477
SP
4502 if (status)
4503 return PCI_ERS_RESULT_DISCONNECT;
4504
d6b6d987 4505 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4506 return PCI_ERS_RESULT_RECOVERED;
4507}
4508
4509static void be_eeh_resume(struct pci_dev *pdev)
4510{
4511 int status = 0;
4512 struct be_adapter *adapter = pci_get_drvdata(pdev);
4513 struct net_device *netdev = adapter->netdev;
4514
4515 dev_info(&adapter->pdev->dev, "EEH resume\n");
4516
4517 pci_save_state(pdev);
4518
2d177be8 4519 status = be_cmd_reset_function(adapter);
cf588477
SP
4520 if (status)
4521 goto err;
4522
2d177be8
KA
4523 /* tell fw we're ready to fire cmds */
4524 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4525 if (status)
4526 goto err;
4527
cf588477
SP
4528 status = be_setup(adapter);
4529 if (status)
4530 goto err;
4531
4532 if (netif_running(netdev)) {
4533 status = be_open(netdev);
4534 if (status)
4535 goto err;
4536 }
f67ef7ba
PR
4537
4538 schedule_delayed_work(&adapter->func_recovery_work,
4539 msecs_to_jiffies(1000));
cf588477
SP
4540 netif_device_attach(netdev);
4541 return;
4542err:
4543 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4544}
4545
3646f0e5 4546static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4547 .error_detected = be_eeh_err_detected,
4548 .slot_reset = be_eeh_reset,
4549 .resume = be_eeh_resume,
4550};
4551
6b7c5b94
SP
4552static struct pci_driver be_driver = {
4553 .name = DRV_NAME,
4554 .id_table = be_dev_ids,
4555 .probe = be_probe,
4556 .remove = be_remove,
4557 .suspend = be_suspend,
cf588477 4558 .resume = be_resume,
82456b03 4559 .shutdown = be_shutdown,
cf588477 4560 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4561};
4562
4563static int __init be_init_module(void)
4564{
8e95a202
JP
4565 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4566 rx_frag_size != 2048) {
6b7c5b94
SP
4567 printk(KERN_WARNING DRV_NAME
4568 " : Module param rx_frag_size must be 2048/4096/8192."
4569 " Using 2048\n");
4570 rx_frag_size = 2048;
4571 }
6b7c5b94
SP
4572
4573 return pci_register_driver(&be_driver);
4574}
4575module_init(be_init_module);
4576
4577static void __exit be_exit_module(void)
4578{
4579 pci_unregister_driver(&be_driver);
4580}
4581module_exit(be_exit_module);