]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: cleanup be_get_drvinfo()
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
efee8e87
SB
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
bc0c3405
AK
787
788 if (vlan_tag) {
58717686 789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
790 if (unlikely(!skb))
791 return skb;
bc0c3405 792 skb->vlan_tci = 0;
efee8e87
SB
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
bc0c3405
AK
795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
58717686 800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
93040ae5
SK
807 return skb;
808}
809
bc0c3405
AK
810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
ee9c799c
SP
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
bc0c3405 839{
ee9c799c 840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
841}
842
ee9c799c
SP
843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
6b7c5b94 846{
d2cb6ce7 847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
93040ae5 850
1297f9db
AK
851 /* For padded packets, BE HW modifies tot_len field in IP header
852 * incorrecly when VLAN tag is inserted by HW.
1ded132d 853 */
ee9c799c
SP
854 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
855 VLAN_ETH_HLEN : ETH_HLEN;
856 if (skb->len <= 60 && vlan_tx_tag_present(skb) &&
857 is_ipv4_pkt(skb)) {
93040ae5
SK
858 ip = (struct iphdr *)ip_hdr(skb);
859 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
860 }
1ded132d 861
d2cb6ce7
AK
862 /* If vlan tag is already inlined in the packet, skip HW VLAN
863 * tagging in UMC mode
864 */
865 if ((adapter->function_mode & UMC_ENABLED) &&
866 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 867 *skip_hw_vlan = true;
d2cb6ce7 868
93040ae5
SK
869 /* HW has a bug wherein it will calculate CSUM for VLAN
870 * pkts even though it is disabled.
871 * Manually insert VLAN in pkt.
872 */
873 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
874 vlan_tx_tag_present(skb)) {
875 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
876 if (unlikely(!skb))
877 goto tx_drop;
878 }
879
880 /* HW may lockup when VLAN HW tagging is requested on
881 * certain ipv6 packets. Drop such pkts if the HW workaround to
882 * skip HW tagging is not enabled by FW.
883 */
884 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
885 (adapter->pvid || adapter->qnq_vid) &&
886 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
887 goto tx_drop;
888
889 /* Manual VLAN tag insertion to prevent:
890 * ASIC lockup when the ASIC inserts VLAN tag into
891 * certain ipv6 packets. Insert VLAN tags in driver,
892 * and set event, completion, vlan bits accordingly
893 * in the Tx WRB.
894 */
895 if (be_ipv6_tx_stall_chk(adapter, skb) &&
896 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 897 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
898 if (unlikely(!skb))
899 goto tx_drop;
1ded132d
AK
900 }
901
ee9c799c
SP
902 return skb;
903tx_drop:
904 dev_kfree_skb_any(skb);
905 return NULL;
906}
907
908static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
912 struct be_queue_info *txq = &txo->q;
913 bool dummy_wrb, stopped = false;
914 u32 wrb_cnt = 0, copied = 0;
915 bool skip_hw_vlan = false;
916 u32 start = txq->head;
917
918 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
919 if (!skb)
920 return NETDEV_TX_OK;
921
fe6d2a38 922 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 923
bc0c3405
AK
924 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
925 skip_hw_vlan);
c190e3c8 926 if (copied) {
cd8f76c0
ED
927 int gso_segs = skb_shinfo(skb)->gso_segs;
928
c190e3c8 929 /* record the sent skb in the sent_skb table */
3c8def97
SP
930 BUG_ON(txo->sent_skb_list[start]);
931 txo->sent_skb_list[start] = skb;
c190e3c8
AK
932
933 /* Ensure txq has space for the next skb; Else stop the queue
934 * *BEFORE* ringing the tx doorbell, so that we serialze the
935 * tx compls of the current transmit which'll wake up the queue
936 */
7101e111 937 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
938 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
939 txq->len) {
3c8def97 940 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
941 stopped = true;
942 }
6b7c5b94 943
94d73aaa 944 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 945
cd8f76c0 946 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
947 } else {
948 txq->head = start;
949 dev_kfree_skb_any(skb);
6b7c5b94 950 }
6b7c5b94
SP
951 return NETDEV_TX_OK;
952}
953
954static int be_change_mtu(struct net_device *netdev, int new_mtu)
955{
956 struct be_adapter *adapter = netdev_priv(netdev);
957 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
958 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
959 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
960 dev_info(&adapter->pdev->dev,
961 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
962 BE_MIN_MTU,
963 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
964 return -EINVAL;
965 }
966 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
967 netdev->mtu, new_mtu);
968 netdev->mtu = new_mtu;
969 return 0;
970}
971
972/*
82903e4b
AK
973 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
974 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 975 */
10329df8 976static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 977{
10329df8
SP
978 u16 vids[BE_NUM_VLANS_SUPPORTED];
979 u16 num = 0, i;
82903e4b 980 int status = 0;
1da87b7f 981
c0e64ef4
SP
982 /* No need to further configure vids if in promiscuous mode */
983 if (adapter->promiscuous)
984 return 0;
985
0fc16ebf
PR
986 if (adapter->vlans_added > adapter->max_vlans)
987 goto set_vlan_promisc;
988
989 /* Construct VLAN Table to give to HW */
990 for (i = 0; i < VLAN_N_VID; i++)
991 if (adapter->vlan_tag[i])
10329df8 992 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
993
994 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 995 vids, num, 1, 0);
0fc16ebf
PR
996
997 /* Set to VLAN promisc mode as setting VLAN filter failed */
998 if (status) {
999 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1000 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1001 goto set_vlan_promisc;
6b7c5b94 1002 }
1da87b7f 1003
b31c50a7 1004 return status;
0fc16ebf
PR
1005
1006set_vlan_promisc:
1007 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1008 NULL, 0, 1, 1);
1009 return status;
6b7c5b94
SP
1010}
1011
80d5c368 1012static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1013{
1014 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1015 int status = 0;
6b7c5b94 1016
a85e9986 1017 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1018 status = -EINVAL;
1019 goto ret;
1020 }
ba343c77 1021
a85e9986
PR
1022 /* Packets with VID 0 are always received by Lancer by default */
1023 if (lancer_chip(adapter) && vid == 0)
1024 goto ret;
1025
6b7c5b94 1026 adapter->vlan_tag[vid] = 1;
82903e4b 1027 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1028 status = be_vid_config(adapter);
8e586137 1029
80817cbf
AK
1030 if (!status)
1031 adapter->vlans_added++;
1032 else
1033 adapter->vlan_tag[vid] = 0;
1034ret:
1035 return status;
6b7c5b94
SP
1036}
1037
80d5c368 1038static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1039{
1040 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1041 int status = 0;
6b7c5b94 1042
a85e9986 1043 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1044 status = -EINVAL;
1045 goto ret;
1046 }
ba343c77 1047
a85e9986
PR
1048 /* Packets with VID 0 are always received by Lancer by default */
1049 if (lancer_chip(adapter) && vid == 0)
1050 goto ret;
1051
6b7c5b94 1052 adapter->vlan_tag[vid] = 0;
82903e4b 1053 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1054 status = be_vid_config(adapter);
8e586137 1055
80817cbf
AK
1056 if (!status)
1057 adapter->vlans_added--;
1058 else
1059 adapter->vlan_tag[vid] = 1;
1060ret:
1061 return status;
6b7c5b94
SP
1062}
1063
a54769f5 1064static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1065{
1066 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1067 int status;
6b7c5b94 1068
24307eef 1069 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1070 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1071 adapter->promiscuous = true;
1072 goto done;
6b7c5b94
SP
1073 }
1074
25985edc 1075 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1076 if (adapter->promiscuous) {
1077 adapter->promiscuous = false;
5b8821b7 1078 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1079
1080 if (adapter->vlans_added)
10329df8 1081 be_vid_config(adapter);
6b7c5b94
SP
1082 }
1083
e7b909a6 1084 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1085 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1086 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1087 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1088 goto done;
6b7c5b94 1089 }
6b7c5b94 1090
fbc13f01
AK
1091 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1092 struct netdev_hw_addr *ha;
1093 int i = 1; /* First slot is claimed by the Primary MAC */
1094
1095 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1096 be_cmd_pmac_del(adapter, adapter->if_handle,
1097 adapter->pmac_id[i], 0);
1098 }
1099
1100 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1101 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1102 adapter->promiscuous = true;
1103 goto done;
1104 }
1105
1106 netdev_for_each_uc_addr(ha, adapter->netdev) {
1107 adapter->uc_macs++; /* First slot is for Primary MAC */
1108 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1109 adapter->if_handle,
1110 &adapter->pmac_id[adapter->uc_macs], 0);
1111 }
1112 }
1113
0fc16ebf
PR
1114 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1115
1116 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1117 if (status) {
1118 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1119 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1120 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1121 }
24307eef
SP
1122done:
1123 return;
6b7c5b94
SP
1124}
1125
ba343c77
SB
1126static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1127{
1128 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1129 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1130 int status;
704e4c88
PR
1131 bool active_mac = false;
1132 u32 pmac_id;
1133 u8 old_mac[ETH_ALEN];
ba343c77 1134
11ac75ed 1135 if (!sriov_enabled(adapter))
ba343c77
SB
1136 return -EPERM;
1137
11ac75ed 1138 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1139 return -EINVAL;
1140
590c391d 1141 if (lancer_chip(adapter)) {
704e4c88
PR
1142 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1143 &pmac_id, vf + 1);
1144 if (!status && active_mac)
1145 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1146 pmac_id, vf + 1);
1147
590c391d
PR
1148 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1149 } else {
11ac75ed
SP
1150 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1151 vf_cfg->pmac_id, vf + 1);
ba343c77 1152
11ac75ed
SP
1153 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1154 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1155 }
1156
64600ea5 1157 if (status)
ba343c77
SB
1158 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1159 mac, vf);
64600ea5 1160 else
11ac75ed 1161 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1162
ba343c77
SB
1163 return status;
1164}
1165
64600ea5
AK
1166static int be_get_vf_config(struct net_device *netdev, int vf,
1167 struct ifla_vf_info *vi)
1168{
1169 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1170 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1171
11ac75ed 1172 if (!sriov_enabled(adapter))
64600ea5
AK
1173 return -EPERM;
1174
11ac75ed 1175 if (vf >= adapter->num_vfs)
64600ea5
AK
1176 return -EINVAL;
1177
1178 vi->vf = vf;
11ac75ed
SP
1179 vi->tx_rate = vf_cfg->tx_rate;
1180 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1181 vi->qos = 0;
11ac75ed 1182 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1183
1184 return 0;
1185}
1186
1da87b7f
AK
1187static int be_set_vf_vlan(struct net_device *netdev,
1188 int vf, u16 vlan, u8 qos)
1189{
1190 struct be_adapter *adapter = netdev_priv(netdev);
1191 int status = 0;
1192
11ac75ed 1193 if (!sriov_enabled(adapter))
1da87b7f
AK
1194 return -EPERM;
1195
11ac75ed 1196 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1197 return -EINVAL;
1198
1199 if (vlan) {
f1f3ee1b
AK
1200 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1201 /* If this is new value, program it. Else skip. */
1202 adapter->vf_cfg[vf].vlan_tag = vlan;
1203
1204 status = be_cmd_set_hsw_config(adapter, vlan,
1205 vf + 1, adapter->vf_cfg[vf].if_handle);
1206 }
1da87b7f 1207 } else {
f1f3ee1b 1208 /* Reset Transparent Vlan Tagging. */
11ac75ed 1209 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1210 vlan = adapter->vf_cfg[vf].def_vid;
1211 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1212 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1213 }
1214
1da87b7f
AK
1215
1216 if (status)
1217 dev_info(&adapter->pdev->dev,
1218 "VLAN %d config on VF %d failed\n", vlan, vf);
1219 return status;
1220}
1221
e1d18735
AK
1222static int be_set_vf_tx_rate(struct net_device *netdev,
1223 int vf, int rate)
1224{
1225 struct be_adapter *adapter = netdev_priv(netdev);
1226 int status = 0;
1227
11ac75ed 1228 if (!sriov_enabled(adapter))
e1d18735
AK
1229 return -EPERM;
1230
94f434c2 1231 if (vf >= adapter->num_vfs)
e1d18735
AK
1232 return -EINVAL;
1233
94f434c2
AK
1234 if (rate < 100 || rate > 10000) {
1235 dev_err(&adapter->pdev->dev,
1236 "tx rate must be between 100 and 10000 Mbps\n");
1237 return -EINVAL;
1238 }
e1d18735 1239
d5c18473
PR
1240 if (lancer_chip(adapter))
1241 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1242 else
1243 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1244
1245 if (status)
94f434c2 1246 dev_err(&adapter->pdev->dev,
e1d18735 1247 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1248 else
1249 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1250 return status;
1251}
1252
39f1d94d
SP
1253static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1254{
1255 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1256 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1257 u16 offset, stride;
1258
1259 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1260 if (!pos)
1261 return 0;
39f1d94d
SP
1262 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1263 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1264
1265 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1266 while (dev) {
2f6a0260 1267 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1268 vfs++;
1269 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1270 assigned_vfs++;
1271 }
1272 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1273 }
1274 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1275}
1276
10ef9ab4 1277static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1278{
10ef9ab4 1279 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1280 ulong now = jiffies;
ac124ff9 1281 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1282 u64 pkts;
1283 unsigned int start, eqd;
ac124ff9 1284
10ef9ab4
SP
1285 if (!eqo->enable_aic) {
1286 eqd = eqo->eqd;
1287 goto modify_eqd;
1288 }
1289
1290 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1291 return;
6b7c5b94 1292
10ef9ab4
SP
1293 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1294
4097f663 1295 /* Wrapped around */
3abcdeda
SP
1296 if (time_before(now, stats->rx_jiffies)) {
1297 stats->rx_jiffies = now;
4097f663
SP
1298 return;
1299 }
6b7c5b94 1300
ac124ff9
SP
1301 /* Update once a second */
1302 if (delta < HZ)
6b7c5b94
SP
1303 return;
1304
ab1594e9
SP
1305 do {
1306 start = u64_stats_fetch_begin_bh(&stats->sync);
1307 pkts = stats->rx_pkts;
1308 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1309
68c3e5a7 1310 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1311 stats->rx_pkts_prev = pkts;
3abcdeda 1312 stats->rx_jiffies = now;
10ef9ab4
SP
1313 eqd = (stats->rx_pps / 110000) << 3;
1314 eqd = min(eqd, eqo->max_eqd);
1315 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1316 if (eqd < 10)
1317 eqd = 0;
10ef9ab4
SP
1318
1319modify_eqd:
1320 if (eqd != eqo->cur_eqd) {
1321 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1322 eqo->cur_eqd = eqd;
ac124ff9 1323 }
6b7c5b94
SP
1324}
1325
3abcdeda 1326static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1327 struct be_rx_compl_info *rxcp)
4097f663 1328{
ac124ff9 1329 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1330
ab1594e9 1331 u64_stats_update_begin(&stats->sync);
3abcdeda 1332 stats->rx_compl++;
2e588f84 1333 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1334 stats->rx_pkts++;
2e588f84 1335 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1336 stats->rx_mcast_pkts++;
2e588f84 1337 if (rxcp->err)
ac124ff9 1338 stats->rx_compl_err++;
ab1594e9 1339 u64_stats_update_end(&stats->sync);
4097f663
SP
1340}
1341
2e588f84 1342static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1343{
19fad86f
PR
1344 /* L4 checksum is not reliable for non TCP/UDP packets.
1345 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1346 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1347 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1348}
1349
10ef9ab4
SP
1350static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1351 u16 frag_idx)
6b7c5b94 1352{
10ef9ab4 1353 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1354 struct be_rx_page_info *rx_page_info;
3abcdeda 1355 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1356
3abcdeda 1357 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1358 BUG_ON(!rx_page_info->page);
1359
205859a2 1360 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1361 dma_unmap_page(&adapter->pdev->dev,
1362 dma_unmap_addr(rx_page_info, bus),
1363 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1364 rx_page_info->last_page_user = false;
1365 }
6b7c5b94
SP
1366
1367 atomic_dec(&rxq->used);
1368 return rx_page_info;
1369}
1370
1371/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1372static void be_rx_compl_discard(struct be_rx_obj *rxo,
1373 struct be_rx_compl_info *rxcp)
6b7c5b94 1374{
3abcdeda 1375 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1376 struct be_rx_page_info *page_info;
2e588f84 1377 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1378
e80d9da6 1379 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1380 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1381 put_page(page_info->page);
1382 memset(page_info, 0, sizeof(*page_info));
2e588f84 1383 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1384 }
1385}
1386
1387/*
1388 * skb_fill_rx_data forms a complete skb for an ether frame
1389 * indicated by rxcp.
1390 */
10ef9ab4
SP
1391static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1392 struct be_rx_compl_info *rxcp)
6b7c5b94 1393{
3abcdeda 1394 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1395 struct be_rx_page_info *page_info;
2e588f84
SP
1396 u16 i, j;
1397 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1398 u8 *start;
6b7c5b94 1399
10ef9ab4 1400 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1401 start = page_address(page_info->page) + page_info->page_offset;
1402 prefetch(start);
1403
1404 /* Copy data in the first descriptor of this completion */
2e588f84 1405 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1406
6b7c5b94
SP
1407 skb->len = curr_frag_len;
1408 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1409 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1410 /* Complete packet has now been moved to data */
1411 put_page(page_info->page);
1412 skb->data_len = 0;
1413 skb->tail += curr_frag_len;
1414 } else {
ac1ae5f3
ED
1415 hdr_len = ETH_HLEN;
1416 memcpy(skb->data, start, hdr_len);
6b7c5b94 1417 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1418 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1419 skb_shinfo(skb)->frags[0].page_offset =
1420 page_info->page_offset + hdr_len;
9e903e08 1421 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1422 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1423 skb->truesize += rx_frag_size;
6b7c5b94
SP
1424 skb->tail += hdr_len;
1425 }
205859a2 1426 page_info->page = NULL;
6b7c5b94 1427
2e588f84
SP
1428 if (rxcp->pkt_size <= rx_frag_size) {
1429 BUG_ON(rxcp->num_rcvd != 1);
1430 return;
6b7c5b94
SP
1431 }
1432
1433 /* More frags present for this completion */
2e588f84
SP
1434 index_inc(&rxcp->rxq_idx, rxq->len);
1435 remaining = rxcp->pkt_size - curr_frag_len;
1436 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1437 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1438 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1439
bd46cb6c
AK
1440 /* Coalesce all frags from the same physical page in one slot */
1441 if (page_info->page_offset == 0) {
1442 /* Fresh page */
1443 j++;
b061b39e 1444 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1445 skb_shinfo(skb)->frags[j].page_offset =
1446 page_info->page_offset;
9e903e08 1447 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1448 skb_shinfo(skb)->nr_frags++;
1449 } else {
1450 put_page(page_info->page);
1451 }
1452
9e903e08 1453 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1454 skb->len += curr_frag_len;
1455 skb->data_len += curr_frag_len;
bdb28a97 1456 skb->truesize += rx_frag_size;
2e588f84
SP
1457 remaining -= curr_frag_len;
1458 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1459 page_info->page = NULL;
6b7c5b94 1460 }
bd46cb6c 1461 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1462}
1463
5be93b9a 1464/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1465static void be_rx_compl_process(struct be_rx_obj *rxo,
1466 struct be_rx_compl_info *rxcp)
6b7c5b94 1467{
10ef9ab4 1468 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1469 struct net_device *netdev = adapter->netdev;
6b7c5b94 1470 struct sk_buff *skb;
89420424 1471
bb349bb4 1472 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1473 if (unlikely(!skb)) {
ac124ff9 1474 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1475 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1476 return;
1477 }
1478
10ef9ab4 1479 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1480
6332c8d3 1481 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1482 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1483 else
1484 skb_checksum_none_assert(skb);
6b7c5b94 1485
6332c8d3 1486 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1487 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1488 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1489 skb->rxhash = rxcp->rss_hash;
1490
6b7c5b94 1491
343e43c0 1492 if (rxcp->vlanf)
86a9bad3 1493 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1494
1495 netif_receive_skb(skb);
6b7c5b94
SP
1496}
1497
5be93b9a 1498/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1499void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1500 struct be_rx_compl_info *rxcp)
6b7c5b94 1501{
10ef9ab4 1502 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1503 struct be_rx_page_info *page_info;
5be93b9a 1504 struct sk_buff *skb = NULL;
3abcdeda 1505 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1506 u16 remaining, curr_frag_len;
1507 u16 i, j;
3968fa1e 1508
10ef9ab4 1509 skb = napi_get_frags(napi);
5be93b9a 1510 if (!skb) {
10ef9ab4 1511 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1512 return;
1513 }
1514
2e588f84
SP
1515 remaining = rxcp->pkt_size;
1516 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1517 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1518
1519 curr_frag_len = min(remaining, rx_frag_size);
1520
bd46cb6c
AK
1521 /* Coalesce all frags from the same physical page in one slot */
1522 if (i == 0 || page_info->page_offset == 0) {
1523 /* First frag or Fresh page */
1524 j++;
b061b39e 1525 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1526 skb_shinfo(skb)->frags[j].page_offset =
1527 page_info->page_offset;
9e903e08 1528 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1529 } else {
1530 put_page(page_info->page);
1531 }
9e903e08 1532 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1533 skb->truesize += rx_frag_size;
bd46cb6c 1534 remaining -= curr_frag_len;
2e588f84 1535 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1536 memset(page_info, 0, sizeof(*page_info));
1537 }
bd46cb6c 1538 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1539
5be93b9a 1540 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1541 skb->len = rxcp->pkt_size;
1542 skb->data_len = rxcp->pkt_size;
5be93b9a 1543 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1544 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1545 if (adapter->netdev->features & NETIF_F_RXHASH)
1546 skb->rxhash = rxcp->rss_hash;
5be93b9a 1547
343e43c0 1548 if (rxcp->vlanf)
86a9bad3 1549 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1550
10ef9ab4 1551 napi_gro_frags(napi);
2e588f84
SP
1552}
1553
10ef9ab4
SP
1554static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1555 struct be_rx_compl_info *rxcp)
2e588f84
SP
1556{
1557 rxcp->pkt_size =
1558 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1559 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1560 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1561 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1562 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1563 rxcp->ip_csum =
1564 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1565 rxcp->l4_csum =
1566 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1567 rxcp->ipv6 =
1568 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1569 rxcp->rxq_idx =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1571 rxcp->num_rcvd =
1572 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1573 rxcp->pkt_type =
1574 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1575 rxcp->rss_hash =
c297977e 1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1577 if (rxcp->vlanf) {
1578 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1579 compl);
1580 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1581 compl);
15d72184 1582 }
12004ae9 1583 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1584}
1585
10ef9ab4
SP
1586static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1587 struct be_rx_compl_info *rxcp)
2e588f84
SP
1588{
1589 rxcp->pkt_size =
1590 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1591 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1592 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1593 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1594 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1595 rxcp->ip_csum =
1596 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1597 rxcp->l4_csum =
1598 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1599 rxcp->ipv6 =
1600 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1601 rxcp->rxq_idx =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1603 rxcp->num_rcvd =
1604 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1605 rxcp->pkt_type =
1606 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1607 rxcp->rss_hash =
c297977e 1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1609 if (rxcp->vlanf) {
1610 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1611 compl);
1612 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1613 compl);
15d72184 1614 }
12004ae9 1615 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1616}
1617
1618static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1619{
1620 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1621 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1622 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1623
2e588f84
SP
1624 /* For checking the valid bit it is Ok to use either definition as the
1625 * valid bit is at the same position in both v0 and v1 Rx compl */
1626 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1627 return NULL;
6b7c5b94 1628
2e588f84
SP
1629 rmb();
1630 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1631
2e588f84 1632 if (adapter->be3_native)
10ef9ab4 1633 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1634 else
10ef9ab4 1635 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1636
15d72184
SP
1637 if (rxcp->vlanf) {
1638 /* vlanf could be wrongly set in some cards.
1639 * ignore if vtm is not set */
752961a1 1640 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1641 rxcp->vlanf = 0;
6b7c5b94 1642
15d72184 1643 if (!lancer_chip(adapter))
3c709f8f 1644 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1645
939cf306 1646 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1647 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1648 rxcp->vlanf = 0;
1649 }
2e588f84
SP
1650
1651 /* As the compl has been parsed, reset it; we wont touch it again */
1652 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1653
3abcdeda 1654 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1655 return rxcp;
1656}
1657
1829b086 1658static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1659{
6b7c5b94 1660 u32 order = get_order(size);
1829b086 1661
6b7c5b94 1662 if (order > 0)
1829b086
ED
1663 gfp |= __GFP_COMP;
1664 return alloc_pages(gfp, order);
6b7c5b94
SP
1665}
1666
1667/*
1668 * Allocate a page, split it to fragments of size rx_frag_size and post as
1669 * receive buffers to BE
1670 */
1829b086 1671static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1672{
3abcdeda 1673 struct be_adapter *adapter = rxo->adapter;
26d92f92 1674 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1675 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1676 struct page *pagep = NULL;
1677 struct be_eth_rx_d *rxd;
1678 u64 page_dmaaddr = 0, frag_dmaaddr;
1679 u32 posted, page_offset = 0;
1680
3abcdeda 1681 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1682 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1683 if (!pagep) {
1829b086 1684 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1685 if (unlikely(!pagep)) {
ac124ff9 1686 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1687 break;
1688 }
2b7bcebf
IV
1689 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1690 0, adapter->big_page_size,
1691 DMA_FROM_DEVICE);
6b7c5b94
SP
1692 page_info->page_offset = 0;
1693 } else {
1694 get_page(pagep);
1695 page_info->page_offset = page_offset + rx_frag_size;
1696 }
1697 page_offset = page_info->page_offset;
1698 page_info->page = pagep;
fac6da5b 1699 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1700 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1701
1702 rxd = queue_head_node(rxq);
1703 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1704 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1705
1706 /* Any space left in the current big page for another frag? */
1707 if ((page_offset + rx_frag_size + rx_frag_size) >
1708 adapter->big_page_size) {
1709 pagep = NULL;
1710 page_info->last_page_user = true;
1711 }
26d92f92
SP
1712
1713 prev_page_info = page_info;
1714 queue_head_inc(rxq);
10ef9ab4 1715 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1716 }
1717 if (pagep)
26d92f92 1718 prev_page_info->last_page_user = true;
6b7c5b94
SP
1719
1720 if (posted) {
6b7c5b94 1721 atomic_add(posted, &rxq->used);
8788fdc2 1722 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1723 } else if (atomic_read(&rxq->used) == 0) {
1724 /* Let be_worker replenish when memory is available */
3abcdeda 1725 rxo->rx_post_starved = true;
6b7c5b94 1726 }
6b7c5b94
SP
1727}
1728
5fb379ee 1729static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1730{
6b7c5b94
SP
1731 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1732
1733 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1734 return NULL;
1735
f3eb62d2 1736 rmb();
6b7c5b94
SP
1737 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1738
1739 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1740
1741 queue_tail_inc(tx_cq);
1742 return txcp;
1743}
1744
3c8def97
SP
1745static u16 be_tx_compl_process(struct be_adapter *adapter,
1746 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1747{
3c8def97 1748 struct be_queue_info *txq = &txo->q;
a73b796e 1749 struct be_eth_wrb *wrb;
3c8def97 1750 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1751 struct sk_buff *sent_skb;
ec43b1a6
SP
1752 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1753 bool unmap_skb_hdr = true;
6b7c5b94 1754
ec43b1a6 1755 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1756 BUG_ON(!sent_skb);
ec43b1a6
SP
1757 sent_skbs[txq->tail] = NULL;
1758
1759 /* skip header wrb */
a73b796e 1760 queue_tail_inc(txq);
6b7c5b94 1761
ec43b1a6 1762 do {
6b7c5b94 1763 cur_index = txq->tail;
a73b796e 1764 wrb = queue_tail_node(txq);
2b7bcebf
IV
1765 unmap_tx_frag(&adapter->pdev->dev, wrb,
1766 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1767 unmap_skb_hdr = false;
1768
6b7c5b94
SP
1769 num_wrbs++;
1770 queue_tail_inc(txq);
ec43b1a6 1771 } while (cur_index != last_index);
6b7c5b94 1772
6b7c5b94 1773 kfree_skb(sent_skb);
4d586b82 1774 return num_wrbs;
6b7c5b94
SP
1775}
1776
10ef9ab4
SP
1777/* Return the number of events in the event queue */
1778static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1779{
10ef9ab4
SP
1780 struct be_eq_entry *eqe;
1781 int num = 0;
859b1e4e 1782
10ef9ab4
SP
1783 do {
1784 eqe = queue_tail_node(&eqo->q);
1785 if (eqe->evt == 0)
1786 break;
859b1e4e 1787
10ef9ab4
SP
1788 rmb();
1789 eqe->evt = 0;
1790 num++;
1791 queue_tail_inc(&eqo->q);
1792 } while (true);
1793
1794 return num;
859b1e4e
SP
1795}
1796
10ef9ab4
SP
1797/* Leaves the EQ is disarmed state */
1798static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1799{
10ef9ab4 1800 int num = events_get(eqo);
859b1e4e 1801
10ef9ab4 1802 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1803}
1804
10ef9ab4 1805static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1806{
1807 struct be_rx_page_info *page_info;
3abcdeda
SP
1808 struct be_queue_info *rxq = &rxo->q;
1809 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1810 struct be_rx_compl_info *rxcp;
d23e946c
SP
1811 struct be_adapter *adapter = rxo->adapter;
1812 int flush_wait = 0;
6b7c5b94
SP
1813 u16 tail;
1814
d23e946c
SP
1815 /* Consume pending rx completions.
1816 * Wait for the flush completion (identified by zero num_rcvd)
1817 * to arrive. Notify CQ even when there are no more CQ entries
1818 * for HW to flush partially coalesced CQ entries.
1819 * In Lancer, there is no need to wait for flush compl.
1820 */
1821 for (;;) {
1822 rxcp = be_rx_compl_get(rxo);
1823 if (rxcp == NULL) {
1824 if (lancer_chip(adapter))
1825 break;
1826
1827 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1828 dev_warn(&adapter->pdev->dev,
1829 "did not receive flush compl\n");
1830 break;
1831 }
1832 be_cq_notify(adapter, rx_cq->id, true, 0);
1833 mdelay(1);
1834 } else {
1835 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1836 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1837 if (rxcp->num_rcvd == 0)
1838 break;
1839 }
6b7c5b94
SP
1840 }
1841
d23e946c
SP
1842 /* After cleanup, leave the CQ in unarmed state */
1843 be_cq_notify(adapter, rx_cq->id, false, 0);
1844
1845 /* Then free posted rx buffers that were not used */
6b7c5b94 1846 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1847 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1848 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1849 put_page(page_info->page);
1850 memset(page_info, 0, sizeof(*page_info));
1851 }
1852 BUG_ON(atomic_read(&rxq->used));
482c9e79 1853 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1854}
1855
0ae57bb3 1856static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1857{
0ae57bb3
SP
1858 struct be_tx_obj *txo;
1859 struct be_queue_info *txq;
a8e9179a 1860 struct be_eth_tx_compl *txcp;
4d586b82 1861 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1862 struct sk_buff *sent_skb;
1863 bool dummy_wrb;
0ae57bb3 1864 int i, pending_txqs;
a8e9179a
SP
1865
1866 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1867 do {
0ae57bb3
SP
1868 pending_txqs = adapter->num_tx_qs;
1869
1870 for_all_tx_queues(adapter, txo, i) {
1871 txq = &txo->q;
1872 while ((txcp = be_tx_compl_get(&txo->cq))) {
1873 end_idx =
1874 AMAP_GET_BITS(struct amap_eth_tx_compl,
1875 wrb_index, txcp);
1876 num_wrbs += be_tx_compl_process(adapter, txo,
1877 end_idx);
1878 cmpl++;
1879 }
1880 if (cmpl) {
1881 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1882 atomic_sub(num_wrbs, &txq->used);
1883 cmpl = 0;
1884 num_wrbs = 0;
1885 }
1886 if (atomic_read(&txq->used) == 0)
1887 pending_txqs--;
a8e9179a
SP
1888 }
1889
0ae57bb3 1890 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1891 break;
1892
1893 mdelay(1);
1894 } while (true);
1895
0ae57bb3
SP
1896 for_all_tx_queues(adapter, txo, i) {
1897 txq = &txo->q;
1898 if (atomic_read(&txq->used))
1899 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1900 atomic_read(&txq->used));
1901
1902 /* free posted tx for which compls will never arrive */
1903 while (atomic_read(&txq->used)) {
1904 sent_skb = txo->sent_skb_list[txq->tail];
1905 end_idx = txq->tail;
1906 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1907 &dummy_wrb);
1908 index_adv(&end_idx, num_wrbs - 1, txq->len);
1909 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1910 atomic_sub(num_wrbs, &txq->used);
1911 }
b03388d6 1912 }
6b7c5b94
SP
1913}
1914
10ef9ab4
SP
1915static void be_evt_queues_destroy(struct be_adapter *adapter)
1916{
1917 struct be_eq_obj *eqo;
1918 int i;
1919
1920 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1921 if (eqo->q.created) {
1922 be_eq_clean(eqo);
10ef9ab4 1923 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1924 }
10ef9ab4
SP
1925 be_queue_free(adapter, &eqo->q);
1926 }
1927}
1928
1929static int be_evt_queues_create(struct be_adapter *adapter)
1930{
1931 struct be_queue_info *eq;
1932 struct be_eq_obj *eqo;
1933 int i, rc;
1934
1935 adapter->num_evt_qs = num_irqs(adapter);
1936
1937 for_all_evt_queues(adapter, eqo, i) {
1938 eqo->adapter = adapter;
1939 eqo->tx_budget = BE_TX_BUDGET;
1940 eqo->idx = i;
1941 eqo->max_eqd = BE_MAX_EQD;
1942 eqo->enable_aic = true;
1943
1944 eq = &eqo->q;
1945 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1946 sizeof(struct be_eq_entry));
1947 if (rc)
1948 return rc;
1949
1950 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1951 if (rc)
1952 return rc;
1953 }
1cfafab9 1954 return 0;
10ef9ab4
SP
1955}
1956
5fb379ee
SP
1957static void be_mcc_queues_destroy(struct be_adapter *adapter)
1958{
1959 struct be_queue_info *q;
5fb379ee 1960
8788fdc2 1961 q = &adapter->mcc_obj.q;
5fb379ee 1962 if (q->created)
8788fdc2 1963 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1964 be_queue_free(adapter, q);
1965
8788fdc2 1966 q = &adapter->mcc_obj.cq;
5fb379ee 1967 if (q->created)
8788fdc2 1968 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1969 be_queue_free(adapter, q);
1970}
1971
1972/* Must be called only after TX qs are created as MCC shares TX EQ */
1973static int be_mcc_queues_create(struct be_adapter *adapter)
1974{
1975 struct be_queue_info *q, *cq;
5fb379ee 1976
8788fdc2 1977 cq = &adapter->mcc_obj.cq;
5fb379ee 1978 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1979 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1980 goto err;
1981
10ef9ab4
SP
1982 /* Use the default EQ for MCC completions */
1983 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1984 goto mcc_cq_free;
1985
8788fdc2 1986 q = &adapter->mcc_obj.q;
5fb379ee
SP
1987 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1988 goto mcc_cq_destroy;
1989
8788fdc2 1990 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1991 goto mcc_q_free;
1992
1993 return 0;
1994
1995mcc_q_free:
1996 be_queue_free(adapter, q);
1997mcc_cq_destroy:
8788fdc2 1998 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1999mcc_cq_free:
2000 be_queue_free(adapter, cq);
2001err:
2002 return -1;
2003}
2004
6b7c5b94
SP
2005static void be_tx_queues_destroy(struct be_adapter *adapter)
2006{
2007 struct be_queue_info *q;
3c8def97
SP
2008 struct be_tx_obj *txo;
2009 u8 i;
6b7c5b94 2010
3c8def97
SP
2011 for_all_tx_queues(adapter, txo, i) {
2012 q = &txo->q;
2013 if (q->created)
2014 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2015 be_queue_free(adapter, q);
6b7c5b94 2016
3c8def97
SP
2017 q = &txo->cq;
2018 if (q->created)
2019 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2020 be_queue_free(adapter, q);
2021 }
6b7c5b94
SP
2022}
2023
dafc0fe3
SP
2024static int be_num_txqs_want(struct be_adapter *adapter)
2025{
abb93951
PR
2026 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2027 be_is_mc(adapter) ||
2028 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2029 BE2_chip(adapter))
dafc0fe3
SP
2030 return 1;
2031 else
abb93951 2032 return adapter->max_tx_queues;
dafc0fe3
SP
2033}
2034
10ef9ab4 2035static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2036{
10ef9ab4
SP
2037 struct be_queue_info *cq, *eq;
2038 int status;
3c8def97
SP
2039 struct be_tx_obj *txo;
2040 u8 i;
6b7c5b94 2041
dafc0fe3 2042 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2043 if (adapter->num_tx_qs != MAX_TX_QS) {
2044 rtnl_lock();
dafc0fe3
SP
2045 netif_set_real_num_tx_queues(adapter->netdev,
2046 adapter->num_tx_qs);
3bb62f4f
PR
2047 rtnl_unlock();
2048 }
dafc0fe3 2049
10ef9ab4
SP
2050 for_all_tx_queues(adapter, txo, i) {
2051 cq = &txo->cq;
2052 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2053 sizeof(struct be_eth_tx_compl));
2054 if (status)
2055 return status;
3c8def97 2056
10ef9ab4
SP
2057 /* If num_evt_qs is less than num_tx_qs, then more than
2058 * one txq share an eq
2059 */
2060 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2061 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2062 if (status)
2063 return status;
2064 }
2065 return 0;
2066}
6b7c5b94 2067
10ef9ab4
SP
2068static int be_tx_qs_create(struct be_adapter *adapter)
2069{
2070 struct be_tx_obj *txo;
2071 int i, status;
fe6d2a38 2072
3c8def97 2073 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2074 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2075 sizeof(struct be_eth_wrb));
2076 if (status)
2077 return status;
6b7c5b94 2078
94d73aaa 2079 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2080 if (status)
2081 return status;
3c8def97 2082 }
6b7c5b94 2083
d379142b
SP
2084 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2085 adapter->num_tx_qs);
10ef9ab4 2086 return 0;
6b7c5b94
SP
2087}
2088
10ef9ab4 2089static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2090{
2091 struct be_queue_info *q;
3abcdeda
SP
2092 struct be_rx_obj *rxo;
2093 int i;
2094
2095 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2096 q = &rxo->cq;
2097 if (q->created)
2098 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2099 be_queue_free(adapter, q);
ac6a0c4a
SP
2100 }
2101}
2102
10ef9ab4 2103static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2104{
10ef9ab4 2105 struct be_queue_info *eq, *cq;
3abcdeda
SP
2106 struct be_rx_obj *rxo;
2107 int rc, i;
6b7c5b94 2108
10ef9ab4
SP
2109 /* We'll create as many RSS rings as there are irqs.
2110 * But when there's only one irq there's no use creating RSS rings
2111 */
2112 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2113 num_irqs(adapter) + 1 : 1;
7f640062
SP
2114 if (adapter->num_rx_qs != MAX_RX_QS) {
2115 rtnl_lock();
2116 netif_set_real_num_rx_queues(adapter->netdev,
2117 adapter->num_rx_qs);
2118 rtnl_unlock();
2119 }
ac6a0c4a 2120
6b7c5b94 2121 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2122 for_all_rx_queues(adapter, rxo, i) {
2123 rxo->adapter = adapter;
3abcdeda
SP
2124 cq = &rxo->cq;
2125 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2126 sizeof(struct be_eth_rx_compl));
2127 if (rc)
10ef9ab4 2128 return rc;
3abcdeda 2129
10ef9ab4
SP
2130 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2131 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2132 if (rc)
10ef9ab4 2133 return rc;
3abcdeda 2134 }
6b7c5b94 2135
d379142b
SP
2136 dev_info(&adapter->pdev->dev,
2137 "created %d RSS queue(s) and 1 default RX queue\n",
2138 adapter->num_rx_qs - 1);
10ef9ab4 2139 return 0;
b628bde2
SP
2140}
2141
6b7c5b94
SP
2142static irqreturn_t be_intx(int irq, void *dev)
2143{
e49cc34f
SP
2144 struct be_eq_obj *eqo = dev;
2145 struct be_adapter *adapter = eqo->adapter;
2146 int num_evts = 0;
6b7c5b94 2147
d0b9cec3
SP
2148 /* IRQ is not expected when NAPI is scheduled as the EQ
2149 * will not be armed.
2150 * But, this can happen on Lancer INTx where it takes
2151 * a while to de-assert INTx or in BE2 where occasionaly
2152 * an interrupt may be raised even when EQ is unarmed.
2153 * If NAPI is already scheduled, then counting & notifying
2154 * events will orphan them.
e49cc34f 2155 */
d0b9cec3 2156 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2157 num_evts = events_get(eqo);
d0b9cec3
SP
2158 __napi_schedule(&eqo->napi);
2159 if (num_evts)
2160 eqo->spurious_intr = 0;
2161 }
2162 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2163
d0b9cec3
SP
2164 /* Return IRQ_HANDLED only for the the first spurious intr
2165 * after a valid intr to stop the kernel from branding
2166 * this irq as a bad one!
e49cc34f 2167 */
d0b9cec3
SP
2168 if (num_evts || eqo->spurious_intr++ == 0)
2169 return IRQ_HANDLED;
2170 else
2171 return IRQ_NONE;
6b7c5b94
SP
2172}
2173
10ef9ab4 2174static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2175{
10ef9ab4 2176 struct be_eq_obj *eqo = dev;
6b7c5b94 2177
0b545a62
SP
2178 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2179 napi_schedule(&eqo->napi);
6b7c5b94
SP
2180 return IRQ_HANDLED;
2181}
2182
2e588f84 2183static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2184{
2e588f84 2185 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2186}
2187
10ef9ab4
SP
2188static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2189 int budget)
6b7c5b94 2190{
3abcdeda
SP
2191 struct be_adapter *adapter = rxo->adapter;
2192 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2193 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2194 u32 work_done;
2195
2196 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2197 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2198 if (!rxcp)
2199 break;
2200
12004ae9
SP
2201 /* Is it a flush compl that has no data */
2202 if (unlikely(rxcp->num_rcvd == 0))
2203 goto loop_continue;
2204
2205 /* Discard compl with partial DMA Lancer B0 */
2206 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2207 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2208 goto loop_continue;
2209 }
2210
2211 /* On BE drop pkts that arrive due to imperfect filtering in
2212 * promiscuous mode on some skews
2213 */
2214 if (unlikely(rxcp->port != adapter->port_num &&
2215 !lancer_chip(adapter))) {
10ef9ab4 2216 be_rx_compl_discard(rxo, rxcp);
12004ae9 2217 goto loop_continue;
64642811 2218 }
009dd872 2219
12004ae9 2220 if (do_gro(rxcp))
10ef9ab4 2221 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2222 else
10ef9ab4 2223 be_rx_compl_process(rxo, rxcp);
12004ae9 2224loop_continue:
2e588f84 2225 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2226 }
2227
10ef9ab4
SP
2228 if (work_done) {
2229 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2230
10ef9ab4
SP
2231 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2232 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2233 }
10ef9ab4 2234
6b7c5b94
SP
2235 return work_done;
2236}
2237
10ef9ab4
SP
2238static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2239 int budget, int idx)
6b7c5b94 2240{
6b7c5b94 2241 struct be_eth_tx_compl *txcp;
10ef9ab4 2242 int num_wrbs = 0, work_done;
3c8def97 2243
10ef9ab4
SP
2244 for (work_done = 0; work_done < budget; work_done++) {
2245 txcp = be_tx_compl_get(&txo->cq);
2246 if (!txcp)
2247 break;
2248 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2249 AMAP_GET_BITS(struct amap_eth_tx_compl,
2250 wrb_index, txcp));
10ef9ab4 2251 }
6b7c5b94 2252
10ef9ab4
SP
2253 if (work_done) {
2254 be_cq_notify(adapter, txo->cq.id, true, work_done);
2255 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2256
10ef9ab4
SP
2257 /* As Tx wrbs have been freed up, wake up netdev queue
2258 * if it was stopped due to lack of tx wrbs. */
2259 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2260 atomic_read(&txo->q.used) < txo->q.len / 2) {
2261 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2262 }
10ef9ab4
SP
2263
2264 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2265 tx_stats(txo)->tx_compl += work_done;
2266 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2267 }
10ef9ab4
SP
2268 return (work_done < budget); /* Done */
2269}
6b7c5b94 2270
10ef9ab4
SP
2271int be_poll(struct napi_struct *napi, int budget)
2272{
2273 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2274 struct be_adapter *adapter = eqo->adapter;
0b545a62 2275 int max_work = 0, work, i, num_evts;
10ef9ab4 2276 bool tx_done;
f31e50a8 2277
0b545a62
SP
2278 num_evts = events_get(eqo);
2279
10ef9ab4
SP
2280 /* Process all TXQs serviced by this EQ */
2281 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2282 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2283 eqo->tx_budget, i);
2284 if (!tx_done)
2285 max_work = budget;
f31e50a8
SP
2286 }
2287
10ef9ab4
SP
2288 /* This loop will iterate twice for EQ0 in which
2289 * completions of the last RXQ (default one) are also processed
2290 * For other EQs the loop iterates only once
2291 */
2292 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2293 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2294 max_work = max(work, max_work);
2295 }
6b7c5b94 2296
10ef9ab4
SP
2297 if (is_mcc_eqo(eqo))
2298 be_process_mcc(adapter);
93c86700 2299
10ef9ab4
SP
2300 if (max_work < budget) {
2301 napi_complete(napi);
0b545a62 2302 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2303 } else {
2304 /* As we'll continue in polling mode, count and clear events */
0b545a62 2305 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2306 }
10ef9ab4 2307 return max_work;
6b7c5b94
SP
2308}
2309
f67ef7ba 2310void be_detect_error(struct be_adapter *adapter)
7c185276 2311{
e1cfb67a
PR
2312 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2313 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2314 u32 i;
2315
d23e946c 2316 if (be_hw_error(adapter))
72f02485
SP
2317 return;
2318
e1cfb67a
PR
2319 if (lancer_chip(adapter)) {
2320 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2321 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2322 sliport_err1 = ioread32(adapter->db +
2323 SLIPORT_ERROR1_OFFSET);
2324 sliport_err2 = ioread32(adapter->db +
2325 SLIPORT_ERROR2_OFFSET);
2326 }
2327 } else {
2328 pci_read_config_dword(adapter->pdev,
2329 PCICFG_UE_STATUS_LOW, &ue_lo);
2330 pci_read_config_dword(adapter->pdev,
2331 PCICFG_UE_STATUS_HIGH, &ue_hi);
2332 pci_read_config_dword(adapter->pdev,
2333 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2334 pci_read_config_dword(adapter->pdev,
2335 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2336
f67ef7ba
PR
2337 ue_lo = (ue_lo & ~ue_lo_mask);
2338 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2339 }
7c185276 2340
1451ae6e
AK
2341 /* On certain platforms BE hardware can indicate spurious UEs.
2342 * Allow the h/w to stop working completely in case of a real UE.
2343 * Hence not setting the hw_error for UE detection.
2344 */
2345 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2346 adapter->hw_error = true;
434b3648 2347 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2348 "Error detected in the card\n");
2349 }
2350
2351 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2352 dev_err(&adapter->pdev->dev,
2353 "ERR: sliport status 0x%x\n", sliport_status);
2354 dev_err(&adapter->pdev->dev,
2355 "ERR: sliport error1 0x%x\n", sliport_err1);
2356 dev_err(&adapter->pdev->dev,
2357 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2358 }
2359
e1cfb67a
PR
2360 if (ue_lo) {
2361 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2362 if (ue_lo & 1)
7c185276
AK
2363 dev_err(&adapter->pdev->dev,
2364 "UE: %s bit set\n", ue_status_low_desc[i]);
2365 }
2366 }
f67ef7ba 2367
e1cfb67a
PR
2368 if (ue_hi) {
2369 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2370 if (ue_hi & 1)
7c185276
AK
2371 dev_err(&adapter->pdev->dev,
2372 "UE: %s bit set\n", ue_status_hi_desc[i]);
2373 }
2374 }
2375
2376}
2377
8d56ff11
SP
2378static void be_msix_disable(struct be_adapter *adapter)
2379{
ac6a0c4a 2380 if (msix_enabled(adapter)) {
8d56ff11 2381 pci_disable_msix(adapter->pdev);
ac6a0c4a 2382 adapter->num_msix_vec = 0;
3abcdeda
SP
2383 }
2384}
2385
10ef9ab4
SP
2386static uint be_num_rss_want(struct be_adapter *adapter)
2387{
30e80b55 2388 u32 num = 0;
abb93951 2389
10ef9ab4 2390 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2391 (lancer_chip(adapter) ||
2392 (!sriov_want(adapter) && be_physfn(adapter)))) {
2393 num = adapter->max_rss_queues;
30e80b55
YM
2394 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2395 }
2396 return num;
10ef9ab4
SP
2397}
2398
c2bba3df 2399static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2400{
10ef9ab4 2401#define BE_MIN_MSIX_VECTORS 1
045508a8 2402 int i, status, num_vec, num_roce_vec = 0;
d379142b 2403 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2404
10ef9ab4
SP
2405 /* If RSS queues are not used, need a vec for default RX Q */
2406 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2407 if (be_roce_supported(adapter)) {
2408 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2409 (num_online_cpus() + 1));
2410 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2411 num_vec += num_roce_vec;
2412 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2413 }
10ef9ab4 2414 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2415
ac6a0c4a 2416 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2417 adapter->msix_entries[i].entry = i;
2418
ac6a0c4a 2419 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2420 if (status == 0) {
2421 goto done;
2422 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2423 num_vec = status;
c2bba3df
SK
2424 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2425 num_vec);
2426 if (!status)
3abcdeda 2427 goto done;
3abcdeda 2428 }
d379142b
SP
2429
2430 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2431 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2432 if (!be_physfn(adapter))
2433 return status;
2434 return 0;
3abcdeda 2435done:
045508a8
PP
2436 if (be_roce_supported(adapter)) {
2437 if (num_vec > num_roce_vec) {
2438 adapter->num_msix_vec = num_vec - num_roce_vec;
2439 adapter->num_msix_roce_vec =
2440 num_vec - adapter->num_msix_vec;
2441 } else {
2442 adapter->num_msix_vec = num_vec;
2443 adapter->num_msix_roce_vec = 0;
2444 }
2445 } else
2446 adapter->num_msix_vec = num_vec;
d379142b 2447 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2448 return 0;
6b7c5b94
SP
2449}
2450
fe6d2a38 2451static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2452 struct be_eq_obj *eqo)
b628bde2 2453{
10ef9ab4 2454 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2455}
6b7c5b94 2456
b628bde2
SP
2457static int be_msix_register(struct be_adapter *adapter)
2458{
10ef9ab4
SP
2459 struct net_device *netdev = adapter->netdev;
2460 struct be_eq_obj *eqo;
2461 int status, i, vec;
6b7c5b94 2462
10ef9ab4
SP
2463 for_all_evt_queues(adapter, eqo, i) {
2464 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2465 vec = be_msix_vec_get(adapter, eqo);
2466 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2467 if (status)
2468 goto err_msix;
2469 }
b628bde2 2470
6b7c5b94 2471 return 0;
3abcdeda 2472err_msix:
10ef9ab4
SP
2473 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2474 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2475 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2476 status);
ac6a0c4a 2477 be_msix_disable(adapter);
6b7c5b94
SP
2478 return status;
2479}
2480
2481static int be_irq_register(struct be_adapter *adapter)
2482{
2483 struct net_device *netdev = adapter->netdev;
2484 int status;
2485
ac6a0c4a 2486 if (msix_enabled(adapter)) {
6b7c5b94
SP
2487 status = be_msix_register(adapter);
2488 if (status == 0)
2489 goto done;
ba343c77
SB
2490 /* INTx is not supported for VF */
2491 if (!be_physfn(adapter))
2492 return status;
6b7c5b94
SP
2493 }
2494
e49cc34f 2495 /* INTx: only the first EQ is used */
6b7c5b94
SP
2496 netdev->irq = adapter->pdev->irq;
2497 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2498 &adapter->eq_obj[0]);
6b7c5b94
SP
2499 if (status) {
2500 dev_err(&adapter->pdev->dev,
2501 "INTx request IRQ failed - err %d\n", status);
2502 return status;
2503 }
2504done:
2505 adapter->isr_registered = true;
2506 return 0;
2507}
2508
2509static void be_irq_unregister(struct be_adapter *adapter)
2510{
2511 struct net_device *netdev = adapter->netdev;
10ef9ab4 2512 struct be_eq_obj *eqo;
3abcdeda 2513 int i;
6b7c5b94
SP
2514
2515 if (!adapter->isr_registered)
2516 return;
2517
2518 /* INTx */
ac6a0c4a 2519 if (!msix_enabled(adapter)) {
e49cc34f 2520 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2521 goto done;
2522 }
2523
2524 /* MSIx */
10ef9ab4
SP
2525 for_all_evt_queues(adapter, eqo, i)
2526 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2527
6b7c5b94
SP
2528done:
2529 adapter->isr_registered = false;
6b7c5b94
SP
2530}
2531
10ef9ab4 2532static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2533{
2534 struct be_queue_info *q;
2535 struct be_rx_obj *rxo;
2536 int i;
2537
2538 for_all_rx_queues(adapter, rxo, i) {
2539 q = &rxo->q;
2540 if (q->created) {
2541 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2542 be_rx_cq_clean(rxo);
482c9e79 2543 }
10ef9ab4 2544 be_queue_free(adapter, q);
482c9e79
SP
2545 }
2546}
2547
889cd4b2
SP
2548static int be_close(struct net_device *netdev)
2549{
2550 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2551 struct be_eq_obj *eqo;
2552 int i;
889cd4b2 2553
045508a8
PP
2554 be_roce_dev_close(adapter);
2555
04d3d624
SK
2556 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2557 for_all_evt_queues(adapter, eqo, i)
2558 napi_disable(&eqo->napi);
2559 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2560 }
a323d9bf
SP
2561
2562 be_async_mcc_disable(adapter);
2563
2564 /* Wait for all pending tx completions to arrive so that
2565 * all tx skbs are freed.
2566 */
2567 be_tx_compl_clean(adapter);
fba87559 2568 netif_tx_disable(netdev);
a323d9bf
SP
2569
2570 be_rx_qs_destroy(adapter);
2571
2572 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2573 if (msix_enabled(adapter))
2574 synchronize_irq(be_msix_vec_get(adapter, eqo));
2575 else
2576 synchronize_irq(netdev->irq);
2577 be_eq_clean(eqo);
63fcb27f
PR
2578 }
2579
889cd4b2
SP
2580 be_irq_unregister(adapter);
2581
482c9e79
SP
2582 return 0;
2583}
2584
10ef9ab4 2585static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2586{
2587 struct be_rx_obj *rxo;
e9008ee9
PR
2588 int rc, i, j;
2589 u8 rsstable[128];
482c9e79
SP
2590
2591 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2592 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2593 sizeof(struct be_eth_rx_d));
2594 if (rc)
2595 return rc;
2596 }
2597
2598 /* The FW would like the default RXQ to be created first */
2599 rxo = default_rxo(adapter);
2600 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2601 adapter->if_handle, false, &rxo->rss_id);
2602 if (rc)
2603 return rc;
2604
2605 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2606 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2607 rx_frag_size, adapter->if_handle,
2608 true, &rxo->rss_id);
482c9e79
SP
2609 if (rc)
2610 return rc;
2611 }
2612
2613 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2614 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2615 for_all_rss_queues(adapter, rxo, i) {
2616 if ((j + i) >= 128)
2617 break;
2618 rsstable[j + i] = rxo->rss_id;
2619 }
2620 }
594ad54a
SR
2621 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2622 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2623
2624 if (!BEx_chip(adapter))
2625 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2626 RSS_ENABLE_UDP_IPV6;
2627
2628 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2629 128);
2630 if (rc) {
2631 adapter->rss_flags = 0;
482c9e79 2632 return rc;
594ad54a 2633 }
482c9e79
SP
2634 }
2635
2636 /* First time posting */
10ef9ab4 2637 for_all_rx_queues(adapter, rxo, i)
482c9e79 2638 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2639 return 0;
2640}
2641
6b7c5b94
SP
2642static int be_open(struct net_device *netdev)
2643{
2644 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2645 struct be_eq_obj *eqo;
3abcdeda 2646 struct be_rx_obj *rxo;
10ef9ab4 2647 struct be_tx_obj *txo;
b236916a 2648 u8 link_status;
3abcdeda 2649 int status, i;
5fb379ee 2650
10ef9ab4 2651 status = be_rx_qs_create(adapter);
482c9e79
SP
2652 if (status)
2653 goto err;
2654
c2bba3df
SK
2655 status = be_irq_register(adapter);
2656 if (status)
2657 goto err;
5fb379ee 2658
10ef9ab4 2659 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2660 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2661
10ef9ab4
SP
2662 for_all_tx_queues(adapter, txo, i)
2663 be_cq_notify(adapter, txo->cq.id, true, 0);
2664
7a1e9b20
SP
2665 be_async_mcc_enable(adapter);
2666
10ef9ab4
SP
2667 for_all_evt_queues(adapter, eqo, i) {
2668 napi_enable(&eqo->napi);
2669 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2670 }
04d3d624 2671 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2672
323ff71e 2673 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2674 if (!status)
2675 be_link_status_update(adapter, link_status);
2676
fba87559 2677 netif_tx_start_all_queues(netdev);
045508a8 2678 be_roce_dev_open(adapter);
889cd4b2
SP
2679 return 0;
2680err:
2681 be_close(adapter->netdev);
2682 return -EIO;
5fb379ee
SP
2683}
2684
71d8d1b5
AK
2685static int be_setup_wol(struct be_adapter *adapter, bool enable)
2686{
2687 struct be_dma_mem cmd;
2688 int status = 0;
2689 u8 mac[ETH_ALEN];
2690
2691 memset(mac, 0, ETH_ALEN);
2692
2693 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2694 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2695 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2696 if (cmd.va == NULL)
2697 return -1;
71d8d1b5
AK
2698
2699 if (enable) {
2700 status = pci_write_config_dword(adapter->pdev,
2701 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2702 if (status) {
2703 dev_err(&adapter->pdev->dev,
2381a55c 2704 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2705 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2706 cmd.dma);
71d8d1b5
AK
2707 return status;
2708 }
2709 status = be_cmd_enable_magic_wol(adapter,
2710 adapter->netdev->dev_addr, &cmd);
2711 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2712 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2713 } else {
2714 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2715 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2716 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2717 }
2718
2b7bcebf 2719 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2720 return status;
2721}
2722
6d87f5c3
AK
2723/*
2724 * Generate a seed MAC address from the PF MAC Address using jhash.
2725 * MAC Address for VFs are assigned incrementally starting from the seed.
2726 * These addresses are programmed in the ASIC by the PF and the VF driver
2727 * queries for the MAC address during its probe.
2728 */
4c876616 2729static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2730{
f9449ab7 2731 u32 vf;
3abcdeda 2732 int status = 0;
6d87f5c3 2733 u8 mac[ETH_ALEN];
11ac75ed 2734 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2735
2736 be_vf_eth_addr_generate(adapter, mac);
2737
11ac75ed 2738 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2739 if (lancer_chip(adapter)) {
2740 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2741 } else {
2742 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2743 vf_cfg->if_handle,
2744 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2745 }
2746
6d87f5c3
AK
2747 if (status)
2748 dev_err(&adapter->pdev->dev,
590c391d 2749 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2750 else
11ac75ed 2751 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2752
2753 mac[5] += 1;
2754 }
2755 return status;
2756}
2757
4c876616
SP
2758static int be_vfs_mac_query(struct be_adapter *adapter)
2759{
2760 int status, vf;
2761 u8 mac[ETH_ALEN];
2762 struct be_vf_cfg *vf_cfg;
2763 bool active;
2764
2765 for_all_vfs(adapter, vf_cfg, vf) {
2766 be_cmd_get_mac_from_list(adapter, mac, &active,
2767 &vf_cfg->pmac_id, 0);
2768
2769 status = be_cmd_mac_addr_query(adapter, mac, false,
2770 vf_cfg->if_handle, 0);
2771 if (status)
2772 return status;
2773 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2774 }
2775 return 0;
2776}
2777
f9449ab7 2778static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2779{
11ac75ed 2780 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2781 u32 vf;
2782
39f1d94d 2783 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2784 dev_warn(&adapter->pdev->dev,
2785 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2786 goto done;
2787 }
2788
b4c1df93
SP
2789 pci_disable_sriov(adapter->pdev);
2790
11ac75ed 2791 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2792 if (lancer_chip(adapter))
2793 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2794 else
11ac75ed
SP
2795 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2796 vf_cfg->pmac_id, vf + 1);
f9449ab7 2797
11ac75ed
SP
2798 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2799 }
39f1d94d
SP
2800done:
2801 kfree(adapter->vf_cfg);
2802 adapter->num_vfs = 0;
6d87f5c3
AK
2803}
2804
a54769f5
SP
2805static int be_clear(struct be_adapter *adapter)
2806{
fbc13f01
AK
2807 int i = 1;
2808
191eb756
SP
2809 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2810 cancel_delayed_work_sync(&adapter->work);
2811 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2812 }
2813
11ac75ed 2814 if (sriov_enabled(adapter))
f9449ab7
SP
2815 be_vf_clear(adapter);
2816
fbc13f01
AK
2817 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2818 be_cmd_pmac_del(adapter, adapter->if_handle,
2819 adapter->pmac_id[i], 0);
2820
f9449ab7 2821 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2822
2823 be_mcc_queues_destroy(adapter);
10ef9ab4 2824 be_rx_cqs_destroy(adapter);
a54769f5 2825 be_tx_queues_destroy(adapter);
10ef9ab4 2826 be_evt_queues_destroy(adapter);
a54769f5 2827
abb93951
PR
2828 kfree(adapter->pmac_id);
2829 adapter->pmac_id = NULL;
2830
10ef9ab4 2831 be_msix_disable(adapter);
a54769f5
SP
2832 return 0;
2833}
2834
4c876616 2835static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2836{
4c876616
SP
2837 struct be_vf_cfg *vf_cfg;
2838 u32 cap_flags, en_flags, vf;
abb93951
PR
2839 int status;
2840
4c876616
SP
2841 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2842 BE_IF_FLAGS_MULTICAST;
abb93951 2843
4c876616
SP
2844 for_all_vfs(adapter, vf_cfg, vf) {
2845 if (!BE3_chip(adapter))
a05f99db
VV
2846 be_cmd_get_profile_config(adapter, &cap_flags,
2847 NULL, vf + 1);
4c876616
SP
2848
2849 /* If a FW profile exists, then cap_flags are updated */
2850 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2851 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2852 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2853 &vf_cfg->if_handle, vf + 1);
2854 if (status)
2855 goto err;
2856 }
2857err:
2858 return status;
abb93951
PR
2859}
2860
39f1d94d 2861static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2862{
11ac75ed 2863 struct be_vf_cfg *vf_cfg;
30128031
SP
2864 int vf;
2865
39f1d94d
SP
2866 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2867 GFP_KERNEL);
2868 if (!adapter->vf_cfg)
2869 return -ENOMEM;
2870
11ac75ed
SP
2871 for_all_vfs(adapter, vf_cfg, vf) {
2872 vf_cfg->if_handle = -1;
2873 vf_cfg->pmac_id = -1;
30128031 2874 }
39f1d94d 2875 return 0;
30128031
SP
2876}
2877
f9449ab7
SP
2878static int be_vf_setup(struct be_adapter *adapter)
2879{
11ac75ed 2880 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2881 u16 def_vlan, lnk_speed;
4c876616
SP
2882 int status, old_vfs, vf;
2883 struct device *dev = &adapter->pdev->dev;
39f1d94d 2884
4c876616
SP
2885 old_vfs = be_find_vfs(adapter, ENABLED);
2886 if (old_vfs) {
2887 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2888 if (old_vfs != num_vfs)
2889 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2890 adapter->num_vfs = old_vfs;
39f1d94d 2891 } else {
4c876616
SP
2892 if (num_vfs > adapter->dev_num_vfs)
2893 dev_info(dev, "Device supports %d VFs and not %d\n",
2894 adapter->dev_num_vfs, num_vfs);
2895 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2896 if (!adapter->num_vfs)
4c876616 2897 return 0;
39f1d94d
SP
2898 }
2899
2900 status = be_vf_setup_init(adapter);
2901 if (status)
2902 goto err;
30128031 2903
4c876616
SP
2904 if (old_vfs) {
2905 for_all_vfs(adapter, vf_cfg, vf) {
2906 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2907 if (status)
2908 goto err;
2909 }
2910 } else {
2911 status = be_vfs_if_create(adapter);
f9449ab7
SP
2912 if (status)
2913 goto err;
f9449ab7
SP
2914 }
2915
4c876616
SP
2916 if (old_vfs) {
2917 status = be_vfs_mac_query(adapter);
2918 if (status)
2919 goto err;
2920 } else {
39f1d94d
SP
2921 status = be_vf_eth_addr_config(adapter);
2922 if (status)
2923 goto err;
2924 }
f9449ab7 2925
11ac75ed 2926 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2927 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2928 * Allow full available bandwidth
2929 */
2930 if (BE3_chip(adapter) && !old_vfs)
2931 be_cmd_set_qos(adapter, 1000, vf+1);
2932
2933 status = be_cmd_link_status_query(adapter, &lnk_speed,
2934 NULL, vf + 1);
2935 if (!status)
2936 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2937
2938 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2939 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2940 if (status)
2941 goto err;
2942 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2943
2944 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2945 }
b4c1df93
SP
2946
2947 if (!old_vfs) {
2948 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2949 if (status) {
2950 dev_err(dev, "SRIOV enable failed\n");
2951 adapter->num_vfs = 0;
2952 goto err;
2953 }
2954 }
f9449ab7
SP
2955 return 0;
2956err:
4c876616
SP
2957 dev_err(dev, "VF setup failed\n");
2958 be_vf_clear(adapter);
f9449ab7
SP
2959 return status;
2960}
2961
30128031
SP
2962static void be_setup_init(struct be_adapter *adapter)
2963{
2964 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2965 adapter->phy.link_speed = -1;
30128031
SP
2966 adapter->if_handle = -1;
2967 adapter->be3_native = false;
2968 adapter->promiscuous = false;
f25b119c
PR
2969 if (be_physfn(adapter))
2970 adapter->cmd_privileges = MAX_PRIVILEGES;
2971 else
2972 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2973}
2974
1578e777
PR
2975static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2976 bool *active_mac, u32 *pmac_id)
590c391d 2977{
1578e777 2978 int status = 0;
e5e1ee89 2979
1578e777
PR
2980 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2981 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2982 if (!lancer_chip(adapter) && !be_physfn(adapter))
2983 *active_mac = true;
2984 else
2985 *active_mac = false;
e5e1ee89 2986
1578e777
PR
2987 return status;
2988 }
e5e1ee89 2989
1578e777
PR
2990 if (lancer_chip(adapter)) {
2991 status = be_cmd_get_mac_from_list(adapter, mac,
2992 active_mac, pmac_id, 0);
2993 if (*active_mac) {
5ee4979b
SP
2994 status = be_cmd_mac_addr_query(adapter, mac, false,
2995 if_handle, *pmac_id);
1578e777
PR
2996 }
2997 } else if (be_physfn(adapter)) {
2998 /* For BE3, for PF get permanent MAC */
5ee4979b 2999 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 3000 *active_mac = false;
e5e1ee89 3001 } else {
1578e777 3002 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 3003 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
3004 if_handle, 0);
3005 *active_mac = true;
e5e1ee89 3006 }
590c391d
PR
3007 return status;
3008}
3009
abb93951
PR
3010static void be_get_resources(struct be_adapter *adapter)
3011{
4c876616
SP
3012 u16 dev_num_vfs;
3013 int pos, status;
abb93951 3014 bool profile_present = false;
a05f99db 3015 u16 txq_count = 0;
abb93951 3016
4c876616 3017 if (!BEx_chip(adapter)) {
abb93951 3018 status = be_cmd_get_func_config(adapter);
abb93951
PR
3019 if (!status)
3020 profile_present = true;
a05f99db
VV
3021 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3022 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
3023 }
3024
3025 if (profile_present) {
3026 /* Sanity fixes for Lancer */
3027 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3028 BE_UC_PMAC_COUNT);
3029 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3030 BE_NUM_VLANS_SUPPORTED);
3031 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3032 BE_MAX_MC);
3033 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3034 MAX_TX_QS);
3035 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3036 BE3_MAX_RSS_QS);
3037 adapter->max_event_queues = min_t(u16,
3038 adapter->max_event_queues,
3039 BE3_MAX_RSS_QS);
3040
3041 if (adapter->max_rss_queues &&
3042 adapter->max_rss_queues == adapter->max_rx_queues)
3043 adapter->max_rss_queues -= 1;
3044
3045 if (adapter->max_event_queues < adapter->max_rss_queues)
3046 adapter->max_rss_queues = adapter->max_event_queues;
3047
3048 } else {
3049 if (be_physfn(adapter))
3050 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3051 else
3052 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3053
3054 if (adapter->function_mode & FLEX10_MODE)
3055 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3056 else
3057 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3058
3059 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3060 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3061 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3062 MAX_TX_QS);
abb93951
PR
3063 adapter->max_rss_queues = (adapter->be3_native) ?
3064 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3065 adapter->max_event_queues = BE3_MAX_RSS_QS;
3066
3067 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3068 BE_IF_FLAGS_BROADCAST |
3069 BE_IF_FLAGS_MULTICAST |
3070 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3071 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3072 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3073 BE_IF_FLAGS_PROMISCUOUS;
3074
3075 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3076 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3077 }
4c876616
SP
3078
3079 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3080 if (pos) {
3081 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3082 &dev_num_vfs);
3083 if (BE3_chip(adapter))
3084 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3085 adapter->dev_num_vfs = dev_num_vfs;
3086 }
abb93951
PR
3087}
3088
39f1d94d
SP
3089/* Routine to query per function resource limits */
3090static int be_get_config(struct be_adapter *adapter)
3091{
4c876616 3092 int status;
39f1d94d 3093
abb93951
PR
3094 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3095 &adapter->function_mode,
0ad3157e
VV
3096 &adapter->function_caps,
3097 &adapter->asic_rev);
abb93951
PR
3098 if (status)
3099 goto err;
3100
3101 be_get_resources(adapter);
3102
3103 /* primary mac needs 1 pmac entry */
3104 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3105 sizeof(u32), GFP_KERNEL);
3106 if (!adapter->pmac_id) {
3107 status = -ENOMEM;
3108 goto err;
3109 }
3110
abb93951
PR
3111err:
3112 return status;
39f1d94d
SP
3113}
3114
5fb379ee
SP
3115static int be_setup(struct be_adapter *adapter)
3116{
39f1d94d 3117 struct device *dev = &adapter->pdev->dev;
abb93951 3118 u32 en_flags;
a54769f5 3119 u32 tx_fc, rx_fc;
10ef9ab4 3120 int status;
ba343c77 3121 u8 mac[ETH_ALEN];
1578e777 3122 bool active_mac;
ba343c77 3123
30128031 3124 be_setup_init(adapter);
6b7c5b94 3125
abb93951
PR
3126 if (!lancer_chip(adapter))
3127 be_cmd_req_native_mode(adapter);
39f1d94d 3128
abb93951
PR
3129 status = be_get_config(adapter);
3130 if (status)
3131 goto err;
73d540f2 3132
c2bba3df
SK
3133 status = be_msix_enable(adapter);
3134 if (status)
3135 goto err;
10ef9ab4
SP
3136
3137 status = be_evt_queues_create(adapter);
3138 if (status)
a54769f5 3139 goto err;
6b7c5b94 3140
10ef9ab4
SP
3141 status = be_tx_cqs_create(adapter);
3142 if (status)
3143 goto err;
3144
3145 status = be_rx_cqs_create(adapter);
3146 if (status)
a54769f5 3147 goto err;
6b7c5b94 3148
f9449ab7 3149 status = be_mcc_queues_create(adapter);
10ef9ab4 3150 if (status)
a54769f5 3151 goto err;
6b7c5b94 3152
f25b119c
PR
3153 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3154 /* In UMC mode FW does not return right privileges.
3155 * Override with correct privilege equivalent to PF.
3156 */
3157 if (be_is_mc(adapter))
3158 adapter->cmd_privileges = MAX_PRIVILEGES;
3159
f9449ab7
SP
3160 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3161 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3162
abb93951 3163 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3164 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3165
abb93951 3166 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3167
abb93951 3168 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3169 &adapter->if_handle, 0);
5fb379ee 3170 if (status != 0)
a54769f5 3171 goto err;
6b7c5b94 3172
1578e777
PR
3173 memset(mac, 0, ETH_ALEN);
3174 active_mac = false;
3175 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3176 &active_mac, &adapter->pmac_id[0]);
3177 if (status != 0)
3178 goto err;
3179
3180 if (!active_mac) {
3181 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3182 &adapter->pmac_id[0], 0);
3183 if (status != 0)
3184 goto err;
3185 }
3186
3187 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3188 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3189 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3190 }
0dffc83e 3191
10ef9ab4
SP
3192 status = be_tx_qs_create(adapter);
3193 if (status)
3194 goto err;
3195
eeb65ced 3196 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3197
1d1e9a46 3198 if (adapter->vlans_added)
10329df8 3199 be_vid_config(adapter);
7ab8b0b4 3200
a54769f5 3201 be_set_rx_mode(adapter->netdev);
5fb379ee 3202
ddc3f5cb 3203 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3204
ddc3f5cb
AK
3205 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3206 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3207 adapter->rx_fc);
2dc1deb6 3208
b4c1df93 3209 if (be_physfn(adapter)) {
39f1d94d
SP
3210 if (adapter->dev_num_vfs)
3211 be_vf_setup(adapter);
3212 else
3213 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3214 }
3215
f25b119c
PR
3216 status = be_cmd_get_phy_info(adapter);
3217 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3218 adapter->phy.fc_autoneg = 1;
3219
191eb756
SP
3220 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3221 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3222 return 0;
a54769f5
SP
3223err:
3224 be_clear(adapter);
3225 return status;
3226}
6b7c5b94 3227
66268739
IV
3228#ifdef CONFIG_NET_POLL_CONTROLLER
3229static void be_netpoll(struct net_device *netdev)
3230{
3231 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3232 struct be_eq_obj *eqo;
66268739
IV
3233 int i;
3234
e49cc34f
SP
3235 for_all_evt_queues(adapter, eqo, i) {
3236 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3237 napi_schedule(&eqo->napi);
3238 }
10ef9ab4
SP
3239
3240 return;
66268739
IV
3241}
3242#endif
3243
84517482 3244#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3245char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3246
fa9a6fed 3247static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3248 const u8 *p, u32 img_start, int image_size,
3249 int hdr_size)
fa9a6fed
SB
3250{
3251 u32 crc_offset;
3252 u8 flashed_crc[4];
3253 int status;
3f0d4560
AK
3254
3255 crc_offset = hdr_size + img_start + image_size - 4;
3256
fa9a6fed 3257 p += crc_offset;
3f0d4560
AK
3258
3259 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3260 (image_size - 4));
fa9a6fed
SB
3261 if (status) {
3262 dev_err(&adapter->pdev->dev,
3263 "could not get crc from flash, not flashing redboot\n");
3264 return false;
3265 }
3266
3267 /*update redboot only if crc does not match*/
3268 if (!memcmp(flashed_crc, p, 4))
3269 return false;
3270 else
3271 return true;
fa9a6fed
SB
3272}
3273
306f1348
SP
3274static bool phy_flashing_required(struct be_adapter *adapter)
3275{
42f11cf2
AK
3276 return (adapter->phy.phy_type == TN_8022 &&
3277 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3278}
3279
c165541e
PR
3280static bool is_comp_in_ufi(struct be_adapter *adapter,
3281 struct flash_section_info *fsec, int type)
3282{
3283 int i = 0, img_type = 0;
3284 struct flash_section_info_g2 *fsec_g2 = NULL;
3285
ca34fe38 3286 if (BE2_chip(adapter))
c165541e
PR
3287 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3288
3289 for (i = 0; i < MAX_FLASH_COMP; i++) {
3290 if (fsec_g2)
3291 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3292 else
3293 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3294
3295 if (img_type == type)
3296 return true;
3297 }
3298 return false;
3299
3300}
3301
3302struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3303 int header_size,
3304 const struct firmware *fw)
3305{
3306 struct flash_section_info *fsec = NULL;
3307 const u8 *p = fw->data;
3308
3309 p += header_size;
3310 while (p < (fw->data + fw->size)) {
3311 fsec = (struct flash_section_info *)p;
3312 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3313 return fsec;
3314 p += 32;
3315 }
3316 return NULL;
3317}
3318
773a2d7c
PR
3319static int be_flash(struct be_adapter *adapter, const u8 *img,
3320 struct be_dma_mem *flash_cmd, int optype, int img_size)
3321{
3322 u32 total_bytes = 0, flash_op, num_bytes = 0;
3323 int status = 0;
3324 struct be_cmd_write_flashrom *req = flash_cmd->va;
3325
3326 total_bytes = img_size;
3327 while (total_bytes) {
3328 num_bytes = min_t(u32, 32*1024, total_bytes);
3329
3330 total_bytes -= num_bytes;
3331
3332 if (!total_bytes) {
3333 if (optype == OPTYPE_PHY_FW)
3334 flash_op = FLASHROM_OPER_PHY_FLASH;
3335 else
3336 flash_op = FLASHROM_OPER_FLASH;
3337 } else {
3338 if (optype == OPTYPE_PHY_FW)
3339 flash_op = FLASHROM_OPER_PHY_SAVE;
3340 else
3341 flash_op = FLASHROM_OPER_SAVE;
3342 }
3343
be716446 3344 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3345 img += num_bytes;
3346 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3347 flash_op, num_bytes);
3348 if (status) {
3349 if (status == ILLEGAL_IOCTL_REQ &&
3350 optype == OPTYPE_PHY_FW)
3351 break;
3352 dev_err(&adapter->pdev->dev,
3353 "cmd to write to flash rom failed.\n");
3354 return status;
3355 }
3356 }
3357 return 0;
3358}
3359
0ad3157e 3360/* For BE2, BE3 and BE3-R */
ca34fe38 3361static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3362 const struct firmware *fw,
3363 struct be_dma_mem *flash_cmd,
3364 int num_of_images)
3f0d4560 3365
84517482 3366{
3f0d4560 3367 int status = 0, i, filehdr_size = 0;
c165541e 3368 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3369 const u8 *p = fw->data;
215faf9c 3370 const struct flash_comp *pflashcomp;
773a2d7c 3371 int num_comp, redboot;
c165541e
PR
3372 struct flash_section_info *fsec = NULL;
3373
3374 struct flash_comp gen3_flash_types[] = {
3375 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3376 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3377 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3378 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3379 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3380 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3381 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3382 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3383 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3384 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3385 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3386 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3387 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3388 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3389 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3390 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3391 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3392 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3393 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3394 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3395 };
c165541e
PR
3396
3397 struct flash_comp gen2_flash_types[] = {
3398 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3399 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3400 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3401 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3402 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3403 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3404 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3405 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3406 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3407 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3408 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3409 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3410 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3411 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3412 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3413 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3414 };
3415
ca34fe38 3416 if (BE3_chip(adapter)) {
3f0d4560
AK
3417 pflashcomp = gen3_flash_types;
3418 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3419 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3420 } else {
3421 pflashcomp = gen2_flash_types;
3422 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3423 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3424 }
ca34fe38 3425
c165541e
PR
3426 /* Get flash section info*/
3427 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3428 if (!fsec) {
3429 dev_err(&adapter->pdev->dev,
3430 "Invalid Cookie. UFI corrupted ?\n");
3431 return -1;
3432 }
9fe96934 3433 for (i = 0; i < num_comp; i++) {
c165541e 3434 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3435 continue;
c165541e
PR
3436
3437 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3438 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3439 continue;
3440
773a2d7c
PR
3441 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3442 !phy_flashing_required(adapter))
306f1348 3443 continue;
c165541e 3444
773a2d7c
PR
3445 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3446 redboot = be_flash_redboot(adapter, fw->data,
3447 pflashcomp[i].offset, pflashcomp[i].size,
3448 filehdr_size + img_hdrs_size);
3449 if (!redboot)
3450 continue;
3451 }
c165541e 3452
3f0d4560 3453 p = fw->data;
c165541e 3454 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3455 if (p + pflashcomp[i].size > fw->data + fw->size)
3456 return -1;
773a2d7c
PR
3457
3458 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3459 pflashcomp[i].size);
3460 if (status) {
3461 dev_err(&adapter->pdev->dev,
3462 "Flashing section type %d failed.\n",
3463 pflashcomp[i].img_type);
3464 return status;
84517482 3465 }
84517482 3466 }
84517482
AK
3467 return 0;
3468}
3469
773a2d7c
PR
3470static int be_flash_skyhawk(struct be_adapter *adapter,
3471 const struct firmware *fw,
3472 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3473{
773a2d7c
PR
3474 int status = 0, i, filehdr_size = 0;
3475 int img_offset, img_size, img_optype, redboot;
3476 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3477 const u8 *p = fw->data;
3478 struct flash_section_info *fsec = NULL;
3479
3480 filehdr_size = sizeof(struct flash_file_hdr_g3);
3481 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3482 if (!fsec) {
3483 dev_err(&adapter->pdev->dev,
3484 "Invalid Cookie. UFI corrupted ?\n");
3485 return -1;
3486 }
3487
3488 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3489 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3490 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3491
3492 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3493 case IMAGE_FIRMWARE_iSCSI:
3494 img_optype = OPTYPE_ISCSI_ACTIVE;
3495 break;
3496 case IMAGE_BOOT_CODE:
3497 img_optype = OPTYPE_REDBOOT;
3498 break;
3499 case IMAGE_OPTION_ROM_ISCSI:
3500 img_optype = OPTYPE_BIOS;
3501 break;
3502 case IMAGE_OPTION_ROM_PXE:
3503 img_optype = OPTYPE_PXE_BIOS;
3504 break;
3505 case IMAGE_OPTION_ROM_FCoE:
3506 img_optype = OPTYPE_FCOE_BIOS;
3507 break;
3508 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3509 img_optype = OPTYPE_ISCSI_BACKUP;
3510 break;
3511 case IMAGE_NCSI:
3512 img_optype = OPTYPE_NCSI_FW;
3513 break;
3514 default:
3515 continue;
3516 }
3517
3518 if (img_optype == OPTYPE_REDBOOT) {
3519 redboot = be_flash_redboot(adapter, fw->data,
3520 img_offset, img_size,
3521 filehdr_size + img_hdrs_size);
3522 if (!redboot)
3523 continue;
3524 }
3525
3526 p = fw->data;
3527 p += filehdr_size + img_offset + img_hdrs_size;
3528 if (p + img_size > fw->data + fw->size)
3529 return -1;
3530
3531 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3532 if (status) {
3533 dev_err(&adapter->pdev->dev,
3534 "Flashing section type %d failed.\n",
3535 fsec->fsec_entry[i].type);
3536 return status;
3537 }
3538 }
3539 return 0;
3f0d4560
AK
3540}
3541
f67ef7ba
PR
3542static int lancer_wait_idle(struct be_adapter *adapter)
3543{
3544#define SLIPORT_IDLE_TIMEOUT 30
3545 u32 reg_val;
3546 int status = 0, i;
3547
3548 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3549 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3550 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3551 break;
3552
3553 ssleep(1);
3554 }
3555
3556 if (i == SLIPORT_IDLE_TIMEOUT)
3557 status = -1;
3558
3559 return status;
3560}
3561
3562static int lancer_fw_reset(struct be_adapter *adapter)
3563{
3564 int status = 0;
3565
3566 status = lancer_wait_idle(adapter);
3567 if (status)
3568 return status;
3569
3570 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3571 PHYSDEV_CONTROL_OFFSET);
3572
3573 return status;
3574}
3575
485bf569
SN
3576static int lancer_fw_download(struct be_adapter *adapter,
3577 const struct firmware *fw)
84517482 3578{
485bf569
SN
3579#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3580#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3581 struct be_dma_mem flash_cmd;
485bf569
SN
3582 const u8 *data_ptr = NULL;
3583 u8 *dest_image_ptr = NULL;
3584 size_t image_size = 0;
3585 u32 chunk_size = 0;
3586 u32 data_written = 0;
3587 u32 offset = 0;
3588 int status = 0;
3589 u8 add_status = 0;
f67ef7ba 3590 u8 change_status;
84517482 3591
485bf569 3592 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3593 dev_err(&adapter->pdev->dev,
485bf569
SN
3594 "FW Image not properly aligned. "
3595 "Length must be 4 byte aligned.\n");
3596 status = -EINVAL;
3597 goto lancer_fw_exit;
d9efd2af
SB
3598 }
3599
485bf569
SN
3600 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3601 + LANCER_FW_DOWNLOAD_CHUNK;
3602 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3603 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3604 if (!flash_cmd.va) {
3605 status = -ENOMEM;
485bf569
SN
3606 goto lancer_fw_exit;
3607 }
84517482 3608
485bf569
SN
3609 dest_image_ptr = flash_cmd.va +
3610 sizeof(struct lancer_cmd_req_write_object);
3611 image_size = fw->size;
3612 data_ptr = fw->data;
3613
3614 while (image_size) {
3615 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3616
3617 /* Copy the image chunk content. */
3618 memcpy(dest_image_ptr, data_ptr, chunk_size);
3619
3620 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3621 chunk_size, offset,
3622 LANCER_FW_DOWNLOAD_LOCATION,
3623 &data_written, &change_status,
3624 &add_status);
485bf569
SN
3625 if (status)
3626 break;
3627
3628 offset += data_written;
3629 data_ptr += data_written;
3630 image_size -= data_written;
3631 }
3632
3633 if (!status) {
3634 /* Commit the FW written */
3635 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3636 0, offset,
3637 LANCER_FW_DOWNLOAD_LOCATION,
3638 &data_written, &change_status,
3639 &add_status);
485bf569
SN
3640 }
3641
3642 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3643 flash_cmd.dma);
3644 if (status) {
3645 dev_err(&adapter->pdev->dev,
3646 "Firmware load error. "
3647 "Status code: 0x%x Additional Status: 0x%x\n",
3648 status, add_status);
3649 goto lancer_fw_exit;
3650 }
3651
f67ef7ba
PR
3652 if (change_status == LANCER_FW_RESET_NEEDED) {
3653 status = lancer_fw_reset(adapter);
3654 if (status) {
3655 dev_err(&adapter->pdev->dev,
3656 "Adapter busy for FW reset.\n"
3657 "New FW will not be active.\n");
3658 goto lancer_fw_exit;
3659 }
3660 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3661 dev_err(&adapter->pdev->dev,
3662 "System reboot required for new FW"
3663 " to be active\n");
3664 }
3665
485bf569
SN
3666 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3667lancer_fw_exit:
3668 return status;
3669}
3670
ca34fe38
SP
3671#define UFI_TYPE2 2
3672#define UFI_TYPE3 3
0ad3157e 3673#define UFI_TYPE3R 10
ca34fe38
SP
3674#define UFI_TYPE4 4
3675static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3676 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3677{
3678 if (fhdr == NULL)
3679 goto be_get_ufi_exit;
3680
ca34fe38
SP
3681 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3682 return UFI_TYPE4;
0ad3157e
VV
3683 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3684 if (fhdr->asic_type_rev == 0x10)
3685 return UFI_TYPE3R;
3686 else
3687 return UFI_TYPE3;
3688 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3689 return UFI_TYPE2;
773a2d7c
PR
3690
3691be_get_ufi_exit:
3692 dev_err(&adapter->pdev->dev,
3693 "UFI and Interface are not compatible for flashing\n");
3694 return -1;
3695}
3696
485bf569
SN
3697static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3698{
485bf569
SN
3699 struct flash_file_hdr_g3 *fhdr3;
3700 struct image_hdr *img_hdr_ptr = NULL;
3701 struct be_dma_mem flash_cmd;
3702 const u8 *p;
773a2d7c 3703 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3704
be716446 3705 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3706 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3707 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3708 if (!flash_cmd.va) {
3709 status = -ENOMEM;
485bf569 3710 goto be_fw_exit;
84517482
AK
3711 }
3712
773a2d7c 3713 p = fw->data;
0ad3157e 3714 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3715
0ad3157e 3716 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3717
773a2d7c
PR
3718 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3719 for (i = 0; i < num_imgs; i++) {
3720 img_hdr_ptr = (struct image_hdr *)(fw->data +
3721 (sizeof(struct flash_file_hdr_g3) +
3722 i * sizeof(struct image_hdr)));
3723 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3724 switch (ufi_type) {
3725 case UFI_TYPE4:
773a2d7c
PR
3726 status = be_flash_skyhawk(adapter, fw,
3727 &flash_cmd, num_imgs);
0ad3157e
VV
3728 break;
3729 case UFI_TYPE3R:
ca34fe38
SP
3730 status = be_flash_BEx(adapter, fw, &flash_cmd,
3731 num_imgs);
0ad3157e
VV
3732 break;
3733 case UFI_TYPE3:
3734 /* Do not flash this ufi on BE3-R cards */
3735 if (adapter->asic_rev < 0x10)
3736 status = be_flash_BEx(adapter, fw,
3737 &flash_cmd,
3738 num_imgs);
3739 else {
3740 status = -1;
3741 dev_err(&adapter->pdev->dev,
3742 "Can't load BE3 UFI on BE3R\n");
3743 }
3744 }
3f0d4560 3745 }
773a2d7c
PR
3746 }
3747
ca34fe38
SP
3748 if (ufi_type == UFI_TYPE2)
3749 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3750 else if (ufi_type == -1)
3f0d4560 3751 status = -1;
84517482 3752
2b7bcebf
IV
3753 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3754 flash_cmd.dma);
84517482
AK
3755 if (status) {
3756 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3757 goto be_fw_exit;
84517482
AK
3758 }
3759
af901ca1 3760 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3761
485bf569
SN
3762be_fw_exit:
3763 return status;
3764}
3765
3766int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3767{
3768 const struct firmware *fw;
3769 int status;
3770
3771 if (!netif_running(adapter->netdev)) {
3772 dev_err(&adapter->pdev->dev,
3773 "Firmware load not allowed (interface is down)\n");
3774 return -1;
3775 }
3776
3777 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3778 if (status)
3779 goto fw_exit;
3780
3781 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3782
3783 if (lancer_chip(adapter))
3784 status = lancer_fw_download(adapter, fw);
3785 else
3786 status = be_fw_download(adapter, fw);
3787
eeb65ced
SK
3788 if (!status)
3789 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3790 adapter->fw_on_flash);
3791
84517482
AK
3792fw_exit:
3793 release_firmware(fw);
3794 return status;
3795}
3796
e5686ad8 3797static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3798 .ndo_open = be_open,
3799 .ndo_stop = be_close,
3800 .ndo_start_xmit = be_xmit,
a54769f5 3801 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3802 .ndo_set_mac_address = be_mac_addr_set,
3803 .ndo_change_mtu = be_change_mtu,
ab1594e9 3804 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3805 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3806 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3807 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3808 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3809 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3810 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3811 .ndo_get_vf_config = be_get_vf_config,
3812#ifdef CONFIG_NET_POLL_CONTROLLER
3813 .ndo_poll_controller = be_netpoll,
3814#endif
6b7c5b94
SP
3815};
3816
3817static void be_netdev_init(struct net_device *netdev)
3818{
3819 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3820 struct be_eq_obj *eqo;
3abcdeda 3821 int i;
6b7c5b94 3822
6332c8d3 3823 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3824 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3825 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3826 if (be_multi_rxq(adapter))
3827 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3828
3829 netdev->features |= netdev->hw_features |
f646968f 3830 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3831
eb8a50d9 3832 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3833 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3834
fbc13f01
AK
3835 netdev->priv_flags |= IFF_UNICAST_FLT;
3836
6b7c5b94
SP
3837 netdev->flags |= IFF_MULTICAST;
3838
b7e5887e 3839 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3840
10ef9ab4 3841 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3842
3843 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3844
10ef9ab4
SP
3845 for_all_evt_queues(adapter, eqo, i)
3846 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3847}
3848
3849static void be_unmap_pci_bars(struct be_adapter *adapter)
3850{
c5b3ad4c
SP
3851 if (adapter->csr)
3852 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3853 if (adapter->db)
ce66f781 3854 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3855}
3856
ce66f781
SP
3857static int db_bar(struct be_adapter *adapter)
3858{
3859 if (lancer_chip(adapter) || !be_physfn(adapter))
3860 return 0;
3861 else
3862 return 4;
3863}
3864
3865static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3866{
dbf0f2a7 3867 if (skyhawk_chip(adapter)) {
ce66f781
SP
3868 adapter->roce_db.size = 4096;
3869 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3870 db_bar(adapter));
3871 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3872 db_bar(adapter));
3873 }
045508a8 3874 return 0;
6b7c5b94
SP
3875}
3876
3877static int be_map_pci_bars(struct be_adapter *adapter)
3878{
3879 u8 __iomem *addr;
ce66f781 3880 u32 sli_intf;
6b7c5b94 3881
ce66f781
SP
3882 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3883 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3884 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3885
c5b3ad4c
SP
3886 if (BEx_chip(adapter) && be_physfn(adapter)) {
3887 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3888 if (adapter->csr == NULL)
3889 return -ENOMEM;
3890 }
3891
ce66f781 3892 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3893 if (addr == NULL)
3894 goto pci_map_err;
ba343c77 3895 adapter->db = addr;
ce66f781
SP
3896
3897 be_roce_map_pci_bars(adapter);
6b7c5b94 3898 return 0;
ce66f781 3899
6b7c5b94
SP
3900pci_map_err:
3901 be_unmap_pci_bars(adapter);
3902 return -ENOMEM;
3903}
3904
6b7c5b94
SP
3905static void be_ctrl_cleanup(struct be_adapter *adapter)
3906{
8788fdc2 3907 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3908
3909 be_unmap_pci_bars(adapter);
3910
3911 if (mem->va)
2b7bcebf
IV
3912 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3913 mem->dma);
e7b909a6 3914
5b8821b7 3915 mem = &adapter->rx_filter;
e7b909a6 3916 if (mem->va)
2b7bcebf
IV
3917 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3918 mem->dma);
6b7c5b94
SP
3919}
3920
6b7c5b94
SP
3921static int be_ctrl_init(struct be_adapter *adapter)
3922{
8788fdc2
SP
3923 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3924 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3925 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3926 u32 sli_intf;
6b7c5b94 3927 int status;
6b7c5b94 3928
ce66f781
SP
3929 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3930 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3931 SLI_INTF_FAMILY_SHIFT;
3932 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3933
6b7c5b94
SP
3934 status = be_map_pci_bars(adapter);
3935 if (status)
e7b909a6 3936 goto done;
6b7c5b94
SP
3937
3938 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3939 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3940 mbox_mem_alloc->size,
3941 &mbox_mem_alloc->dma,
3942 GFP_KERNEL);
6b7c5b94 3943 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3944 status = -ENOMEM;
3945 goto unmap_pci_bars;
6b7c5b94
SP
3946 }
3947 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3948 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3949 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3950 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3951
5b8821b7
SP
3952 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3953 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3954 &rx_filter->dma,
3955 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3956 if (rx_filter->va == NULL) {
e7b909a6
SP
3957 status = -ENOMEM;
3958 goto free_mbox;
3959 }
1f9061d2 3960
2984961c 3961 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3962 spin_lock_init(&adapter->mcc_lock);
3963 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3964
dd131e76 3965 init_completion(&adapter->flash_compl);
cf588477 3966 pci_save_state(adapter->pdev);
6b7c5b94 3967 return 0;
e7b909a6
SP
3968
3969free_mbox:
2b7bcebf
IV
3970 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3971 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3972
3973unmap_pci_bars:
3974 be_unmap_pci_bars(adapter);
3975
3976done:
3977 return status;
6b7c5b94
SP
3978}
3979
3980static void be_stats_cleanup(struct be_adapter *adapter)
3981{
3abcdeda 3982 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3983
3984 if (cmd->va)
2b7bcebf
IV
3985 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3986 cmd->va, cmd->dma);
6b7c5b94
SP
3987}
3988
3989static int be_stats_init(struct be_adapter *adapter)
3990{
3abcdeda 3991 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3992
ca34fe38
SP
3993 if (lancer_chip(adapter))
3994 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3995 else if (BE2_chip(adapter))
89a88ab8 3996 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3997 else
3998 /* BE3 and Skyhawk */
3999 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4000
2b7bcebf 4001 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 4002 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
4003 if (cmd->va == NULL)
4004 return -1;
4005 return 0;
4006}
4007
3bc6b06c 4008static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
4009{
4010 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 4011
6b7c5b94
SP
4012 if (!adapter)
4013 return;
4014
045508a8 4015 be_roce_dev_remove(adapter);
8cef7a78 4016 be_intr_set(adapter, false);
045508a8 4017
f67ef7ba
PR
4018 cancel_delayed_work_sync(&adapter->func_recovery_work);
4019
6b7c5b94
SP
4020 unregister_netdev(adapter->netdev);
4021
5fb379ee
SP
4022 be_clear(adapter);
4023
bf99e50d
PR
4024 /* tell fw we're done with firing cmds */
4025 be_cmd_fw_clean(adapter);
4026
6b7c5b94
SP
4027 be_stats_cleanup(adapter);
4028
4029 be_ctrl_cleanup(adapter);
4030
d6b6d987
SP
4031 pci_disable_pcie_error_reporting(pdev);
4032
6b7c5b94
SP
4033 pci_set_drvdata(pdev, NULL);
4034 pci_release_regions(pdev);
4035 pci_disable_device(pdev);
4036
4037 free_netdev(adapter->netdev);
4038}
4039
4762f6ce
AK
4040bool be_is_wol_supported(struct be_adapter *adapter)
4041{
4042 return ((adapter->wol_cap & BE_WOL_CAP) &&
4043 !be_is_wol_excluded(adapter)) ? true : false;
4044}
4045
941a77d5
SK
4046u32 be_get_fw_log_level(struct be_adapter *adapter)
4047{
4048 struct be_dma_mem extfat_cmd;
4049 struct be_fat_conf_params *cfgs;
4050 int status;
4051 u32 level = 0;
4052 int j;
4053
f25b119c
PR
4054 if (lancer_chip(adapter))
4055 return 0;
4056
941a77d5
SK
4057 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4058 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4059 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4060 &extfat_cmd.dma);
4061
4062 if (!extfat_cmd.va) {
4063 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4064 __func__);
4065 goto err;
4066 }
4067
4068 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4069 if (!status) {
4070 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4071 sizeof(struct be_cmd_resp_hdr));
ac46a462 4072 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4073 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4074 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4075 }
4076 }
4077 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4078 extfat_cmd.dma);
4079err:
4080 return level;
4081}
abb93951 4082
39f1d94d 4083static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4084{
6b7c5b94 4085 int status;
941a77d5 4086 u32 level;
6b7c5b94 4087
9e1453c5
AK
4088 status = be_cmd_get_cntl_attributes(adapter);
4089 if (status)
4090 return status;
4091
4762f6ce
AK
4092 status = be_cmd_get_acpi_wol_cap(adapter);
4093 if (status) {
4094 /* in case of a failure to get wol capabillities
4095 * check the exclusion list to determine WOL capability */
4096 if (!be_is_wol_excluded(adapter))
4097 adapter->wol_cap |= BE_WOL_CAP;
4098 }
4099
4100 if (be_is_wol_supported(adapter))
4101 adapter->wol = true;
4102
7aeb2156
PR
4103 /* Must be a power of 2 or else MODULO will BUG_ON */
4104 adapter->be_get_temp_freq = 64;
4105
941a77d5
SK
4106 level = be_get_fw_log_level(adapter);
4107 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4108
2243e2e9 4109 return 0;
6b7c5b94
SP
4110}
4111
f67ef7ba 4112static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
4113{
4114 int status;
d8110f62 4115
f67ef7ba
PR
4116 status = lancer_test_and_set_rdy_state(adapter);
4117 if (status)
4118 goto err;
d8110f62 4119
f67ef7ba
PR
4120 if (netif_running(adapter->netdev))
4121 be_close(adapter->netdev);
d8110f62 4122
f67ef7ba
PR
4123 be_clear(adapter);
4124
4125 adapter->hw_error = false;
4126 adapter->fw_timeout = false;
4127
4128 status = be_setup(adapter);
4129 if (status)
4130 goto err;
d8110f62 4131
f67ef7ba
PR
4132 if (netif_running(adapter->netdev)) {
4133 status = be_open(adapter->netdev);
d8110f62
PR
4134 if (status)
4135 goto err;
f67ef7ba 4136 }
d8110f62 4137
f67ef7ba
PR
4138 dev_err(&adapter->pdev->dev,
4139 "Adapter SLIPORT recovery succeeded\n");
4140 return 0;
4141err:
67297ad8
PR
4142 if (adapter->eeh_error)
4143 dev_err(&adapter->pdev->dev,
4144 "Adapter SLIPORT recovery failed\n");
d8110f62 4145
f67ef7ba
PR
4146 return status;
4147}
4148
4149static void be_func_recovery_task(struct work_struct *work)
4150{
4151 struct be_adapter *adapter =
4152 container_of(work, struct be_adapter, func_recovery_work.work);
4153 int status;
d8110f62 4154
f67ef7ba 4155 be_detect_error(adapter);
d8110f62 4156
f67ef7ba 4157 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4158
f67ef7ba
PR
4159 if (adapter->eeh_error)
4160 goto out;
d8110f62 4161
f67ef7ba
PR
4162 rtnl_lock();
4163 netif_device_detach(adapter->netdev);
4164 rtnl_unlock();
d8110f62 4165
f67ef7ba 4166 status = lancer_recover_func(adapter);
d8110f62 4167
f67ef7ba
PR
4168 if (!status)
4169 netif_device_attach(adapter->netdev);
d8110f62 4170 }
f67ef7ba
PR
4171
4172out:
4173 schedule_delayed_work(&adapter->func_recovery_work,
4174 msecs_to_jiffies(1000));
d8110f62
PR
4175}
4176
4177static void be_worker(struct work_struct *work)
4178{
4179 struct be_adapter *adapter =
4180 container_of(work, struct be_adapter, work.work);
4181 struct be_rx_obj *rxo;
10ef9ab4 4182 struct be_eq_obj *eqo;
d8110f62
PR
4183 int i;
4184
d8110f62
PR
4185 /* when interrupts are not yet enabled, just reap any pending
4186 * mcc completions */
4187 if (!netif_running(adapter->netdev)) {
072a9c48 4188 local_bh_disable();
10ef9ab4 4189 be_process_mcc(adapter);
072a9c48 4190 local_bh_enable();
d8110f62
PR
4191 goto reschedule;
4192 }
4193
4194 if (!adapter->stats_cmd_sent) {
4195 if (lancer_chip(adapter))
4196 lancer_cmd_get_pport_stats(adapter,
4197 &adapter->stats_cmd);
4198 else
4199 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4200 }
4201
7aeb2156
PR
4202 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4203 be_cmd_get_die_temperature(adapter);
4204
d8110f62 4205 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4206 if (rxo->rx_post_starved) {
4207 rxo->rx_post_starved = false;
4208 be_post_rx_frags(rxo, GFP_KERNEL);
4209 }
4210 }
4211
10ef9ab4
SP
4212 for_all_evt_queues(adapter, eqo, i)
4213 be_eqd_update(adapter, eqo);
4214
d8110f62
PR
4215reschedule:
4216 adapter->work_counter++;
4217 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4218}
4219
39f1d94d
SP
4220static bool be_reset_required(struct be_adapter *adapter)
4221{
d79c0a20 4222 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4223}
4224
d379142b
SP
4225static char *mc_name(struct be_adapter *adapter)
4226{
4227 if (adapter->function_mode & FLEX10_MODE)
4228 return "FLEX10";
4229 else if (adapter->function_mode & VNIC_MODE)
4230 return "vNIC";
4231 else if (adapter->function_mode & UMC_ENABLED)
4232 return "UMC";
4233 else
4234 return "";
4235}
4236
4237static inline char *func_name(struct be_adapter *adapter)
4238{
4239 return be_physfn(adapter) ? "PF" : "VF";
4240}
4241
1dd06ae8 4242static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4243{
4244 int status = 0;
4245 struct be_adapter *adapter;
4246 struct net_device *netdev;
b4e32a71 4247 char port_name;
6b7c5b94
SP
4248
4249 status = pci_enable_device(pdev);
4250 if (status)
4251 goto do_none;
4252
4253 status = pci_request_regions(pdev, DRV_NAME);
4254 if (status)
4255 goto disable_dev;
4256 pci_set_master(pdev);
4257
7f640062 4258 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4259 if (netdev == NULL) {
4260 status = -ENOMEM;
4261 goto rel_reg;
4262 }
4263 adapter = netdev_priv(netdev);
4264 adapter->pdev = pdev;
4265 pci_set_drvdata(pdev, adapter);
4266 adapter->netdev = netdev;
2243e2e9 4267 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4268
2b7bcebf 4269 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4270 if (!status) {
2bd92cd2
CH
4271 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4272 if (status < 0) {
4273 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4274 goto free_netdev;
4275 }
6b7c5b94
SP
4276 netdev->features |= NETIF_F_HIGHDMA;
4277 } else {
2b7bcebf 4278 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4279 if (status) {
4280 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4281 goto free_netdev;
4282 }
4283 }
4284
d6b6d987
SP
4285 status = pci_enable_pcie_error_reporting(pdev);
4286 if (status)
4287 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4288
6b7c5b94
SP
4289 status = be_ctrl_init(adapter);
4290 if (status)
39f1d94d 4291 goto free_netdev;
6b7c5b94 4292
2243e2e9 4293 /* sync up with fw's ready state */
ba343c77 4294 if (be_physfn(adapter)) {
bf99e50d 4295 status = be_fw_wait_ready(adapter);
ba343c77
SB
4296 if (status)
4297 goto ctrl_clean;
ba343c77 4298 }
6b7c5b94 4299
39f1d94d
SP
4300 if (be_reset_required(adapter)) {
4301 status = be_cmd_reset_function(adapter);
4302 if (status)
4303 goto ctrl_clean;
556ae191 4304
2d177be8
KA
4305 /* Wait for interrupts to quiesce after an FLR */
4306 msleep(100);
4307 }
8cef7a78
SK
4308
4309 /* Allow interrupts for other ULPs running on NIC function */
4310 be_intr_set(adapter, true);
10ef9ab4 4311
2d177be8
KA
4312 /* tell fw we're ready to fire cmds */
4313 status = be_cmd_fw_init(adapter);
4314 if (status)
4315 goto ctrl_clean;
4316
2243e2e9
SP
4317 status = be_stats_init(adapter);
4318 if (status)
4319 goto ctrl_clean;
4320
39f1d94d 4321 status = be_get_initial_config(adapter);
6b7c5b94
SP
4322 if (status)
4323 goto stats_clean;
6b7c5b94
SP
4324
4325 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4326 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4327 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4328
5fb379ee
SP
4329 status = be_setup(adapter);
4330 if (status)
55f5c3c5 4331 goto stats_clean;
2243e2e9 4332
3abcdeda 4333 be_netdev_init(netdev);
6b7c5b94
SP
4334 status = register_netdev(netdev);
4335 if (status != 0)
5fb379ee 4336 goto unsetup;
6b7c5b94 4337
045508a8
PP
4338 be_roce_dev_add(adapter);
4339
f67ef7ba
PR
4340 schedule_delayed_work(&adapter->func_recovery_work,
4341 msecs_to_jiffies(1000));
b4e32a71
PR
4342
4343 be_cmd_query_port_name(adapter, &port_name);
4344
d379142b
SP
4345 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4346 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4347
6b7c5b94
SP
4348 return 0;
4349
5fb379ee
SP
4350unsetup:
4351 be_clear(adapter);
6b7c5b94
SP
4352stats_clean:
4353 be_stats_cleanup(adapter);
4354ctrl_clean:
4355 be_ctrl_cleanup(adapter);
f9449ab7 4356free_netdev:
fe6d2a38 4357 free_netdev(netdev);
8d56ff11 4358 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4359rel_reg:
4360 pci_release_regions(pdev);
4361disable_dev:
4362 pci_disable_device(pdev);
4363do_none:
c4ca2374 4364 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4365 return status;
4366}
4367
4368static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4369{
4370 struct be_adapter *adapter = pci_get_drvdata(pdev);
4371 struct net_device *netdev = adapter->netdev;
4372
71d8d1b5
AK
4373 if (adapter->wol)
4374 be_setup_wol(adapter, true);
4375
f67ef7ba
PR
4376 cancel_delayed_work_sync(&adapter->func_recovery_work);
4377
6b7c5b94
SP
4378 netif_device_detach(netdev);
4379 if (netif_running(netdev)) {
4380 rtnl_lock();
4381 be_close(netdev);
4382 rtnl_unlock();
4383 }
9b0365f1 4384 be_clear(adapter);
6b7c5b94
SP
4385
4386 pci_save_state(pdev);
4387 pci_disable_device(pdev);
4388 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4389 return 0;
4390}
4391
4392static int be_resume(struct pci_dev *pdev)
4393{
4394 int status = 0;
4395 struct be_adapter *adapter = pci_get_drvdata(pdev);
4396 struct net_device *netdev = adapter->netdev;
4397
4398 netif_device_detach(netdev);
4399
4400 status = pci_enable_device(pdev);
4401 if (status)
4402 return status;
4403
4404 pci_set_power_state(pdev, 0);
4405 pci_restore_state(pdev);
4406
2243e2e9
SP
4407 /* tell fw we're ready to fire cmds */
4408 status = be_cmd_fw_init(adapter);
4409 if (status)
4410 return status;
4411
9b0365f1 4412 be_setup(adapter);
6b7c5b94
SP
4413 if (netif_running(netdev)) {
4414 rtnl_lock();
4415 be_open(netdev);
4416 rtnl_unlock();
4417 }
f67ef7ba
PR
4418
4419 schedule_delayed_work(&adapter->func_recovery_work,
4420 msecs_to_jiffies(1000));
6b7c5b94 4421 netif_device_attach(netdev);
71d8d1b5
AK
4422
4423 if (adapter->wol)
4424 be_setup_wol(adapter, false);
a4ca055f 4425
6b7c5b94
SP
4426 return 0;
4427}
4428
82456b03
SP
4429/*
4430 * An FLR will stop BE from DMAing any data.
4431 */
4432static void be_shutdown(struct pci_dev *pdev)
4433{
4434 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4435
2d5d4154
AK
4436 if (!adapter)
4437 return;
82456b03 4438
0f4a6828 4439 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4440 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4441
2d5d4154 4442 netif_device_detach(adapter->netdev);
82456b03 4443
57841869
AK
4444 be_cmd_reset_function(adapter);
4445
82456b03 4446 pci_disable_device(pdev);
82456b03
SP
4447}
4448
cf588477
SP
4449static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4450 pci_channel_state_t state)
4451{
4452 struct be_adapter *adapter = pci_get_drvdata(pdev);
4453 struct net_device *netdev = adapter->netdev;
4454
4455 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4456
f67ef7ba
PR
4457 adapter->eeh_error = true;
4458
4459 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4460
f67ef7ba 4461 rtnl_lock();
cf588477 4462 netif_device_detach(netdev);
f67ef7ba 4463 rtnl_unlock();
cf588477
SP
4464
4465 if (netif_running(netdev)) {
4466 rtnl_lock();
4467 be_close(netdev);
4468 rtnl_unlock();
4469 }
4470 be_clear(adapter);
4471
4472 if (state == pci_channel_io_perm_failure)
4473 return PCI_ERS_RESULT_DISCONNECT;
4474
4475 pci_disable_device(pdev);
4476
eeb7fc7b
SK
4477 /* The error could cause the FW to trigger a flash debug dump.
4478 * Resetting the card while flash dump is in progress
c8a54163
PR
4479 * can cause it not to recover; wait for it to finish.
4480 * Wait only for first function as it is needed only once per
4481 * adapter.
eeb7fc7b 4482 */
c8a54163
PR
4483 if (pdev->devfn == 0)
4484 ssleep(30);
4485
cf588477
SP
4486 return PCI_ERS_RESULT_NEED_RESET;
4487}
4488
4489static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4490{
4491 struct be_adapter *adapter = pci_get_drvdata(pdev);
4492 int status;
4493
4494 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4495 be_clear_all_error(adapter);
cf588477
SP
4496
4497 status = pci_enable_device(pdev);
4498 if (status)
4499 return PCI_ERS_RESULT_DISCONNECT;
4500
4501 pci_set_master(pdev);
4502 pci_set_power_state(pdev, 0);
4503 pci_restore_state(pdev);
4504
4505 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4506 dev_info(&adapter->pdev->dev,
4507 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4508 status = be_fw_wait_ready(adapter);
cf588477
SP
4509 if (status)
4510 return PCI_ERS_RESULT_DISCONNECT;
4511
d6b6d987 4512 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4513 return PCI_ERS_RESULT_RECOVERED;
4514}
4515
4516static void be_eeh_resume(struct pci_dev *pdev)
4517{
4518 int status = 0;
4519 struct be_adapter *adapter = pci_get_drvdata(pdev);
4520 struct net_device *netdev = adapter->netdev;
4521
4522 dev_info(&adapter->pdev->dev, "EEH resume\n");
4523
4524 pci_save_state(pdev);
4525
2d177be8 4526 status = be_cmd_reset_function(adapter);
cf588477
SP
4527 if (status)
4528 goto err;
4529
2d177be8
KA
4530 /* tell fw we're ready to fire cmds */
4531 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4532 if (status)
4533 goto err;
4534
cf588477
SP
4535 status = be_setup(adapter);
4536 if (status)
4537 goto err;
4538
4539 if (netif_running(netdev)) {
4540 status = be_open(netdev);
4541 if (status)
4542 goto err;
4543 }
f67ef7ba
PR
4544
4545 schedule_delayed_work(&adapter->func_recovery_work,
4546 msecs_to_jiffies(1000));
cf588477
SP
4547 netif_device_attach(netdev);
4548 return;
4549err:
4550 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4551}
4552
3646f0e5 4553static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4554 .error_detected = be_eeh_err_detected,
4555 .slot_reset = be_eeh_reset,
4556 .resume = be_eeh_resume,
4557};
4558
6b7c5b94
SP
4559static struct pci_driver be_driver = {
4560 .name = DRV_NAME,
4561 .id_table = be_dev_ids,
4562 .probe = be_probe,
4563 .remove = be_remove,
4564 .suspend = be_suspend,
cf588477 4565 .resume = be_resume,
82456b03 4566 .shutdown = be_shutdown,
cf588477 4567 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4568};
4569
4570static int __init be_init_module(void)
4571{
8e95a202
JP
4572 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4573 rx_frag_size != 2048) {
6b7c5b94
SP
4574 printk(KERN_WARNING DRV_NAME
4575 " : Module param rx_frag_size must be 2048/4096/8192."
4576 " Using 2048\n");
4577 rx_frag_size = 2048;
4578 }
6b7c5b94
SP
4579
4580 return pci_register_driver(&be_driver);
4581}
4582module_init(be_init_module);
4583
4584static void __exit be_exit_module(void)
4585{
4586 pci_unregister_driver(&be_driver);
4587}
4588module_exit(be_exit_module);