]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
sit: fix an oops when IFLA_IPTUN_PROTO is not set
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
c7bb15a6 2 * Copyright (C) 2005 - 2013 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
00d3d51e 28MODULE_AUTHOR("Emulex Corporation");
6b7c5b94
SP
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
1f9061d2 149 GFP_KERNEL | __GFP_ZERO);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 return 0;
153}
154
68c45a2d 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 156{
db3ea781 157 u32 reg, enabled;
5f0b849e 158
db3ea781
SP
159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 &reg);
161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
5f0b849e 163 if (!enabled && enable)
6b7c5b94 164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else if (enabled && !enable)
6b7c5b94 166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 167 else
6b7c5b94 168 return;
5f0b849e 169
db3ea781
SP
170 pci_write_config_dword(adapter->pdev,
171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
172}
173
68c45a2d
SK
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
8788fdc2 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
191{
192 u32 val = 0;
193 val |= qid & DB_RQ_RING_ID_MASK;
194 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
195
196 wmb();
8788fdc2 197 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
198}
199
94d73aaa
VV
200static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201 u16 posted)
6b7c5b94
SP
202{
203 u32 val = 0;
94d73aaa 204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
6b7c5b94 205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
206
207 wmb();
94d73aaa 208 iowrite32(val, adapter->db + txo->db_offset);
6b7c5b94
SP
209}
210
8788fdc2 211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
212 bool arm, bool clear_int, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 218
f67ef7ba 219 if (adapter->eeh_error)
cf588477
SP
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
229}
230
8788fdc2 231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
232{
233 u32 val = 0;
234 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
235 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 237
f67ef7ba 238 if (adapter->eeh_error)
cf588477
SP
239 return;
240
6b7c5b94
SP
241 if (arm)
242 val |= 1 << DB_CQ_REARM_SHIFT;
243 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 244 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
245}
246
6b7c5b94
SP
247static int be_mac_addr_set(struct net_device *netdev, void *p)
248{
249 struct be_adapter *adapter = netdev_priv(netdev);
250 struct sockaddr *addr = p;
251 int status = 0;
e3a7ae2c 252 u8 current_mac[ETH_ALEN];
fbc13f01 253 u32 pmac_id = adapter->pmac_id[0];
704e4c88 254 bool active_mac = true;
6b7c5b94 255
ca9e4988
AK
256 if (!is_valid_ether_addr(addr->sa_data))
257 return -EADDRNOTAVAIL;
258
704e4c88
PR
259 /* For BE VF, MAC address is already activated by PF.
260 * Hence only operation left is updating netdev->devaddr.
261 * Update it if user is passing the same MAC which was used
262 * during configuring VF MAC from PF(Hypervisor).
263 */
264 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265 status = be_cmd_mac_addr_query(adapter, current_mac,
266 false, adapter->if_handle, 0);
267 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268 goto done;
269 else
270 goto err;
271 }
272
273 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274 goto done;
275
276 /* For Lancer check if any MAC is active.
277 * If active, get its mac id.
278 */
279 if (lancer_chip(adapter) && !be_physfn(adapter))
280 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281 &pmac_id, 0);
282
283 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284 adapter->if_handle,
285 &adapter->pmac_id[0], 0);
286
a65027e4 287 if (status)
e3a7ae2c 288 goto err;
6b7c5b94 289
704e4c88
PR
290 if (active_mac)
291 be_cmd_pmac_del(adapter, adapter->if_handle,
292 pmac_id, 0);
293done:
e3a7ae2c
SK
294 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295 return 0;
296err:
297 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
298 return status;
299}
300
ca34fe38
SP
301/* BE2 supports only v0 cmd */
302static void *hw_stats_from_cmd(struct be_adapter *adapter)
303{
304 if (BE2_chip(adapter)) {
305 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307 return &cmd->hw_stats;
308 } else {
309 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311 return &cmd->hw_stats;
312 }
313}
314
315/* BE2 supports only v0 cmd */
316static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317{
318 if (BE2_chip(adapter)) {
319 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321 return &hw_stats->erx;
322 } else {
323 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325 return &hw_stats->erx;
326 }
327}
328
329static void populate_be_v0_stats(struct be_adapter *adapter)
89a88ab8 330{
ac124ff9
SP
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 334 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
335 &rxf_stats->port[adapter->port_num];
336 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 337
ac124ff9 338 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
339 drvs->rx_pause_frames = port_stats->rx_pause_frames;
340 drvs->rx_crc_errors = port_stats->rx_crc_errors;
341 drvs->rx_control_frames = port_stats->rx_control_frames;
342 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 353 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
354 drvs->rx_dropped_header_too_small =
355 port_stats->rx_dropped_header_too_small;
18fb06a1
SR
356 drvs->rx_address_filtered =
357 port_stats->rx_address_filtered +
358 port_stats->rx_vlan_filtered;
89a88ab8
AK
359 drvs->rx_alignment_symbol_errors =
360 port_stats->rx_alignment_symbol_errors;
361
362 drvs->tx_pauseframes = port_stats->tx_pauseframes;
363 drvs->tx_controlframes = port_stats->tx_controlframes;
364
365 if (adapter->port_num)
ac124ff9 366 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 367 else
ac124ff9 368 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 369 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 370 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
371 drvs->forwarded_packets = rxf_stats->forwarded_packets;
372 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
373 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
375 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376}
377
ca34fe38 378static void populate_be_v1_stats(struct be_adapter *adapter)
89a88ab8 379{
ac124ff9
SP
380 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 383 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
384 &rxf_stats->port[adapter->port_num];
385 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 386
ac124ff9 387 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
388 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
390 drvs->rx_pause_frames = port_stats->rx_pause_frames;
391 drvs->rx_crc_errors = port_stats->rx_crc_errors;
392 drvs->rx_control_frames = port_stats->rx_control_frames;
393 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403 drvs->rx_dropped_header_too_small =
404 port_stats->rx_dropped_header_too_small;
405 drvs->rx_input_fifo_overflow_drop =
406 port_stats->rx_input_fifo_overflow_drop;
18fb06a1 407 drvs->rx_address_filtered = port_stats->rx_address_filtered;
89a88ab8
AK
408 drvs->rx_alignment_symbol_errors =
409 port_stats->rx_alignment_symbol_errors;
ac124ff9 410 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
411 drvs->tx_pauseframes = port_stats->tx_pauseframes;
412 drvs->tx_controlframes = port_stats->tx_controlframes;
b5adffc4 413 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
89a88ab8
AK
414 drvs->jabber_events = port_stats->jabber_events;
415 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 416 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
417 drvs->forwarded_packets = rxf_stats->forwarded_packets;
418 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
419 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
421 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422}
423
005d5696
SX
424static void populate_lancer_stats(struct be_adapter *adapter)
425{
89a88ab8 426
005d5696 427 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
428 struct lancer_pport_stats *pport_stats =
429 pport_stats_from_cmd(adapter);
430
431 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 435 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 436 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
437 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441 drvs->rx_dropped_tcp_length =
442 pport_stats->rx_dropped_invalid_tcp_length;
443 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446 drvs->rx_dropped_header_too_small =
447 pport_stats->rx_dropped_header_too_small;
448 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
18fb06a1
SR
449 drvs->rx_address_filtered =
450 pport_stats->rx_address_filtered +
451 pport_stats->rx_vlan_filtered;
ac124ff9 452 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 453 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
454 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 456 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
457 drvs->forwarded_packets = pport_stats->num_forwards_lo;
458 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 459 drvs->rx_drops_too_many_frags =
ac124ff9 460 pport_stats->rx_drops_too_many_frags_lo;
005d5696 461}
89a88ab8 462
09c1c68f
SP
463static void accumulate_16bit_val(u32 *acc, u16 val)
464{
465#define lo(x) (x & 0xFFFF)
466#define hi(x) (x & 0xFFFF0000)
467 bool wrapped = val < lo(*acc);
468 u32 newacc = hi(*acc) + val;
469
470 if (wrapped)
471 newacc += 65536;
472 ACCESS_ONCE(*acc) = newacc;
473}
474
a6c578ef
AK
475void populate_erx_stats(struct be_adapter *adapter,
476 struct be_rx_obj *rxo,
477 u32 erx_stat)
478{
479 if (!BEx_chip(adapter))
480 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481 else
482 /* below erx HW counter can actually wrap around after
483 * 65535. Driver accumulates a 32-bit value
484 */
485 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486 (u16)erx_stat);
487}
488
89a88ab8
AK
489void be_parse_stats(struct be_adapter *adapter)
490{
ac124ff9
SP
491 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492 struct be_rx_obj *rxo;
493 int i;
a6c578ef 494 u32 erx_stat;
ac124ff9 495
ca34fe38
SP
496 if (lancer_chip(adapter)) {
497 populate_lancer_stats(adapter);
005d5696 498 } else {
ca34fe38
SP
499 if (BE2_chip(adapter))
500 populate_be_v0_stats(adapter);
501 else
502 /* for BE3 and Skyhawk */
503 populate_be_v1_stats(adapter);
d51ebd33 504
ca34fe38
SP
505 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506 for_all_rx_queues(adapter, rxo, i) {
a6c578ef
AK
507 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508 populate_erx_stats(adapter, rxo, erx_stat);
ca34fe38 509 }
09c1c68f 510 }
89a88ab8
AK
511}
512
ab1594e9
SP
513static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514 struct rtnl_link_stats64 *stats)
6b7c5b94 515{
ab1594e9 516 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 517 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 518 struct be_rx_obj *rxo;
3c8def97 519 struct be_tx_obj *txo;
ab1594e9
SP
520 u64 pkts, bytes;
521 unsigned int start;
3abcdeda 522 int i;
6b7c5b94 523
3abcdeda 524 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
525 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526 do {
527 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528 pkts = rx_stats(rxo)->rx_pkts;
529 bytes = rx_stats(rxo)->rx_bytes;
530 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531 stats->rx_packets += pkts;
532 stats->rx_bytes += bytes;
533 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
536 }
537
3c8def97 538 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
539 const struct be_tx_stats *tx_stats = tx_stats(txo);
540 do {
541 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542 pkts = tx_stats(txo)->tx_pkts;
543 bytes = tx_stats(txo)->tx_bytes;
544 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545 stats->tx_packets += pkts;
546 stats->tx_bytes += bytes;
3c8def97 547 }
6b7c5b94
SP
548
549 /* bad pkts received */
ab1594e9 550 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
551 drvs->rx_alignment_symbol_errors +
552 drvs->rx_in_range_errors +
553 drvs->rx_out_range_errors +
554 drvs->rx_frame_too_long +
555 drvs->rx_dropped_too_small +
556 drvs->rx_dropped_too_short +
557 drvs->rx_dropped_header_too_small +
558 drvs->rx_dropped_tcp_length +
ab1594e9 559 drvs->rx_dropped_runt;
68110868 560
6b7c5b94 561 /* detailed rx errors */
ab1594e9 562 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
563 drvs->rx_out_range_errors +
564 drvs->rx_frame_too_long;
68110868 565
ab1594e9 566 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
567
568 /* frame alignment errors */
ab1594e9 569 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 570
6b7c5b94
SP
571 /* receiver fifo overrun */
572 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 573 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
574 drvs->rx_input_fifo_overflow_drop +
575 drvs->rx_drops_no_pbuf;
ab1594e9 576 return stats;
6b7c5b94
SP
577}
578
b236916a 579void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 580{
6b7c5b94
SP
581 struct net_device *netdev = adapter->netdev;
582
b236916a 583 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 584 netif_carrier_off(netdev);
b236916a 585 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 586 }
b236916a
AK
587
588 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589 netif_carrier_on(netdev);
590 else
591 netif_carrier_off(netdev);
6b7c5b94
SP
592}
593
3c8def97 594static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 595 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 596{
3c8def97
SP
597 struct be_tx_stats *stats = tx_stats(txo);
598
ab1594e9 599 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
600 stats->tx_reqs++;
601 stats->tx_wrbs += wrb_cnt;
602 stats->tx_bytes += copied;
603 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 604 if (stopped)
ac124ff9 605 stats->tx_stops++;
ab1594e9 606 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
607}
608
609/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
610static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611 bool *dummy)
6b7c5b94 612{
ebc8d2ab
DM
613 int cnt = (skb->len > skb->data_len);
614
615 cnt += skb_shinfo(skb)->nr_frags;
616
6b7c5b94
SP
617 /* to account for hdr wrb */
618 cnt++;
fe6d2a38
SP
619 if (lancer_chip(adapter) || !(cnt & 1)) {
620 *dummy = false;
621 } else {
6b7c5b94
SP
622 /* add a dummy to make it an even num */
623 cnt++;
624 *dummy = true;
fe6d2a38 625 }
6b7c5b94
SP
626 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627 return cnt;
628}
629
630static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631{
632 wrb->frag_pa_hi = upper_32_bits(addr);
633 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 635 wrb->rsvd0 = 0;
6b7c5b94
SP
636}
637
1ded132d
AK
638static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639 struct sk_buff *skb)
640{
641 u8 vlan_prio;
642 u16 vlan_tag;
643
644 vlan_tag = vlan_tx_tag_get(skb);
645 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646 /* If vlan priority provided by OS is NOT in available bmap */
647 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649 adapter->recommended_prio;
650
651 return vlan_tag;
652}
653
cc4ce020 654static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
bc0c3405 655 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
6b7c5b94 656{
1ded132d 657 u16 vlan_tag;
cc4ce020 658
6b7c5b94
SP
659 memset(hdr, 0, sizeof(*hdr));
660
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
49e4b847 663 if (skb_is_gso(skb)) {
6b7c5b94
SP
664 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 667 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
6b7c5b94
SP
669 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 if (is_tcp_pkt(skb))
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672 else if (is_udp_pkt(skb))
673 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674 }
675
4c5102f9 676 if (vlan_tx_tag_present(skb)) {
6b7c5b94 677 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 678 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 679 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
680 }
681
bc0c3405
AK
682 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
6b7c5b94 684 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
6b7c5b94
SP
685 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687}
688
2b7bcebf 689static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
690 bool unmap_single)
691{
692 dma_addr_t dma;
693
694 be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 697 if (wrb->frag_len) {
7101e111 698 if (unmap_single)
2b7bcebf
IV
699 dma_unmap_single(dev, dma, wrb->frag_len,
700 DMA_TO_DEVICE);
7101e111 701 else
2b7bcebf 702 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
703 }
704}
6b7c5b94 705
3c8def97 706static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
bc0c3405
AK
707 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708 bool skip_hw_vlan)
6b7c5b94 709{
7101e111
SP
710 dma_addr_t busaddr;
711 int i, copied = 0;
2b7bcebf 712 struct device *dev = &adapter->pdev->dev;
6b7c5b94 713 struct sk_buff *first_skb = skb;
6b7c5b94
SP
714 struct be_eth_wrb *wrb;
715 struct be_eth_hdr_wrb *hdr;
7101e111
SP
716 bool map_single = false;
717 u16 map_head;
6b7c5b94 718
6b7c5b94
SP
719 hdr = queue_head_node(txq);
720 queue_head_inc(txq);
7101e111 721 map_head = txq->head;
6b7c5b94 722
ebc8d2ab 723 if (skb->len > skb->data_len) {
e743d313 724 int len = skb_headlen(skb);
2b7bcebf
IV
725 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726 if (dma_mapping_error(dev, busaddr))
7101e111
SP
727 goto dma_err;
728 map_single = true;
ebc8d2ab
DM
729 wrb = queue_head_node(txq);
730 wrb_fill(wrb, busaddr, len);
731 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732 queue_head_inc(txq);
733 copied += len;
734 }
6b7c5b94 735
ebc8d2ab 736 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 737 const struct skb_frag_struct *frag =
ebc8d2ab 738 &skb_shinfo(skb)->frags[i];
b061b39e 739 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 740 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 741 if (dma_mapping_error(dev, busaddr))
7101e111 742 goto dma_err;
ebc8d2ab 743 wrb = queue_head_node(txq);
9e903e08 744 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
745 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746 queue_head_inc(txq);
9e903e08 747 copied += skb_frag_size(frag);
6b7c5b94
SP
748 }
749
750 if (dummy_wrb) {
751 wrb = queue_head_node(txq);
752 wrb_fill(wrb, 0, 0);
753 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754 queue_head_inc(txq);
755 }
756
bc0c3405 757 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
6b7c5b94
SP
758 be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760 return copied;
7101e111
SP
761dma_err:
762 txq->head = map_head;
763 while (copied) {
764 wrb = queue_head_node(txq);
2b7bcebf 765 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
766 map_single = false;
767 copied -= wrb->frag_len;
768 queue_head_inc(txq);
769 }
770 return 0;
6b7c5b94
SP
771}
772
93040ae5 773static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
bc0c3405
AK
774 struct sk_buff *skb,
775 bool *skip_hw_vlan)
93040ae5
SK
776{
777 u16 vlan_tag = 0;
778
779 skb = skb_share_check(skb, GFP_ATOMIC);
780 if (unlikely(!skb))
781 return skb;
782
efee8e87 783 if (vlan_tx_tag_present(skb))
93040ae5 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
efee8e87
SB
785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 vlan_tag = adapter->pvid;
bc0c3405
AK
787
788 if (vlan_tag) {
58717686 789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
790 if (unlikely(!skb))
791 return skb;
bc0c3405 792 skb->vlan_tci = 0;
efee8e87
SB
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
bc0c3405
AK
795 }
796
797 /* Insert the outer VLAN, if any */
798 if (adapter->qnq_vid) {
799 vlan_tag = adapter->qnq_vid;
58717686 800 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
bc0c3405
AK
801 if (unlikely(!skb))
802 return skb;
803 if (skip_hw_vlan)
804 *skip_hw_vlan = true;
805 }
806
93040ae5
SK
807 return skb;
808}
809
bc0c3405
AK
810static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811{
812 struct ethhdr *eh = (struct ethhdr *)skb->data;
813 u16 offset = ETH_HLEN;
814
815 if (eh->h_proto == htons(ETH_P_IPV6)) {
816 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818 offset += sizeof(struct ipv6hdr);
819 if (ip6h->nexthdr != NEXTHDR_TCP &&
820 ip6h->nexthdr != NEXTHDR_UDP) {
821 struct ipv6_opt_hdr *ehdr =
822 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825 if (ehdr->hdrlen == 0xff)
826 return true;
827 }
828 }
829 return false;
830}
831
832static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833{
834 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835}
836
ee9c799c
SP
837static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
838 struct sk_buff *skb)
bc0c3405 839{
ee9c799c 840 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
bc0c3405
AK
841}
842
ee9c799c
SP
843static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
844 struct sk_buff *skb,
845 bool *skip_hw_vlan)
6b7c5b94 846{
d2cb6ce7 847 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
ee9c799c
SP
848 unsigned int eth_hdr_len;
849 struct iphdr *ip;
93040ae5 850
48265667
SK
851 /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
852 * may cause a transmit stall on that port. So the work-around is to
853 * pad such packets to a 36-byte length.
854 */
855 if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
856 if (skb_padto(skb, 36))
857 goto tx_drop;
858 skb->len = 36;
859 }
860
1297f9db
AK
861 /* For padded packets, BE HW modifies tot_len field in IP header
862 * incorrecly when VLAN tag is inserted by HW.
3904dcc4 863 * For padded packets, Lancer computes incorrect checksum.
1ded132d 864 */
ee9c799c
SP
865 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
866 VLAN_ETH_HLEN : ETH_HLEN;
3904dcc4
SK
867 if (skb->len <= 60 &&
868 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
ee9c799c 869 is_ipv4_pkt(skb)) {
93040ae5
SK
870 ip = (struct iphdr *)ip_hdr(skb);
871 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
872 }
1ded132d 873
d2cb6ce7
AK
874 /* If vlan tag is already inlined in the packet, skip HW VLAN
875 * tagging in UMC mode
876 */
877 if ((adapter->function_mode & UMC_ENABLED) &&
878 veh->h_vlan_proto == htons(ETH_P_8021Q))
ee9c799c 879 *skip_hw_vlan = true;
d2cb6ce7 880
93040ae5
SK
881 /* HW has a bug wherein it will calculate CSUM for VLAN
882 * pkts even though it is disabled.
883 * Manually insert VLAN in pkt.
884 */
885 if (skb->ip_summed != CHECKSUM_PARTIAL &&
ee9c799c
SP
886 vlan_tx_tag_present(skb)) {
887 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
bc0c3405
AK
888 if (unlikely(!skb))
889 goto tx_drop;
890 }
891
892 /* HW may lockup when VLAN HW tagging is requested on
893 * certain ipv6 packets. Drop such pkts if the HW workaround to
894 * skip HW tagging is not enabled by FW.
895 */
896 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
ee9c799c
SP
897 (adapter->pvid || adapter->qnq_vid) &&
898 !qnq_async_evt_rcvd(adapter)))
bc0c3405
AK
899 goto tx_drop;
900
901 /* Manual VLAN tag insertion to prevent:
902 * ASIC lockup when the ASIC inserts VLAN tag into
903 * certain ipv6 packets. Insert VLAN tags in driver,
904 * and set event, completion, vlan bits accordingly
905 * in the Tx WRB.
906 */
907 if (be_ipv6_tx_stall_chk(adapter, skb) &&
908 be_vlan_tag_tx_chk(adapter, skb)) {
ee9c799c 909 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
1ded132d
AK
910 if (unlikely(!skb))
911 goto tx_drop;
1ded132d
AK
912 }
913
ee9c799c
SP
914 return skb;
915tx_drop:
916 dev_kfree_skb_any(skb);
917 return NULL;
918}
919
920static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
921{
922 struct be_adapter *adapter = netdev_priv(netdev);
923 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
924 struct be_queue_info *txq = &txo->q;
925 bool dummy_wrb, stopped = false;
926 u32 wrb_cnt = 0, copied = 0;
927 bool skip_hw_vlan = false;
928 u32 start = txq->head;
929
930 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
931 if (!skb)
932 return NETDEV_TX_OK;
933
fe6d2a38 934 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 935
bc0c3405
AK
936 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
937 skip_hw_vlan);
c190e3c8 938 if (copied) {
cd8f76c0
ED
939 int gso_segs = skb_shinfo(skb)->gso_segs;
940
c190e3c8 941 /* record the sent skb in the sent_skb table */
3c8def97
SP
942 BUG_ON(txo->sent_skb_list[start]);
943 txo->sent_skb_list[start] = skb;
c190e3c8
AK
944
945 /* Ensure txq has space for the next skb; Else stop the queue
946 * *BEFORE* ringing the tx doorbell, so that we serialze the
947 * tx compls of the current transmit which'll wake up the queue
948 */
7101e111 949 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
950 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
951 txq->len) {
3c8def97 952 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
953 stopped = true;
954 }
6b7c5b94 955
94d73aaa 956 be_txq_notify(adapter, txo, wrb_cnt);
6b7c5b94 957
cd8f76c0 958 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
959 } else {
960 txq->head = start;
961 dev_kfree_skb_any(skb);
6b7c5b94 962 }
6b7c5b94
SP
963 return NETDEV_TX_OK;
964}
965
966static int be_change_mtu(struct net_device *netdev, int new_mtu)
967{
968 struct be_adapter *adapter = netdev_priv(netdev);
969 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
970 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
971 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
972 dev_info(&adapter->pdev->dev,
973 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
974 BE_MIN_MTU,
975 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
976 return -EINVAL;
977 }
978 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
979 netdev->mtu, new_mtu);
980 netdev->mtu = new_mtu;
981 return 0;
982}
983
984/*
82903e4b
AK
985 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
986 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 987 */
10329df8 988static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 989{
10329df8
SP
990 u16 vids[BE_NUM_VLANS_SUPPORTED];
991 u16 num = 0, i;
82903e4b 992 int status = 0;
1da87b7f 993
c0e64ef4
SP
994 /* No need to further configure vids if in promiscuous mode */
995 if (adapter->promiscuous)
996 return 0;
997
0fc16ebf
PR
998 if (adapter->vlans_added > adapter->max_vlans)
999 goto set_vlan_promisc;
1000
1001 /* Construct VLAN Table to give to HW */
1002 for (i = 0; i < VLAN_N_VID; i++)
1003 if (adapter->vlan_tag[i])
10329df8 1004 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
1005
1006 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 1007 vids, num, 1, 0);
0fc16ebf
PR
1008
1009 /* Set to VLAN promisc mode as setting VLAN filter failed */
1010 if (status) {
1011 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1012 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
1013 goto set_vlan_promisc;
6b7c5b94 1014 }
1da87b7f 1015
b31c50a7 1016 return status;
0fc16ebf
PR
1017
1018set_vlan_promisc:
1019 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1020 NULL, 0, 1, 1);
1021 return status;
6b7c5b94
SP
1022}
1023
80d5c368 1024static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1025{
1026 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1027 int status = 0;
6b7c5b94 1028
a85e9986 1029 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1030 status = -EINVAL;
1031 goto ret;
1032 }
ba343c77 1033
a85e9986
PR
1034 /* Packets with VID 0 are always received by Lancer by default */
1035 if (lancer_chip(adapter) && vid == 0)
1036 goto ret;
1037
6b7c5b94 1038 adapter->vlan_tag[vid] = 1;
82903e4b 1039 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 1040 status = be_vid_config(adapter);
8e586137 1041
80817cbf
AK
1042 if (!status)
1043 adapter->vlans_added++;
1044 else
1045 adapter->vlan_tag[vid] = 0;
1046ret:
1047 return status;
6b7c5b94
SP
1048}
1049
80d5c368 1050static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
6b7c5b94
SP
1051{
1052 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 1053 int status = 0;
6b7c5b94 1054
a85e9986 1055 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
1056 status = -EINVAL;
1057 goto ret;
1058 }
ba343c77 1059
a85e9986
PR
1060 /* Packets with VID 0 are always received by Lancer by default */
1061 if (lancer_chip(adapter) && vid == 0)
1062 goto ret;
1063
6b7c5b94 1064 adapter->vlan_tag[vid] = 0;
82903e4b 1065 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 1066 status = be_vid_config(adapter);
8e586137 1067
80817cbf
AK
1068 if (!status)
1069 adapter->vlans_added--;
1070 else
1071 adapter->vlan_tag[vid] = 1;
1072ret:
1073 return status;
6b7c5b94
SP
1074}
1075
a54769f5 1076static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
1077{
1078 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 1079 int status;
6b7c5b94 1080
24307eef 1081 if (netdev->flags & IFF_PROMISC) {
5b8821b7 1082 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
1083 adapter->promiscuous = true;
1084 goto done;
6b7c5b94
SP
1085 }
1086
25985edc 1087 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
1088 if (adapter->promiscuous) {
1089 adapter->promiscuous = false;
5b8821b7 1090 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
1091
1092 if (adapter->vlans_added)
10329df8 1093 be_vid_config(adapter);
6b7c5b94
SP
1094 }
1095
e7b909a6 1096 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 1097 if (netdev->flags & IFF_ALLMULTI ||
abb93951 1098 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 1099 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 1100 goto done;
6b7c5b94 1101 }
6b7c5b94 1102
fbc13f01
AK
1103 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1104 struct netdev_hw_addr *ha;
1105 int i = 1; /* First slot is claimed by the Primary MAC */
1106
1107 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1108 be_cmd_pmac_del(adapter, adapter->if_handle,
1109 adapter->pmac_id[i], 0);
1110 }
1111
1112 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1113 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1114 adapter->promiscuous = true;
1115 goto done;
1116 }
1117
1118 netdev_for_each_uc_addr(ha, adapter->netdev) {
1119 adapter->uc_macs++; /* First slot is for Primary MAC */
1120 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1121 adapter->if_handle,
1122 &adapter->pmac_id[adapter->uc_macs], 0);
1123 }
1124 }
1125
0fc16ebf
PR
1126 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1127
1128 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1129 if (status) {
1130 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1131 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1132 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1133 }
24307eef
SP
1134done:
1135 return;
6b7c5b94
SP
1136}
1137
ba343c77
SB
1138static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1139{
1140 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1141 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 1142 int status;
704e4c88
PR
1143 bool active_mac = false;
1144 u32 pmac_id;
1145 u8 old_mac[ETH_ALEN];
ba343c77 1146
11ac75ed 1147 if (!sriov_enabled(adapter))
ba343c77
SB
1148 return -EPERM;
1149
11ac75ed 1150 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1151 return -EINVAL;
1152
590c391d 1153 if (lancer_chip(adapter)) {
704e4c88
PR
1154 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1155 &pmac_id, vf + 1);
1156 if (!status && active_mac)
1157 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1158 pmac_id, vf + 1);
1159
590c391d
PR
1160 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1161 } else {
11ac75ed
SP
1162 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1163 vf_cfg->pmac_id, vf + 1);
ba343c77 1164
11ac75ed
SP
1165 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1166 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1167 }
1168
64600ea5 1169 if (status)
ba343c77
SB
1170 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1171 mac, vf);
64600ea5 1172 else
11ac75ed 1173 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1174
ba343c77
SB
1175 return status;
1176}
1177
64600ea5
AK
1178static int be_get_vf_config(struct net_device *netdev, int vf,
1179 struct ifla_vf_info *vi)
1180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1182 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1183
11ac75ed 1184 if (!sriov_enabled(adapter))
64600ea5
AK
1185 return -EPERM;
1186
11ac75ed 1187 if (vf >= adapter->num_vfs)
64600ea5
AK
1188 return -EINVAL;
1189
1190 vi->vf = vf;
11ac75ed
SP
1191 vi->tx_rate = vf_cfg->tx_rate;
1192 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1193 vi->qos = 0;
11ac75ed 1194 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1195
1196 return 0;
1197}
1198
1da87b7f
AK
1199static int be_set_vf_vlan(struct net_device *netdev,
1200 int vf, u16 vlan, u8 qos)
1201{
1202 struct be_adapter *adapter = netdev_priv(netdev);
1203 int status = 0;
1204
11ac75ed 1205 if (!sriov_enabled(adapter))
1da87b7f
AK
1206 return -EPERM;
1207
11ac75ed 1208 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1209 return -EINVAL;
1210
1211 if (vlan) {
f1f3ee1b
AK
1212 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1213 /* If this is new value, program it. Else skip. */
1214 adapter->vf_cfg[vf].vlan_tag = vlan;
1215
1216 status = be_cmd_set_hsw_config(adapter, vlan,
1217 vf + 1, adapter->vf_cfg[vf].if_handle);
1218 }
1da87b7f 1219 } else {
f1f3ee1b 1220 /* Reset Transparent Vlan Tagging. */
11ac75ed 1221 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1222 vlan = adapter->vf_cfg[vf].def_vid;
1223 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1224 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1225 }
1226
1da87b7f
AK
1227
1228 if (status)
1229 dev_info(&adapter->pdev->dev,
1230 "VLAN %d config on VF %d failed\n", vlan, vf);
1231 return status;
1232}
1233
e1d18735
AK
1234static int be_set_vf_tx_rate(struct net_device *netdev,
1235 int vf, int rate)
1236{
1237 struct be_adapter *adapter = netdev_priv(netdev);
1238 int status = 0;
1239
11ac75ed 1240 if (!sriov_enabled(adapter))
e1d18735
AK
1241 return -EPERM;
1242
94f434c2 1243 if (vf >= adapter->num_vfs)
e1d18735
AK
1244 return -EINVAL;
1245
94f434c2
AK
1246 if (rate < 100 || rate > 10000) {
1247 dev_err(&adapter->pdev->dev,
1248 "tx rate must be between 100 and 10000 Mbps\n");
1249 return -EINVAL;
1250 }
e1d18735 1251
d5c18473
PR
1252 if (lancer_chip(adapter))
1253 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1254 else
1255 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1256
1257 if (status)
94f434c2 1258 dev_err(&adapter->pdev->dev,
e1d18735 1259 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1260 else
1261 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1262 return status;
1263}
1264
39f1d94d
SP
1265static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1266{
1267 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1268 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1269 u16 offset, stride;
1270
1271 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1272 if (!pos)
1273 return 0;
39f1d94d
SP
1274 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1275 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1276
1277 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1278 while (dev) {
2f6a0260 1279 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1280 vfs++;
1281 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1282 assigned_vfs++;
1283 }
1284 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1285 }
1286 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1287}
1288
10ef9ab4 1289static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1290{
10ef9ab4 1291 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1292 ulong now = jiffies;
ac124ff9 1293 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1294 u64 pkts;
1295 unsigned int start, eqd;
ac124ff9 1296
10ef9ab4
SP
1297 if (!eqo->enable_aic) {
1298 eqd = eqo->eqd;
1299 goto modify_eqd;
1300 }
1301
1302 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1303 return;
6b7c5b94 1304
10ef9ab4
SP
1305 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1306
4097f663 1307 /* Wrapped around */
3abcdeda
SP
1308 if (time_before(now, stats->rx_jiffies)) {
1309 stats->rx_jiffies = now;
4097f663
SP
1310 return;
1311 }
6b7c5b94 1312
ac124ff9
SP
1313 /* Update once a second */
1314 if (delta < HZ)
6b7c5b94
SP
1315 return;
1316
ab1594e9
SP
1317 do {
1318 start = u64_stats_fetch_begin_bh(&stats->sync);
1319 pkts = stats->rx_pkts;
1320 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1321
68c3e5a7 1322 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1323 stats->rx_pkts_prev = pkts;
3abcdeda 1324 stats->rx_jiffies = now;
10ef9ab4
SP
1325 eqd = (stats->rx_pps / 110000) << 3;
1326 eqd = min(eqd, eqo->max_eqd);
1327 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1328 if (eqd < 10)
1329 eqd = 0;
10ef9ab4
SP
1330
1331modify_eqd:
1332 if (eqd != eqo->cur_eqd) {
1333 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1334 eqo->cur_eqd = eqd;
ac124ff9 1335 }
6b7c5b94
SP
1336}
1337
3abcdeda 1338static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1339 struct be_rx_compl_info *rxcp)
4097f663 1340{
ac124ff9 1341 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1342
ab1594e9 1343 u64_stats_update_begin(&stats->sync);
3abcdeda 1344 stats->rx_compl++;
2e588f84 1345 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1346 stats->rx_pkts++;
2e588f84 1347 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1348 stats->rx_mcast_pkts++;
2e588f84 1349 if (rxcp->err)
ac124ff9 1350 stats->rx_compl_err++;
ab1594e9 1351 u64_stats_update_end(&stats->sync);
4097f663
SP
1352}
1353
2e588f84 1354static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1355{
19fad86f
PR
1356 /* L4 checksum is not reliable for non TCP/UDP packets.
1357 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1358 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1359 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1360}
1361
10ef9ab4
SP
1362static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1363 u16 frag_idx)
6b7c5b94 1364{
10ef9ab4 1365 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1366 struct be_rx_page_info *rx_page_info;
3abcdeda 1367 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1368
3abcdeda 1369 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1370 BUG_ON(!rx_page_info->page);
1371
205859a2 1372 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1373 dma_unmap_page(&adapter->pdev->dev,
1374 dma_unmap_addr(rx_page_info, bus),
1375 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1376 rx_page_info->last_page_user = false;
1377 }
6b7c5b94
SP
1378
1379 atomic_dec(&rxq->used);
1380 return rx_page_info;
1381}
1382
1383/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1384static void be_rx_compl_discard(struct be_rx_obj *rxo,
1385 struct be_rx_compl_info *rxcp)
6b7c5b94 1386{
3abcdeda 1387 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1388 struct be_rx_page_info *page_info;
2e588f84 1389 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1390
e80d9da6 1391 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1392 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1393 put_page(page_info->page);
1394 memset(page_info, 0, sizeof(*page_info));
2e588f84 1395 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1396 }
1397}
1398
1399/*
1400 * skb_fill_rx_data forms a complete skb for an ether frame
1401 * indicated by rxcp.
1402 */
10ef9ab4
SP
1403static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1404 struct be_rx_compl_info *rxcp)
6b7c5b94 1405{
3abcdeda 1406 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1407 struct be_rx_page_info *page_info;
2e588f84
SP
1408 u16 i, j;
1409 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1410 u8 *start;
6b7c5b94 1411
10ef9ab4 1412 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1413 start = page_address(page_info->page) + page_info->page_offset;
1414 prefetch(start);
1415
1416 /* Copy data in the first descriptor of this completion */
2e588f84 1417 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1418
6b7c5b94
SP
1419 skb->len = curr_frag_len;
1420 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1421 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1422 /* Complete packet has now been moved to data */
1423 put_page(page_info->page);
1424 skb->data_len = 0;
1425 skb->tail += curr_frag_len;
1426 } else {
ac1ae5f3
ED
1427 hdr_len = ETH_HLEN;
1428 memcpy(skb->data, start, hdr_len);
6b7c5b94 1429 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1430 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1431 skb_shinfo(skb)->frags[0].page_offset =
1432 page_info->page_offset + hdr_len;
9e903e08 1433 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1434 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1435 skb->truesize += rx_frag_size;
6b7c5b94
SP
1436 skb->tail += hdr_len;
1437 }
205859a2 1438 page_info->page = NULL;
6b7c5b94 1439
2e588f84
SP
1440 if (rxcp->pkt_size <= rx_frag_size) {
1441 BUG_ON(rxcp->num_rcvd != 1);
1442 return;
6b7c5b94
SP
1443 }
1444
1445 /* More frags present for this completion */
2e588f84
SP
1446 index_inc(&rxcp->rxq_idx, rxq->len);
1447 remaining = rxcp->pkt_size - curr_frag_len;
1448 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1449 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1450 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1451
bd46cb6c
AK
1452 /* Coalesce all frags from the same physical page in one slot */
1453 if (page_info->page_offset == 0) {
1454 /* Fresh page */
1455 j++;
b061b39e 1456 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1457 skb_shinfo(skb)->frags[j].page_offset =
1458 page_info->page_offset;
9e903e08 1459 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1460 skb_shinfo(skb)->nr_frags++;
1461 } else {
1462 put_page(page_info->page);
1463 }
1464
9e903e08 1465 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1466 skb->len += curr_frag_len;
1467 skb->data_len += curr_frag_len;
bdb28a97 1468 skb->truesize += rx_frag_size;
2e588f84
SP
1469 remaining -= curr_frag_len;
1470 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1471 page_info->page = NULL;
6b7c5b94 1472 }
bd46cb6c 1473 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1474}
1475
5be93b9a 1476/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1477static void be_rx_compl_process(struct be_rx_obj *rxo,
1478 struct be_rx_compl_info *rxcp)
6b7c5b94 1479{
10ef9ab4 1480 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1481 struct net_device *netdev = adapter->netdev;
6b7c5b94 1482 struct sk_buff *skb;
89420424 1483
bb349bb4 1484 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1485 if (unlikely(!skb)) {
ac124ff9 1486 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1487 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1488 return;
1489 }
1490
10ef9ab4 1491 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1492
6332c8d3 1493 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1494 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1495 else
1496 skb_checksum_none_assert(skb);
6b7c5b94 1497
6332c8d3 1498 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1499 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1500 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1501 skb->rxhash = rxcp->rss_hash;
1502
6b7c5b94 1503
343e43c0 1504 if (rxcp->vlanf)
86a9bad3 1505 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9
AK
1506
1507 netif_receive_skb(skb);
6b7c5b94
SP
1508}
1509
5be93b9a 1510/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1511void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1512 struct be_rx_compl_info *rxcp)
6b7c5b94 1513{
10ef9ab4 1514 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1515 struct be_rx_page_info *page_info;
5be93b9a 1516 struct sk_buff *skb = NULL;
3abcdeda 1517 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1518 u16 remaining, curr_frag_len;
1519 u16 i, j;
3968fa1e 1520
10ef9ab4 1521 skb = napi_get_frags(napi);
5be93b9a 1522 if (!skb) {
10ef9ab4 1523 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1524 return;
1525 }
1526
2e588f84
SP
1527 remaining = rxcp->pkt_size;
1528 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1529 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1530
1531 curr_frag_len = min(remaining, rx_frag_size);
1532
bd46cb6c
AK
1533 /* Coalesce all frags from the same physical page in one slot */
1534 if (i == 0 || page_info->page_offset == 0) {
1535 /* First frag or Fresh page */
1536 j++;
b061b39e 1537 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1538 skb_shinfo(skb)->frags[j].page_offset =
1539 page_info->page_offset;
9e903e08 1540 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1541 } else {
1542 put_page(page_info->page);
1543 }
9e903e08 1544 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1545 skb->truesize += rx_frag_size;
bd46cb6c 1546 remaining -= curr_frag_len;
2e588f84 1547 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1548 memset(page_info, 0, sizeof(*page_info));
1549 }
bd46cb6c 1550 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1551
5be93b9a 1552 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1553 skb->len = rxcp->pkt_size;
1554 skb->data_len = rxcp->pkt_size;
5be93b9a 1555 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1556 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1557 if (adapter->netdev->features & NETIF_F_RXHASH)
1558 skb->rxhash = rxcp->rss_hash;
5be93b9a 1559
343e43c0 1560 if (rxcp->vlanf)
86a9bad3 1561 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
4c5102f9 1562
10ef9ab4 1563 napi_gro_frags(napi);
2e588f84
SP
1564}
1565
10ef9ab4
SP
1566static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1567 struct be_rx_compl_info *rxcp)
2e588f84
SP
1568{
1569 rxcp->pkt_size =
1570 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1571 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1572 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1573 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1574 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1575 rxcp->ip_csum =
1576 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1577 rxcp->l4_csum =
1578 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1579 rxcp->ipv6 =
1580 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1581 rxcp->rxq_idx =
1582 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1583 rxcp->num_rcvd =
1584 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1585 rxcp->pkt_type =
1586 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1587 rxcp->rss_hash =
c297977e 1588 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1589 if (rxcp->vlanf) {
1590 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1591 compl);
1592 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1593 compl);
15d72184 1594 }
12004ae9 1595 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1596}
1597
10ef9ab4
SP
1598static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1599 struct be_rx_compl_info *rxcp)
2e588f84
SP
1600{
1601 rxcp->pkt_size =
1602 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1603 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1604 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1605 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1606 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1607 rxcp->ip_csum =
1608 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1609 rxcp->l4_csum =
1610 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1611 rxcp->ipv6 =
1612 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1613 rxcp->rxq_idx =
1614 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1615 rxcp->num_rcvd =
1616 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1617 rxcp->pkt_type =
1618 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1619 rxcp->rss_hash =
c297977e 1620 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1621 if (rxcp->vlanf) {
1622 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1623 compl);
1624 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1625 compl);
15d72184 1626 }
12004ae9 1627 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
e38b1706
SK
1628 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1629 ip_frag, compl);
2e588f84
SP
1630}
1631
1632static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1633{
1634 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1635 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1636 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1637
2e588f84
SP
1638 /* For checking the valid bit it is Ok to use either definition as the
1639 * valid bit is at the same position in both v0 and v1 Rx compl */
1640 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1641 return NULL;
6b7c5b94 1642
2e588f84
SP
1643 rmb();
1644 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1645
2e588f84 1646 if (adapter->be3_native)
10ef9ab4 1647 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1648 else
10ef9ab4 1649 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1650
e38b1706
SK
1651 if (rxcp->ip_frag)
1652 rxcp->l4_csum = 0;
1653
15d72184
SP
1654 if (rxcp->vlanf) {
1655 /* vlanf could be wrongly set in some cards.
1656 * ignore if vtm is not set */
752961a1 1657 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1658 rxcp->vlanf = 0;
6b7c5b94 1659
15d72184 1660 if (!lancer_chip(adapter))
3c709f8f 1661 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1662
939cf306 1663 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1664 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1665 rxcp->vlanf = 0;
1666 }
2e588f84
SP
1667
1668 /* As the compl has been parsed, reset it; we wont touch it again */
1669 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1670
3abcdeda 1671 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1672 return rxcp;
1673}
1674
1829b086 1675static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1676{
6b7c5b94 1677 u32 order = get_order(size);
1829b086 1678
6b7c5b94 1679 if (order > 0)
1829b086
ED
1680 gfp |= __GFP_COMP;
1681 return alloc_pages(gfp, order);
6b7c5b94
SP
1682}
1683
1684/*
1685 * Allocate a page, split it to fragments of size rx_frag_size and post as
1686 * receive buffers to BE
1687 */
1829b086 1688static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1689{
3abcdeda 1690 struct be_adapter *adapter = rxo->adapter;
26d92f92 1691 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1692 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1693 struct page *pagep = NULL;
1694 struct be_eth_rx_d *rxd;
1695 u64 page_dmaaddr = 0, frag_dmaaddr;
1696 u32 posted, page_offset = 0;
1697
3abcdeda 1698 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1699 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1700 if (!pagep) {
1829b086 1701 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1702 if (unlikely(!pagep)) {
ac124ff9 1703 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1704 break;
1705 }
2b7bcebf
IV
1706 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1707 0, adapter->big_page_size,
1708 DMA_FROM_DEVICE);
6b7c5b94
SP
1709 page_info->page_offset = 0;
1710 } else {
1711 get_page(pagep);
1712 page_info->page_offset = page_offset + rx_frag_size;
1713 }
1714 page_offset = page_info->page_offset;
1715 page_info->page = pagep;
fac6da5b 1716 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1717 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1718
1719 rxd = queue_head_node(rxq);
1720 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1721 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1722
1723 /* Any space left in the current big page for another frag? */
1724 if ((page_offset + rx_frag_size + rx_frag_size) >
1725 adapter->big_page_size) {
1726 pagep = NULL;
1727 page_info->last_page_user = true;
1728 }
26d92f92
SP
1729
1730 prev_page_info = page_info;
1731 queue_head_inc(rxq);
10ef9ab4 1732 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1733 }
1734 if (pagep)
26d92f92 1735 prev_page_info->last_page_user = true;
6b7c5b94
SP
1736
1737 if (posted) {
6b7c5b94 1738 atomic_add(posted, &rxq->used);
8788fdc2 1739 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1740 } else if (atomic_read(&rxq->used) == 0) {
1741 /* Let be_worker replenish when memory is available */
3abcdeda 1742 rxo->rx_post_starved = true;
6b7c5b94 1743 }
6b7c5b94
SP
1744}
1745
5fb379ee 1746static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1747{
6b7c5b94
SP
1748 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1749
1750 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1751 return NULL;
1752
f3eb62d2 1753 rmb();
6b7c5b94
SP
1754 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1755
1756 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1757
1758 queue_tail_inc(tx_cq);
1759 return txcp;
1760}
1761
3c8def97
SP
1762static u16 be_tx_compl_process(struct be_adapter *adapter,
1763 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1764{
3c8def97 1765 struct be_queue_info *txq = &txo->q;
a73b796e 1766 struct be_eth_wrb *wrb;
3c8def97 1767 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1768 struct sk_buff *sent_skb;
ec43b1a6
SP
1769 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1770 bool unmap_skb_hdr = true;
6b7c5b94 1771
ec43b1a6 1772 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1773 BUG_ON(!sent_skb);
ec43b1a6
SP
1774 sent_skbs[txq->tail] = NULL;
1775
1776 /* skip header wrb */
a73b796e 1777 queue_tail_inc(txq);
6b7c5b94 1778
ec43b1a6 1779 do {
6b7c5b94 1780 cur_index = txq->tail;
a73b796e 1781 wrb = queue_tail_node(txq);
2b7bcebf
IV
1782 unmap_tx_frag(&adapter->pdev->dev, wrb,
1783 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1784 unmap_skb_hdr = false;
1785
6b7c5b94
SP
1786 num_wrbs++;
1787 queue_tail_inc(txq);
ec43b1a6 1788 } while (cur_index != last_index);
6b7c5b94 1789
6b7c5b94 1790 kfree_skb(sent_skb);
4d586b82 1791 return num_wrbs;
6b7c5b94
SP
1792}
1793
10ef9ab4
SP
1794/* Return the number of events in the event queue */
1795static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1796{
10ef9ab4
SP
1797 struct be_eq_entry *eqe;
1798 int num = 0;
859b1e4e 1799
10ef9ab4
SP
1800 do {
1801 eqe = queue_tail_node(&eqo->q);
1802 if (eqe->evt == 0)
1803 break;
859b1e4e 1804
10ef9ab4
SP
1805 rmb();
1806 eqe->evt = 0;
1807 num++;
1808 queue_tail_inc(&eqo->q);
1809 } while (true);
1810
1811 return num;
859b1e4e
SP
1812}
1813
10ef9ab4
SP
1814/* Leaves the EQ is disarmed state */
1815static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1816{
10ef9ab4 1817 int num = events_get(eqo);
859b1e4e 1818
10ef9ab4 1819 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1820}
1821
10ef9ab4 1822static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1823{
1824 struct be_rx_page_info *page_info;
3abcdeda
SP
1825 struct be_queue_info *rxq = &rxo->q;
1826 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1827 struct be_rx_compl_info *rxcp;
d23e946c
SP
1828 struct be_adapter *adapter = rxo->adapter;
1829 int flush_wait = 0;
6b7c5b94
SP
1830 u16 tail;
1831
d23e946c
SP
1832 /* Consume pending rx completions.
1833 * Wait for the flush completion (identified by zero num_rcvd)
1834 * to arrive. Notify CQ even when there are no more CQ entries
1835 * for HW to flush partially coalesced CQ entries.
1836 * In Lancer, there is no need to wait for flush compl.
1837 */
1838 for (;;) {
1839 rxcp = be_rx_compl_get(rxo);
1840 if (rxcp == NULL) {
1841 if (lancer_chip(adapter))
1842 break;
1843
1844 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1845 dev_warn(&adapter->pdev->dev,
1846 "did not receive flush compl\n");
1847 break;
1848 }
1849 be_cq_notify(adapter, rx_cq->id, true, 0);
1850 mdelay(1);
1851 } else {
1852 be_rx_compl_discard(rxo, rxcp);
3f5dffe6 1853 be_cq_notify(adapter, rx_cq->id, false, 1);
d23e946c
SP
1854 if (rxcp->num_rcvd == 0)
1855 break;
1856 }
6b7c5b94
SP
1857 }
1858
d23e946c
SP
1859 /* After cleanup, leave the CQ in unarmed state */
1860 be_cq_notify(adapter, rx_cq->id, false, 0);
1861
1862 /* Then free posted rx buffers that were not used */
6b7c5b94 1863 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1864 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1865 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1866 put_page(page_info->page);
1867 memset(page_info, 0, sizeof(*page_info));
1868 }
1869 BUG_ON(atomic_read(&rxq->used));
482c9e79 1870 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1871}
1872
0ae57bb3 1873static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1874{
0ae57bb3
SP
1875 struct be_tx_obj *txo;
1876 struct be_queue_info *txq;
a8e9179a 1877 struct be_eth_tx_compl *txcp;
4d586b82 1878 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1879 struct sk_buff *sent_skb;
1880 bool dummy_wrb;
0ae57bb3 1881 int i, pending_txqs;
a8e9179a
SP
1882
1883 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1884 do {
0ae57bb3
SP
1885 pending_txqs = adapter->num_tx_qs;
1886
1887 for_all_tx_queues(adapter, txo, i) {
1888 txq = &txo->q;
1889 while ((txcp = be_tx_compl_get(&txo->cq))) {
1890 end_idx =
1891 AMAP_GET_BITS(struct amap_eth_tx_compl,
1892 wrb_index, txcp);
1893 num_wrbs += be_tx_compl_process(adapter, txo,
1894 end_idx);
1895 cmpl++;
1896 }
1897 if (cmpl) {
1898 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1899 atomic_sub(num_wrbs, &txq->used);
1900 cmpl = 0;
1901 num_wrbs = 0;
1902 }
1903 if (atomic_read(&txq->used) == 0)
1904 pending_txqs--;
a8e9179a
SP
1905 }
1906
0ae57bb3 1907 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1908 break;
1909
1910 mdelay(1);
1911 } while (true);
1912
0ae57bb3
SP
1913 for_all_tx_queues(adapter, txo, i) {
1914 txq = &txo->q;
1915 if (atomic_read(&txq->used))
1916 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1917 atomic_read(&txq->used));
1918
1919 /* free posted tx for which compls will never arrive */
1920 while (atomic_read(&txq->used)) {
1921 sent_skb = txo->sent_skb_list[txq->tail];
1922 end_idx = txq->tail;
1923 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1924 &dummy_wrb);
1925 index_adv(&end_idx, num_wrbs - 1, txq->len);
1926 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1927 atomic_sub(num_wrbs, &txq->used);
1928 }
b03388d6 1929 }
6b7c5b94
SP
1930}
1931
10ef9ab4
SP
1932static void be_evt_queues_destroy(struct be_adapter *adapter)
1933{
1934 struct be_eq_obj *eqo;
1935 int i;
1936
1937 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1938 if (eqo->q.created) {
1939 be_eq_clean(eqo);
10ef9ab4 1940 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1941 }
10ef9ab4
SP
1942 be_queue_free(adapter, &eqo->q);
1943 }
1944}
1945
1946static int be_evt_queues_create(struct be_adapter *adapter)
1947{
1948 struct be_queue_info *eq;
1949 struct be_eq_obj *eqo;
1950 int i, rc;
1951
1952 adapter->num_evt_qs = num_irqs(adapter);
1953
1954 for_all_evt_queues(adapter, eqo, i) {
1955 eqo->adapter = adapter;
1956 eqo->tx_budget = BE_TX_BUDGET;
1957 eqo->idx = i;
1958 eqo->max_eqd = BE_MAX_EQD;
1959 eqo->enable_aic = true;
1960
1961 eq = &eqo->q;
1962 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1963 sizeof(struct be_eq_entry));
1964 if (rc)
1965 return rc;
1966
1967 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1968 if (rc)
1969 return rc;
1970 }
1cfafab9 1971 return 0;
10ef9ab4
SP
1972}
1973
5fb379ee
SP
1974static void be_mcc_queues_destroy(struct be_adapter *adapter)
1975{
1976 struct be_queue_info *q;
5fb379ee 1977
8788fdc2 1978 q = &adapter->mcc_obj.q;
5fb379ee 1979 if (q->created)
8788fdc2 1980 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1981 be_queue_free(adapter, q);
1982
8788fdc2 1983 q = &adapter->mcc_obj.cq;
5fb379ee 1984 if (q->created)
8788fdc2 1985 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1986 be_queue_free(adapter, q);
1987}
1988
1989/* Must be called only after TX qs are created as MCC shares TX EQ */
1990static int be_mcc_queues_create(struct be_adapter *adapter)
1991{
1992 struct be_queue_info *q, *cq;
5fb379ee 1993
8788fdc2 1994 cq = &adapter->mcc_obj.cq;
5fb379ee 1995 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1996 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1997 goto err;
1998
10ef9ab4
SP
1999 /* Use the default EQ for MCC completions */
2000 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
2001 goto mcc_cq_free;
2002
8788fdc2 2003 q = &adapter->mcc_obj.q;
5fb379ee
SP
2004 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2005 goto mcc_cq_destroy;
2006
8788fdc2 2007 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
2008 goto mcc_q_free;
2009
2010 return 0;
2011
2012mcc_q_free:
2013 be_queue_free(adapter, q);
2014mcc_cq_destroy:
8788fdc2 2015 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
2016mcc_cq_free:
2017 be_queue_free(adapter, cq);
2018err:
2019 return -1;
2020}
2021
6b7c5b94
SP
2022static void be_tx_queues_destroy(struct be_adapter *adapter)
2023{
2024 struct be_queue_info *q;
3c8def97
SP
2025 struct be_tx_obj *txo;
2026 u8 i;
6b7c5b94 2027
3c8def97
SP
2028 for_all_tx_queues(adapter, txo, i) {
2029 q = &txo->q;
2030 if (q->created)
2031 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2032 be_queue_free(adapter, q);
6b7c5b94 2033
3c8def97
SP
2034 q = &txo->cq;
2035 if (q->created)
2036 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2037 be_queue_free(adapter, q);
2038 }
6b7c5b94
SP
2039}
2040
dafc0fe3
SP
2041static int be_num_txqs_want(struct be_adapter *adapter)
2042{
abb93951
PR
2043 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2044 be_is_mc(adapter) ||
2045 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
ca34fe38 2046 BE2_chip(adapter))
dafc0fe3
SP
2047 return 1;
2048 else
abb93951 2049 return adapter->max_tx_queues;
dafc0fe3
SP
2050}
2051
10ef9ab4 2052static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2053{
10ef9ab4
SP
2054 struct be_queue_info *cq, *eq;
2055 int status;
3c8def97
SP
2056 struct be_tx_obj *txo;
2057 u8 i;
6b7c5b94 2058
dafc0fe3 2059 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
2060 if (adapter->num_tx_qs != MAX_TX_QS) {
2061 rtnl_lock();
dafc0fe3
SP
2062 netif_set_real_num_tx_queues(adapter->netdev,
2063 adapter->num_tx_qs);
3bb62f4f
PR
2064 rtnl_unlock();
2065 }
dafc0fe3 2066
10ef9ab4
SP
2067 for_all_tx_queues(adapter, txo, i) {
2068 cq = &txo->cq;
2069 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2070 sizeof(struct be_eth_tx_compl));
2071 if (status)
2072 return status;
3c8def97 2073
10ef9ab4
SP
2074 /* If num_evt_qs is less than num_tx_qs, then more than
2075 * one txq share an eq
2076 */
2077 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2078 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2079 if (status)
2080 return status;
2081 }
2082 return 0;
2083}
6b7c5b94 2084
10ef9ab4
SP
2085static int be_tx_qs_create(struct be_adapter *adapter)
2086{
2087 struct be_tx_obj *txo;
2088 int i, status;
fe6d2a38 2089
3c8def97 2090 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
2091 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2092 sizeof(struct be_eth_wrb));
2093 if (status)
2094 return status;
6b7c5b94 2095
94d73aaa 2096 status = be_cmd_txq_create(adapter, txo);
10ef9ab4
SP
2097 if (status)
2098 return status;
3c8def97 2099 }
6b7c5b94 2100
d379142b
SP
2101 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2102 adapter->num_tx_qs);
10ef9ab4 2103 return 0;
6b7c5b94
SP
2104}
2105
10ef9ab4 2106static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
2107{
2108 struct be_queue_info *q;
3abcdeda
SP
2109 struct be_rx_obj *rxo;
2110 int i;
2111
2112 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2113 q = &rxo->cq;
2114 if (q->created)
2115 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2116 be_queue_free(adapter, q);
ac6a0c4a
SP
2117 }
2118}
2119
10ef9ab4 2120static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 2121{
10ef9ab4 2122 struct be_queue_info *eq, *cq;
3abcdeda
SP
2123 struct be_rx_obj *rxo;
2124 int rc, i;
6b7c5b94 2125
10ef9ab4
SP
2126 /* We'll create as many RSS rings as there are irqs.
2127 * But when there's only one irq there's no use creating RSS rings
2128 */
2129 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2130 num_irqs(adapter) + 1 : 1;
7f640062
SP
2131 if (adapter->num_rx_qs != MAX_RX_QS) {
2132 rtnl_lock();
2133 netif_set_real_num_rx_queues(adapter->netdev,
2134 adapter->num_rx_qs);
2135 rtnl_unlock();
2136 }
ac6a0c4a 2137
6b7c5b94 2138 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
2139 for_all_rx_queues(adapter, rxo, i) {
2140 rxo->adapter = adapter;
3abcdeda
SP
2141 cq = &rxo->cq;
2142 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2143 sizeof(struct be_eth_rx_compl));
2144 if (rc)
10ef9ab4 2145 return rc;
3abcdeda 2146
10ef9ab4
SP
2147 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2148 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 2149 if (rc)
10ef9ab4 2150 return rc;
3abcdeda 2151 }
6b7c5b94 2152
d379142b
SP
2153 dev_info(&adapter->pdev->dev,
2154 "created %d RSS queue(s) and 1 default RX queue\n",
2155 adapter->num_rx_qs - 1);
10ef9ab4 2156 return 0;
b628bde2
SP
2157}
2158
6b7c5b94
SP
2159static irqreturn_t be_intx(int irq, void *dev)
2160{
e49cc34f
SP
2161 struct be_eq_obj *eqo = dev;
2162 struct be_adapter *adapter = eqo->adapter;
2163 int num_evts = 0;
6b7c5b94 2164
d0b9cec3
SP
2165 /* IRQ is not expected when NAPI is scheduled as the EQ
2166 * will not be armed.
2167 * But, this can happen on Lancer INTx where it takes
2168 * a while to de-assert INTx or in BE2 where occasionaly
2169 * an interrupt may be raised even when EQ is unarmed.
2170 * If NAPI is already scheduled, then counting & notifying
2171 * events will orphan them.
e49cc34f 2172 */
d0b9cec3 2173 if (napi_schedule_prep(&eqo->napi)) {
e49cc34f 2174 num_evts = events_get(eqo);
d0b9cec3
SP
2175 __napi_schedule(&eqo->napi);
2176 if (num_evts)
2177 eqo->spurious_intr = 0;
2178 }
2179 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
e49cc34f 2180
d0b9cec3
SP
2181 /* Return IRQ_HANDLED only for the the first spurious intr
2182 * after a valid intr to stop the kernel from branding
2183 * this irq as a bad one!
e49cc34f 2184 */
d0b9cec3
SP
2185 if (num_evts || eqo->spurious_intr++ == 0)
2186 return IRQ_HANDLED;
2187 else
2188 return IRQ_NONE;
6b7c5b94
SP
2189}
2190
10ef9ab4 2191static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2192{
10ef9ab4 2193 struct be_eq_obj *eqo = dev;
6b7c5b94 2194
0b545a62
SP
2195 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2196 napi_schedule(&eqo->napi);
6b7c5b94
SP
2197 return IRQ_HANDLED;
2198}
2199
2e588f84 2200static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2201{
e38b1706 2202 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
6b7c5b94
SP
2203}
2204
10ef9ab4
SP
2205static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2206 int budget)
6b7c5b94 2207{
3abcdeda
SP
2208 struct be_adapter *adapter = rxo->adapter;
2209 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2210 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2211 u32 work_done;
2212
2213 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2214 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2215 if (!rxcp)
2216 break;
2217
12004ae9
SP
2218 /* Is it a flush compl that has no data */
2219 if (unlikely(rxcp->num_rcvd == 0))
2220 goto loop_continue;
2221
2222 /* Discard compl with partial DMA Lancer B0 */
2223 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2224 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2225 goto loop_continue;
2226 }
2227
2228 /* On BE drop pkts that arrive due to imperfect filtering in
2229 * promiscuous mode on some skews
2230 */
2231 if (unlikely(rxcp->port != adapter->port_num &&
2232 !lancer_chip(adapter))) {
10ef9ab4 2233 be_rx_compl_discard(rxo, rxcp);
12004ae9 2234 goto loop_continue;
64642811 2235 }
009dd872 2236
12004ae9 2237 if (do_gro(rxcp))
10ef9ab4 2238 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2239 else
10ef9ab4 2240 be_rx_compl_process(rxo, rxcp);
12004ae9 2241loop_continue:
2e588f84 2242 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2243 }
2244
10ef9ab4
SP
2245 if (work_done) {
2246 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2247
10ef9ab4
SP
2248 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2249 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2250 }
10ef9ab4 2251
6b7c5b94
SP
2252 return work_done;
2253}
2254
10ef9ab4
SP
2255static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2256 int budget, int idx)
6b7c5b94 2257{
6b7c5b94 2258 struct be_eth_tx_compl *txcp;
10ef9ab4 2259 int num_wrbs = 0, work_done;
3c8def97 2260
10ef9ab4
SP
2261 for (work_done = 0; work_done < budget; work_done++) {
2262 txcp = be_tx_compl_get(&txo->cq);
2263 if (!txcp)
2264 break;
2265 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2266 AMAP_GET_BITS(struct amap_eth_tx_compl,
2267 wrb_index, txcp));
10ef9ab4 2268 }
6b7c5b94 2269
10ef9ab4
SP
2270 if (work_done) {
2271 be_cq_notify(adapter, txo->cq.id, true, work_done);
2272 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2273
10ef9ab4
SP
2274 /* As Tx wrbs have been freed up, wake up netdev queue
2275 * if it was stopped due to lack of tx wrbs. */
2276 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2277 atomic_read(&txo->q.used) < txo->q.len / 2) {
2278 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2279 }
10ef9ab4
SP
2280
2281 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2282 tx_stats(txo)->tx_compl += work_done;
2283 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2284 }
10ef9ab4
SP
2285 return (work_done < budget); /* Done */
2286}
6b7c5b94 2287
10ef9ab4
SP
2288int be_poll(struct napi_struct *napi, int budget)
2289{
2290 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2291 struct be_adapter *adapter = eqo->adapter;
0b545a62 2292 int max_work = 0, work, i, num_evts;
10ef9ab4 2293 bool tx_done;
f31e50a8 2294
0b545a62
SP
2295 num_evts = events_get(eqo);
2296
10ef9ab4
SP
2297 /* Process all TXQs serviced by this EQ */
2298 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2299 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2300 eqo->tx_budget, i);
2301 if (!tx_done)
2302 max_work = budget;
f31e50a8
SP
2303 }
2304
10ef9ab4
SP
2305 /* This loop will iterate twice for EQ0 in which
2306 * completions of the last RXQ (default one) are also processed
2307 * For other EQs the loop iterates only once
2308 */
2309 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2310 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2311 max_work = max(work, max_work);
2312 }
6b7c5b94 2313
10ef9ab4
SP
2314 if (is_mcc_eqo(eqo))
2315 be_process_mcc(adapter);
93c86700 2316
10ef9ab4
SP
2317 if (max_work < budget) {
2318 napi_complete(napi);
0b545a62 2319 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
10ef9ab4
SP
2320 } else {
2321 /* As we'll continue in polling mode, count and clear events */
0b545a62 2322 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
93c86700 2323 }
10ef9ab4 2324 return max_work;
6b7c5b94
SP
2325}
2326
f67ef7ba 2327void be_detect_error(struct be_adapter *adapter)
7c185276 2328{
e1cfb67a
PR
2329 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2330 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2331 u32 i;
2332
d23e946c 2333 if (be_hw_error(adapter))
72f02485
SP
2334 return;
2335
e1cfb67a
PR
2336 if (lancer_chip(adapter)) {
2337 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2338 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2339 sliport_err1 = ioread32(adapter->db +
2340 SLIPORT_ERROR1_OFFSET);
2341 sliport_err2 = ioread32(adapter->db +
2342 SLIPORT_ERROR2_OFFSET);
2343 }
2344 } else {
2345 pci_read_config_dword(adapter->pdev,
2346 PCICFG_UE_STATUS_LOW, &ue_lo);
2347 pci_read_config_dword(adapter->pdev,
2348 PCICFG_UE_STATUS_HIGH, &ue_hi);
2349 pci_read_config_dword(adapter->pdev,
2350 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2351 pci_read_config_dword(adapter->pdev,
2352 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2353
f67ef7ba
PR
2354 ue_lo = (ue_lo & ~ue_lo_mask);
2355 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2356 }
7c185276 2357
1451ae6e
AK
2358 /* On certain platforms BE hardware can indicate spurious UEs.
2359 * Allow the h/w to stop working completely in case of a real UE.
2360 * Hence not setting the hw_error for UE detection.
2361 */
2362 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2363 adapter->hw_error = true;
434b3648 2364 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2365 "Error detected in the card\n");
2366 }
2367
2368 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2369 dev_err(&adapter->pdev->dev,
2370 "ERR: sliport status 0x%x\n", sliport_status);
2371 dev_err(&adapter->pdev->dev,
2372 "ERR: sliport error1 0x%x\n", sliport_err1);
2373 dev_err(&adapter->pdev->dev,
2374 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2375 }
2376
e1cfb67a
PR
2377 if (ue_lo) {
2378 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2379 if (ue_lo & 1)
7c185276
AK
2380 dev_err(&adapter->pdev->dev,
2381 "UE: %s bit set\n", ue_status_low_desc[i]);
2382 }
2383 }
f67ef7ba 2384
e1cfb67a
PR
2385 if (ue_hi) {
2386 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2387 if (ue_hi & 1)
7c185276
AK
2388 dev_err(&adapter->pdev->dev,
2389 "UE: %s bit set\n", ue_status_hi_desc[i]);
2390 }
2391 }
2392
2393}
2394
8d56ff11
SP
2395static void be_msix_disable(struct be_adapter *adapter)
2396{
ac6a0c4a 2397 if (msix_enabled(adapter)) {
8d56ff11 2398 pci_disable_msix(adapter->pdev);
ac6a0c4a 2399 adapter->num_msix_vec = 0;
3abcdeda
SP
2400 }
2401}
2402
10ef9ab4
SP
2403static uint be_num_rss_want(struct be_adapter *adapter)
2404{
30e80b55 2405 u32 num = 0;
abb93951 2406
10ef9ab4 2407 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2408 (lancer_chip(adapter) ||
2409 (!sriov_want(adapter) && be_physfn(adapter)))) {
2410 num = adapter->max_rss_queues;
30e80b55
YM
2411 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2412 }
2413 return num;
10ef9ab4
SP
2414}
2415
c2bba3df 2416static int be_msix_enable(struct be_adapter *adapter)
6b7c5b94 2417{
10ef9ab4 2418#define BE_MIN_MSIX_VECTORS 1
045508a8 2419 int i, status, num_vec, num_roce_vec = 0;
d379142b 2420 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2421
10ef9ab4
SP
2422 /* If RSS queues are not used, need a vec for default RX Q */
2423 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2424 if (be_roce_supported(adapter)) {
2425 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2426 (num_online_cpus() + 1));
2427 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2428 num_vec += num_roce_vec;
2429 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2430 }
10ef9ab4 2431 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2432
ac6a0c4a 2433 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2434 adapter->msix_entries[i].entry = i;
2435
ac6a0c4a 2436 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2437 if (status == 0) {
2438 goto done;
2439 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2440 num_vec = status;
c2bba3df
SK
2441 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2442 num_vec);
2443 if (!status)
3abcdeda 2444 goto done;
3abcdeda 2445 }
d379142b
SP
2446
2447 dev_warn(dev, "MSIx enable failed\n");
c2bba3df
SK
2448 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2449 if (!be_physfn(adapter))
2450 return status;
2451 return 0;
3abcdeda 2452done:
045508a8
PP
2453 if (be_roce_supported(adapter)) {
2454 if (num_vec > num_roce_vec) {
2455 adapter->num_msix_vec = num_vec - num_roce_vec;
2456 adapter->num_msix_roce_vec =
2457 num_vec - adapter->num_msix_vec;
2458 } else {
2459 adapter->num_msix_vec = num_vec;
2460 adapter->num_msix_roce_vec = 0;
2461 }
2462 } else
2463 adapter->num_msix_vec = num_vec;
d379142b 2464 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
c2bba3df 2465 return 0;
6b7c5b94
SP
2466}
2467
fe6d2a38 2468static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2469 struct be_eq_obj *eqo)
b628bde2 2470{
10ef9ab4 2471 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2472}
6b7c5b94 2473
b628bde2
SP
2474static int be_msix_register(struct be_adapter *adapter)
2475{
10ef9ab4
SP
2476 struct net_device *netdev = adapter->netdev;
2477 struct be_eq_obj *eqo;
2478 int status, i, vec;
6b7c5b94 2479
10ef9ab4
SP
2480 for_all_evt_queues(adapter, eqo, i) {
2481 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2482 vec = be_msix_vec_get(adapter, eqo);
2483 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2484 if (status)
2485 goto err_msix;
2486 }
b628bde2 2487
6b7c5b94 2488 return 0;
3abcdeda 2489err_msix:
10ef9ab4
SP
2490 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2491 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2492 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2493 status);
ac6a0c4a 2494 be_msix_disable(adapter);
6b7c5b94
SP
2495 return status;
2496}
2497
2498static int be_irq_register(struct be_adapter *adapter)
2499{
2500 struct net_device *netdev = adapter->netdev;
2501 int status;
2502
ac6a0c4a 2503 if (msix_enabled(adapter)) {
6b7c5b94
SP
2504 status = be_msix_register(adapter);
2505 if (status == 0)
2506 goto done;
ba343c77
SB
2507 /* INTx is not supported for VF */
2508 if (!be_physfn(adapter))
2509 return status;
6b7c5b94
SP
2510 }
2511
e49cc34f 2512 /* INTx: only the first EQ is used */
6b7c5b94
SP
2513 netdev->irq = adapter->pdev->irq;
2514 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
e49cc34f 2515 &adapter->eq_obj[0]);
6b7c5b94
SP
2516 if (status) {
2517 dev_err(&adapter->pdev->dev,
2518 "INTx request IRQ failed - err %d\n", status);
2519 return status;
2520 }
2521done:
2522 adapter->isr_registered = true;
2523 return 0;
2524}
2525
2526static void be_irq_unregister(struct be_adapter *adapter)
2527{
2528 struct net_device *netdev = adapter->netdev;
10ef9ab4 2529 struct be_eq_obj *eqo;
3abcdeda 2530 int i;
6b7c5b94
SP
2531
2532 if (!adapter->isr_registered)
2533 return;
2534
2535 /* INTx */
ac6a0c4a 2536 if (!msix_enabled(adapter)) {
e49cc34f 2537 free_irq(netdev->irq, &adapter->eq_obj[0]);
6b7c5b94
SP
2538 goto done;
2539 }
2540
2541 /* MSIx */
10ef9ab4
SP
2542 for_all_evt_queues(adapter, eqo, i)
2543 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2544
6b7c5b94
SP
2545done:
2546 adapter->isr_registered = false;
6b7c5b94
SP
2547}
2548
10ef9ab4 2549static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2550{
2551 struct be_queue_info *q;
2552 struct be_rx_obj *rxo;
2553 int i;
2554
2555 for_all_rx_queues(adapter, rxo, i) {
2556 q = &rxo->q;
2557 if (q->created) {
2558 be_cmd_rxq_destroy(adapter, q);
10ef9ab4 2559 be_rx_cq_clean(rxo);
482c9e79 2560 }
10ef9ab4 2561 be_queue_free(adapter, q);
482c9e79
SP
2562 }
2563}
2564
889cd4b2
SP
2565static int be_close(struct net_device *netdev)
2566{
2567 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2568 struct be_eq_obj *eqo;
2569 int i;
889cd4b2 2570
045508a8
PP
2571 be_roce_dev_close(adapter);
2572
04d3d624
SK
2573 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2574 for_all_evt_queues(adapter, eqo, i)
2575 napi_disable(&eqo->napi);
2576 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2577 }
a323d9bf
SP
2578
2579 be_async_mcc_disable(adapter);
2580
2581 /* Wait for all pending tx completions to arrive so that
2582 * all tx skbs are freed.
2583 */
2584 be_tx_compl_clean(adapter);
fba87559 2585 netif_tx_disable(netdev);
a323d9bf
SP
2586
2587 be_rx_qs_destroy(adapter);
2588
2589 for_all_evt_queues(adapter, eqo, i) {
10ef9ab4
SP
2590 if (msix_enabled(adapter))
2591 synchronize_irq(be_msix_vec_get(adapter, eqo));
2592 else
2593 synchronize_irq(netdev->irq);
2594 be_eq_clean(eqo);
63fcb27f
PR
2595 }
2596
889cd4b2
SP
2597 be_irq_unregister(adapter);
2598
482c9e79
SP
2599 return 0;
2600}
2601
10ef9ab4 2602static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2603{
2604 struct be_rx_obj *rxo;
e9008ee9
PR
2605 int rc, i, j;
2606 u8 rsstable[128];
482c9e79
SP
2607
2608 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2609 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2610 sizeof(struct be_eth_rx_d));
2611 if (rc)
2612 return rc;
2613 }
2614
2615 /* The FW would like the default RXQ to be created first */
2616 rxo = default_rxo(adapter);
2617 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2618 adapter->if_handle, false, &rxo->rss_id);
2619 if (rc)
2620 return rc;
2621
2622 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2623 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2624 rx_frag_size, adapter->if_handle,
2625 true, &rxo->rss_id);
482c9e79
SP
2626 if (rc)
2627 return rc;
2628 }
2629
2630 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2631 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2632 for_all_rss_queues(adapter, rxo, i) {
2633 if ((j + i) >= 128)
2634 break;
2635 rsstable[j + i] = rxo->rss_id;
2636 }
2637 }
594ad54a
SR
2638 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2639 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2640
2641 if (!BEx_chip(adapter))
2642 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2643 RSS_ENABLE_UDP_IPV6;
2644
2645 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2646 128);
2647 if (rc) {
2648 adapter->rss_flags = 0;
482c9e79 2649 return rc;
594ad54a 2650 }
482c9e79
SP
2651 }
2652
2653 /* First time posting */
10ef9ab4 2654 for_all_rx_queues(adapter, rxo, i)
482c9e79 2655 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2656 return 0;
2657}
2658
6b7c5b94
SP
2659static int be_open(struct net_device *netdev)
2660{
2661 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2662 struct be_eq_obj *eqo;
3abcdeda 2663 struct be_rx_obj *rxo;
10ef9ab4 2664 struct be_tx_obj *txo;
b236916a 2665 u8 link_status;
3abcdeda 2666 int status, i;
5fb379ee 2667
10ef9ab4 2668 status = be_rx_qs_create(adapter);
482c9e79
SP
2669 if (status)
2670 goto err;
2671
c2bba3df
SK
2672 status = be_irq_register(adapter);
2673 if (status)
2674 goto err;
5fb379ee 2675
10ef9ab4 2676 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2677 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2678
10ef9ab4
SP
2679 for_all_tx_queues(adapter, txo, i)
2680 be_cq_notify(adapter, txo->cq.id, true, 0);
2681
7a1e9b20
SP
2682 be_async_mcc_enable(adapter);
2683
10ef9ab4
SP
2684 for_all_evt_queues(adapter, eqo, i) {
2685 napi_enable(&eqo->napi);
2686 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2687 }
04d3d624 2688 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
10ef9ab4 2689
323ff71e 2690 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2691 if (!status)
2692 be_link_status_update(adapter, link_status);
2693
fba87559 2694 netif_tx_start_all_queues(netdev);
045508a8 2695 be_roce_dev_open(adapter);
889cd4b2
SP
2696 return 0;
2697err:
2698 be_close(adapter->netdev);
2699 return -EIO;
5fb379ee
SP
2700}
2701
71d8d1b5
AK
2702static int be_setup_wol(struct be_adapter *adapter, bool enable)
2703{
2704 struct be_dma_mem cmd;
2705 int status = 0;
2706 u8 mac[ETH_ALEN];
2707
2708 memset(mac, 0, ETH_ALEN);
2709
2710 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf 2711 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1f9061d2 2712 GFP_KERNEL | __GFP_ZERO);
71d8d1b5
AK
2713 if (cmd.va == NULL)
2714 return -1;
71d8d1b5
AK
2715
2716 if (enable) {
2717 status = pci_write_config_dword(adapter->pdev,
2718 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2719 if (status) {
2720 dev_err(&adapter->pdev->dev,
2381a55c 2721 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2722 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2723 cmd.dma);
71d8d1b5
AK
2724 return status;
2725 }
2726 status = be_cmd_enable_magic_wol(adapter,
2727 adapter->netdev->dev_addr, &cmd);
2728 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2729 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2730 } else {
2731 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2732 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2733 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2734 }
2735
2b7bcebf 2736 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2737 return status;
2738}
2739
6d87f5c3
AK
2740/*
2741 * Generate a seed MAC address from the PF MAC Address using jhash.
2742 * MAC Address for VFs are assigned incrementally starting from the seed.
2743 * These addresses are programmed in the ASIC by the PF and the VF driver
2744 * queries for the MAC address during its probe.
2745 */
4c876616 2746static int be_vf_eth_addr_config(struct be_adapter *adapter)
6d87f5c3 2747{
f9449ab7 2748 u32 vf;
3abcdeda 2749 int status = 0;
6d87f5c3 2750 u8 mac[ETH_ALEN];
11ac75ed 2751 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2752
2753 be_vf_eth_addr_generate(adapter, mac);
2754
11ac75ed 2755 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2756 if (lancer_chip(adapter)) {
2757 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2758 } else {
2759 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2760 vf_cfg->if_handle,
2761 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2762 }
2763
6d87f5c3
AK
2764 if (status)
2765 dev_err(&adapter->pdev->dev,
590c391d 2766 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2767 else
11ac75ed 2768 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2769
2770 mac[5] += 1;
2771 }
2772 return status;
2773}
2774
4c876616
SP
2775static int be_vfs_mac_query(struct be_adapter *adapter)
2776{
2777 int status, vf;
2778 u8 mac[ETH_ALEN];
2779 struct be_vf_cfg *vf_cfg;
2780 bool active;
2781
2782 for_all_vfs(adapter, vf_cfg, vf) {
2783 be_cmd_get_mac_from_list(adapter, mac, &active,
2784 &vf_cfg->pmac_id, 0);
2785
2786 status = be_cmd_mac_addr_query(adapter, mac, false,
2787 vf_cfg->if_handle, 0);
2788 if (status)
2789 return status;
2790 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2791 }
2792 return 0;
2793}
2794
f9449ab7 2795static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2796{
11ac75ed 2797 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2798 u32 vf;
2799
39f1d94d 2800 if (be_find_vfs(adapter, ASSIGNED)) {
4c876616
SP
2801 dev_warn(&adapter->pdev->dev,
2802 "VFs are assigned to VMs: not disabling VFs\n");
39f1d94d
SP
2803 goto done;
2804 }
2805
b4c1df93
SP
2806 pci_disable_sriov(adapter->pdev);
2807
11ac75ed 2808 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2809 if (lancer_chip(adapter))
2810 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2811 else
11ac75ed
SP
2812 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2813 vf_cfg->pmac_id, vf + 1);
f9449ab7 2814
11ac75ed
SP
2815 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2816 }
39f1d94d
SP
2817done:
2818 kfree(adapter->vf_cfg);
2819 adapter->num_vfs = 0;
6d87f5c3
AK
2820}
2821
a54769f5
SP
2822static int be_clear(struct be_adapter *adapter)
2823{
fbc13f01
AK
2824 int i = 1;
2825
191eb756
SP
2826 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2827 cancel_delayed_work_sync(&adapter->work);
2828 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2829 }
2830
11ac75ed 2831 if (sriov_enabled(adapter))
f9449ab7
SP
2832 be_vf_clear(adapter);
2833
fbc13f01
AK
2834 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2835 be_cmd_pmac_del(adapter, adapter->if_handle,
2836 adapter->pmac_id[i], 0);
2837
f9449ab7 2838 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2839
2840 be_mcc_queues_destroy(adapter);
10ef9ab4 2841 be_rx_cqs_destroy(adapter);
a54769f5 2842 be_tx_queues_destroy(adapter);
10ef9ab4 2843 be_evt_queues_destroy(adapter);
a54769f5 2844
abb93951
PR
2845 kfree(adapter->pmac_id);
2846 adapter->pmac_id = NULL;
2847
10ef9ab4 2848 be_msix_disable(adapter);
a54769f5
SP
2849 return 0;
2850}
2851
4c876616 2852static int be_vfs_if_create(struct be_adapter *adapter)
abb93951 2853{
4c876616
SP
2854 struct be_vf_cfg *vf_cfg;
2855 u32 cap_flags, en_flags, vf;
abb93951
PR
2856 int status;
2857
4c876616
SP
2858 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2859 BE_IF_FLAGS_MULTICAST;
abb93951 2860
4c876616
SP
2861 for_all_vfs(adapter, vf_cfg, vf) {
2862 if (!BE3_chip(adapter))
a05f99db
VV
2863 be_cmd_get_profile_config(adapter, &cap_flags,
2864 NULL, vf + 1);
4c876616
SP
2865
2866 /* If a FW profile exists, then cap_flags are updated */
2867 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2868 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2869 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2870 &vf_cfg->if_handle, vf + 1);
2871 if (status)
2872 goto err;
2873 }
2874err:
2875 return status;
abb93951
PR
2876}
2877
39f1d94d 2878static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2879{
11ac75ed 2880 struct be_vf_cfg *vf_cfg;
30128031
SP
2881 int vf;
2882
39f1d94d
SP
2883 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2884 GFP_KERNEL);
2885 if (!adapter->vf_cfg)
2886 return -ENOMEM;
2887
11ac75ed
SP
2888 for_all_vfs(adapter, vf_cfg, vf) {
2889 vf_cfg->if_handle = -1;
2890 vf_cfg->pmac_id = -1;
30128031 2891 }
39f1d94d 2892 return 0;
30128031
SP
2893}
2894
f9449ab7
SP
2895static int be_vf_setup(struct be_adapter *adapter)
2896{
11ac75ed 2897 struct be_vf_cfg *vf_cfg;
f1f3ee1b 2898 u16 def_vlan, lnk_speed;
4c876616
SP
2899 int status, old_vfs, vf;
2900 struct device *dev = &adapter->pdev->dev;
39f1d94d 2901
4c876616
SP
2902 old_vfs = be_find_vfs(adapter, ENABLED);
2903 if (old_vfs) {
2904 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2905 if (old_vfs != num_vfs)
2906 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2907 adapter->num_vfs = old_vfs;
39f1d94d 2908 } else {
4c876616
SP
2909 if (num_vfs > adapter->dev_num_vfs)
2910 dev_info(dev, "Device supports %d VFs and not %d\n",
2911 adapter->dev_num_vfs, num_vfs);
2912 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
b4c1df93 2913 if (!adapter->num_vfs)
4c876616 2914 return 0;
39f1d94d
SP
2915 }
2916
2917 status = be_vf_setup_init(adapter);
2918 if (status)
2919 goto err;
30128031 2920
4c876616
SP
2921 if (old_vfs) {
2922 for_all_vfs(adapter, vf_cfg, vf) {
2923 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2924 if (status)
2925 goto err;
2926 }
2927 } else {
2928 status = be_vfs_if_create(adapter);
f9449ab7
SP
2929 if (status)
2930 goto err;
f9449ab7
SP
2931 }
2932
4c876616
SP
2933 if (old_vfs) {
2934 status = be_vfs_mac_query(adapter);
2935 if (status)
2936 goto err;
2937 } else {
39f1d94d
SP
2938 status = be_vf_eth_addr_config(adapter);
2939 if (status)
2940 goto err;
2941 }
f9449ab7 2942
11ac75ed 2943 for_all_vfs(adapter, vf_cfg, vf) {
4c876616
SP
2944 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2945 * Allow full available bandwidth
2946 */
2947 if (BE3_chip(adapter) && !old_vfs)
2948 be_cmd_set_qos(adapter, 1000, vf+1);
2949
2950 status = be_cmd_link_status_query(adapter, &lnk_speed,
2951 NULL, vf + 1);
2952 if (!status)
2953 vf_cfg->tx_rate = lnk_speed;
f1f3ee1b
AK
2954
2955 status = be_cmd_get_hsw_config(adapter, &def_vlan,
4c876616 2956 vf + 1, vf_cfg->if_handle);
f1f3ee1b
AK
2957 if (status)
2958 goto err;
2959 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2960
2961 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7 2962 }
b4c1df93
SP
2963
2964 if (!old_vfs) {
2965 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2966 if (status) {
2967 dev_err(dev, "SRIOV enable failed\n");
2968 adapter->num_vfs = 0;
2969 goto err;
2970 }
2971 }
f9449ab7
SP
2972 return 0;
2973err:
4c876616
SP
2974 dev_err(dev, "VF setup failed\n");
2975 be_vf_clear(adapter);
f9449ab7
SP
2976 return status;
2977}
2978
30128031
SP
2979static void be_setup_init(struct be_adapter *adapter)
2980{
2981 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2982 adapter->phy.link_speed = -1;
30128031
SP
2983 adapter->if_handle = -1;
2984 adapter->be3_native = false;
2985 adapter->promiscuous = false;
f25b119c
PR
2986 if (be_physfn(adapter))
2987 adapter->cmd_privileges = MAX_PRIVILEGES;
2988 else
2989 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2990}
2991
1578e777
PR
2992static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2993 bool *active_mac, u32 *pmac_id)
590c391d 2994{
1578e777 2995 int status = 0;
e5e1ee89 2996
1578e777
PR
2997 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2998 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2999 if (!lancer_chip(adapter) && !be_physfn(adapter))
3000 *active_mac = true;
3001 else
3002 *active_mac = false;
e5e1ee89 3003
1578e777
PR
3004 return status;
3005 }
e5e1ee89 3006
1578e777
PR
3007 if (lancer_chip(adapter)) {
3008 status = be_cmd_get_mac_from_list(adapter, mac,
3009 active_mac, pmac_id, 0);
3010 if (*active_mac) {
5ee4979b
SP
3011 status = be_cmd_mac_addr_query(adapter, mac, false,
3012 if_handle, *pmac_id);
1578e777
PR
3013 }
3014 } else if (be_physfn(adapter)) {
3015 /* For BE3, for PF get permanent MAC */
5ee4979b 3016 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 3017 *active_mac = false;
e5e1ee89 3018 } else {
1578e777 3019 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 3020 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
3021 if_handle, 0);
3022 *active_mac = true;
e5e1ee89 3023 }
590c391d
PR
3024 return status;
3025}
3026
abb93951
PR
3027static void be_get_resources(struct be_adapter *adapter)
3028{
4c876616
SP
3029 u16 dev_num_vfs;
3030 int pos, status;
abb93951 3031 bool profile_present = false;
a05f99db 3032 u16 txq_count = 0;
abb93951 3033
4c876616 3034 if (!BEx_chip(adapter)) {
abb93951 3035 status = be_cmd_get_func_config(adapter);
abb93951
PR
3036 if (!status)
3037 profile_present = true;
a05f99db
VV
3038 } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3039 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
abb93951
PR
3040 }
3041
3042 if (profile_present) {
3043 /* Sanity fixes for Lancer */
3044 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3045 BE_UC_PMAC_COUNT);
3046 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3047 BE_NUM_VLANS_SUPPORTED);
3048 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3049 BE_MAX_MC);
3050 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3051 MAX_TX_QS);
3052 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3053 BE3_MAX_RSS_QS);
3054 adapter->max_event_queues = min_t(u16,
3055 adapter->max_event_queues,
3056 BE3_MAX_RSS_QS);
3057
3058 if (adapter->max_rss_queues &&
3059 adapter->max_rss_queues == adapter->max_rx_queues)
3060 adapter->max_rss_queues -= 1;
3061
3062 if (adapter->max_event_queues < adapter->max_rss_queues)
3063 adapter->max_rss_queues = adapter->max_event_queues;
3064
3065 } else {
3066 if (be_physfn(adapter))
3067 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3068 else
3069 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3070
3071 if (adapter->function_mode & FLEX10_MODE)
3072 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3073 else
3074 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3075
3076 adapter->max_mcast_mac = BE_MAX_MC;
a05f99db
VV
3077 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3078 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3079 MAX_TX_QS);
abb93951
PR
3080 adapter->max_rss_queues = (adapter->be3_native) ?
3081 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3082 adapter->max_event_queues = BE3_MAX_RSS_QS;
3083
3084 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3085 BE_IF_FLAGS_BROADCAST |
3086 BE_IF_FLAGS_MULTICAST |
3087 BE_IF_FLAGS_PASS_L3L4_ERRORS |
3088 BE_IF_FLAGS_MCAST_PROMISCUOUS |
3089 BE_IF_FLAGS_VLAN_PROMISCUOUS |
3090 BE_IF_FLAGS_PROMISCUOUS;
3091
3092 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3093 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3094 }
4c876616
SP
3095
3096 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3097 if (pos) {
3098 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3099 &dev_num_vfs);
3100 if (BE3_chip(adapter))
3101 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3102 adapter->dev_num_vfs = dev_num_vfs;
3103 }
abb93951
PR
3104}
3105
39f1d94d
SP
3106/* Routine to query per function resource limits */
3107static int be_get_config(struct be_adapter *adapter)
3108{
4c876616 3109 int status;
39f1d94d 3110
abb93951
PR
3111 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3112 &adapter->function_mode,
0ad3157e
VV
3113 &adapter->function_caps,
3114 &adapter->asic_rev);
abb93951
PR
3115 if (status)
3116 goto err;
3117
3118 be_get_resources(adapter);
3119
3120 /* primary mac needs 1 pmac entry */
3121 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3122 sizeof(u32), GFP_KERNEL);
3123 if (!adapter->pmac_id) {
3124 status = -ENOMEM;
3125 goto err;
3126 }
3127
abb93951
PR
3128err:
3129 return status;
39f1d94d
SP
3130}
3131
5fb379ee
SP
3132static int be_setup(struct be_adapter *adapter)
3133{
39f1d94d 3134 struct device *dev = &adapter->pdev->dev;
abb93951 3135 u32 en_flags;
a54769f5 3136 u32 tx_fc, rx_fc;
10ef9ab4 3137 int status;
ba343c77 3138 u8 mac[ETH_ALEN];
1578e777 3139 bool active_mac;
ba343c77 3140
30128031 3141 be_setup_init(adapter);
6b7c5b94 3142
abb93951
PR
3143 if (!lancer_chip(adapter))
3144 be_cmd_req_native_mode(adapter);
39f1d94d 3145
abb93951
PR
3146 status = be_get_config(adapter);
3147 if (status)
3148 goto err;
73d540f2 3149
c2bba3df
SK
3150 status = be_msix_enable(adapter);
3151 if (status)
3152 goto err;
10ef9ab4
SP
3153
3154 status = be_evt_queues_create(adapter);
3155 if (status)
a54769f5 3156 goto err;
6b7c5b94 3157
10ef9ab4
SP
3158 status = be_tx_cqs_create(adapter);
3159 if (status)
3160 goto err;
3161
3162 status = be_rx_cqs_create(adapter);
3163 if (status)
a54769f5 3164 goto err;
6b7c5b94 3165
f9449ab7 3166 status = be_mcc_queues_create(adapter);
10ef9ab4 3167 if (status)
a54769f5 3168 goto err;
6b7c5b94 3169
f25b119c
PR
3170 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3171 /* In UMC mode FW does not return right privileges.
3172 * Override with correct privilege equivalent to PF.
3173 */
3174 if (be_is_mc(adapter))
3175 adapter->cmd_privileges = MAX_PRIVILEGES;
3176
f9449ab7
SP
3177 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3178 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 3179
abb93951 3180 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 3181 en_flags |= BE_IF_FLAGS_RSS;
1578e777 3182
abb93951 3183 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 3184
abb93951 3185 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 3186 &adapter->if_handle, 0);
5fb379ee 3187 if (status != 0)
a54769f5 3188 goto err;
6b7c5b94 3189
1578e777
PR
3190 memset(mac, 0, ETH_ALEN);
3191 active_mac = false;
3192 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3193 &active_mac, &adapter->pmac_id[0]);
3194 if (status != 0)
3195 goto err;
3196
3197 if (!active_mac) {
3198 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3199 &adapter->pmac_id[0], 0);
3200 if (status != 0)
3201 goto err;
3202 }
3203
3204 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3205 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 3207 }
0dffc83e 3208
10ef9ab4
SP
3209 status = be_tx_qs_create(adapter);
3210 if (status)
3211 goto err;
3212
eeb65ced 3213 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
5a56eb10 3214
1d1e9a46 3215 if (adapter->vlans_added)
10329df8 3216 be_vid_config(adapter);
7ab8b0b4 3217
a54769f5 3218 be_set_rx_mode(adapter->netdev);
5fb379ee 3219
ddc3f5cb 3220 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 3221
ddc3f5cb
AK
3222 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3223 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 3224 adapter->rx_fc);
2dc1deb6 3225
b4c1df93 3226 if (be_physfn(adapter)) {
39f1d94d
SP
3227 if (adapter->dev_num_vfs)
3228 be_vf_setup(adapter);
3229 else
3230 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
3231 }
3232
f25b119c
PR
3233 status = be_cmd_get_phy_info(adapter);
3234 if (!status && be_pause_supported(adapter))
42f11cf2
AK
3235 adapter->phy.fc_autoneg = 1;
3236
191eb756
SP
3237 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3238 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 3239 return 0;
a54769f5
SP
3240err:
3241 be_clear(adapter);
3242 return status;
3243}
6b7c5b94 3244
66268739
IV
3245#ifdef CONFIG_NET_POLL_CONTROLLER
3246static void be_netpoll(struct net_device *netdev)
3247{
3248 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3249 struct be_eq_obj *eqo;
66268739
IV
3250 int i;
3251
e49cc34f
SP
3252 for_all_evt_queues(adapter, eqo, i) {
3253 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3254 napi_schedule(&eqo->napi);
3255 }
10ef9ab4
SP
3256
3257 return;
66268739
IV
3258}
3259#endif
3260
84517482 3261#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3262char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3263
fa9a6fed 3264static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3265 const u8 *p, u32 img_start, int image_size,
3266 int hdr_size)
fa9a6fed
SB
3267{
3268 u32 crc_offset;
3269 u8 flashed_crc[4];
3270 int status;
3f0d4560
AK
3271
3272 crc_offset = hdr_size + img_start + image_size - 4;
3273
fa9a6fed 3274 p += crc_offset;
3f0d4560
AK
3275
3276 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3277 (image_size - 4));
fa9a6fed
SB
3278 if (status) {
3279 dev_err(&adapter->pdev->dev,
3280 "could not get crc from flash, not flashing redboot\n");
3281 return false;
3282 }
3283
3284 /*update redboot only if crc does not match*/
3285 if (!memcmp(flashed_crc, p, 4))
3286 return false;
3287 else
3288 return true;
fa9a6fed
SB
3289}
3290
306f1348
SP
3291static bool phy_flashing_required(struct be_adapter *adapter)
3292{
42f11cf2
AK
3293 return (adapter->phy.phy_type == TN_8022 &&
3294 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3295}
3296
c165541e
PR
3297static bool is_comp_in_ufi(struct be_adapter *adapter,
3298 struct flash_section_info *fsec, int type)
3299{
3300 int i = 0, img_type = 0;
3301 struct flash_section_info_g2 *fsec_g2 = NULL;
3302
ca34fe38 3303 if (BE2_chip(adapter))
c165541e
PR
3304 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3305
3306 for (i = 0; i < MAX_FLASH_COMP; i++) {
3307 if (fsec_g2)
3308 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3309 else
3310 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3311
3312 if (img_type == type)
3313 return true;
3314 }
3315 return false;
3316
3317}
3318
3319struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3320 int header_size,
3321 const struct firmware *fw)
3322{
3323 struct flash_section_info *fsec = NULL;
3324 const u8 *p = fw->data;
3325
3326 p += header_size;
3327 while (p < (fw->data + fw->size)) {
3328 fsec = (struct flash_section_info *)p;
3329 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3330 return fsec;
3331 p += 32;
3332 }
3333 return NULL;
3334}
3335
773a2d7c
PR
3336static int be_flash(struct be_adapter *adapter, const u8 *img,
3337 struct be_dma_mem *flash_cmd, int optype, int img_size)
3338{
3339 u32 total_bytes = 0, flash_op, num_bytes = 0;
3340 int status = 0;
3341 struct be_cmd_write_flashrom *req = flash_cmd->va;
3342
3343 total_bytes = img_size;
3344 while (total_bytes) {
3345 num_bytes = min_t(u32, 32*1024, total_bytes);
3346
3347 total_bytes -= num_bytes;
3348
3349 if (!total_bytes) {
3350 if (optype == OPTYPE_PHY_FW)
3351 flash_op = FLASHROM_OPER_PHY_FLASH;
3352 else
3353 flash_op = FLASHROM_OPER_FLASH;
3354 } else {
3355 if (optype == OPTYPE_PHY_FW)
3356 flash_op = FLASHROM_OPER_PHY_SAVE;
3357 else
3358 flash_op = FLASHROM_OPER_SAVE;
3359 }
3360
be716446 3361 memcpy(req->data_buf, img, num_bytes);
773a2d7c
PR
3362 img += num_bytes;
3363 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3364 flash_op, num_bytes);
3365 if (status) {
3366 if (status == ILLEGAL_IOCTL_REQ &&
3367 optype == OPTYPE_PHY_FW)
3368 break;
3369 dev_err(&adapter->pdev->dev,
3370 "cmd to write to flash rom failed.\n");
3371 return status;
3372 }
3373 }
3374 return 0;
3375}
3376
0ad3157e 3377/* For BE2, BE3 and BE3-R */
ca34fe38 3378static int be_flash_BEx(struct be_adapter *adapter,
c165541e
PR
3379 const struct firmware *fw,
3380 struct be_dma_mem *flash_cmd,
3381 int num_of_images)
3f0d4560 3382
84517482 3383{
3f0d4560 3384 int status = 0, i, filehdr_size = 0;
c165541e 3385 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3386 const u8 *p = fw->data;
215faf9c 3387 const struct flash_comp *pflashcomp;
773a2d7c 3388 int num_comp, redboot;
c165541e
PR
3389 struct flash_section_info *fsec = NULL;
3390
3391 struct flash_comp gen3_flash_types[] = {
3392 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3393 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3394 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3395 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3396 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3397 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3398 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3399 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3400 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3401 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3402 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3403 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3404 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3405 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3406 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3407 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3408 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3409 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3410 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3411 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3412 };
c165541e
PR
3413
3414 struct flash_comp gen2_flash_types[] = {
3415 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3416 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3417 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3418 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3419 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3420 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3421 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3422 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3423 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3424 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3425 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3426 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3427 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3428 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3429 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3430 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3431 };
3432
ca34fe38 3433 if (BE3_chip(adapter)) {
3f0d4560
AK
3434 pflashcomp = gen3_flash_types;
3435 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3436 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3437 } else {
3438 pflashcomp = gen2_flash_types;
3439 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3440 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3441 }
ca34fe38 3442
c165541e
PR
3443 /* Get flash section info*/
3444 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3445 if (!fsec) {
3446 dev_err(&adapter->pdev->dev,
3447 "Invalid Cookie. UFI corrupted ?\n");
3448 return -1;
3449 }
9fe96934 3450 for (i = 0; i < num_comp; i++) {
c165541e 3451 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3452 continue;
c165541e
PR
3453
3454 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3455 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3456 continue;
3457
773a2d7c
PR
3458 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3459 !phy_flashing_required(adapter))
306f1348 3460 continue;
c165541e 3461
773a2d7c
PR
3462 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3463 redboot = be_flash_redboot(adapter, fw->data,
3464 pflashcomp[i].offset, pflashcomp[i].size,
3465 filehdr_size + img_hdrs_size);
3466 if (!redboot)
3467 continue;
3468 }
c165541e 3469
3f0d4560 3470 p = fw->data;
c165541e 3471 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3472 if (p + pflashcomp[i].size > fw->data + fw->size)
3473 return -1;
773a2d7c
PR
3474
3475 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3476 pflashcomp[i].size);
3477 if (status) {
3478 dev_err(&adapter->pdev->dev,
3479 "Flashing section type %d failed.\n",
3480 pflashcomp[i].img_type);
3481 return status;
84517482 3482 }
84517482 3483 }
84517482
AK
3484 return 0;
3485}
3486
773a2d7c
PR
3487static int be_flash_skyhawk(struct be_adapter *adapter,
3488 const struct firmware *fw,
3489 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3490{
773a2d7c
PR
3491 int status = 0, i, filehdr_size = 0;
3492 int img_offset, img_size, img_optype, redboot;
3493 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3494 const u8 *p = fw->data;
3495 struct flash_section_info *fsec = NULL;
3496
3497 filehdr_size = sizeof(struct flash_file_hdr_g3);
3498 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3499 if (!fsec) {
3500 dev_err(&adapter->pdev->dev,
3501 "Invalid Cookie. UFI corrupted ?\n");
3502 return -1;
3503 }
3504
3505 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3506 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3507 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3508
3509 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3510 case IMAGE_FIRMWARE_iSCSI:
3511 img_optype = OPTYPE_ISCSI_ACTIVE;
3512 break;
3513 case IMAGE_BOOT_CODE:
3514 img_optype = OPTYPE_REDBOOT;
3515 break;
3516 case IMAGE_OPTION_ROM_ISCSI:
3517 img_optype = OPTYPE_BIOS;
3518 break;
3519 case IMAGE_OPTION_ROM_PXE:
3520 img_optype = OPTYPE_PXE_BIOS;
3521 break;
3522 case IMAGE_OPTION_ROM_FCoE:
3523 img_optype = OPTYPE_FCOE_BIOS;
3524 break;
3525 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3526 img_optype = OPTYPE_ISCSI_BACKUP;
3527 break;
3528 case IMAGE_NCSI:
3529 img_optype = OPTYPE_NCSI_FW;
3530 break;
3531 default:
3532 continue;
3533 }
3534
3535 if (img_optype == OPTYPE_REDBOOT) {
3536 redboot = be_flash_redboot(adapter, fw->data,
3537 img_offset, img_size,
3538 filehdr_size + img_hdrs_size);
3539 if (!redboot)
3540 continue;
3541 }
3542
3543 p = fw->data;
3544 p += filehdr_size + img_offset + img_hdrs_size;
3545 if (p + img_size > fw->data + fw->size)
3546 return -1;
3547
3548 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3549 if (status) {
3550 dev_err(&adapter->pdev->dev,
3551 "Flashing section type %d failed.\n",
3552 fsec->fsec_entry[i].type);
3553 return status;
3554 }
3555 }
3556 return 0;
3f0d4560
AK
3557}
3558
485bf569
SN
3559static int lancer_fw_download(struct be_adapter *adapter,
3560 const struct firmware *fw)
84517482 3561{
485bf569
SN
3562#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3563#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3564 struct be_dma_mem flash_cmd;
485bf569
SN
3565 const u8 *data_ptr = NULL;
3566 u8 *dest_image_ptr = NULL;
3567 size_t image_size = 0;
3568 u32 chunk_size = 0;
3569 u32 data_written = 0;
3570 u32 offset = 0;
3571 int status = 0;
3572 u8 add_status = 0;
f67ef7ba 3573 u8 change_status;
84517482 3574
485bf569 3575 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3576 dev_err(&adapter->pdev->dev,
485bf569
SN
3577 "FW Image not properly aligned. "
3578 "Length must be 4 byte aligned.\n");
3579 status = -EINVAL;
3580 goto lancer_fw_exit;
d9efd2af
SB
3581 }
3582
485bf569
SN
3583 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3584 + LANCER_FW_DOWNLOAD_CHUNK;
3585 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
d0320f75 3586 &flash_cmd.dma, GFP_KERNEL);
485bf569
SN
3587 if (!flash_cmd.va) {
3588 status = -ENOMEM;
485bf569
SN
3589 goto lancer_fw_exit;
3590 }
84517482 3591
485bf569
SN
3592 dest_image_ptr = flash_cmd.va +
3593 sizeof(struct lancer_cmd_req_write_object);
3594 image_size = fw->size;
3595 data_ptr = fw->data;
3596
3597 while (image_size) {
3598 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3599
3600 /* Copy the image chunk content. */
3601 memcpy(dest_image_ptr, data_ptr, chunk_size);
3602
3603 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3604 chunk_size, offset,
3605 LANCER_FW_DOWNLOAD_LOCATION,
3606 &data_written, &change_status,
3607 &add_status);
485bf569
SN
3608 if (status)
3609 break;
3610
3611 offset += data_written;
3612 data_ptr += data_written;
3613 image_size -= data_written;
3614 }
3615
3616 if (!status) {
3617 /* Commit the FW written */
3618 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3619 0, offset,
3620 LANCER_FW_DOWNLOAD_LOCATION,
3621 &data_written, &change_status,
3622 &add_status);
485bf569
SN
3623 }
3624
3625 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3626 flash_cmd.dma);
3627 if (status) {
3628 dev_err(&adapter->pdev->dev,
3629 "Firmware load error. "
3630 "Status code: 0x%x Additional Status: 0x%x\n",
3631 status, add_status);
3632 goto lancer_fw_exit;
3633 }
3634
f67ef7ba 3635 if (change_status == LANCER_FW_RESET_NEEDED) {
5c510811
SK
3636 status = lancer_physdev_ctrl(adapter,
3637 PHYSDEV_CONTROL_FW_RESET_MASK);
f67ef7ba
PR
3638 if (status) {
3639 dev_err(&adapter->pdev->dev,
3640 "Adapter busy for FW reset.\n"
3641 "New FW will not be active.\n");
3642 goto lancer_fw_exit;
3643 }
3644 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3645 dev_err(&adapter->pdev->dev,
3646 "System reboot required for new FW"
3647 " to be active\n");
3648 }
3649
485bf569
SN
3650 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3651lancer_fw_exit:
3652 return status;
3653}
3654
ca34fe38
SP
3655#define UFI_TYPE2 2
3656#define UFI_TYPE3 3
0ad3157e 3657#define UFI_TYPE3R 10
ca34fe38
SP
3658#define UFI_TYPE4 4
3659static int be_get_ufi_type(struct be_adapter *adapter,
0ad3157e 3660 struct flash_file_hdr_g3 *fhdr)
773a2d7c
PR
3661{
3662 if (fhdr == NULL)
3663 goto be_get_ufi_exit;
3664
ca34fe38
SP
3665 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3666 return UFI_TYPE4;
0ad3157e
VV
3667 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3668 if (fhdr->asic_type_rev == 0x10)
3669 return UFI_TYPE3R;
3670 else
3671 return UFI_TYPE3;
3672 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
ca34fe38 3673 return UFI_TYPE2;
773a2d7c
PR
3674
3675be_get_ufi_exit:
3676 dev_err(&adapter->pdev->dev,
3677 "UFI and Interface are not compatible for flashing\n");
3678 return -1;
3679}
3680
485bf569
SN
3681static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3682{
485bf569
SN
3683 struct flash_file_hdr_g3 *fhdr3;
3684 struct image_hdr *img_hdr_ptr = NULL;
3685 struct be_dma_mem flash_cmd;
3686 const u8 *p;
773a2d7c 3687 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3688
be716446 3689 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2b7bcebf
IV
3690 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3691 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3692 if (!flash_cmd.va) {
3693 status = -ENOMEM;
485bf569 3694 goto be_fw_exit;
84517482
AK
3695 }
3696
773a2d7c 3697 p = fw->data;
0ad3157e 3698 fhdr3 = (struct flash_file_hdr_g3 *)p;
773a2d7c 3699
0ad3157e 3700 ufi_type = be_get_ufi_type(adapter, fhdr3);
773a2d7c 3701
773a2d7c
PR
3702 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3703 for (i = 0; i < num_imgs; i++) {
3704 img_hdr_ptr = (struct image_hdr *)(fw->data +
3705 (sizeof(struct flash_file_hdr_g3) +
3706 i * sizeof(struct image_hdr)));
3707 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
0ad3157e
VV
3708 switch (ufi_type) {
3709 case UFI_TYPE4:
773a2d7c
PR
3710 status = be_flash_skyhawk(adapter, fw,
3711 &flash_cmd, num_imgs);
0ad3157e
VV
3712 break;
3713 case UFI_TYPE3R:
ca34fe38
SP
3714 status = be_flash_BEx(adapter, fw, &flash_cmd,
3715 num_imgs);
0ad3157e
VV
3716 break;
3717 case UFI_TYPE3:
3718 /* Do not flash this ufi on BE3-R cards */
3719 if (adapter->asic_rev < 0x10)
3720 status = be_flash_BEx(adapter, fw,
3721 &flash_cmd,
3722 num_imgs);
3723 else {
3724 status = -1;
3725 dev_err(&adapter->pdev->dev,
3726 "Can't load BE3 UFI on BE3R\n");
3727 }
3728 }
3f0d4560 3729 }
773a2d7c
PR
3730 }
3731
ca34fe38
SP
3732 if (ufi_type == UFI_TYPE2)
3733 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
773a2d7c 3734 else if (ufi_type == -1)
3f0d4560 3735 status = -1;
84517482 3736
2b7bcebf
IV
3737 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3738 flash_cmd.dma);
84517482
AK
3739 if (status) {
3740 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3741 goto be_fw_exit;
84517482
AK
3742 }
3743
af901ca1 3744 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3745
485bf569
SN
3746be_fw_exit:
3747 return status;
3748}
3749
3750int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3751{
3752 const struct firmware *fw;
3753 int status;
3754
3755 if (!netif_running(adapter->netdev)) {
3756 dev_err(&adapter->pdev->dev,
3757 "Firmware load not allowed (interface is down)\n");
3758 return -1;
3759 }
3760
3761 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3762 if (status)
3763 goto fw_exit;
3764
3765 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3766
3767 if (lancer_chip(adapter))
3768 status = lancer_fw_download(adapter, fw);
3769 else
3770 status = be_fw_download(adapter, fw);
3771
eeb65ced
SK
3772 if (!status)
3773 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3774 adapter->fw_on_flash);
3775
84517482
AK
3776fw_exit:
3777 release_firmware(fw);
3778 return status;
3779}
3780
e5686ad8 3781static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3782 .ndo_open = be_open,
3783 .ndo_stop = be_close,
3784 .ndo_start_xmit = be_xmit,
a54769f5 3785 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3786 .ndo_set_mac_address = be_mac_addr_set,
3787 .ndo_change_mtu = be_change_mtu,
ab1594e9 3788 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3789 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3790 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3791 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3792 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3793 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3794 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3795 .ndo_get_vf_config = be_get_vf_config,
3796#ifdef CONFIG_NET_POLL_CONTROLLER
3797 .ndo_poll_controller = be_netpoll,
3798#endif
6b7c5b94
SP
3799};
3800
3801static void be_netdev_init(struct net_device *netdev)
3802{
3803 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3804 struct be_eq_obj *eqo;
3abcdeda 3805 int i;
6b7c5b94 3806
6332c8d3 3807 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68 3808 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
f646968f 3809 NETIF_F_HW_VLAN_CTAG_TX;
8b8ddc68
MM
3810 if (be_multi_rxq(adapter))
3811 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3812
3813 netdev->features |= netdev->hw_features |
f646968f 3814 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4b972914 3815
eb8a50d9 3816 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3817 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3818
fbc13f01
AK
3819 netdev->priv_flags |= IFF_UNICAST_FLT;
3820
6b7c5b94
SP
3821 netdev->flags |= IFF_MULTICAST;
3822
b7e5887e 3823 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3824
10ef9ab4 3825 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3826
3827 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3828
10ef9ab4
SP
3829 for_all_evt_queues(adapter, eqo, i)
3830 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3831}
3832
3833static void be_unmap_pci_bars(struct be_adapter *adapter)
3834{
c5b3ad4c
SP
3835 if (adapter->csr)
3836 pci_iounmap(adapter->pdev, adapter->csr);
8788fdc2 3837 if (adapter->db)
ce66f781 3838 pci_iounmap(adapter->pdev, adapter->db);
045508a8
PP
3839}
3840
ce66f781
SP
3841static int db_bar(struct be_adapter *adapter)
3842{
3843 if (lancer_chip(adapter) || !be_physfn(adapter))
3844 return 0;
3845 else
3846 return 4;
3847}
3848
3849static int be_roce_map_pci_bars(struct be_adapter *adapter)
045508a8 3850{
dbf0f2a7 3851 if (skyhawk_chip(adapter)) {
ce66f781
SP
3852 adapter->roce_db.size = 4096;
3853 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3854 db_bar(adapter));
3855 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3856 db_bar(adapter));
3857 }
045508a8 3858 return 0;
6b7c5b94
SP
3859}
3860
3861static int be_map_pci_bars(struct be_adapter *adapter)
3862{
3863 u8 __iomem *addr;
ce66f781 3864 u32 sli_intf;
6b7c5b94 3865
ce66f781
SP
3866 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3867 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3868 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3869
c5b3ad4c
SP
3870 if (BEx_chip(adapter) && be_physfn(adapter)) {
3871 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3872 if (adapter->csr == NULL)
3873 return -ENOMEM;
3874 }
3875
ce66f781 3876 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
6b7c5b94
SP
3877 if (addr == NULL)
3878 goto pci_map_err;
ba343c77 3879 adapter->db = addr;
ce66f781
SP
3880
3881 be_roce_map_pci_bars(adapter);
6b7c5b94 3882 return 0;
ce66f781 3883
6b7c5b94
SP
3884pci_map_err:
3885 be_unmap_pci_bars(adapter);
3886 return -ENOMEM;
3887}
3888
6b7c5b94
SP
3889static void be_ctrl_cleanup(struct be_adapter *adapter)
3890{
8788fdc2 3891 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3892
3893 be_unmap_pci_bars(adapter);
3894
3895 if (mem->va)
2b7bcebf
IV
3896 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3897 mem->dma);
e7b909a6 3898
5b8821b7 3899 mem = &adapter->rx_filter;
e7b909a6 3900 if (mem->va)
2b7bcebf
IV
3901 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3902 mem->dma);
6b7c5b94
SP
3903}
3904
6b7c5b94
SP
3905static int be_ctrl_init(struct be_adapter *adapter)
3906{
8788fdc2
SP
3907 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3908 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3909 struct be_dma_mem *rx_filter = &adapter->rx_filter;
ce66f781 3910 u32 sli_intf;
6b7c5b94 3911 int status;
6b7c5b94 3912
ce66f781
SP
3913 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3914 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3915 SLI_INTF_FAMILY_SHIFT;
3916 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3917
6b7c5b94
SP
3918 status = be_map_pci_bars(adapter);
3919 if (status)
e7b909a6 3920 goto done;
6b7c5b94
SP
3921
3922 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3923 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3924 mbox_mem_alloc->size,
3925 &mbox_mem_alloc->dma,
3926 GFP_KERNEL);
6b7c5b94 3927 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3928 status = -ENOMEM;
3929 goto unmap_pci_bars;
6b7c5b94
SP
3930 }
3931 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3932 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3933 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3934 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3935
5b8821b7
SP
3936 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3937 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
1f9061d2
JP
3938 &rx_filter->dma,
3939 GFP_KERNEL | __GFP_ZERO);
5b8821b7 3940 if (rx_filter->va == NULL) {
e7b909a6
SP
3941 status = -ENOMEM;
3942 goto free_mbox;
3943 }
1f9061d2 3944
2984961c 3945 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3946 spin_lock_init(&adapter->mcc_lock);
3947 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3948
dd131e76 3949 init_completion(&adapter->flash_compl);
cf588477 3950 pci_save_state(adapter->pdev);
6b7c5b94 3951 return 0;
e7b909a6
SP
3952
3953free_mbox:
2b7bcebf
IV
3954 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3955 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3956
3957unmap_pci_bars:
3958 be_unmap_pci_bars(adapter);
3959
3960done:
3961 return status;
6b7c5b94
SP
3962}
3963
3964static void be_stats_cleanup(struct be_adapter *adapter)
3965{
3abcdeda 3966 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3967
3968 if (cmd->va)
2b7bcebf
IV
3969 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3970 cmd->va, cmd->dma);
6b7c5b94
SP
3971}
3972
3973static int be_stats_init(struct be_adapter *adapter)
3974{
3abcdeda 3975 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3976
ca34fe38
SP
3977 if (lancer_chip(adapter))
3978 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3979 else if (BE2_chip(adapter))
89a88ab8 3980 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
ca34fe38
SP
3981 else
3982 /* BE3 and Skyhawk */
3983 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3984
2b7bcebf 3985 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
1f9061d2 3986 GFP_KERNEL | __GFP_ZERO);
6b7c5b94
SP
3987 if (cmd->va == NULL)
3988 return -1;
3989 return 0;
3990}
3991
3bc6b06c 3992static void be_remove(struct pci_dev *pdev)
6b7c5b94
SP
3993{
3994 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3995
6b7c5b94
SP
3996 if (!adapter)
3997 return;
3998
045508a8 3999 be_roce_dev_remove(adapter);
8cef7a78 4000 be_intr_set(adapter, false);
045508a8 4001
f67ef7ba
PR
4002 cancel_delayed_work_sync(&adapter->func_recovery_work);
4003
6b7c5b94
SP
4004 unregister_netdev(adapter->netdev);
4005
5fb379ee
SP
4006 be_clear(adapter);
4007
bf99e50d
PR
4008 /* tell fw we're done with firing cmds */
4009 be_cmd_fw_clean(adapter);
4010
6b7c5b94
SP
4011 be_stats_cleanup(adapter);
4012
4013 be_ctrl_cleanup(adapter);
4014
d6b6d987
SP
4015 pci_disable_pcie_error_reporting(pdev);
4016
6b7c5b94
SP
4017 pci_set_drvdata(pdev, NULL);
4018 pci_release_regions(pdev);
4019 pci_disable_device(pdev);
4020
4021 free_netdev(adapter->netdev);
4022}
4023
4762f6ce
AK
4024bool be_is_wol_supported(struct be_adapter *adapter)
4025{
4026 return ((adapter->wol_cap & BE_WOL_CAP) &&
4027 !be_is_wol_excluded(adapter)) ? true : false;
4028}
4029
941a77d5
SK
4030u32 be_get_fw_log_level(struct be_adapter *adapter)
4031{
4032 struct be_dma_mem extfat_cmd;
4033 struct be_fat_conf_params *cfgs;
4034 int status;
4035 u32 level = 0;
4036 int j;
4037
f25b119c
PR
4038 if (lancer_chip(adapter))
4039 return 0;
4040
941a77d5
SK
4041 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4042 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4043 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4044 &extfat_cmd.dma);
4045
4046 if (!extfat_cmd.va) {
4047 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4048 __func__);
4049 goto err;
4050 }
4051
4052 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4053 if (!status) {
4054 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4055 sizeof(struct be_cmd_resp_hdr));
ac46a462 4056 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
4057 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4058 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4059 }
4060 }
4061 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4062 extfat_cmd.dma);
4063err:
4064 return level;
4065}
abb93951 4066
39f1d94d 4067static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 4068{
6b7c5b94 4069 int status;
941a77d5 4070 u32 level;
6b7c5b94 4071
9e1453c5
AK
4072 status = be_cmd_get_cntl_attributes(adapter);
4073 if (status)
4074 return status;
4075
4762f6ce
AK
4076 status = be_cmd_get_acpi_wol_cap(adapter);
4077 if (status) {
4078 /* in case of a failure to get wol capabillities
4079 * check the exclusion list to determine WOL capability */
4080 if (!be_is_wol_excluded(adapter))
4081 adapter->wol_cap |= BE_WOL_CAP;
4082 }
4083
4084 if (be_is_wol_supported(adapter))
4085 adapter->wol = true;
4086
7aeb2156
PR
4087 /* Must be a power of 2 or else MODULO will BUG_ON */
4088 adapter->be_get_temp_freq = 64;
4089
941a77d5
SK
4090 level = be_get_fw_log_level(adapter);
4091 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4092
2243e2e9 4093 return 0;
6b7c5b94
SP
4094}
4095
f67ef7ba 4096static int lancer_recover_func(struct be_adapter *adapter)
d8110f62 4097{
01e5b2c4 4098 struct device *dev = &adapter->pdev->dev;
d8110f62 4099 int status;
d8110f62 4100
f67ef7ba
PR
4101 status = lancer_test_and_set_rdy_state(adapter);
4102 if (status)
4103 goto err;
d8110f62 4104
f67ef7ba
PR
4105 if (netif_running(adapter->netdev))
4106 be_close(adapter->netdev);
d8110f62 4107
f67ef7ba
PR
4108 be_clear(adapter);
4109
01e5b2c4 4110 be_clear_all_error(adapter);
f67ef7ba
PR
4111
4112 status = be_setup(adapter);
4113 if (status)
4114 goto err;
d8110f62 4115
f67ef7ba
PR
4116 if (netif_running(adapter->netdev)) {
4117 status = be_open(adapter->netdev);
d8110f62
PR
4118 if (status)
4119 goto err;
f67ef7ba 4120 }
d8110f62 4121
01e5b2c4 4122 dev_err(dev, "Error recovery successful\n");
f67ef7ba
PR
4123 return 0;
4124err:
01e5b2c4
SK
4125 if (status == -EAGAIN)
4126 dev_err(dev, "Waiting for resource provisioning\n");
4127 else
4128 dev_err(dev, "Error recovery failed\n");
d8110f62 4129
f67ef7ba
PR
4130 return status;
4131}
4132
4133static void be_func_recovery_task(struct work_struct *work)
4134{
4135 struct be_adapter *adapter =
4136 container_of(work, struct be_adapter, func_recovery_work.work);
01e5b2c4 4137 int status = 0;
d8110f62 4138
f67ef7ba 4139 be_detect_error(adapter);
d8110f62 4140
f67ef7ba 4141 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 4142
f67ef7ba
PR
4143 rtnl_lock();
4144 netif_device_detach(adapter->netdev);
4145 rtnl_unlock();
d8110f62 4146
f67ef7ba 4147 status = lancer_recover_func(adapter);
f67ef7ba
PR
4148 if (!status)
4149 netif_device_attach(adapter->netdev);
d8110f62 4150 }
f67ef7ba 4151
01e5b2c4
SK
4152 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4153 * no need to attempt further recovery.
4154 */
4155 if (!status || status == -EAGAIN)
4156 schedule_delayed_work(&adapter->func_recovery_work,
4157 msecs_to_jiffies(1000));
d8110f62
PR
4158}
4159
4160static void be_worker(struct work_struct *work)
4161{
4162 struct be_adapter *adapter =
4163 container_of(work, struct be_adapter, work.work);
4164 struct be_rx_obj *rxo;
10ef9ab4 4165 struct be_eq_obj *eqo;
d8110f62
PR
4166 int i;
4167
d8110f62
PR
4168 /* when interrupts are not yet enabled, just reap any pending
4169 * mcc completions */
4170 if (!netif_running(adapter->netdev)) {
072a9c48 4171 local_bh_disable();
10ef9ab4 4172 be_process_mcc(adapter);
072a9c48 4173 local_bh_enable();
d8110f62
PR
4174 goto reschedule;
4175 }
4176
4177 if (!adapter->stats_cmd_sent) {
4178 if (lancer_chip(adapter))
4179 lancer_cmd_get_pport_stats(adapter,
4180 &adapter->stats_cmd);
4181 else
4182 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4183 }
4184
7aeb2156
PR
4185 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4186 be_cmd_get_die_temperature(adapter);
4187
d8110f62 4188 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4189 if (rxo->rx_post_starved) {
4190 rxo->rx_post_starved = false;
4191 be_post_rx_frags(rxo, GFP_KERNEL);
4192 }
4193 }
4194
10ef9ab4
SP
4195 for_all_evt_queues(adapter, eqo, i)
4196 be_eqd_update(adapter, eqo);
4197
d8110f62
PR
4198reschedule:
4199 adapter->work_counter++;
4200 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4201}
4202
39f1d94d
SP
4203static bool be_reset_required(struct be_adapter *adapter)
4204{
d79c0a20 4205 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4206}
4207
d379142b
SP
4208static char *mc_name(struct be_adapter *adapter)
4209{
4210 if (adapter->function_mode & FLEX10_MODE)
4211 return "FLEX10";
4212 else if (adapter->function_mode & VNIC_MODE)
4213 return "vNIC";
4214 else if (adapter->function_mode & UMC_ENABLED)
4215 return "UMC";
4216 else
4217 return "";
4218}
4219
4220static inline char *func_name(struct be_adapter *adapter)
4221{
4222 return be_physfn(adapter) ? "PF" : "VF";
4223}
4224
1dd06ae8 4225static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
6b7c5b94
SP
4226{
4227 int status = 0;
4228 struct be_adapter *adapter;
4229 struct net_device *netdev;
b4e32a71 4230 char port_name;
6b7c5b94
SP
4231
4232 status = pci_enable_device(pdev);
4233 if (status)
4234 goto do_none;
4235
4236 status = pci_request_regions(pdev, DRV_NAME);
4237 if (status)
4238 goto disable_dev;
4239 pci_set_master(pdev);
4240
7f640062 4241 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4242 if (netdev == NULL) {
4243 status = -ENOMEM;
4244 goto rel_reg;
4245 }
4246 adapter = netdev_priv(netdev);
4247 adapter->pdev = pdev;
4248 pci_set_drvdata(pdev, adapter);
4249 adapter->netdev = netdev;
2243e2e9 4250 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4251
2b7bcebf 4252 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94 4253 if (!status) {
2bd92cd2
CH
4254 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4255 if (status < 0) {
4256 dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4257 goto free_netdev;
4258 }
6b7c5b94
SP
4259 netdev->features |= NETIF_F_HIGHDMA;
4260 } else {
2b7bcebf 4261 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
0c5fed09
SK
4262 if (!status)
4263 status = dma_set_coherent_mask(&pdev->dev,
4264 DMA_BIT_MASK(32));
6b7c5b94
SP
4265 if (status) {
4266 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4267 goto free_netdev;
4268 }
4269 }
4270
d6b6d987
SP
4271 status = pci_enable_pcie_error_reporting(pdev);
4272 if (status)
4273 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4274
6b7c5b94
SP
4275 status = be_ctrl_init(adapter);
4276 if (status)
39f1d94d 4277 goto free_netdev;
6b7c5b94 4278
2243e2e9 4279 /* sync up with fw's ready state */
ba343c77 4280 if (be_physfn(adapter)) {
bf99e50d 4281 status = be_fw_wait_ready(adapter);
ba343c77
SB
4282 if (status)
4283 goto ctrl_clean;
ba343c77 4284 }
6b7c5b94 4285
39f1d94d
SP
4286 if (be_reset_required(adapter)) {
4287 status = be_cmd_reset_function(adapter);
4288 if (status)
4289 goto ctrl_clean;
556ae191 4290
2d177be8
KA
4291 /* Wait for interrupts to quiesce after an FLR */
4292 msleep(100);
4293 }
8cef7a78
SK
4294
4295 /* Allow interrupts for other ULPs running on NIC function */
4296 be_intr_set(adapter, true);
10ef9ab4 4297
2d177be8
KA
4298 /* tell fw we're ready to fire cmds */
4299 status = be_cmd_fw_init(adapter);
4300 if (status)
4301 goto ctrl_clean;
4302
2243e2e9
SP
4303 status = be_stats_init(adapter);
4304 if (status)
4305 goto ctrl_clean;
4306
39f1d94d 4307 status = be_get_initial_config(adapter);
6b7c5b94
SP
4308 if (status)
4309 goto stats_clean;
6b7c5b94
SP
4310
4311 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4312 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4313 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4314
5fb379ee
SP
4315 status = be_setup(adapter);
4316 if (status)
55f5c3c5 4317 goto stats_clean;
2243e2e9 4318
3abcdeda 4319 be_netdev_init(netdev);
6b7c5b94
SP
4320 status = register_netdev(netdev);
4321 if (status != 0)
5fb379ee 4322 goto unsetup;
6b7c5b94 4323
045508a8
PP
4324 be_roce_dev_add(adapter);
4325
f67ef7ba
PR
4326 schedule_delayed_work(&adapter->func_recovery_work,
4327 msecs_to_jiffies(1000));
b4e32a71
PR
4328
4329 be_cmd_query_port_name(adapter, &port_name);
4330
d379142b
SP
4331 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4332 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4333
6b7c5b94
SP
4334 return 0;
4335
5fb379ee
SP
4336unsetup:
4337 be_clear(adapter);
6b7c5b94
SP
4338stats_clean:
4339 be_stats_cleanup(adapter);
4340ctrl_clean:
4341 be_ctrl_cleanup(adapter);
f9449ab7 4342free_netdev:
fe6d2a38 4343 free_netdev(netdev);
8d56ff11 4344 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4345rel_reg:
4346 pci_release_regions(pdev);
4347disable_dev:
4348 pci_disable_device(pdev);
4349do_none:
c4ca2374 4350 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4351 return status;
4352}
4353
4354static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4355{
4356 struct be_adapter *adapter = pci_get_drvdata(pdev);
4357 struct net_device *netdev = adapter->netdev;
4358
71d8d1b5
AK
4359 if (adapter->wol)
4360 be_setup_wol(adapter, true);
4361
f67ef7ba
PR
4362 cancel_delayed_work_sync(&adapter->func_recovery_work);
4363
6b7c5b94
SP
4364 netif_device_detach(netdev);
4365 if (netif_running(netdev)) {
4366 rtnl_lock();
4367 be_close(netdev);
4368 rtnl_unlock();
4369 }
9b0365f1 4370 be_clear(adapter);
6b7c5b94
SP
4371
4372 pci_save_state(pdev);
4373 pci_disable_device(pdev);
4374 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4375 return 0;
4376}
4377
4378static int be_resume(struct pci_dev *pdev)
4379{
4380 int status = 0;
4381 struct be_adapter *adapter = pci_get_drvdata(pdev);
4382 struct net_device *netdev = adapter->netdev;
4383
4384 netif_device_detach(netdev);
4385
4386 status = pci_enable_device(pdev);
4387 if (status)
4388 return status;
4389
4390 pci_set_power_state(pdev, 0);
4391 pci_restore_state(pdev);
4392
2243e2e9
SP
4393 /* tell fw we're ready to fire cmds */
4394 status = be_cmd_fw_init(adapter);
4395 if (status)
4396 return status;
4397
9b0365f1 4398 be_setup(adapter);
6b7c5b94
SP
4399 if (netif_running(netdev)) {
4400 rtnl_lock();
4401 be_open(netdev);
4402 rtnl_unlock();
4403 }
f67ef7ba
PR
4404
4405 schedule_delayed_work(&adapter->func_recovery_work,
4406 msecs_to_jiffies(1000));
6b7c5b94 4407 netif_device_attach(netdev);
71d8d1b5
AK
4408
4409 if (adapter->wol)
4410 be_setup_wol(adapter, false);
a4ca055f 4411
6b7c5b94
SP
4412 return 0;
4413}
4414
82456b03
SP
4415/*
4416 * An FLR will stop BE from DMAing any data.
4417 */
4418static void be_shutdown(struct pci_dev *pdev)
4419{
4420 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4421
2d5d4154
AK
4422 if (!adapter)
4423 return;
82456b03 4424
0f4a6828 4425 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4426 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4427
2d5d4154 4428 netif_device_detach(adapter->netdev);
82456b03 4429
57841869
AK
4430 be_cmd_reset_function(adapter);
4431
82456b03 4432 pci_disable_device(pdev);
82456b03
SP
4433}
4434
cf588477
SP
4435static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436 pci_channel_state_t state)
4437{
4438 struct be_adapter *adapter = pci_get_drvdata(pdev);
4439 struct net_device *netdev = adapter->netdev;
4440
4441 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4442
01e5b2c4
SK
4443 if (!adapter->eeh_error) {
4444 adapter->eeh_error = true;
cf588477 4445
01e5b2c4 4446 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4447
cf588477 4448 rtnl_lock();
01e5b2c4
SK
4449 netif_device_detach(netdev);
4450 if (netif_running(netdev))
4451 be_close(netdev);
cf588477 4452 rtnl_unlock();
01e5b2c4
SK
4453
4454 be_clear(adapter);
cf588477 4455 }
cf588477
SP
4456
4457 if (state == pci_channel_io_perm_failure)
4458 return PCI_ERS_RESULT_DISCONNECT;
4459
4460 pci_disable_device(pdev);
4461
eeb7fc7b
SK
4462 /* The error could cause the FW to trigger a flash debug dump.
4463 * Resetting the card while flash dump is in progress
c8a54163
PR
4464 * can cause it not to recover; wait for it to finish.
4465 * Wait only for first function as it is needed only once per
4466 * adapter.
eeb7fc7b 4467 */
c8a54163
PR
4468 if (pdev->devfn == 0)
4469 ssleep(30);
4470
cf588477
SP
4471 return PCI_ERS_RESULT_NEED_RESET;
4472}
4473
4474static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4475{
4476 struct be_adapter *adapter = pci_get_drvdata(pdev);
4477 int status;
4478
4479 dev_info(&adapter->pdev->dev, "EEH reset\n");
cf588477
SP
4480
4481 status = pci_enable_device(pdev);
4482 if (status)
4483 return PCI_ERS_RESULT_DISCONNECT;
4484
4485 pci_set_master(pdev);
4486 pci_set_power_state(pdev, 0);
4487 pci_restore_state(pdev);
4488
4489 /* Check if card is ok and fw is ready */
c5b3ad4c
SP
4490 dev_info(&adapter->pdev->dev,
4491 "Waiting for FW to be ready after EEH reset\n");
bf99e50d 4492 status = be_fw_wait_ready(adapter);
cf588477
SP
4493 if (status)
4494 return PCI_ERS_RESULT_DISCONNECT;
4495
d6b6d987 4496 pci_cleanup_aer_uncorrect_error_status(pdev);
01e5b2c4 4497 be_clear_all_error(adapter);
cf588477
SP
4498 return PCI_ERS_RESULT_RECOVERED;
4499}
4500
4501static void be_eeh_resume(struct pci_dev *pdev)
4502{
4503 int status = 0;
4504 struct be_adapter *adapter = pci_get_drvdata(pdev);
4505 struct net_device *netdev = adapter->netdev;
4506
4507 dev_info(&adapter->pdev->dev, "EEH resume\n");
4508
4509 pci_save_state(pdev);
4510
2d177be8 4511 status = be_cmd_reset_function(adapter);
cf588477
SP
4512 if (status)
4513 goto err;
4514
2d177be8
KA
4515 /* tell fw we're ready to fire cmds */
4516 status = be_cmd_fw_init(adapter);
bf99e50d
PR
4517 if (status)
4518 goto err;
4519
cf588477
SP
4520 status = be_setup(adapter);
4521 if (status)
4522 goto err;
4523
4524 if (netif_running(netdev)) {
4525 status = be_open(netdev);
4526 if (status)
4527 goto err;
4528 }
f67ef7ba
PR
4529
4530 schedule_delayed_work(&adapter->func_recovery_work,
4531 msecs_to_jiffies(1000));
cf588477
SP
4532 netif_device_attach(netdev);
4533 return;
4534err:
4535 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4536}
4537
3646f0e5 4538static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4539 .error_detected = be_eeh_err_detected,
4540 .slot_reset = be_eeh_reset,
4541 .resume = be_eeh_resume,
4542};
4543
6b7c5b94
SP
4544static struct pci_driver be_driver = {
4545 .name = DRV_NAME,
4546 .id_table = be_dev_ids,
4547 .probe = be_probe,
4548 .remove = be_remove,
4549 .suspend = be_suspend,
cf588477 4550 .resume = be_resume,
82456b03 4551 .shutdown = be_shutdown,
cf588477 4552 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4553};
4554
4555static int __init be_init_module(void)
4556{
8e95a202
JP
4557 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4558 rx_frag_size != 2048) {
6b7c5b94
SP
4559 printk(KERN_WARNING DRV_NAME
4560 " : Module param rx_frag_size must be 2048/4096/8192."
4561 " Using 2048\n");
4562 rx_frag_size = 2048;
4563 }
6b7c5b94
SP
4564
4565 return pci_register_driver(&be_driver);
4566}
4567module_init(be_init_module);
4568
4569static void __exit be_exit_module(void)
4570{
4571 pci_unregister_driver(&be_driver);
4572}
4573module_exit(be_exit_module);