]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
netlink: cleanup the unnecessary return value check
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
d6b6d987 23#include <linux/aer.h>
6b7c5b94
SP
24
25MODULE_VERSION(DRV_VER);
26MODULE_DEVICE_TABLE(pci, be_dev_ids);
27MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28MODULE_AUTHOR("ServerEngines Corporation");
29MODULE_LICENSE("GPL");
30
ba343c77 31static unsigned int num_vfs;
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
ba343c77 33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 34
11ac75ed
SP
35static ushort rx_frag_size = 2048;
36module_param(rx_frag_size, ushort, S_IRUGO);
37MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
6b7c5b94 39static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
ecedb6ae 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
76b73530 47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
6b7c5b94
SP
48 { 0 }
49};
50MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 51/* UE Status Low CSR */
42c8b11e 52static const char * const ue_status_low_desc[] = {
7c185276
AK
53 "CEV",
54 "CTX",
55 "DBUF",
56 "ERX",
57 "Host",
58 "MPU",
59 "NDMA",
60 "PTC ",
61 "RDMA ",
62 "RXF ",
63 "RXIPS ",
64 "RXULP0 ",
65 "RXULP1 ",
66 "RXULP2 ",
67 "TIM ",
68 "TPOST ",
69 "TPRE ",
70 "TXIPS ",
71 "TXULP0 ",
72 "TXULP1 ",
73 "UC ",
74 "WDMA ",
75 "TXULP2 ",
76 "HOST1 ",
77 "P0_OB_LINK ",
78 "P1_OB_LINK ",
79 "HOST_GPIO ",
80 "MBOX ",
81 "AXGMAC0",
82 "AXGMAC1",
83 "JTAG",
84 "MPU_INTPEND"
85};
86/* UE Status High CSR */
42c8b11e 87static const char * const ue_status_hi_desc[] = {
7c185276
AK
88 "LPCMEMHOST",
89 "MGMT_MAC",
90 "PCS0ONLINE",
91 "MPU_IRAM",
92 "PCS1ONLINE",
93 "PCTL0",
94 "PCTL1",
95 "PMEM",
96 "RR",
97 "TXPB",
98 "RXPP",
99 "XAUI",
100 "TXP",
101 "ARM",
102 "IPC",
103 "HOST2",
104 "HOST3",
105 "HOST4",
106 "HOST5",
107 "HOST6",
108 "HOST7",
109 "HOST8",
110 "HOST9",
42c8b11e 111 "NETC",
7c185276
AK
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown"
120};
6b7c5b94 121
752961a1
SP
122/* Is BE in a multi-channel mode */
123static inline bool be_is_mc(struct be_adapter *adapter) {
124 return (adapter->function_mode & FLEX10_MODE ||
125 adapter->function_mode & VNIC_MODE ||
126 adapter->function_mode & UMC_ENABLED);
127}
128
6b7c5b94
SP
129static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130{
131 struct be_dma_mem *mem = &q->dma_mem;
1cfafab9 132 if (mem->va) {
2b7bcebf
IV
133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
1cfafab9
SP
135 mem->va = NULL;
136 }
6b7c5b94
SP
137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140 u16 len, u16 entry_size)
141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
2b7bcebf
IV
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
6b7c5b94 150 if (!mem->va)
10ef9ab4 151 return -ENOMEM;
6b7c5b94
SP
152 memset(mem->va, 0, mem->size);
153 return 0;
154}
155
8788fdc2 156static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 157{
db3ea781 158 u32 reg, enabled;
5f0b849e 159
f67ef7ba 160 if (adapter->eeh_error)
cf588477
SP
161 return;
162
db3ea781
SP
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
5f0b849e 167 if (!enabled && enable)
6b7c5b94 168 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 169 else if (enabled && !enable)
6b7c5b94 170 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 171 else
6b7c5b94 172 return;
5f0b849e 173
db3ea781
SP
174 pci_write_config_dword(adapter->pdev,
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
176}
177
8788fdc2 178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
179{
180 u32 val = 0;
181 val |= qid & DB_RQ_RING_ID_MASK;
182 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
183
184 wmb();
8788fdc2 185 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
186}
187
8788fdc2 188static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
189{
190 u32 val = 0;
191 val |= qid & DB_TXULP_RING_ID_MASK;
192 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
193
194 wmb();
8788fdc2 195 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
196}
197
8788fdc2 198static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
199 bool arm, bool clear_int, u16 num_popped)
200{
201 u32 val = 0;
202 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
203 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477 205
f67ef7ba 206 if (adapter->eeh_error)
cf588477
SP
207 return;
208
6b7c5b94
SP
209 if (arm)
210 val |= 1 << DB_EQ_REARM_SHIFT;
211 if (clear_int)
212 val |= 1 << DB_EQ_CLR_SHIFT;
213 val |= 1 << DB_EQ_EVNT_SHIFT;
214 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 215 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
216}
217
8788fdc2 218void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
219{
220 u32 val = 0;
221 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
222 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477 224
f67ef7ba 225 if (adapter->eeh_error)
cf588477
SP
226 return;
227
6b7c5b94
SP
228 if (arm)
229 val |= 1 << DB_CQ_REARM_SHIFT;
230 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 231 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
232}
233
6b7c5b94
SP
234static int be_mac_addr_set(struct net_device *netdev, void *p)
235{
236 struct be_adapter *adapter = netdev_priv(netdev);
237 struct sockaddr *addr = p;
238 int status = 0;
e3a7ae2c 239 u8 current_mac[ETH_ALEN];
fbc13f01 240 u32 pmac_id = adapter->pmac_id[0];
704e4c88 241 bool active_mac = true;
6b7c5b94 242
ca9e4988
AK
243 if (!is_valid_ether_addr(addr->sa_data))
244 return -EADDRNOTAVAIL;
245
704e4c88
PR
246 /* For BE VF, MAC address is already activated by PF.
247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
a65027e4 274 if (status)
e3a7ae2c 275 goto err;
6b7c5b94 276
704e4c88
PR
277 if (active_mac)
278 be_cmd_pmac_del(adapter, adapter->if_handle,
279 pmac_id, 0);
280done:
e3a7ae2c
SK
281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282 return 0;
283err:
284 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
285 return status;
286}
287
89a88ab8
AK
288static void populate_be2_stats(struct be_adapter *adapter)
289{
ac124ff9
SP
290 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
291 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
292 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 293 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
294 &rxf_stats->port[adapter->port_num];
295 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 296
ac124ff9 297 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
298 drvs->rx_pause_frames = port_stats->rx_pause_frames;
299 drvs->rx_crc_errors = port_stats->rx_crc_errors;
300 drvs->rx_control_frames = port_stats->rx_control_frames;
301 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
302 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
303 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
304 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
305 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
306 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
307 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
308 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
309 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
310 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
311 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 312 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
313 drvs->rx_dropped_header_too_small =
314 port_stats->rx_dropped_header_too_small;
d45b9d39
SP
315 drvs->rx_address_mismatch_drops =
316 port_stats->rx_address_mismatch_drops +
317 port_stats->rx_vlan_mismatch_drops;
89a88ab8
AK
318 drvs->rx_alignment_symbol_errors =
319 port_stats->rx_alignment_symbol_errors;
320
321 drvs->tx_pauseframes = port_stats->tx_pauseframes;
322 drvs->tx_controlframes = port_stats->tx_controlframes;
323
324 if (adapter->port_num)
ac124ff9 325 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 326 else
ac124ff9 327 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8 328 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 329 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
330 drvs->forwarded_packets = rxf_stats->forwarded_packets;
331 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
332 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
333 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
334 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
335}
336
337static void populate_be3_stats(struct be_adapter *adapter)
338{
ac124ff9
SP
339 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
340 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
341 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 342 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
343 &rxf_stats->port[adapter->port_num];
344 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 345
ac124ff9 346 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
02fe7027
AK
347 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
348 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
89a88ab8
AK
349 drvs->rx_pause_frames = port_stats->rx_pause_frames;
350 drvs->rx_crc_errors = port_stats->rx_crc_errors;
351 drvs->rx_control_frames = port_stats->rx_control_frames;
352 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
353 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
354 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
355 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
356 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
357 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
358 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
359 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
362 drvs->rx_dropped_header_too_small =
363 port_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop =
365 port_stats->rx_input_fifo_overflow_drop;
d45b9d39 366 drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
89a88ab8
AK
367 drvs->rx_alignment_symbol_errors =
368 port_stats->rx_alignment_symbol_errors;
ac124ff9 369 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
370 drvs->tx_pauseframes = port_stats->tx_pauseframes;
371 drvs->tx_controlframes = port_stats->tx_controlframes;
372 drvs->jabber_events = port_stats->jabber_events;
373 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
89a88ab8 374 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
89a88ab8
AK
375 drvs->forwarded_packets = rxf_stats->forwarded_packets;
376 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
377 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
378 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
379 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
380}
381
005d5696
SX
382static void populate_lancer_stats(struct be_adapter *adapter)
383{
89a88ab8 384
005d5696 385 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
386 struct lancer_pport_stats *pport_stats =
387 pport_stats_from_cmd(adapter);
388
389 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
390 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
391 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
392 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 393 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 394 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
395 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
396 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
397 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
398 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
399 drvs->rx_dropped_tcp_length =
400 pport_stats->rx_dropped_invalid_tcp_length;
401 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
402 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
403 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
404 drvs->rx_dropped_header_too_small =
405 pport_stats->rx_dropped_header_too_small;
406 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
d45b9d39
SP
407 drvs->rx_address_mismatch_drops =
408 pport_stats->rx_address_mismatch_drops +
409 pport_stats->rx_vlan_mismatch_drops;
ac124ff9 410 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 411 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
412 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
413 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 414 drvs->jabber_events = pport_stats->rx_jabbers;
ac124ff9
SP
415 drvs->forwarded_packets = pport_stats->num_forwards_lo;
416 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 417 drvs->rx_drops_too_many_frags =
ac124ff9 418 pport_stats->rx_drops_too_many_frags_lo;
005d5696 419}
89a88ab8 420
09c1c68f
SP
421static void accumulate_16bit_val(u32 *acc, u16 val)
422{
423#define lo(x) (x & 0xFFFF)
424#define hi(x) (x & 0xFFFF0000)
425 bool wrapped = val < lo(*acc);
426 u32 newacc = hi(*acc) + val;
427
428 if (wrapped)
429 newacc += 65536;
430 ACCESS_ONCE(*acc) = newacc;
431}
432
89a88ab8
AK
433void be_parse_stats(struct be_adapter *adapter)
434{
ac124ff9
SP
435 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
436 struct be_rx_obj *rxo;
437 int i;
438
005d5696
SX
439 if (adapter->generation == BE_GEN3) {
440 if (lancer_chip(adapter))
441 populate_lancer_stats(adapter);
442 else
443 populate_be3_stats(adapter);
444 } else {
89a88ab8 445 populate_be2_stats(adapter);
005d5696 446 }
ac124ff9 447
d51ebd33
PR
448 if (lancer_chip(adapter))
449 goto done;
450
ac124ff9 451 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
452 for_all_rx_queues(adapter, rxo, i) {
453 /* below erx HW counter can actually wrap around after
454 * 65535. Driver accumulates a 32-bit value
455 */
456 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
457 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
458 }
d51ebd33
PR
459done:
460 return;
89a88ab8
AK
461}
462
ab1594e9
SP
463static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
464 struct rtnl_link_stats64 *stats)
6b7c5b94 465{
ab1594e9 466 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 467 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 468 struct be_rx_obj *rxo;
3c8def97 469 struct be_tx_obj *txo;
ab1594e9
SP
470 u64 pkts, bytes;
471 unsigned int start;
3abcdeda 472 int i;
6b7c5b94 473
3abcdeda 474 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
475 const struct be_rx_stats *rx_stats = rx_stats(rxo);
476 do {
477 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
478 pkts = rx_stats(rxo)->rx_pkts;
479 bytes = rx_stats(rxo)->rx_bytes;
480 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
481 stats->rx_packets += pkts;
482 stats->rx_bytes += bytes;
483 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
484 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
485 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
486 }
487
3c8def97 488 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
489 const struct be_tx_stats *tx_stats = tx_stats(txo);
490 do {
491 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
492 pkts = tx_stats(txo)->tx_pkts;
493 bytes = tx_stats(txo)->tx_bytes;
494 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
495 stats->tx_packets += pkts;
496 stats->tx_bytes += bytes;
3c8def97 497 }
6b7c5b94
SP
498
499 /* bad pkts received */
ab1594e9 500 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
501 drvs->rx_alignment_symbol_errors +
502 drvs->rx_in_range_errors +
503 drvs->rx_out_range_errors +
504 drvs->rx_frame_too_long +
505 drvs->rx_dropped_too_small +
506 drvs->rx_dropped_too_short +
507 drvs->rx_dropped_header_too_small +
508 drvs->rx_dropped_tcp_length +
ab1594e9 509 drvs->rx_dropped_runt;
68110868 510
6b7c5b94 511 /* detailed rx errors */
ab1594e9 512 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
513 drvs->rx_out_range_errors +
514 drvs->rx_frame_too_long;
68110868 515
ab1594e9 516 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
517
518 /* frame alignment errors */
ab1594e9 519 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 520
6b7c5b94
SP
521 /* receiver fifo overrun */
522 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 523 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
524 drvs->rx_input_fifo_overflow_drop +
525 drvs->rx_drops_no_pbuf;
ab1594e9 526 return stats;
6b7c5b94
SP
527}
528
b236916a 529void be_link_status_update(struct be_adapter *adapter, u8 link_status)
6b7c5b94 530{
6b7c5b94
SP
531 struct net_device *netdev = adapter->netdev;
532
b236916a 533 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
ea172a01 534 netif_carrier_off(netdev);
b236916a 535 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
6b7c5b94 536 }
b236916a
AK
537
538 if ((link_status & LINK_STATUS_MASK) == LINK_UP)
539 netif_carrier_on(netdev);
540 else
541 netif_carrier_off(netdev);
6b7c5b94
SP
542}
543
3c8def97 544static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 545 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 546{
3c8def97
SP
547 struct be_tx_stats *stats = tx_stats(txo);
548
ab1594e9 549 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
550 stats->tx_reqs++;
551 stats->tx_wrbs += wrb_cnt;
552 stats->tx_bytes += copied;
553 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 554 if (stopped)
ac124ff9 555 stats->tx_stops++;
ab1594e9 556 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
557}
558
559/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
560static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
561 bool *dummy)
6b7c5b94 562{
ebc8d2ab
DM
563 int cnt = (skb->len > skb->data_len);
564
565 cnt += skb_shinfo(skb)->nr_frags;
566
6b7c5b94
SP
567 /* to account for hdr wrb */
568 cnt++;
fe6d2a38
SP
569 if (lancer_chip(adapter) || !(cnt & 1)) {
570 *dummy = false;
571 } else {
6b7c5b94
SP
572 /* add a dummy to make it an even num */
573 cnt++;
574 *dummy = true;
fe6d2a38 575 }
6b7c5b94
SP
576 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
577 return cnt;
578}
579
580static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
581{
582 wrb->frag_pa_hi = upper_32_bits(addr);
583 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
584 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
89b1f496 585 wrb->rsvd0 = 0;
6b7c5b94
SP
586}
587
1ded132d
AK
588static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
589 struct sk_buff *skb)
590{
591 u8 vlan_prio;
592 u16 vlan_tag;
593
594 vlan_tag = vlan_tx_tag_get(skb);
595 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
596 /* If vlan priority provided by OS is NOT in available bmap */
597 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
598 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
599 adapter->recommended_prio;
600
601 return vlan_tag;
602}
603
93040ae5
SK
604static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
605{
606 return vlan_tx_tag_present(skb) || adapter->pvid;
607}
608
cc4ce020
SK
609static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
610 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 611{
1ded132d 612 u16 vlan_tag;
cc4ce020 613
6b7c5b94
SP
614 memset(hdr, 0, sizeof(*hdr));
615
616 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
617
49e4b847 618 if (skb_is_gso(skb)) {
6b7c5b94
SP
619 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
620 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
621 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 622 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 623 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
624 if (lancer_chip(adapter) && adapter->sli_family ==
625 LANCER_A0_SLI_FAMILY) {
626 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
627 if (is_tcp_pkt(skb))
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
629 tcpcs, hdr, 1);
630 else if (is_udp_pkt(skb))
631 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
632 udpcs, hdr, 1);
633 }
6b7c5b94
SP
634 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
635 if (is_tcp_pkt(skb))
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
637 else if (is_udp_pkt(skb))
638 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
639 }
640
4c5102f9 641 if (vlan_tx_tag_present(skb)) {
6b7c5b94 642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
1ded132d 643 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
cc4ce020 644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
645 }
646
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
649 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
650 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
651}
652
2b7bcebf 653static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
654 bool unmap_single)
655{
656 dma_addr_t dma;
657
658 be_dws_le_to_cpu(wrb, sizeof(*wrb));
659
660 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 661 if (wrb->frag_len) {
7101e111 662 if (unmap_single)
2b7bcebf
IV
663 dma_unmap_single(dev, dma, wrb->frag_len,
664 DMA_TO_DEVICE);
7101e111 665 else
2b7bcebf 666 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
667 }
668}
6b7c5b94 669
3c8def97 670static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
671 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
672{
7101e111
SP
673 dma_addr_t busaddr;
674 int i, copied = 0;
2b7bcebf 675 struct device *dev = &adapter->pdev->dev;
6b7c5b94 676 struct sk_buff *first_skb = skb;
6b7c5b94
SP
677 struct be_eth_wrb *wrb;
678 struct be_eth_hdr_wrb *hdr;
7101e111
SP
679 bool map_single = false;
680 u16 map_head;
6b7c5b94 681
6b7c5b94
SP
682 hdr = queue_head_node(txq);
683 queue_head_inc(txq);
7101e111 684 map_head = txq->head;
6b7c5b94 685
ebc8d2ab 686 if (skb->len > skb->data_len) {
e743d313 687 int len = skb_headlen(skb);
2b7bcebf
IV
688 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
689 if (dma_mapping_error(dev, busaddr))
7101e111
SP
690 goto dma_err;
691 map_single = true;
ebc8d2ab
DM
692 wrb = queue_head_node(txq);
693 wrb_fill(wrb, busaddr, len);
694 be_dws_cpu_to_le(wrb, sizeof(*wrb));
695 queue_head_inc(txq);
696 copied += len;
697 }
6b7c5b94 698
ebc8d2ab 699 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 700 const struct skb_frag_struct *frag =
ebc8d2ab 701 &skb_shinfo(skb)->frags[i];
b061b39e 702 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 703 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 704 if (dma_mapping_error(dev, busaddr))
7101e111 705 goto dma_err;
ebc8d2ab 706 wrb = queue_head_node(txq);
9e903e08 707 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
708 be_dws_cpu_to_le(wrb, sizeof(*wrb));
709 queue_head_inc(txq);
9e903e08 710 copied += skb_frag_size(frag);
6b7c5b94
SP
711 }
712
713 if (dummy_wrb) {
714 wrb = queue_head_node(txq);
715 wrb_fill(wrb, 0, 0);
716 be_dws_cpu_to_le(wrb, sizeof(*wrb));
717 queue_head_inc(txq);
718 }
719
cc4ce020 720 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
721 be_dws_cpu_to_le(hdr, sizeof(*hdr));
722
723 return copied;
7101e111
SP
724dma_err:
725 txq->head = map_head;
726 while (copied) {
727 wrb = queue_head_node(txq);
2b7bcebf 728 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
729 map_single = false;
730 copied -= wrb->frag_len;
731 queue_head_inc(txq);
732 }
733 return 0;
6b7c5b94
SP
734}
735
93040ae5
SK
736static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
737 struct sk_buff *skb)
738{
739 u16 vlan_tag = 0;
740
741 skb = skb_share_check(skb, GFP_ATOMIC);
742 if (unlikely(!skb))
743 return skb;
744
745 if (vlan_tx_tag_present(skb)) {
746 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
747 __vlan_put_tag(skb, vlan_tag);
748 skb->vlan_tci = 0;
749 }
750
751 return skb;
752}
753
61357325 754static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 755 struct net_device *netdev)
6b7c5b94
SP
756{
757 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
758 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
759 struct be_queue_info *txq = &txo->q;
93040ae5 760 struct iphdr *ip = NULL;
6b7c5b94 761 u32 wrb_cnt = 0, copied = 0;
93040ae5 762 u32 start = txq->head, eth_hdr_len;
6b7c5b94
SP
763 bool dummy_wrb, stopped = false;
764
93040ae5
SK
765 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
766 VLAN_ETH_HLEN : ETH_HLEN;
767
768 /* HW has a bug which considers padding bytes as legal
769 * and modifies the IPv4 hdr's 'tot_len' field
1ded132d 770 */
93040ae5
SK
771 if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
772 is_ipv4_pkt(skb)) {
773 ip = (struct iphdr *)ip_hdr(skb);
774 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
775 }
1ded132d 776
93040ae5
SK
777 /* HW has a bug wherein it will calculate CSUM for VLAN
778 * pkts even though it is disabled.
779 * Manually insert VLAN in pkt.
780 */
781 if (skb->ip_summed != CHECKSUM_PARTIAL &&
782 be_vlan_tag_chk(adapter, skb)) {
783 skb = be_insert_vlan_in_pkt(adapter, skb);
1ded132d
AK
784 if (unlikely(!skb))
785 goto tx_drop;
1ded132d
AK
786 }
787
fe6d2a38 788 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 789
3c8def97 790 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8 791 if (copied) {
cd8f76c0
ED
792 int gso_segs = skb_shinfo(skb)->gso_segs;
793
c190e3c8 794 /* record the sent skb in the sent_skb table */
3c8def97
SP
795 BUG_ON(txo->sent_skb_list[start]);
796 txo->sent_skb_list[start] = skb;
c190e3c8
AK
797
798 /* Ensure txq has space for the next skb; Else stop the queue
799 * *BEFORE* ringing the tx doorbell, so that we serialze the
800 * tx compls of the current transmit which'll wake up the queue
801 */
7101e111 802 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
803 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
804 txq->len) {
3c8def97 805 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
806 stopped = true;
807 }
6b7c5b94 808
c190e3c8 809 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 810
cd8f76c0 811 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
c190e3c8
AK
812 } else {
813 txq->head = start;
814 dev_kfree_skb_any(skb);
6b7c5b94 815 }
1ded132d 816tx_drop:
6b7c5b94
SP
817 return NETDEV_TX_OK;
818}
819
820static int be_change_mtu(struct net_device *netdev, int new_mtu)
821{
822 struct be_adapter *adapter = netdev_priv(netdev);
823 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
824 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
825 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
826 dev_info(&adapter->pdev->dev,
827 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
828 BE_MIN_MTU,
829 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
830 return -EINVAL;
831 }
832 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
833 netdev->mtu, new_mtu);
834 netdev->mtu = new_mtu;
835 return 0;
836}
837
838/*
82903e4b
AK
839 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
840 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 841 */
10329df8 842static int be_vid_config(struct be_adapter *adapter)
6b7c5b94 843{
10329df8
SP
844 u16 vids[BE_NUM_VLANS_SUPPORTED];
845 u16 num = 0, i;
82903e4b 846 int status = 0;
1da87b7f 847
c0e64ef4
SP
848 /* No need to further configure vids if in promiscuous mode */
849 if (adapter->promiscuous)
850 return 0;
851
0fc16ebf
PR
852 if (adapter->vlans_added > adapter->max_vlans)
853 goto set_vlan_promisc;
854
855 /* Construct VLAN Table to give to HW */
856 for (i = 0; i < VLAN_N_VID; i++)
857 if (adapter->vlan_tag[i])
10329df8 858 vids[num++] = cpu_to_le16(i);
0fc16ebf
PR
859
860 status = be_cmd_vlan_config(adapter, adapter->if_handle,
10329df8 861 vids, num, 1, 0);
0fc16ebf
PR
862
863 /* Set to VLAN promisc mode as setting VLAN filter failed */
864 if (status) {
865 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
866 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
867 goto set_vlan_promisc;
6b7c5b94 868 }
1da87b7f 869
b31c50a7 870 return status;
0fc16ebf
PR
871
872set_vlan_promisc:
873 status = be_cmd_vlan_config(adapter, adapter->if_handle,
874 NULL, 0, 1, 1);
875 return status;
6b7c5b94
SP
876}
877
8e586137 878static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 881 int status = 0;
6b7c5b94 882
a85e9986 883 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
884 status = -EINVAL;
885 goto ret;
886 }
ba343c77 887
a85e9986
PR
888 /* Packets with VID 0 are always received by Lancer by default */
889 if (lancer_chip(adapter) && vid == 0)
890 goto ret;
891
6b7c5b94 892 adapter->vlan_tag[vid] = 1;
82903e4b 893 if (adapter->vlans_added <= (adapter->max_vlans + 1))
10329df8 894 status = be_vid_config(adapter);
8e586137 895
80817cbf
AK
896 if (!status)
897 adapter->vlans_added++;
898 else
899 adapter->vlan_tag[vid] = 0;
900ret:
901 return status;
6b7c5b94
SP
902}
903
8e586137 904static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
6b7c5b94
SP
905{
906 struct be_adapter *adapter = netdev_priv(netdev);
80817cbf 907 int status = 0;
6b7c5b94 908
a85e9986 909 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
80817cbf
AK
910 status = -EINVAL;
911 goto ret;
912 }
ba343c77 913
a85e9986
PR
914 /* Packets with VID 0 are always received by Lancer by default */
915 if (lancer_chip(adapter) && vid == 0)
916 goto ret;
917
6b7c5b94 918 adapter->vlan_tag[vid] = 0;
82903e4b 919 if (adapter->vlans_added <= adapter->max_vlans)
10329df8 920 status = be_vid_config(adapter);
8e586137 921
80817cbf
AK
922 if (!status)
923 adapter->vlans_added--;
924 else
925 adapter->vlan_tag[vid] = 1;
926ret:
927 return status;
6b7c5b94
SP
928}
929
a54769f5 930static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
931{
932 struct be_adapter *adapter = netdev_priv(netdev);
0fc16ebf 933 int status;
6b7c5b94 934
24307eef 935 if (netdev->flags & IFF_PROMISC) {
5b8821b7 936 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
937 adapter->promiscuous = true;
938 goto done;
6b7c5b94
SP
939 }
940
25985edc 941 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
942 if (adapter->promiscuous) {
943 adapter->promiscuous = false;
5b8821b7 944 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
945
946 if (adapter->vlans_added)
10329df8 947 be_vid_config(adapter);
6b7c5b94
SP
948 }
949
e7b909a6 950 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 951 if (netdev->flags & IFF_ALLMULTI ||
abb93951 952 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
5b8821b7 953 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 954 goto done;
6b7c5b94 955 }
6b7c5b94 956
fbc13f01
AK
957 if (netdev_uc_count(netdev) != adapter->uc_macs) {
958 struct netdev_hw_addr *ha;
959 int i = 1; /* First slot is claimed by the Primary MAC */
960
961 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
962 be_cmd_pmac_del(adapter, adapter->if_handle,
963 adapter->pmac_id[i], 0);
964 }
965
966 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
967 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
968 adapter->promiscuous = true;
969 goto done;
970 }
971
972 netdev_for_each_uc_addr(ha, adapter->netdev) {
973 adapter->uc_macs++; /* First slot is for Primary MAC */
974 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
975 adapter->if_handle,
976 &adapter->pmac_id[adapter->uc_macs], 0);
977 }
978 }
979
0fc16ebf
PR
980 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
981
982 /* Set to MCAST promisc mode if setting MULTICAST address fails */
983 if (status) {
984 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
985 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
986 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
987 }
24307eef
SP
988done:
989 return;
6b7c5b94
SP
990}
991
ba343c77
SB
992static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
993{
994 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 995 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
ba343c77 996 int status;
704e4c88
PR
997 bool active_mac = false;
998 u32 pmac_id;
999 u8 old_mac[ETH_ALEN];
ba343c77 1000
11ac75ed 1001 if (!sriov_enabled(adapter))
ba343c77
SB
1002 return -EPERM;
1003
11ac75ed 1004 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
ba343c77
SB
1005 return -EINVAL;
1006
590c391d 1007 if (lancer_chip(adapter)) {
704e4c88
PR
1008 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1009 &pmac_id, vf + 1);
1010 if (!status && active_mac)
1011 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1012 pmac_id, vf + 1);
1013
590c391d
PR
1014 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
1015 } else {
11ac75ed
SP
1016 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1017 vf_cfg->pmac_id, vf + 1);
ba343c77 1018
11ac75ed
SP
1019 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1020 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
1021 }
1022
64600ea5 1023 if (status)
ba343c77
SB
1024 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1025 mac, vf);
64600ea5 1026 else
11ac75ed 1027 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
64600ea5 1028
ba343c77
SB
1029 return status;
1030}
1031
64600ea5
AK
1032static int be_get_vf_config(struct net_device *netdev, int vf,
1033 struct ifla_vf_info *vi)
1034{
1035 struct be_adapter *adapter = netdev_priv(netdev);
11ac75ed 1036 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
64600ea5 1037
11ac75ed 1038 if (!sriov_enabled(adapter))
64600ea5
AK
1039 return -EPERM;
1040
11ac75ed 1041 if (vf >= adapter->num_vfs)
64600ea5
AK
1042 return -EINVAL;
1043
1044 vi->vf = vf;
11ac75ed
SP
1045 vi->tx_rate = vf_cfg->tx_rate;
1046 vi->vlan = vf_cfg->vlan_tag;
64600ea5 1047 vi->qos = 0;
11ac75ed 1048 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
64600ea5
AK
1049
1050 return 0;
1051}
1052
1da87b7f
AK
1053static int be_set_vf_vlan(struct net_device *netdev,
1054 int vf, u16 vlan, u8 qos)
1055{
1056 struct be_adapter *adapter = netdev_priv(netdev);
1057 int status = 0;
1058
11ac75ed 1059 if (!sriov_enabled(adapter))
1da87b7f
AK
1060 return -EPERM;
1061
11ac75ed 1062 if (vf >= adapter->num_vfs || vlan > 4095)
1da87b7f
AK
1063 return -EINVAL;
1064
1065 if (vlan) {
f1f3ee1b
AK
1066 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1067 /* If this is new value, program it. Else skip. */
1068 adapter->vf_cfg[vf].vlan_tag = vlan;
1069
1070 status = be_cmd_set_hsw_config(adapter, vlan,
1071 vf + 1, adapter->vf_cfg[vf].if_handle);
1072 }
1da87b7f 1073 } else {
f1f3ee1b 1074 /* Reset Transparent Vlan Tagging. */
11ac75ed 1075 adapter->vf_cfg[vf].vlan_tag = 0;
f1f3ee1b
AK
1076 vlan = adapter->vf_cfg[vf].def_vid;
1077 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1078 adapter->vf_cfg[vf].if_handle);
1da87b7f
AK
1079 }
1080
1da87b7f
AK
1081
1082 if (status)
1083 dev_info(&adapter->pdev->dev,
1084 "VLAN %d config on VF %d failed\n", vlan, vf);
1085 return status;
1086}
1087
e1d18735
AK
1088static int be_set_vf_tx_rate(struct net_device *netdev,
1089 int vf, int rate)
1090{
1091 struct be_adapter *adapter = netdev_priv(netdev);
1092 int status = 0;
1093
11ac75ed 1094 if (!sriov_enabled(adapter))
e1d18735
AK
1095 return -EPERM;
1096
94f434c2 1097 if (vf >= adapter->num_vfs)
e1d18735
AK
1098 return -EINVAL;
1099
94f434c2
AK
1100 if (rate < 100 || rate > 10000) {
1101 dev_err(&adapter->pdev->dev,
1102 "tx rate must be between 100 and 10000 Mbps\n");
1103 return -EINVAL;
1104 }
e1d18735 1105
d5c18473
PR
1106 if (lancer_chip(adapter))
1107 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1108 else
1109 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1110
1111 if (status)
94f434c2 1112 dev_err(&adapter->pdev->dev,
e1d18735 1113 "tx rate %d on VF %d failed\n", rate, vf);
94f434c2
AK
1114 else
1115 adapter->vf_cfg[vf].tx_rate = rate;
e1d18735
AK
1116 return status;
1117}
1118
39f1d94d
SP
1119static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1120{
1121 struct pci_dev *dev, *pdev = adapter->pdev;
2f6a0260 1122 int vfs = 0, assigned_vfs = 0, pos;
39f1d94d
SP
1123 u16 offset, stride;
1124
1125 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
d79c0a20
SP
1126 if (!pos)
1127 return 0;
39f1d94d
SP
1128 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1129 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1130
1131 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1132 while (dev) {
2f6a0260 1133 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
39f1d94d
SP
1134 vfs++;
1135 if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1136 assigned_vfs++;
1137 }
1138 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1139 }
1140 return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1141}
1142
10ef9ab4 1143static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
6b7c5b94 1144{
10ef9ab4 1145 struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
4097f663 1146 ulong now = jiffies;
ac124ff9 1147 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
1148 u64 pkts;
1149 unsigned int start, eqd;
ac124ff9 1150
10ef9ab4
SP
1151 if (!eqo->enable_aic) {
1152 eqd = eqo->eqd;
1153 goto modify_eqd;
1154 }
1155
1156 if (eqo->idx >= adapter->num_rx_qs)
ac124ff9 1157 return;
6b7c5b94 1158
10ef9ab4
SP
1159 stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1160
4097f663 1161 /* Wrapped around */
3abcdeda
SP
1162 if (time_before(now, stats->rx_jiffies)) {
1163 stats->rx_jiffies = now;
4097f663
SP
1164 return;
1165 }
6b7c5b94 1166
ac124ff9
SP
1167 /* Update once a second */
1168 if (delta < HZ)
6b7c5b94
SP
1169 return;
1170
ab1594e9
SP
1171 do {
1172 start = u64_stats_fetch_begin_bh(&stats->sync);
1173 pkts = stats->rx_pkts;
1174 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1175
68c3e5a7 1176 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 1177 stats->rx_pkts_prev = pkts;
3abcdeda 1178 stats->rx_jiffies = now;
10ef9ab4
SP
1179 eqd = (stats->rx_pps / 110000) << 3;
1180 eqd = min(eqd, eqo->max_eqd);
1181 eqd = max(eqd, eqo->min_eqd);
ac124ff9
SP
1182 if (eqd < 10)
1183 eqd = 0;
10ef9ab4
SP
1184
1185modify_eqd:
1186 if (eqd != eqo->cur_eqd) {
1187 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1188 eqo->cur_eqd = eqd;
ac124ff9 1189 }
6b7c5b94
SP
1190}
1191
3abcdeda 1192static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1193 struct be_rx_compl_info *rxcp)
4097f663 1194{
ac124ff9 1195 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 1196
ab1594e9 1197 u64_stats_update_begin(&stats->sync);
3abcdeda 1198 stats->rx_compl++;
2e588f84 1199 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1200 stats->rx_pkts++;
2e588f84 1201 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1202 stats->rx_mcast_pkts++;
2e588f84 1203 if (rxcp->err)
ac124ff9 1204 stats->rx_compl_err++;
ab1594e9 1205 u64_stats_update_end(&stats->sync);
4097f663
SP
1206}
1207
2e588f84 1208static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1209{
19fad86f
PR
1210 /* L4 checksum is not reliable for non TCP/UDP packets.
1211 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1212 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1213 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1214}
1215
10ef9ab4
SP
1216static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1217 u16 frag_idx)
6b7c5b94 1218{
10ef9ab4 1219 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1220 struct be_rx_page_info *rx_page_info;
3abcdeda 1221 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1222
3abcdeda 1223 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1224 BUG_ON(!rx_page_info->page);
1225
205859a2 1226 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1227 dma_unmap_page(&adapter->pdev->dev,
1228 dma_unmap_addr(rx_page_info, bus),
1229 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1230 rx_page_info->last_page_user = false;
1231 }
6b7c5b94
SP
1232
1233 atomic_dec(&rxq->used);
1234 return rx_page_info;
1235}
1236
1237/* Throwaway the data in the Rx completion */
10ef9ab4
SP
1238static void be_rx_compl_discard(struct be_rx_obj *rxo,
1239 struct be_rx_compl_info *rxcp)
6b7c5b94 1240{
3abcdeda 1241 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1242 struct be_rx_page_info *page_info;
2e588f84 1243 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1244
e80d9da6 1245 for (i = 0; i < num_rcvd; i++) {
10ef9ab4 1246 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
e80d9da6
PR
1247 put_page(page_info->page);
1248 memset(page_info, 0, sizeof(*page_info));
2e588f84 1249 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1250 }
1251}
1252
1253/*
1254 * skb_fill_rx_data forms a complete skb for an ether frame
1255 * indicated by rxcp.
1256 */
10ef9ab4
SP
1257static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1258 struct be_rx_compl_info *rxcp)
6b7c5b94 1259{
3abcdeda 1260 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1261 struct be_rx_page_info *page_info;
2e588f84
SP
1262 u16 i, j;
1263 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1264 u8 *start;
6b7c5b94 1265
10ef9ab4 1266 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1267 start = page_address(page_info->page) + page_info->page_offset;
1268 prefetch(start);
1269
1270 /* Copy data in the first descriptor of this completion */
2e588f84 1271 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94 1272
6b7c5b94
SP
1273 skb->len = curr_frag_len;
1274 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
ac1ae5f3 1275 memcpy(skb->data, start, curr_frag_len);
6b7c5b94
SP
1276 /* Complete packet has now been moved to data */
1277 put_page(page_info->page);
1278 skb->data_len = 0;
1279 skb->tail += curr_frag_len;
1280 } else {
ac1ae5f3
ED
1281 hdr_len = ETH_HLEN;
1282 memcpy(skb->data, start, hdr_len);
6b7c5b94 1283 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1284 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1285 skb_shinfo(skb)->frags[0].page_offset =
1286 page_info->page_offset + hdr_len;
9e903e08 1287 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1288 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1289 skb->truesize += rx_frag_size;
6b7c5b94
SP
1290 skb->tail += hdr_len;
1291 }
205859a2 1292 page_info->page = NULL;
6b7c5b94 1293
2e588f84
SP
1294 if (rxcp->pkt_size <= rx_frag_size) {
1295 BUG_ON(rxcp->num_rcvd != 1);
1296 return;
6b7c5b94
SP
1297 }
1298
1299 /* More frags present for this completion */
2e588f84
SP
1300 index_inc(&rxcp->rxq_idx, rxq->len);
1301 remaining = rxcp->pkt_size - curr_frag_len;
1302 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
10ef9ab4 1303 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
2e588f84 1304 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1305
bd46cb6c
AK
1306 /* Coalesce all frags from the same physical page in one slot */
1307 if (page_info->page_offset == 0) {
1308 /* Fresh page */
1309 j++;
b061b39e 1310 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1311 skb_shinfo(skb)->frags[j].page_offset =
1312 page_info->page_offset;
9e903e08 1313 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1314 skb_shinfo(skb)->nr_frags++;
1315 } else {
1316 put_page(page_info->page);
1317 }
1318
9e903e08 1319 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1320 skb->len += curr_frag_len;
1321 skb->data_len += curr_frag_len;
bdb28a97 1322 skb->truesize += rx_frag_size;
2e588f84
SP
1323 remaining -= curr_frag_len;
1324 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1325 page_info->page = NULL;
6b7c5b94 1326 }
bd46cb6c 1327 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1328}
1329
5be93b9a 1330/* Process the RX completion indicated by rxcp when GRO is disabled */
10ef9ab4
SP
1331static void be_rx_compl_process(struct be_rx_obj *rxo,
1332 struct be_rx_compl_info *rxcp)
6b7c5b94 1333{
10ef9ab4 1334 struct be_adapter *adapter = rxo->adapter;
6332c8d3 1335 struct net_device *netdev = adapter->netdev;
6b7c5b94 1336 struct sk_buff *skb;
89420424 1337
bb349bb4 1338 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
a058a632 1339 if (unlikely(!skb)) {
ac124ff9 1340 rx_stats(rxo)->rx_drops_no_skbs++;
10ef9ab4 1341 be_rx_compl_discard(rxo, rxcp);
6b7c5b94
SP
1342 return;
1343 }
1344
10ef9ab4 1345 skb_fill_rx_data(rxo, skb, rxcp);
6b7c5b94 1346
6332c8d3 1347 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1348 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1349 else
1350 skb_checksum_none_assert(skb);
6b7c5b94 1351
6332c8d3 1352 skb->protocol = eth_type_trans(skb, netdev);
aaa6daec 1353 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
10ef9ab4 1354 if (netdev->features & NETIF_F_RXHASH)
4b972914
AK
1355 skb->rxhash = rxcp->rss_hash;
1356
6b7c5b94 1357
343e43c0 1358 if (rxcp->vlanf)
4c5102f9
AK
1359 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1360
1361 netif_receive_skb(skb);
6b7c5b94
SP
1362}
1363
5be93b9a 1364/* Process the RX completion indicated by rxcp when GRO is enabled */
10ef9ab4
SP
1365void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1366 struct be_rx_compl_info *rxcp)
6b7c5b94 1367{
10ef9ab4 1368 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1369 struct be_rx_page_info *page_info;
5be93b9a 1370 struct sk_buff *skb = NULL;
3abcdeda 1371 struct be_queue_info *rxq = &rxo->q;
2e588f84
SP
1372 u16 remaining, curr_frag_len;
1373 u16 i, j;
3968fa1e 1374
10ef9ab4 1375 skb = napi_get_frags(napi);
5be93b9a 1376 if (!skb) {
10ef9ab4 1377 be_rx_compl_discard(rxo, rxcp);
5be93b9a
AK
1378 return;
1379 }
1380
2e588f84
SP
1381 remaining = rxcp->pkt_size;
1382 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
10ef9ab4 1383 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
6b7c5b94
SP
1384
1385 curr_frag_len = min(remaining, rx_frag_size);
1386
bd46cb6c
AK
1387 /* Coalesce all frags from the same physical page in one slot */
1388 if (i == 0 || page_info->page_offset == 0) {
1389 /* First frag or Fresh page */
1390 j++;
b061b39e 1391 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1392 skb_shinfo(skb)->frags[j].page_offset =
1393 page_info->page_offset;
9e903e08 1394 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1395 } else {
1396 put_page(page_info->page);
1397 }
9e903e08 1398 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1399 skb->truesize += rx_frag_size;
bd46cb6c 1400 remaining -= curr_frag_len;
2e588f84 1401 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1402 memset(page_info, 0, sizeof(*page_info));
1403 }
bd46cb6c 1404 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1405
5be93b9a 1406 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1407 skb->len = rxcp->pkt_size;
1408 skb->data_len = rxcp->pkt_size;
5be93b9a 1409 skb->ip_summed = CHECKSUM_UNNECESSARY;
aaa6daec 1410 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
4b972914
AK
1411 if (adapter->netdev->features & NETIF_F_RXHASH)
1412 skb->rxhash = rxcp->rss_hash;
5be93b9a 1413
343e43c0 1414 if (rxcp->vlanf)
4c5102f9
AK
1415 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1416
10ef9ab4 1417 napi_gro_frags(napi);
2e588f84
SP
1418}
1419
10ef9ab4
SP
1420static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1421 struct be_rx_compl_info *rxcp)
2e588f84
SP
1422{
1423 rxcp->pkt_size =
1424 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1425 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1426 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1427 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1428 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1429 rxcp->ip_csum =
1430 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1431 rxcp->l4_csum =
1432 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1433 rxcp->ipv6 =
1434 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1435 rxcp->rxq_idx =
1436 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1437 rxcp->num_rcvd =
1438 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1439 rxcp->pkt_type =
1440 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914 1441 rxcp->rss_hash =
c297977e 1442 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
15d72184
SP
1443 if (rxcp->vlanf) {
1444 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1445 compl);
1446 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1447 compl);
15d72184 1448 }
12004ae9 1449 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1450}
1451
10ef9ab4
SP
1452static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1453 struct be_rx_compl_info *rxcp)
2e588f84
SP
1454{
1455 rxcp->pkt_size =
1456 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1457 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1458 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1459 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1460 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1461 rxcp->ip_csum =
1462 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1463 rxcp->l4_csum =
1464 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1465 rxcp->ipv6 =
1466 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1467 rxcp->rxq_idx =
1468 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1469 rxcp->num_rcvd =
1470 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1471 rxcp->pkt_type =
1472 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914 1473 rxcp->rss_hash =
c297977e 1474 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
15d72184
SP
1475 if (rxcp->vlanf) {
1476 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1477 compl);
1478 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1479 compl);
15d72184 1480 }
12004ae9 1481 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1482}
1483
1484static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1485{
1486 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1487 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1488 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1489
2e588f84
SP
1490 /* For checking the valid bit it is Ok to use either definition as the
1491 * valid bit is at the same position in both v0 and v1 Rx compl */
1492 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1493 return NULL;
6b7c5b94 1494
2e588f84
SP
1495 rmb();
1496 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1497
2e588f84 1498 if (adapter->be3_native)
10ef9ab4 1499 be_parse_rx_compl_v1(compl, rxcp);
2e588f84 1500 else
10ef9ab4 1501 be_parse_rx_compl_v0(compl, rxcp);
6b7c5b94 1502
15d72184
SP
1503 if (rxcp->vlanf) {
1504 /* vlanf could be wrongly set in some cards.
1505 * ignore if vtm is not set */
752961a1 1506 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1507 rxcp->vlanf = 0;
6b7c5b94 1508
15d72184 1509 if (!lancer_chip(adapter))
3c709f8f 1510 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1511
939cf306 1512 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1513 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1514 rxcp->vlanf = 0;
1515 }
2e588f84
SP
1516
1517 /* As the compl has been parsed, reset it; we wont touch it again */
1518 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1519
3abcdeda 1520 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1521 return rxcp;
1522}
1523
1829b086 1524static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1525{
6b7c5b94 1526 u32 order = get_order(size);
1829b086 1527
6b7c5b94 1528 if (order > 0)
1829b086
ED
1529 gfp |= __GFP_COMP;
1530 return alloc_pages(gfp, order);
6b7c5b94
SP
1531}
1532
1533/*
1534 * Allocate a page, split it to fragments of size rx_frag_size and post as
1535 * receive buffers to BE
1536 */
1829b086 1537static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1538{
3abcdeda 1539 struct be_adapter *adapter = rxo->adapter;
26d92f92 1540 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1541 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1542 struct page *pagep = NULL;
1543 struct be_eth_rx_d *rxd;
1544 u64 page_dmaaddr = 0, frag_dmaaddr;
1545 u32 posted, page_offset = 0;
1546
3abcdeda 1547 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1548 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1549 if (!pagep) {
1829b086 1550 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1551 if (unlikely(!pagep)) {
ac124ff9 1552 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1553 break;
1554 }
2b7bcebf
IV
1555 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1556 0, adapter->big_page_size,
1557 DMA_FROM_DEVICE);
6b7c5b94
SP
1558 page_info->page_offset = 0;
1559 } else {
1560 get_page(pagep);
1561 page_info->page_offset = page_offset + rx_frag_size;
1562 }
1563 page_offset = page_info->page_offset;
1564 page_info->page = pagep;
fac6da5b 1565 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1566 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1567
1568 rxd = queue_head_node(rxq);
1569 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1570 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1571
1572 /* Any space left in the current big page for another frag? */
1573 if ((page_offset + rx_frag_size + rx_frag_size) >
1574 adapter->big_page_size) {
1575 pagep = NULL;
1576 page_info->last_page_user = true;
1577 }
26d92f92
SP
1578
1579 prev_page_info = page_info;
1580 queue_head_inc(rxq);
10ef9ab4 1581 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1582 }
1583 if (pagep)
26d92f92 1584 prev_page_info->last_page_user = true;
6b7c5b94
SP
1585
1586 if (posted) {
6b7c5b94 1587 atomic_add(posted, &rxq->used);
8788fdc2 1588 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1589 } else if (atomic_read(&rxq->used) == 0) {
1590 /* Let be_worker replenish when memory is available */
3abcdeda 1591 rxo->rx_post_starved = true;
6b7c5b94 1592 }
6b7c5b94
SP
1593}
1594
5fb379ee 1595static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1596{
6b7c5b94
SP
1597 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1598
1599 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1600 return NULL;
1601
f3eb62d2 1602 rmb();
6b7c5b94
SP
1603 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1604
1605 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1606
1607 queue_tail_inc(tx_cq);
1608 return txcp;
1609}
1610
3c8def97
SP
1611static u16 be_tx_compl_process(struct be_adapter *adapter,
1612 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1613{
3c8def97 1614 struct be_queue_info *txq = &txo->q;
a73b796e 1615 struct be_eth_wrb *wrb;
3c8def97 1616 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1617 struct sk_buff *sent_skb;
ec43b1a6
SP
1618 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1619 bool unmap_skb_hdr = true;
6b7c5b94 1620
ec43b1a6 1621 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1622 BUG_ON(!sent_skb);
ec43b1a6
SP
1623 sent_skbs[txq->tail] = NULL;
1624
1625 /* skip header wrb */
a73b796e 1626 queue_tail_inc(txq);
6b7c5b94 1627
ec43b1a6 1628 do {
6b7c5b94 1629 cur_index = txq->tail;
a73b796e 1630 wrb = queue_tail_node(txq);
2b7bcebf
IV
1631 unmap_tx_frag(&adapter->pdev->dev, wrb,
1632 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1633 unmap_skb_hdr = false;
1634
6b7c5b94
SP
1635 num_wrbs++;
1636 queue_tail_inc(txq);
ec43b1a6 1637 } while (cur_index != last_index);
6b7c5b94 1638
6b7c5b94 1639 kfree_skb(sent_skb);
4d586b82 1640 return num_wrbs;
6b7c5b94
SP
1641}
1642
10ef9ab4
SP
1643/* Return the number of events in the event queue */
1644static inline int events_get(struct be_eq_obj *eqo)
859b1e4e 1645{
10ef9ab4
SP
1646 struct be_eq_entry *eqe;
1647 int num = 0;
859b1e4e 1648
10ef9ab4
SP
1649 do {
1650 eqe = queue_tail_node(&eqo->q);
1651 if (eqe->evt == 0)
1652 break;
859b1e4e 1653
10ef9ab4
SP
1654 rmb();
1655 eqe->evt = 0;
1656 num++;
1657 queue_tail_inc(&eqo->q);
1658 } while (true);
1659
1660 return num;
859b1e4e
SP
1661}
1662
10ef9ab4 1663static int event_handle(struct be_eq_obj *eqo)
859b1e4e 1664{
10ef9ab4
SP
1665 bool rearm = false;
1666 int num = events_get(eqo);
859b1e4e 1667
10ef9ab4 1668 /* Deal with any spurious interrupts that come without events */
3c8def97
SP
1669 if (!num)
1670 rearm = true;
1671
af311fe3
PR
1672 if (num || msix_enabled(eqo->adapter))
1673 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1674
859b1e4e 1675 if (num)
10ef9ab4 1676 napi_schedule(&eqo->napi);
859b1e4e
SP
1677
1678 return num;
1679}
1680
10ef9ab4
SP
1681/* Leaves the EQ is disarmed state */
1682static void be_eq_clean(struct be_eq_obj *eqo)
859b1e4e 1683{
10ef9ab4 1684 int num = events_get(eqo);
859b1e4e 1685
10ef9ab4 1686 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
859b1e4e
SP
1687}
1688
10ef9ab4 1689static void be_rx_cq_clean(struct be_rx_obj *rxo)
6b7c5b94
SP
1690{
1691 struct be_rx_page_info *page_info;
3abcdeda
SP
1692 struct be_queue_info *rxq = &rxo->q;
1693 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1694 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1695 u16 tail;
1696
1697 /* First cleanup pending rx completions */
3abcdeda 1698 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
10ef9ab4
SP
1699 be_rx_compl_discard(rxo, rxcp);
1700 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1701 }
1702
1703 /* Then free posted rx buffer that were not used */
1704 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1705 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
10ef9ab4 1706 page_info = get_rx_page_info(rxo, tail);
6b7c5b94
SP
1707 put_page(page_info->page);
1708 memset(page_info, 0, sizeof(*page_info));
1709 }
1710 BUG_ON(atomic_read(&rxq->used));
482c9e79 1711 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1712}
1713
0ae57bb3 1714static void be_tx_compl_clean(struct be_adapter *adapter)
6b7c5b94 1715{
0ae57bb3
SP
1716 struct be_tx_obj *txo;
1717 struct be_queue_info *txq;
a8e9179a 1718 struct be_eth_tx_compl *txcp;
4d586b82 1719 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
b03388d6
SP
1720 struct sk_buff *sent_skb;
1721 bool dummy_wrb;
0ae57bb3 1722 int i, pending_txqs;
a8e9179a
SP
1723
1724 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1725 do {
0ae57bb3
SP
1726 pending_txqs = adapter->num_tx_qs;
1727
1728 for_all_tx_queues(adapter, txo, i) {
1729 txq = &txo->q;
1730 while ((txcp = be_tx_compl_get(&txo->cq))) {
1731 end_idx =
1732 AMAP_GET_BITS(struct amap_eth_tx_compl,
1733 wrb_index, txcp);
1734 num_wrbs += be_tx_compl_process(adapter, txo,
1735 end_idx);
1736 cmpl++;
1737 }
1738 if (cmpl) {
1739 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1740 atomic_sub(num_wrbs, &txq->used);
1741 cmpl = 0;
1742 num_wrbs = 0;
1743 }
1744 if (atomic_read(&txq->used) == 0)
1745 pending_txqs--;
a8e9179a
SP
1746 }
1747
0ae57bb3 1748 if (pending_txqs == 0 || ++timeo > 200)
a8e9179a
SP
1749 break;
1750
1751 mdelay(1);
1752 } while (true);
1753
0ae57bb3
SP
1754 for_all_tx_queues(adapter, txo, i) {
1755 txq = &txo->q;
1756 if (atomic_read(&txq->used))
1757 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1758 atomic_read(&txq->used));
1759
1760 /* free posted tx for which compls will never arrive */
1761 while (atomic_read(&txq->used)) {
1762 sent_skb = txo->sent_skb_list[txq->tail];
1763 end_idx = txq->tail;
1764 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1765 &dummy_wrb);
1766 index_adv(&end_idx, num_wrbs - 1, txq->len);
1767 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1768 atomic_sub(num_wrbs, &txq->used);
1769 }
b03388d6 1770 }
6b7c5b94
SP
1771}
1772
10ef9ab4
SP
1773static void be_evt_queues_destroy(struct be_adapter *adapter)
1774{
1775 struct be_eq_obj *eqo;
1776 int i;
1777
1778 for_all_evt_queues(adapter, eqo, i) {
19d59aa7
PR
1779 if (eqo->q.created) {
1780 be_eq_clean(eqo);
10ef9ab4 1781 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
19d59aa7 1782 }
10ef9ab4
SP
1783 be_queue_free(adapter, &eqo->q);
1784 }
1785}
1786
1787static int be_evt_queues_create(struct be_adapter *adapter)
1788{
1789 struct be_queue_info *eq;
1790 struct be_eq_obj *eqo;
1791 int i, rc;
1792
1793 adapter->num_evt_qs = num_irqs(adapter);
1794
1795 for_all_evt_queues(adapter, eqo, i) {
1796 eqo->adapter = adapter;
1797 eqo->tx_budget = BE_TX_BUDGET;
1798 eqo->idx = i;
1799 eqo->max_eqd = BE_MAX_EQD;
1800 eqo->enable_aic = true;
1801
1802 eq = &eqo->q;
1803 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1804 sizeof(struct be_eq_entry));
1805 if (rc)
1806 return rc;
1807
1808 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1809 if (rc)
1810 return rc;
1811 }
1cfafab9 1812 return 0;
10ef9ab4
SP
1813}
1814
5fb379ee
SP
1815static void be_mcc_queues_destroy(struct be_adapter *adapter)
1816{
1817 struct be_queue_info *q;
5fb379ee 1818
8788fdc2 1819 q = &adapter->mcc_obj.q;
5fb379ee 1820 if (q->created)
8788fdc2 1821 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1822 be_queue_free(adapter, q);
1823
8788fdc2 1824 q = &adapter->mcc_obj.cq;
5fb379ee 1825 if (q->created)
8788fdc2 1826 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1827 be_queue_free(adapter, q);
1828}
1829
1830/* Must be called only after TX qs are created as MCC shares TX EQ */
1831static int be_mcc_queues_create(struct be_adapter *adapter)
1832{
1833 struct be_queue_info *q, *cq;
5fb379ee 1834
8788fdc2 1835 cq = &adapter->mcc_obj.cq;
5fb379ee 1836 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1837 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1838 goto err;
1839
10ef9ab4
SP
1840 /* Use the default EQ for MCC completions */
1841 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
5fb379ee
SP
1842 goto mcc_cq_free;
1843
8788fdc2 1844 q = &adapter->mcc_obj.q;
5fb379ee
SP
1845 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1846 goto mcc_cq_destroy;
1847
8788fdc2 1848 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1849 goto mcc_q_free;
1850
1851 return 0;
1852
1853mcc_q_free:
1854 be_queue_free(adapter, q);
1855mcc_cq_destroy:
8788fdc2 1856 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1857mcc_cq_free:
1858 be_queue_free(adapter, cq);
1859err:
1860 return -1;
1861}
1862
6b7c5b94
SP
1863static void be_tx_queues_destroy(struct be_adapter *adapter)
1864{
1865 struct be_queue_info *q;
3c8def97
SP
1866 struct be_tx_obj *txo;
1867 u8 i;
6b7c5b94 1868
3c8def97
SP
1869 for_all_tx_queues(adapter, txo, i) {
1870 q = &txo->q;
1871 if (q->created)
1872 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1873 be_queue_free(adapter, q);
6b7c5b94 1874
3c8def97
SP
1875 q = &txo->cq;
1876 if (q->created)
1877 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1878 be_queue_free(adapter, q);
1879 }
6b7c5b94
SP
1880}
1881
dafc0fe3
SP
1882static int be_num_txqs_want(struct be_adapter *adapter)
1883{
abb93951
PR
1884 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1885 be_is_mc(adapter) ||
1886 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
39f1d94d 1887 adapter->generation == BE_GEN2)
dafc0fe3
SP
1888 return 1;
1889 else
abb93951 1890 return adapter->max_tx_queues;
dafc0fe3
SP
1891}
1892
10ef9ab4 1893static int be_tx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1894{
10ef9ab4
SP
1895 struct be_queue_info *cq, *eq;
1896 int status;
3c8def97
SP
1897 struct be_tx_obj *txo;
1898 u8 i;
6b7c5b94 1899
dafc0fe3 1900 adapter->num_tx_qs = be_num_txqs_want(adapter);
3bb62f4f
PR
1901 if (adapter->num_tx_qs != MAX_TX_QS) {
1902 rtnl_lock();
dafc0fe3
SP
1903 netif_set_real_num_tx_queues(adapter->netdev,
1904 adapter->num_tx_qs);
3bb62f4f
PR
1905 rtnl_unlock();
1906 }
dafc0fe3 1907
10ef9ab4
SP
1908 for_all_tx_queues(adapter, txo, i) {
1909 cq = &txo->cq;
1910 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1911 sizeof(struct be_eth_tx_compl));
1912 if (status)
1913 return status;
3c8def97 1914
10ef9ab4
SP
1915 /* If num_evt_qs is less than num_tx_qs, then more than
1916 * one txq share an eq
1917 */
1918 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1919 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1920 if (status)
1921 return status;
1922 }
1923 return 0;
1924}
6b7c5b94 1925
10ef9ab4
SP
1926static int be_tx_qs_create(struct be_adapter *adapter)
1927{
1928 struct be_tx_obj *txo;
1929 int i, status;
fe6d2a38 1930
3c8def97 1931 for_all_tx_queues(adapter, txo, i) {
10ef9ab4
SP
1932 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1933 sizeof(struct be_eth_wrb));
1934 if (status)
1935 return status;
6b7c5b94 1936
10ef9ab4
SP
1937 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1938 if (status)
1939 return status;
3c8def97 1940 }
6b7c5b94 1941
d379142b
SP
1942 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1943 adapter->num_tx_qs);
10ef9ab4 1944 return 0;
6b7c5b94
SP
1945}
1946
10ef9ab4 1947static void be_rx_cqs_destroy(struct be_adapter *adapter)
6b7c5b94
SP
1948{
1949 struct be_queue_info *q;
3abcdeda
SP
1950 struct be_rx_obj *rxo;
1951 int i;
1952
1953 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
1954 q = &rxo->cq;
1955 if (q->created)
1956 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1957 be_queue_free(adapter, q);
ac6a0c4a
SP
1958 }
1959}
1960
10ef9ab4 1961static int be_rx_cqs_create(struct be_adapter *adapter)
6b7c5b94 1962{
10ef9ab4 1963 struct be_queue_info *eq, *cq;
3abcdeda
SP
1964 struct be_rx_obj *rxo;
1965 int rc, i;
6b7c5b94 1966
10ef9ab4
SP
1967 /* We'll create as many RSS rings as there are irqs.
1968 * But when there's only one irq there's no use creating RSS rings
1969 */
1970 adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1971 num_irqs(adapter) + 1 : 1;
7f640062
SP
1972 if (adapter->num_rx_qs != MAX_RX_QS) {
1973 rtnl_lock();
1974 netif_set_real_num_rx_queues(adapter->netdev,
1975 adapter->num_rx_qs);
1976 rtnl_unlock();
1977 }
ac6a0c4a 1978
6b7c5b94 1979 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1980 for_all_rx_queues(adapter, rxo, i) {
1981 rxo->adapter = adapter;
3abcdeda
SP
1982 cq = &rxo->cq;
1983 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1984 sizeof(struct be_eth_rx_compl));
1985 if (rc)
10ef9ab4 1986 return rc;
3abcdeda 1987
10ef9ab4
SP
1988 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1989 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3abcdeda 1990 if (rc)
10ef9ab4 1991 return rc;
3abcdeda 1992 }
6b7c5b94 1993
d379142b
SP
1994 dev_info(&adapter->pdev->dev,
1995 "created %d RSS queue(s) and 1 default RX queue\n",
1996 adapter->num_rx_qs - 1);
10ef9ab4 1997 return 0;
b628bde2
SP
1998}
1999
6b7c5b94
SP
2000static irqreturn_t be_intx(int irq, void *dev)
2001{
2002 struct be_adapter *adapter = dev;
10ef9ab4 2003 int num_evts;
6b7c5b94 2004
10ef9ab4
SP
2005 /* With INTx only one EQ is used */
2006 num_evts = event_handle(&adapter->eq_obj[0]);
2007 if (num_evts)
2008 return IRQ_HANDLED;
2009 else
2010 return IRQ_NONE;
6b7c5b94
SP
2011}
2012
10ef9ab4 2013static irqreturn_t be_msix(int irq, void *dev)
6b7c5b94 2014{
10ef9ab4 2015 struct be_eq_obj *eqo = dev;
6b7c5b94 2016
10ef9ab4 2017 event_handle(eqo);
6b7c5b94
SP
2018 return IRQ_HANDLED;
2019}
2020
2e588f84 2021static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 2022{
2e588f84 2023 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
2024}
2025
10ef9ab4
SP
2026static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2027 int budget)
6b7c5b94 2028{
3abcdeda
SP
2029 struct be_adapter *adapter = rxo->adapter;
2030 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 2031 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
2032 u32 work_done;
2033
2034 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 2035 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
2036 if (!rxcp)
2037 break;
2038
12004ae9
SP
2039 /* Is it a flush compl that has no data */
2040 if (unlikely(rxcp->num_rcvd == 0))
2041 goto loop_continue;
2042
2043 /* Discard compl with partial DMA Lancer B0 */
2044 if (unlikely(!rxcp->pkt_size)) {
10ef9ab4 2045 be_rx_compl_discard(rxo, rxcp);
12004ae9
SP
2046 goto loop_continue;
2047 }
2048
2049 /* On BE drop pkts that arrive due to imperfect filtering in
2050 * promiscuous mode on some skews
2051 */
2052 if (unlikely(rxcp->port != adapter->port_num &&
2053 !lancer_chip(adapter))) {
10ef9ab4 2054 be_rx_compl_discard(rxo, rxcp);
12004ae9 2055 goto loop_continue;
64642811 2056 }
009dd872 2057
12004ae9 2058 if (do_gro(rxcp))
10ef9ab4 2059 be_rx_compl_process_gro(rxo, napi, rxcp);
12004ae9 2060 else
10ef9ab4 2061 be_rx_compl_process(rxo, rxcp);
12004ae9 2062loop_continue:
2e588f84 2063 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
2064 }
2065
10ef9ab4
SP
2066 if (work_done) {
2067 be_cq_notify(adapter, rx_cq->id, true, work_done);
9372cacb 2068
10ef9ab4
SP
2069 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2070 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94 2071 }
10ef9ab4 2072
6b7c5b94
SP
2073 return work_done;
2074}
2075
10ef9ab4
SP
2076static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2077 int budget, int idx)
6b7c5b94 2078{
6b7c5b94 2079 struct be_eth_tx_compl *txcp;
10ef9ab4 2080 int num_wrbs = 0, work_done;
3c8def97 2081
10ef9ab4
SP
2082 for (work_done = 0; work_done < budget; work_done++) {
2083 txcp = be_tx_compl_get(&txo->cq);
2084 if (!txcp)
2085 break;
2086 num_wrbs += be_tx_compl_process(adapter, txo,
3c8def97
SP
2087 AMAP_GET_BITS(struct amap_eth_tx_compl,
2088 wrb_index, txcp));
10ef9ab4 2089 }
6b7c5b94 2090
10ef9ab4
SP
2091 if (work_done) {
2092 be_cq_notify(adapter, txo->cq.id, true, work_done);
2093 atomic_sub(num_wrbs, &txo->q.used);
3c8def97 2094
10ef9ab4
SP
2095 /* As Tx wrbs have been freed up, wake up netdev queue
2096 * if it was stopped due to lack of tx wrbs. */
2097 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2098 atomic_read(&txo->q.used) < txo->q.len / 2) {
2099 netif_wake_subqueue(adapter->netdev, idx);
3c8def97 2100 }
10ef9ab4
SP
2101
2102 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2103 tx_stats(txo)->tx_compl += work_done;
2104 u64_stats_update_end(&tx_stats(txo)->sync_compl);
6b7c5b94 2105 }
10ef9ab4
SP
2106 return (work_done < budget); /* Done */
2107}
6b7c5b94 2108
10ef9ab4
SP
2109int be_poll(struct napi_struct *napi, int budget)
2110{
2111 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2112 struct be_adapter *adapter = eqo->adapter;
2113 int max_work = 0, work, i;
2114 bool tx_done;
f31e50a8 2115
10ef9ab4
SP
2116 /* Process all TXQs serviced by this EQ */
2117 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2118 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2119 eqo->tx_budget, i);
2120 if (!tx_done)
2121 max_work = budget;
f31e50a8
SP
2122 }
2123
10ef9ab4
SP
2124 /* This loop will iterate twice for EQ0 in which
2125 * completions of the last RXQ (default one) are also processed
2126 * For other EQs the loop iterates only once
2127 */
2128 for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2129 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2130 max_work = max(work, max_work);
2131 }
6b7c5b94 2132
10ef9ab4
SP
2133 if (is_mcc_eqo(eqo))
2134 be_process_mcc(adapter);
93c86700 2135
10ef9ab4
SP
2136 if (max_work < budget) {
2137 napi_complete(napi);
2138 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2139 } else {
2140 /* As we'll continue in polling mode, count and clear events */
2141 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
93c86700 2142 }
10ef9ab4 2143 return max_work;
6b7c5b94
SP
2144}
2145
f67ef7ba 2146void be_detect_error(struct be_adapter *adapter)
7c185276 2147{
e1cfb67a
PR
2148 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2149 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
2150 u32 i;
2151
f67ef7ba 2152 if (be_crit_error(adapter))
72f02485
SP
2153 return;
2154
e1cfb67a
PR
2155 if (lancer_chip(adapter)) {
2156 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2157 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2158 sliport_err1 = ioread32(adapter->db +
2159 SLIPORT_ERROR1_OFFSET);
2160 sliport_err2 = ioread32(adapter->db +
2161 SLIPORT_ERROR2_OFFSET);
2162 }
2163 } else {
2164 pci_read_config_dword(adapter->pdev,
2165 PCICFG_UE_STATUS_LOW, &ue_lo);
2166 pci_read_config_dword(adapter->pdev,
2167 PCICFG_UE_STATUS_HIGH, &ue_hi);
2168 pci_read_config_dword(adapter->pdev,
2169 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2170 pci_read_config_dword(adapter->pdev,
2171 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2172
f67ef7ba
PR
2173 ue_lo = (ue_lo & ~ue_lo_mask);
2174 ue_hi = (ue_hi & ~ue_hi_mask);
e1cfb67a 2175 }
7c185276 2176
1451ae6e
AK
2177 /* On certain platforms BE hardware can indicate spurious UEs.
2178 * Allow the h/w to stop working completely in case of a real UE.
2179 * Hence not setting the hw_error for UE detection.
2180 */
2181 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
f67ef7ba 2182 adapter->hw_error = true;
434b3648 2183 dev_err(&adapter->pdev->dev,
f67ef7ba
PR
2184 "Error detected in the card\n");
2185 }
2186
2187 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2188 dev_err(&adapter->pdev->dev,
2189 "ERR: sliport status 0x%x\n", sliport_status);
2190 dev_err(&adapter->pdev->dev,
2191 "ERR: sliport error1 0x%x\n", sliport_err1);
2192 dev_err(&adapter->pdev->dev,
2193 "ERR: sliport error2 0x%x\n", sliport_err2);
d053de91
AK
2194 }
2195
e1cfb67a
PR
2196 if (ue_lo) {
2197 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2198 if (ue_lo & 1)
7c185276
AK
2199 dev_err(&adapter->pdev->dev,
2200 "UE: %s bit set\n", ue_status_low_desc[i]);
2201 }
2202 }
f67ef7ba 2203
e1cfb67a
PR
2204 if (ue_hi) {
2205 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2206 if (ue_hi & 1)
7c185276
AK
2207 dev_err(&adapter->pdev->dev,
2208 "UE: %s bit set\n", ue_status_hi_desc[i]);
2209 }
2210 }
2211
2212}
2213
8d56ff11
SP
2214static void be_msix_disable(struct be_adapter *adapter)
2215{
ac6a0c4a 2216 if (msix_enabled(adapter)) {
8d56ff11 2217 pci_disable_msix(adapter->pdev);
ac6a0c4a 2218 adapter->num_msix_vec = 0;
3abcdeda
SP
2219 }
2220}
2221
10ef9ab4
SP
2222static uint be_num_rss_want(struct be_adapter *adapter)
2223{
30e80b55 2224 u32 num = 0;
abb93951 2225
10ef9ab4 2226 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
abb93951
PR
2227 (lancer_chip(adapter) ||
2228 (!sriov_want(adapter) && be_physfn(adapter)))) {
2229 num = adapter->max_rss_queues;
30e80b55
YM
2230 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2231 }
2232 return num;
10ef9ab4
SP
2233}
2234
6b7c5b94
SP
2235static void be_msix_enable(struct be_adapter *adapter)
2236{
10ef9ab4 2237#define BE_MIN_MSIX_VECTORS 1
045508a8 2238 int i, status, num_vec, num_roce_vec = 0;
d379142b 2239 struct device *dev = &adapter->pdev->dev;
6b7c5b94 2240
10ef9ab4
SP
2241 /* If RSS queues are not used, need a vec for default RX Q */
2242 num_vec = min(be_num_rss_want(adapter), num_online_cpus());
045508a8
PP
2243 if (be_roce_supported(adapter)) {
2244 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2245 (num_online_cpus() + 1));
2246 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2247 num_vec += num_roce_vec;
2248 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2249 }
10ef9ab4 2250 num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
3abcdeda 2251
ac6a0c4a 2252 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2253 adapter->msix_entries[i].entry = i;
2254
ac6a0c4a 2255 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2256 if (status == 0) {
2257 goto done;
2258 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2259 num_vec = status;
3abcdeda 2260 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2261 num_vec) == 0)
3abcdeda 2262 goto done;
3abcdeda 2263 }
d379142b
SP
2264
2265 dev_warn(dev, "MSIx enable failed\n");
3abcdeda
SP
2266 return;
2267done:
045508a8
PP
2268 if (be_roce_supported(adapter)) {
2269 if (num_vec > num_roce_vec) {
2270 adapter->num_msix_vec = num_vec - num_roce_vec;
2271 adapter->num_msix_roce_vec =
2272 num_vec - adapter->num_msix_vec;
2273 } else {
2274 adapter->num_msix_vec = num_vec;
2275 adapter->num_msix_roce_vec = 0;
2276 }
2277 } else
2278 adapter->num_msix_vec = num_vec;
d379142b 2279 dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
ac6a0c4a 2280 return;
6b7c5b94
SP
2281}
2282
fe6d2a38 2283static inline int be_msix_vec_get(struct be_adapter *adapter,
10ef9ab4 2284 struct be_eq_obj *eqo)
b628bde2 2285{
10ef9ab4 2286 return adapter->msix_entries[eqo->idx].vector;
b628bde2 2287}
6b7c5b94 2288
b628bde2
SP
2289static int be_msix_register(struct be_adapter *adapter)
2290{
10ef9ab4
SP
2291 struct net_device *netdev = adapter->netdev;
2292 struct be_eq_obj *eqo;
2293 int status, i, vec;
6b7c5b94 2294
10ef9ab4
SP
2295 for_all_evt_queues(adapter, eqo, i) {
2296 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2297 vec = be_msix_vec_get(adapter, eqo);
2298 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3abcdeda
SP
2299 if (status)
2300 goto err_msix;
2301 }
b628bde2 2302
6b7c5b94 2303 return 0;
3abcdeda 2304err_msix:
10ef9ab4
SP
2305 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2307 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2308 status);
ac6a0c4a 2309 be_msix_disable(adapter);
6b7c5b94
SP
2310 return status;
2311}
2312
2313static int be_irq_register(struct be_adapter *adapter)
2314{
2315 struct net_device *netdev = adapter->netdev;
2316 int status;
2317
ac6a0c4a 2318 if (msix_enabled(adapter)) {
6b7c5b94
SP
2319 status = be_msix_register(adapter);
2320 if (status == 0)
2321 goto done;
ba343c77
SB
2322 /* INTx is not supported for VF */
2323 if (!be_physfn(adapter))
2324 return status;
6b7c5b94
SP
2325 }
2326
2327 /* INTx */
2328 netdev->irq = adapter->pdev->irq;
2329 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2330 adapter);
2331 if (status) {
2332 dev_err(&adapter->pdev->dev,
2333 "INTx request IRQ failed - err %d\n", status);
2334 return status;
2335 }
2336done:
2337 adapter->isr_registered = true;
2338 return 0;
2339}
2340
2341static void be_irq_unregister(struct be_adapter *adapter)
2342{
2343 struct net_device *netdev = adapter->netdev;
10ef9ab4 2344 struct be_eq_obj *eqo;
3abcdeda 2345 int i;
6b7c5b94
SP
2346
2347 if (!adapter->isr_registered)
2348 return;
2349
2350 /* INTx */
ac6a0c4a 2351 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2352 free_irq(netdev->irq, adapter);
2353 goto done;
2354 }
2355
2356 /* MSIx */
10ef9ab4
SP
2357 for_all_evt_queues(adapter, eqo, i)
2358 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3abcdeda 2359
6b7c5b94
SP
2360done:
2361 adapter->isr_registered = false;
6b7c5b94
SP
2362}
2363
10ef9ab4 2364static void be_rx_qs_destroy(struct be_adapter *adapter)
482c9e79
SP
2365{
2366 struct be_queue_info *q;
2367 struct be_rx_obj *rxo;
2368 int i;
2369
2370 for_all_rx_queues(adapter, rxo, i) {
2371 q = &rxo->q;
2372 if (q->created) {
2373 be_cmd_rxq_destroy(adapter, q);
2374 /* After the rxq is invalidated, wait for a grace time
2375 * of 1ms for all dma to end and the flush compl to
2376 * arrive
2377 */
2378 mdelay(1);
10ef9ab4 2379 be_rx_cq_clean(rxo);
482c9e79 2380 }
10ef9ab4 2381 be_queue_free(adapter, q);
482c9e79
SP
2382 }
2383}
2384
889cd4b2
SP
2385static int be_close(struct net_device *netdev)
2386{
2387 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4
SP
2388 struct be_eq_obj *eqo;
2389 int i;
889cd4b2 2390
045508a8
PP
2391 be_roce_dev_close(adapter);
2392
889cd4b2
SP
2393 be_async_mcc_disable(adapter);
2394
fe6d2a38
SP
2395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, false);
889cd4b2 2397
10ef9ab4
SP
2398 for_all_evt_queues(adapter, eqo, i) {
2399 napi_disable(&eqo->napi);
2400 if (msix_enabled(adapter))
2401 synchronize_irq(be_msix_vec_get(adapter, eqo));
2402 else
2403 synchronize_irq(netdev->irq);
2404 be_eq_clean(eqo);
63fcb27f
PR
2405 }
2406
889cd4b2
SP
2407 be_irq_unregister(adapter);
2408
889cd4b2
SP
2409 /* Wait for all pending tx completions to arrive so that
2410 * all tx skbs are freed.
2411 */
0ae57bb3 2412 be_tx_compl_clean(adapter);
889cd4b2 2413
10ef9ab4 2414 be_rx_qs_destroy(adapter);
482c9e79
SP
2415 return 0;
2416}
2417
10ef9ab4 2418static int be_rx_qs_create(struct be_adapter *adapter)
482c9e79
SP
2419{
2420 struct be_rx_obj *rxo;
e9008ee9
PR
2421 int rc, i, j;
2422 u8 rsstable[128];
482c9e79
SP
2423
2424 for_all_rx_queues(adapter, rxo, i) {
10ef9ab4
SP
2425 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2426 sizeof(struct be_eth_rx_d));
2427 if (rc)
2428 return rc;
2429 }
2430
2431 /* The FW would like the default RXQ to be created first */
2432 rxo = default_rxo(adapter);
2433 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2434 adapter->if_handle, false, &rxo->rss_id);
2435 if (rc)
2436 return rc;
2437
2438 for_all_rss_queues(adapter, rxo, i) {
482c9e79 2439 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
10ef9ab4
SP
2440 rx_frag_size, adapter->if_handle,
2441 true, &rxo->rss_id);
482c9e79
SP
2442 if (rc)
2443 return rc;
2444 }
2445
2446 if (be_multi_rxq(adapter)) {
e9008ee9
PR
2447 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2448 for_all_rss_queues(adapter, rxo, i) {
2449 if ((j + i) >= 128)
2450 break;
2451 rsstable[j + i] = rxo->rss_id;
2452 }
2453 }
2454 rc = be_cmd_rss_config(adapter, rsstable, 128);
482c9e79
SP
2455 if (rc)
2456 return rc;
2457 }
2458
2459 /* First time posting */
10ef9ab4 2460 for_all_rx_queues(adapter, rxo, i)
482c9e79 2461 be_post_rx_frags(rxo, GFP_KERNEL);
889cd4b2
SP
2462 return 0;
2463}
2464
6b7c5b94
SP
2465static int be_open(struct net_device *netdev)
2466{
2467 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 2468 struct be_eq_obj *eqo;
3abcdeda 2469 struct be_rx_obj *rxo;
10ef9ab4 2470 struct be_tx_obj *txo;
b236916a 2471 u8 link_status;
3abcdeda 2472 int status, i;
5fb379ee 2473
10ef9ab4 2474 status = be_rx_qs_create(adapter);
482c9e79
SP
2475 if (status)
2476 goto err;
2477
5fb379ee
SP
2478 be_irq_register(adapter);
2479
fe6d2a38
SP
2480 if (!lancer_chip(adapter))
2481 be_intr_set(adapter, true);
5fb379ee 2482
10ef9ab4 2483 for_all_rx_queues(adapter, rxo, i)
3abcdeda 2484 be_cq_notify(adapter, rxo->cq.id, true, 0);
5fb379ee 2485
10ef9ab4
SP
2486 for_all_tx_queues(adapter, txo, i)
2487 be_cq_notify(adapter, txo->cq.id, true, 0);
2488
7a1e9b20
SP
2489 be_async_mcc_enable(adapter);
2490
10ef9ab4
SP
2491 for_all_evt_queues(adapter, eqo, i) {
2492 napi_enable(&eqo->napi);
2493 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2494 }
2495
323ff71e 2496 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
b236916a
AK
2497 if (!status)
2498 be_link_status_update(adapter, link_status);
2499
045508a8 2500 be_roce_dev_open(adapter);
889cd4b2
SP
2501 return 0;
2502err:
2503 be_close(adapter->netdev);
2504 return -EIO;
5fb379ee
SP
2505}
2506
71d8d1b5
AK
2507static int be_setup_wol(struct be_adapter *adapter, bool enable)
2508{
2509 struct be_dma_mem cmd;
2510 int status = 0;
2511 u8 mac[ETH_ALEN];
2512
2513 memset(mac, 0, ETH_ALEN);
2514
2515 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2516 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2517 GFP_KERNEL);
71d8d1b5
AK
2518 if (cmd.va == NULL)
2519 return -1;
2520 memset(cmd.va, 0, cmd.size);
2521
2522 if (enable) {
2523 status = pci_write_config_dword(adapter->pdev,
2524 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2525 if (status) {
2526 dev_err(&adapter->pdev->dev,
2381a55c 2527 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2528 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2529 cmd.dma);
71d8d1b5
AK
2530 return status;
2531 }
2532 status = be_cmd_enable_magic_wol(adapter,
2533 adapter->netdev->dev_addr, &cmd);
2534 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2535 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2536 } else {
2537 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2538 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2539 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2540 }
2541
2b7bcebf 2542 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2543 return status;
2544}
2545
6d87f5c3
AK
2546/*
2547 * Generate a seed MAC address from the PF MAC Address using jhash.
2548 * MAC Address for VFs are assigned incrementally starting from the seed.
2549 * These addresses are programmed in the ASIC by the PF and the VF driver
2550 * queries for the MAC address during its probe.
2551 */
2552static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2553{
f9449ab7 2554 u32 vf;
3abcdeda 2555 int status = 0;
6d87f5c3 2556 u8 mac[ETH_ALEN];
11ac75ed 2557 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2558
2559 be_vf_eth_addr_generate(adapter, mac);
2560
11ac75ed 2561 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2562 if (lancer_chip(adapter)) {
2563 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
2564 } else {
2565 status = be_cmd_pmac_add(adapter, mac,
11ac75ed
SP
2566 vf_cfg->if_handle,
2567 &vf_cfg->pmac_id, vf + 1);
590c391d
PR
2568 }
2569
6d87f5c3
AK
2570 if (status)
2571 dev_err(&adapter->pdev->dev,
590c391d 2572 "Mac address assignment failed for VF %d\n", vf);
6d87f5c3 2573 else
11ac75ed 2574 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
6d87f5c3
AK
2575
2576 mac[5] += 1;
2577 }
2578 return status;
2579}
2580
f9449ab7 2581static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3 2582{
11ac75ed 2583 struct be_vf_cfg *vf_cfg;
6d87f5c3
AK
2584 u32 vf;
2585
39f1d94d
SP
2586 if (be_find_vfs(adapter, ASSIGNED)) {
2587 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2588 goto done;
2589 }
2590
11ac75ed 2591 for_all_vfs(adapter, vf_cfg, vf) {
590c391d
PR
2592 if (lancer_chip(adapter))
2593 be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2594 else
11ac75ed
SP
2595 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2596 vf_cfg->pmac_id, vf + 1);
f9449ab7 2597
11ac75ed
SP
2598 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2599 }
39f1d94d
SP
2600 pci_disable_sriov(adapter->pdev);
2601done:
2602 kfree(adapter->vf_cfg);
2603 adapter->num_vfs = 0;
6d87f5c3
AK
2604}
2605
a54769f5
SP
2606static int be_clear(struct be_adapter *adapter)
2607{
fbc13f01
AK
2608 int i = 1;
2609
191eb756
SP
2610 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2611 cancel_delayed_work_sync(&adapter->work);
2612 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2613 }
2614
11ac75ed 2615 if (sriov_enabled(adapter))
f9449ab7
SP
2616 be_vf_clear(adapter);
2617
fbc13f01
AK
2618 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2619 be_cmd_pmac_del(adapter, adapter->if_handle,
2620 adapter->pmac_id[i], 0);
2621
f9449ab7 2622 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2623
2624 be_mcc_queues_destroy(adapter);
10ef9ab4 2625 be_rx_cqs_destroy(adapter);
a54769f5 2626 be_tx_queues_destroy(adapter);
10ef9ab4 2627 be_evt_queues_destroy(adapter);
a54769f5 2628
abb93951
PR
2629 kfree(adapter->pmac_id);
2630 adapter->pmac_id = NULL;
2631
10ef9ab4 2632 be_msix_disable(adapter);
a54769f5
SP
2633 return 0;
2634}
2635
abb93951
PR
2636static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2637 u32 *cap_flags, u8 domain)
2638{
2639 bool profile_present = false;
2640 int status;
2641
2642 if (lancer_chip(adapter)) {
2643 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2644 if (!status)
2645 profile_present = true;
2646 }
2647
2648 if (!profile_present)
2649 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2650 BE_IF_FLAGS_MULTICAST;
2651}
2652
39f1d94d 2653static int be_vf_setup_init(struct be_adapter *adapter)
30128031 2654{
11ac75ed 2655 struct be_vf_cfg *vf_cfg;
30128031
SP
2656 int vf;
2657
39f1d94d
SP
2658 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2659 GFP_KERNEL);
2660 if (!adapter->vf_cfg)
2661 return -ENOMEM;
2662
11ac75ed
SP
2663 for_all_vfs(adapter, vf_cfg, vf) {
2664 vf_cfg->if_handle = -1;
2665 vf_cfg->pmac_id = -1;
30128031 2666 }
39f1d94d 2667 return 0;
30128031
SP
2668}
2669
f9449ab7
SP
2670static int be_vf_setup(struct be_adapter *adapter)
2671{
11ac75ed 2672 struct be_vf_cfg *vf_cfg;
39f1d94d 2673 struct device *dev = &adapter->pdev->dev;
f9449ab7 2674 u32 cap_flags, en_flags, vf;
f1f3ee1b 2675 u16 def_vlan, lnk_speed;
39f1d94d
SP
2676 int status, enabled_vfs;
2677
2678 enabled_vfs = be_find_vfs(adapter, ENABLED);
2679 if (enabled_vfs) {
2680 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2681 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2682 return 0;
2683 }
f9449ab7 2684
39f1d94d
SP
2685 if (num_vfs > adapter->dev_num_vfs) {
2686 dev_warn(dev, "Device supports %d VFs and not %d\n",
2687 adapter->dev_num_vfs, num_vfs);
2688 num_vfs = adapter->dev_num_vfs;
2689 }
2690
2691 status = pci_enable_sriov(adapter->pdev, num_vfs);
2692 if (!status) {
2693 adapter->num_vfs = num_vfs;
2694 } else {
2695 /* Platform doesn't support SRIOV though device supports it */
2696 dev_warn(dev, "SRIOV enable failed\n");
2697 return 0;
2698 }
2699
2700 status = be_vf_setup_init(adapter);
2701 if (status)
2702 goto err;
30128031 2703
11ac75ed 2704 for_all_vfs(adapter, vf_cfg, vf) {
abb93951
PR
2705 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2706
2707 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2708 BE_IF_FLAGS_BROADCAST |
2709 BE_IF_FLAGS_MULTICAST);
2710
1578e777
PR
2711 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2712 &vf_cfg->if_handle, vf + 1);
f9449ab7
SP
2713 if (status)
2714 goto err;
f9449ab7
SP
2715 }
2716
39f1d94d
SP
2717 if (!enabled_vfs) {
2718 status = be_vf_eth_addr_config(adapter);
2719 if (status)
2720 goto err;
2721 }
f9449ab7 2722
11ac75ed 2723 for_all_vfs(adapter, vf_cfg, vf) {
8a046d3b
VV
2724 lnk_speed = 1000;
2725 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
f9449ab7
SP
2726 if (status)
2727 goto err;
11ac75ed 2728 vf_cfg->tx_rate = lnk_speed * 10;
f1f3ee1b
AK
2729
2730 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2731 vf + 1, vf_cfg->if_handle);
2732 if (status)
2733 goto err;
2734 vf_cfg->def_vid = def_vlan;
dcf7ebba
PR
2735
2736 be_cmd_enable_vf(adapter, vf + 1);
f9449ab7
SP
2737 }
2738 return 0;
2739err:
2740 return status;
2741}
2742
30128031
SP
2743static void be_setup_init(struct be_adapter *adapter)
2744{
2745 adapter->vlan_prio_bmap = 0xff;
42f11cf2 2746 adapter->phy.link_speed = -1;
30128031
SP
2747 adapter->if_handle = -1;
2748 adapter->be3_native = false;
2749 adapter->promiscuous = false;
2750 adapter->eq_next_idx = 0;
f25b119c
PR
2751
2752 if (be_physfn(adapter))
2753 adapter->cmd_privileges = MAX_PRIVILEGES;
2754 else
2755 adapter->cmd_privileges = MIN_PRIVILEGES;
30128031
SP
2756}
2757
1578e777
PR
2758static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2759 bool *active_mac, u32 *pmac_id)
590c391d 2760{
1578e777 2761 int status = 0;
e5e1ee89 2762
1578e777
PR
2763 if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2764 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2765 if (!lancer_chip(adapter) && !be_physfn(adapter))
2766 *active_mac = true;
2767 else
2768 *active_mac = false;
e5e1ee89 2769
1578e777
PR
2770 return status;
2771 }
e5e1ee89 2772
1578e777
PR
2773 if (lancer_chip(adapter)) {
2774 status = be_cmd_get_mac_from_list(adapter, mac,
2775 active_mac, pmac_id, 0);
2776 if (*active_mac) {
5ee4979b
SP
2777 status = be_cmd_mac_addr_query(adapter, mac, false,
2778 if_handle, *pmac_id);
1578e777
PR
2779 }
2780 } else if (be_physfn(adapter)) {
2781 /* For BE3, for PF get permanent MAC */
5ee4979b 2782 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
1578e777 2783 *active_mac = false;
e5e1ee89 2784 } else {
1578e777 2785 /* For BE3, for VF get soft MAC assigned by PF*/
5ee4979b 2786 status = be_cmd_mac_addr_query(adapter, mac, false,
1578e777
PR
2787 if_handle, 0);
2788 *active_mac = true;
e5e1ee89 2789 }
590c391d
PR
2790 return status;
2791}
2792
abb93951
PR
2793static void be_get_resources(struct be_adapter *adapter)
2794{
2795 int status;
2796 bool profile_present = false;
2797
2798 if (lancer_chip(adapter)) {
2799 status = be_cmd_get_func_config(adapter);
2800
2801 if (!status)
2802 profile_present = true;
2803 }
2804
2805 if (profile_present) {
2806 /* Sanity fixes for Lancer */
2807 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2808 BE_UC_PMAC_COUNT);
2809 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2810 BE_NUM_VLANS_SUPPORTED);
2811 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2812 BE_MAX_MC);
2813 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2814 MAX_TX_QS);
2815 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2816 BE3_MAX_RSS_QS);
2817 adapter->max_event_queues = min_t(u16,
2818 adapter->max_event_queues,
2819 BE3_MAX_RSS_QS);
2820
2821 if (adapter->max_rss_queues &&
2822 adapter->max_rss_queues == adapter->max_rx_queues)
2823 adapter->max_rss_queues -= 1;
2824
2825 if (adapter->max_event_queues < adapter->max_rss_queues)
2826 adapter->max_rss_queues = adapter->max_event_queues;
2827
2828 } else {
2829 if (be_physfn(adapter))
2830 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2831 else
2832 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2833
2834 if (adapter->function_mode & FLEX10_MODE)
2835 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2836 else
2837 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2838
2839 adapter->max_mcast_mac = BE_MAX_MC;
2840 adapter->max_tx_queues = MAX_TX_QS;
2841 adapter->max_rss_queues = (adapter->be3_native) ?
2842 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2843 adapter->max_event_queues = BE3_MAX_RSS_QS;
2844
2845 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2846 BE_IF_FLAGS_BROADCAST |
2847 BE_IF_FLAGS_MULTICAST |
2848 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2849 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2850 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2851 BE_IF_FLAGS_PROMISCUOUS;
2852
2853 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2854 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2855 }
2856}
2857
39f1d94d
SP
2858/* Routine to query per function resource limits */
2859static int be_get_config(struct be_adapter *adapter)
2860{
abb93951 2861 int pos, status;
39f1d94d
SP
2862 u16 dev_num_vfs;
2863
abb93951
PR
2864 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2865 &adapter->function_mode,
2866 &adapter->function_caps);
2867 if (status)
2868 goto err;
2869
2870 be_get_resources(adapter);
2871
2872 /* primary mac needs 1 pmac entry */
2873 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2874 sizeof(u32), GFP_KERNEL);
2875 if (!adapter->pmac_id) {
2876 status = -ENOMEM;
2877 goto err;
2878 }
2879
39f1d94d
SP
2880 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2881 if (pos) {
2882 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2883 &dev_num_vfs);
7c5a5242
VV
2884 if (!lancer_chip(adapter))
2885 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
39f1d94d
SP
2886 adapter->dev_num_vfs = dev_num_vfs;
2887 }
abb93951
PR
2888err:
2889 return status;
39f1d94d
SP
2890}
2891
5fb379ee
SP
2892static int be_setup(struct be_adapter *adapter)
2893{
39f1d94d 2894 struct device *dev = &adapter->pdev->dev;
abb93951 2895 u32 en_flags;
a54769f5 2896 u32 tx_fc, rx_fc;
10ef9ab4 2897 int status;
ba343c77 2898 u8 mac[ETH_ALEN];
1578e777 2899 bool active_mac;
ba343c77 2900
30128031 2901 be_setup_init(adapter);
6b7c5b94 2902
abb93951
PR
2903 if (!lancer_chip(adapter))
2904 be_cmd_req_native_mode(adapter);
39f1d94d 2905
abb93951
PR
2906 status = be_get_config(adapter);
2907 if (status)
2908 goto err;
73d540f2 2909
10ef9ab4
SP
2910 be_msix_enable(adapter);
2911
2912 status = be_evt_queues_create(adapter);
2913 if (status)
a54769f5 2914 goto err;
6b7c5b94 2915
10ef9ab4
SP
2916 status = be_tx_cqs_create(adapter);
2917 if (status)
2918 goto err;
2919
2920 status = be_rx_cqs_create(adapter);
2921 if (status)
a54769f5 2922 goto err;
6b7c5b94 2923
f9449ab7 2924 status = be_mcc_queues_create(adapter);
10ef9ab4 2925 if (status)
a54769f5 2926 goto err;
6b7c5b94 2927
f25b119c
PR
2928 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2929 /* In UMC mode FW does not return right privileges.
2930 * Override with correct privilege equivalent to PF.
2931 */
2932 if (be_is_mc(adapter))
2933 adapter->cmd_privileges = MAX_PRIVILEGES;
2934
f9449ab7
SP
2935 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2936 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
5d5adb93 2937
abb93951 2938 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
f9449ab7 2939 en_flags |= BE_IF_FLAGS_RSS;
1578e777 2940
abb93951 2941 en_flags = en_flags & adapter->if_cap_flags;
0b13fb45 2942
abb93951 2943 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
1578e777 2944 &adapter->if_handle, 0);
5fb379ee 2945 if (status != 0)
a54769f5 2946 goto err;
6b7c5b94 2947
1578e777
PR
2948 memset(mac, 0, ETH_ALEN);
2949 active_mac = false;
2950 status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2951 &active_mac, &adapter->pmac_id[0]);
2952 if (status != 0)
2953 goto err;
2954
2955 if (!active_mac) {
2956 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2957 &adapter->pmac_id[0], 0);
2958 if (status != 0)
2959 goto err;
2960 }
2961
2962 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2963 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2964 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
f9449ab7 2965 }
0dffc83e 2966
10ef9ab4
SP
2967 status = be_tx_qs_create(adapter);
2968 if (status)
2969 goto err;
2970
04b71175 2971 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2972
1d1e9a46 2973 if (adapter->vlans_added)
10329df8 2974 be_vid_config(adapter);
7ab8b0b4 2975
a54769f5 2976 be_set_rx_mode(adapter->netdev);
5fb379ee 2977
ddc3f5cb 2978 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
590c391d 2979
ddc3f5cb
AK
2980 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2981 be_cmd_set_flow_control(adapter, adapter->tx_fc,
a54769f5 2982 adapter->rx_fc);
2dc1deb6 2983
39f1d94d
SP
2984 if (be_physfn(adapter) && num_vfs) {
2985 if (adapter->dev_num_vfs)
2986 be_vf_setup(adapter);
2987 else
2988 dev_warn(dev, "device doesn't support SRIOV\n");
f9449ab7
SP
2989 }
2990
f25b119c
PR
2991 status = be_cmd_get_phy_info(adapter);
2992 if (!status && be_pause_supported(adapter))
42f11cf2
AK
2993 adapter->phy.fc_autoneg = 1;
2994
191eb756
SP
2995 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2996 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
f9449ab7 2997 return 0;
a54769f5
SP
2998err:
2999 be_clear(adapter);
3000 return status;
3001}
6b7c5b94 3002
66268739
IV
3003#ifdef CONFIG_NET_POLL_CONTROLLER
3004static void be_netpoll(struct net_device *netdev)
3005{
3006 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3007 struct be_eq_obj *eqo;
66268739
IV
3008 int i;
3009
10ef9ab4
SP
3010 for_all_evt_queues(adapter, eqo, i)
3011 event_handle(eqo);
3012
3013 return;
66268739
IV
3014}
3015#endif
3016
84517482 3017#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
c165541e
PR
3018char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3019
fa9a6fed 3020static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
3021 const u8 *p, u32 img_start, int image_size,
3022 int hdr_size)
fa9a6fed
SB
3023{
3024 u32 crc_offset;
3025 u8 flashed_crc[4];
3026 int status;
3f0d4560
AK
3027
3028 crc_offset = hdr_size + img_start + image_size - 4;
3029
fa9a6fed 3030 p += crc_offset;
3f0d4560
AK
3031
3032 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 3033 (image_size - 4));
fa9a6fed
SB
3034 if (status) {
3035 dev_err(&adapter->pdev->dev,
3036 "could not get crc from flash, not flashing redboot\n");
3037 return false;
3038 }
3039
3040 /*update redboot only if crc does not match*/
3041 if (!memcmp(flashed_crc, p, 4))
3042 return false;
3043 else
3044 return true;
fa9a6fed
SB
3045}
3046
306f1348
SP
3047static bool phy_flashing_required(struct be_adapter *adapter)
3048{
42f11cf2
AK
3049 return (adapter->phy.phy_type == TN_8022 &&
3050 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
306f1348
SP
3051}
3052
c165541e
PR
3053static bool is_comp_in_ufi(struct be_adapter *adapter,
3054 struct flash_section_info *fsec, int type)
3055{
3056 int i = 0, img_type = 0;
3057 struct flash_section_info_g2 *fsec_g2 = NULL;
3058
3059 if (adapter->generation != BE_GEN3)
3060 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3061
3062 for (i = 0; i < MAX_FLASH_COMP; i++) {
3063 if (fsec_g2)
3064 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3065 else
3066 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3067
3068 if (img_type == type)
3069 return true;
3070 }
3071 return false;
3072
3073}
3074
3075struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3076 int header_size,
3077 const struct firmware *fw)
3078{
3079 struct flash_section_info *fsec = NULL;
3080 const u8 *p = fw->data;
3081
3082 p += header_size;
3083 while (p < (fw->data + fw->size)) {
3084 fsec = (struct flash_section_info *)p;
3085 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3086 return fsec;
3087 p += 32;
3088 }
3089 return NULL;
3090}
3091
773a2d7c
PR
3092static int be_flash(struct be_adapter *adapter, const u8 *img,
3093 struct be_dma_mem *flash_cmd, int optype, int img_size)
3094{
3095 u32 total_bytes = 0, flash_op, num_bytes = 0;
3096 int status = 0;
3097 struct be_cmd_write_flashrom *req = flash_cmd->va;
3098
3099 total_bytes = img_size;
3100 while (total_bytes) {
3101 num_bytes = min_t(u32, 32*1024, total_bytes);
3102
3103 total_bytes -= num_bytes;
3104
3105 if (!total_bytes) {
3106 if (optype == OPTYPE_PHY_FW)
3107 flash_op = FLASHROM_OPER_PHY_FLASH;
3108 else
3109 flash_op = FLASHROM_OPER_FLASH;
3110 } else {
3111 if (optype == OPTYPE_PHY_FW)
3112 flash_op = FLASHROM_OPER_PHY_SAVE;
3113 else
3114 flash_op = FLASHROM_OPER_SAVE;
3115 }
3116
3117 memcpy(req->params.data_buf, img, num_bytes);
3118 img += num_bytes;
3119 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3120 flash_op, num_bytes);
3121 if (status) {
3122 if (status == ILLEGAL_IOCTL_REQ &&
3123 optype == OPTYPE_PHY_FW)
3124 break;
3125 dev_err(&adapter->pdev->dev,
3126 "cmd to write to flash rom failed.\n");
3127 return status;
3128 }
3129 }
3130 return 0;
3131}
3132
3f0d4560 3133static int be_flash_data(struct be_adapter *adapter,
c165541e
PR
3134 const struct firmware *fw,
3135 struct be_dma_mem *flash_cmd,
3136 int num_of_images)
3f0d4560 3137
84517482 3138{
3f0d4560 3139 int status = 0, i, filehdr_size = 0;
c165541e 3140 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
84517482 3141 const u8 *p = fw->data;
215faf9c 3142 const struct flash_comp *pflashcomp;
773a2d7c 3143 int num_comp, redboot;
c165541e
PR
3144 struct flash_section_info *fsec = NULL;
3145
3146 struct flash_comp gen3_flash_types[] = {
3147 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3148 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3149 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3150 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3151 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3152 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3153 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3154 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3155 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3156 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3157 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3158 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3159 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3160 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3161 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3162 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3163 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3164 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3165 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3166 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3f0d4560 3167 };
c165541e
PR
3168
3169 struct flash_comp gen2_flash_types[] = {
3170 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3171 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3172 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3173 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3174 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3175 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3176 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3177 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3178 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3179 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3180 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3181 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3182 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3183 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3184 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3185 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3f0d4560
AK
3186 };
3187
3188 if (adapter->generation == BE_GEN3) {
3189 pflashcomp = gen3_flash_types;
3190 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 3191 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
3192 } else {
3193 pflashcomp = gen2_flash_types;
3194 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 3195 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 3196 }
c165541e
PR
3197 /* Get flash section info*/
3198 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3199 if (!fsec) {
3200 dev_err(&adapter->pdev->dev,
3201 "Invalid Cookie. UFI corrupted ?\n");
3202 return -1;
3203 }
9fe96934 3204 for (i = 0; i < num_comp; i++) {
c165541e 3205 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
9fe96934 3206 continue;
c165541e
PR
3207
3208 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3209 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3210 continue;
3211
773a2d7c
PR
3212 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3213 !phy_flashing_required(adapter))
306f1348 3214 continue;
c165541e 3215
773a2d7c
PR
3216 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3217 redboot = be_flash_redboot(adapter, fw->data,
3218 pflashcomp[i].offset, pflashcomp[i].size,
3219 filehdr_size + img_hdrs_size);
3220 if (!redboot)
3221 continue;
3222 }
c165541e 3223
3f0d4560 3224 p = fw->data;
c165541e 3225 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
306f1348
SP
3226 if (p + pflashcomp[i].size > fw->data + fw->size)
3227 return -1;
773a2d7c
PR
3228
3229 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3230 pflashcomp[i].size);
3231 if (status) {
3232 dev_err(&adapter->pdev->dev,
3233 "Flashing section type %d failed.\n",
3234 pflashcomp[i].img_type);
3235 return status;
84517482 3236 }
84517482 3237 }
84517482
AK
3238 return 0;
3239}
3240
773a2d7c
PR
3241static int be_flash_skyhawk(struct be_adapter *adapter,
3242 const struct firmware *fw,
3243 struct be_dma_mem *flash_cmd, int num_of_images)
3f0d4560 3244{
773a2d7c
PR
3245 int status = 0, i, filehdr_size = 0;
3246 int img_offset, img_size, img_optype, redboot;
3247 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3248 const u8 *p = fw->data;
3249 struct flash_section_info *fsec = NULL;
3250
3251 filehdr_size = sizeof(struct flash_file_hdr_g3);
3252 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3253 if (!fsec) {
3254 dev_err(&adapter->pdev->dev,
3255 "Invalid Cookie. UFI corrupted ?\n");
3256 return -1;
3257 }
3258
3259 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3260 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3261 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3262
3263 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3264 case IMAGE_FIRMWARE_iSCSI:
3265 img_optype = OPTYPE_ISCSI_ACTIVE;
3266 break;
3267 case IMAGE_BOOT_CODE:
3268 img_optype = OPTYPE_REDBOOT;
3269 break;
3270 case IMAGE_OPTION_ROM_ISCSI:
3271 img_optype = OPTYPE_BIOS;
3272 break;
3273 case IMAGE_OPTION_ROM_PXE:
3274 img_optype = OPTYPE_PXE_BIOS;
3275 break;
3276 case IMAGE_OPTION_ROM_FCoE:
3277 img_optype = OPTYPE_FCOE_BIOS;
3278 break;
3279 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3280 img_optype = OPTYPE_ISCSI_BACKUP;
3281 break;
3282 case IMAGE_NCSI:
3283 img_optype = OPTYPE_NCSI_FW;
3284 break;
3285 default:
3286 continue;
3287 }
3288
3289 if (img_optype == OPTYPE_REDBOOT) {
3290 redboot = be_flash_redboot(adapter, fw->data,
3291 img_offset, img_size,
3292 filehdr_size + img_hdrs_size);
3293 if (!redboot)
3294 continue;
3295 }
3296
3297 p = fw->data;
3298 p += filehdr_size + img_offset + img_hdrs_size;
3299 if (p + img_size > fw->data + fw->size)
3300 return -1;
3301
3302 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3303 if (status) {
3304 dev_err(&adapter->pdev->dev,
3305 "Flashing section type %d failed.\n",
3306 fsec->fsec_entry[i].type);
3307 return status;
3308 }
3309 }
3310 return 0;
3f0d4560
AK
3311}
3312
f67ef7ba
PR
3313static int lancer_wait_idle(struct be_adapter *adapter)
3314{
3315#define SLIPORT_IDLE_TIMEOUT 30
3316 u32 reg_val;
3317 int status = 0, i;
3318
3319 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3320 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3321 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3322 break;
3323
3324 ssleep(1);
3325 }
3326
3327 if (i == SLIPORT_IDLE_TIMEOUT)
3328 status = -1;
3329
3330 return status;
3331}
3332
3333static int lancer_fw_reset(struct be_adapter *adapter)
3334{
3335 int status = 0;
3336
3337 status = lancer_wait_idle(adapter);
3338 if (status)
3339 return status;
3340
3341 iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3342 PHYSDEV_CONTROL_OFFSET);
3343
3344 return status;
3345}
3346
485bf569
SN
3347static int lancer_fw_download(struct be_adapter *adapter,
3348 const struct firmware *fw)
84517482 3349{
485bf569
SN
3350#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3351#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 3352 struct be_dma_mem flash_cmd;
485bf569
SN
3353 const u8 *data_ptr = NULL;
3354 u8 *dest_image_ptr = NULL;
3355 size_t image_size = 0;
3356 u32 chunk_size = 0;
3357 u32 data_written = 0;
3358 u32 offset = 0;
3359 int status = 0;
3360 u8 add_status = 0;
f67ef7ba 3361 u8 change_status;
84517482 3362
485bf569 3363 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 3364 dev_err(&adapter->pdev->dev,
485bf569
SN
3365 "FW Image not properly aligned. "
3366 "Length must be 4 byte aligned.\n");
3367 status = -EINVAL;
3368 goto lancer_fw_exit;
d9efd2af
SB
3369 }
3370
485bf569
SN
3371 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3372 + LANCER_FW_DOWNLOAD_CHUNK;
3373 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3374 &flash_cmd.dma, GFP_KERNEL);
3375 if (!flash_cmd.va) {
3376 status = -ENOMEM;
3377 dev_err(&adapter->pdev->dev,
3378 "Memory allocation failure while flashing\n");
3379 goto lancer_fw_exit;
3380 }
84517482 3381
485bf569
SN
3382 dest_image_ptr = flash_cmd.va +
3383 sizeof(struct lancer_cmd_req_write_object);
3384 image_size = fw->size;
3385 data_ptr = fw->data;
3386
3387 while (image_size) {
3388 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3389
3390 /* Copy the image chunk content. */
3391 memcpy(dest_image_ptr, data_ptr, chunk_size);
3392
3393 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3394 chunk_size, offset,
3395 LANCER_FW_DOWNLOAD_LOCATION,
3396 &data_written, &change_status,
3397 &add_status);
485bf569
SN
3398 if (status)
3399 break;
3400
3401 offset += data_written;
3402 data_ptr += data_written;
3403 image_size -= data_written;
3404 }
3405
3406 if (!status) {
3407 /* Commit the FW written */
3408 status = lancer_cmd_write_object(adapter, &flash_cmd,
f67ef7ba
PR
3409 0, offset,
3410 LANCER_FW_DOWNLOAD_LOCATION,
3411 &data_written, &change_status,
3412 &add_status);
485bf569
SN
3413 }
3414
3415 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3416 flash_cmd.dma);
3417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "Firmware load error. "
3420 "Status code: 0x%x Additional Status: 0x%x\n",
3421 status, add_status);
3422 goto lancer_fw_exit;
3423 }
3424
f67ef7ba
PR
3425 if (change_status == LANCER_FW_RESET_NEEDED) {
3426 status = lancer_fw_reset(adapter);
3427 if (status) {
3428 dev_err(&adapter->pdev->dev,
3429 "Adapter busy for FW reset.\n"
3430 "New FW will not be active.\n");
3431 goto lancer_fw_exit;
3432 }
3433 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3434 dev_err(&adapter->pdev->dev,
3435 "System reboot required for new FW"
3436 " to be active\n");
3437 }
3438
485bf569
SN
3439 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3440lancer_fw_exit:
3441 return status;
3442}
3443
773a2d7c
PR
3444static int be_get_ufi_gen(struct be_adapter *adapter,
3445 struct flash_file_hdr_g2 *fhdr)
3446{
3447 if (fhdr == NULL)
3448 goto be_get_ufi_exit;
3449
3450 if (adapter->generation == BE_GEN3) {
3451 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3452 return SH_HW;
3453 else if (!skyhawk_chip(adapter) && fhdr->build[0] == '3')
3454 return BE_GEN3;
3455 } else if (adapter->generation == BE_GEN2 && fhdr->build[0] == '2') {
3456 return BE_GEN2;
3457 }
3458
3459be_get_ufi_exit:
3460 dev_err(&adapter->pdev->dev,
3461 "UFI and Interface are not compatible for flashing\n");
3462 return -1;
3463}
3464
485bf569
SN
3465static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3466{
3467 struct flash_file_hdr_g2 *fhdr;
3468 struct flash_file_hdr_g3 *fhdr3;
3469 struct image_hdr *img_hdr_ptr = NULL;
3470 struct be_dma_mem flash_cmd;
3471 const u8 *p;
773a2d7c 3472 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
84517482 3473
84517482 3474 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
3475 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3476 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
3477 if (!flash_cmd.va) {
3478 status = -ENOMEM;
3479 dev_err(&adapter->pdev->dev,
3480 "Memory allocation failure while flashing\n");
485bf569 3481 goto be_fw_exit;
84517482
AK
3482 }
3483
773a2d7c
PR
3484 p = fw->data;
3485 fhdr = (struct flash_file_hdr_g2 *)p;
3486
3487 ufi_type = be_get_ufi_gen(adapter, fhdr);
3488
3489 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3490 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3491 for (i = 0; i < num_imgs; i++) {
3492 img_hdr_ptr = (struct image_hdr *)(fw->data +
3493 (sizeof(struct flash_file_hdr_g3) +
3494 i * sizeof(struct image_hdr)));
3495 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3496 if (ufi_type == SH_HW)
3497 status = be_flash_skyhawk(adapter, fw,
3498 &flash_cmd, num_imgs);
3499 else if (ufi_type == BE_GEN3)
3500 status = be_flash_data(adapter, fw,
3501 &flash_cmd, num_imgs);
3f0d4560 3502 }
773a2d7c
PR
3503 }
3504
3505 if (ufi_type == BE_GEN2)
3f0d4560 3506 status = be_flash_data(adapter, fw, &flash_cmd, 0);
773a2d7c 3507 else if (ufi_type == -1)
3f0d4560 3508 status = -1;
84517482 3509
2b7bcebf
IV
3510 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3511 flash_cmd.dma);
84517482
AK
3512 if (status) {
3513 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 3514 goto be_fw_exit;
84517482
AK
3515 }
3516
af901ca1 3517 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 3518
485bf569
SN
3519be_fw_exit:
3520 return status;
3521}
3522
3523int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3524{
3525 const struct firmware *fw;
3526 int status;
3527
3528 if (!netif_running(adapter->netdev)) {
3529 dev_err(&adapter->pdev->dev,
3530 "Firmware load not allowed (interface is down)\n");
3531 return -1;
3532 }
3533
3534 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3535 if (status)
3536 goto fw_exit;
3537
3538 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3539
3540 if (lancer_chip(adapter))
3541 status = lancer_fw_download(adapter, fw);
3542 else
3543 status = be_fw_download(adapter, fw);
3544
84517482
AK
3545fw_exit:
3546 release_firmware(fw);
3547 return status;
3548}
3549
e5686ad8 3550static const struct net_device_ops be_netdev_ops = {
6b7c5b94
SP
3551 .ndo_open = be_open,
3552 .ndo_stop = be_close,
3553 .ndo_start_xmit = be_xmit,
a54769f5 3554 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3555 .ndo_set_mac_address = be_mac_addr_set,
3556 .ndo_change_mtu = be_change_mtu,
ab1594e9 3557 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3558 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3559 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3560 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3561 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3562 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3563 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
66268739
IV
3564 .ndo_get_vf_config = be_get_vf_config,
3565#ifdef CONFIG_NET_POLL_CONTROLLER
3566 .ndo_poll_controller = be_netpoll,
3567#endif
6b7c5b94
SP
3568};
3569
3570static void be_netdev_init(struct net_device *netdev)
3571{
3572 struct be_adapter *adapter = netdev_priv(netdev);
10ef9ab4 3573 struct be_eq_obj *eqo;
3abcdeda 3574 int i;
6b7c5b94 3575
6332c8d3 3576 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3577 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3578 NETIF_F_HW_VLAN_TX;
3579 if (be_multi_rxq(adapter))
3580 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3581
3582 netdev->features |= netdev->hw_features |
8b8ddc68 3583 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3584
eb8a50d9 3585 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3586 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3587
fbc13f01
AK
3588 netdev->priv_flags |= IFF_UNICAST_FLT;
3589
6b7c5b94
SP
3590 netdev->flags |= IFF_MULTICAST;
3591
b7e5887e 3592 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
c190e3c8 3593
10ef9ab4 3594 netdev->netdev_ops = &be_netdev_ops;
6b7c5b94
SP
3595
3596 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3597
10ef9ab4
SP
3598 for_all_evt_queues(adapter, eqo, i)
3599 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
6b7c5b94
SP
3600}
3601
3602static void be_unmap_pci_bars(struct be_adapter *adapter)
3603{
8788fdc2
SP
3604 if (adapter->csr)
3605 iounmap(adapter->csr);
3606 if (adapter->db)
3607 iounmap(adapter->db);
045508a8
PP
3608 if (adapter->roce_db.base)
3609 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3610}
3611
3612static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3613{
3614 struct pci_dev *pdev = adapter->pdev;
3615 u8 __iomem *addr;
3616
3617 addr = pci_iomap(pdev, 2, 0);
3618 if (addr == NULL)
3619 return -ENOMEM;
3620
3621 adapter->roce_db.base = addr;
3622 adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3623 adapter->roce_db.size = 8192;
3624 adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3625 return 0;
6b7c5b94
SP
3626}
3627
3628static int be_map_pci_bars(struct be_adapter *adapter)
3629{
3630 u8 __iomem *addr;
db3ea781 3631 int db_reg;
6b7c5b94 3632
fe6d2a38 3633 if (lancer_chip(adapter)) {
045508a8
PP
3634 if (be_type_2_3(adapter)) {
3635 addr = ioremap_nocache(
3636 pci_resource_start(adapter->pdev, 0),
3637 pci_resource_len(adapter->pdev, 0));
3638 if (addr == NULL)
3639 return -ENOMEM;
3640 adapter->db = addr;
3641 }
3642 if (adapter->if_type == SLI_INTF_TYPE_3) {
3643 if (lancer_roce_map_pci_bars(adapter))
3644 goto pci_map_err;
3645 }
fe6d2a38
SP
3646 return 0;
3647 }
3648
ba343c77
SB
3649 if (be_physfn(adapter)) {
3650 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3651 pci_resource_len(adapter->pdev, 2));
3652 if (addr == NULL)
3653 return -ENOMEM;
3654 adapter->csr = addr;
3655 }
6b7c5b94 3656
ba343c77 3657 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3658 db_reg = 4;
3659 } else {
ba343c77
SB
3660 if (be_physfn(adapter))
3661 db_reg = 4;
3662 else
3663 db_reg = 0;
3664 }
3665 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3666 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3667 if (addr == NULL)
3668 goto pci_map_err;
ba343c77 3669 adapter->db = addr;
045508a8
PP
3670 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3671 adapter->roce_db.size = 4096;
3672 adapter->roce_db.io_addr =
3673 pci_resource_start(adapter->pdev, db_reg);
3674 adapter->roce_db.total_size =
3675 pci_resource_len(adapter->pdev, db_reg);
3676 }
6b7c5b94
SP
3677 return 0;
3678pci_map_err:
3679 be_unmap_pci_bars(adapter);
3680 return -ENOMEM;
3681}
3682
6b7c5b94
SP
3683static void be_ctrl_cleanup(struct be_adapter *adapter)
3684{
8788fdc2 3685 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3686
3687 be_unmap_pci_bars(adapter);
3688
3689 if (mem->va)
2b7bcebf
IV
3690 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3691 mem->dma);
e7b909a6 3692
5b8821b7 3693 mem = &adapter->rx_filter;
e7b909a6 3694 if (mem->va)
2b7bcebf
IV
3695 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3696 mem->dma);
6b7c5b94
SP
3697}
3698
6b7c5b94
SP
3699static int be_ctrl_init(struct be_adapter *adapter)
3700{
8788fdc2
SP
3701 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3702 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3703 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3704 int status;
6b7c5b94
SP
3705
3706 status = be_map_pci_bars(adapter);
3707 if (status)
e7b909a6 3708 goto done;
6b7c5b94
SP
3709
3710 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3711 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3712 mbox_mem_alloc->size,
3713 &mbox_mem_alloc->dma,
3714 GFP_KERNEL);
6b7c5b94 3715 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3716 status = -ENOMEM;
3717 goto unmap_pci_bars;
6b7c5b94
SP
3718 }
3719 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3720 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3721 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3722 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3723
5b8821b7
SP
3724 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3725 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3726 &rx_filter->dma, GFP_KERNEL);
3727 if (rx_filter->va == NULL) {
e7b909a6
SP
3728 status = -ENOMEM;
3729 goto free_mbox;
3730 }
5b8821b7 3731 memset(rx_filter->va, 0, rx_filter->size);
2984961c 3732 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3733 spin_lock_init(&adapter->mcc_lock);
3734 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3735
dd131e76 3736 init_completion(&adapter->flash_compl);
cf588477 3737 pci_save_state(adapter->pdev);
6b7c5b94 3738 return 0;
e7b909a6
SP
3739
3740free_mbox:
2b7bcebf
IV
3741 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3742 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3743
3744unmap_pci_bars:
3745 be_unmap_pci_bars(adapter);
3746
3747done:
3748 return status;
6b7c5b94
SP
3749}
3750
3751static void be_stats_cleanup(struct be_adapter *adapter)
3752{
3abcdeda 3753 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3754
3755 if (cmd->va)
2b7bcebf
IV
3756 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3757 cmd->va, cmd->dma);
6b7c5b94
SP
3758}
3759
3760static int be_stats_init(struct be_adapter *adapter)
3761{
3abcdeda 3762 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3763
005d5696 3764 if (adapter->generation == BE_GEN2) {
89a88ab8 3765 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3766 } else {
3767 if (lancer_chip(adapter))
3768 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3769 else
3770 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3771 }
2b7bcebf
IV
3772 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3773 GFP_KERNEL);
6b7c5b94
SP
3774 if (cmd->va == NULL)
3775 return -1;
d291b9af 3776 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3777 return 0;
3778}
3779
3780static void __devexit be_remove(struct pci_dev *pdev)
3781{
3782 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3783
6b7c5b94
SP
3784 if (!adapter)
3785 return;
3786
045508a8
PP
3787 be_roce_dev_remove(adapter);
3788
f67ef7ba
PR
3789 cancel_delayed_work_sync(&adapter->func_recovery_work);
3790
6b7c5b94
SP
3791 unregister_netdev(adapter->netdev);
3792
5fb379ee
SP
3793 be_clear(adapter);
3794
bf99e50d
PR
3795 /* tell fw we're done with firing cmds */
3796 be_cmd_fw_clean(adapter);
3797
6b7c5b94
SP
3798 be_stats_cleanup(adapter);
3799
3800 be_ctrl_cleanup(adapter);
3801
d6b6d987
SP
3802 pci_disable_pcie_error_reporting(pdev);
3803
6b7c5b94
SP
3804 pci_set_drvdata(pdev, NULL);
3805 pci_release_regions(pdev);
3806 pci_disable_device(pdev);
3807
3808 free_netdev(adapter->netdev);
3809}
3810
4762f6ce
AK
3811bool be_is_wol_supported(struct be_adapter *adapter)
3812{
3813 return ((adapter->wol_cap & BE_WOL_CAP) &&
3814 !be_is_wol_excluded(adapter)) ? true : false;
3815}
3816
941a77d5
SK
3817u32 be_get_fw_log_level(struct be_adapter *adapter)
3818{
3819 struct be_dma_mem extfat_cmd;
3820 struct be_fat_conf_params *cfgs;
3821 int status;
3822 u32 level = 0;
3823 int j;
3824
f25b119c
PR
3825 if (lancer_chip(adapter))
3826 return 0;
3827
941a77d5
SK
3828 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3829 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3830 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3831 &extfat_cmd.dma);
3832
3833 if (!extfat_cmd.va) {
3834 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3835 __func__);
3836 goto err;
3837 }
3838
3839 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3840 if (!status) {
3841 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3842 sizeof(struct be_cmd_resp_hdr));
ac46a462 3843 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
941a77d5
SK
3844 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3845 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3846 }
3847 }
3848 pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3849 extfat_cmd.dma);
3850err:
3851 return level;
3852}
abb93951 3853
39f1d94d 3854static int be_get_initial_config(struct be_adapter *adapter)
6b7c5b94 3855{
6b7c5b94 3856 int status;
941a77d5 3857 u32 level;
6b7c5b94 3858
9e1453c5
AK
3859 status = be_cmd_get_cntl_attributes(adapter);
3860 if (status)
3861 return status;
3862
4762f6ce
AK
3863 status = be_cmd_get_acpi_wol_cap(adapter);
3864 if (status) {
3865 /* in case of a failure to get wol capabillities
3866 * check the exclusion list to determine WOL capability */
3867 if (!be_is_wol_excluded(adapter))
3868 adapter->wol_cap |= BE_WOL_CAP;
3869 }
3870
3871 if (be_is_wol_supported(adapter))
3872 adapter->wol = true;
3873
7aeb2156
PR
3874 /* Must be a power of 2 or else MODULO will BUG_ON */
3875 adapter->be_get_temp_freq = 64;
3876
941a77d5
SK
3877 level = be_get_fw_log_level(adapter);
3878 adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3879
2243e2e9 3880 return 0;
6b7c5b94
SP
3881}
3882
39f1d94d 3883static int be_dev_type_check(struct be_adapter *adapter)
fe6d2a38
SP
3884{
3885 struct pci_dev *pdev = adapter->pdev;
3886 u32 sli_intf = 0, if_type;
3887
3888 switch (pdev->device) {
3889 case BE_DEVICE_ID1:
3890 case OC_DEVICE_ID1:
3891 adapter->generation = BE_GEN2;
3892 break;
3893 case BE_DEVICE_ID2:
3894 case OC_DEVICE_ID2:
3895 adapter->generation = BE_GEN3;
3896 break;
3897 case OC_DEVICE_ID3:
12f4d0a8 3898 case OC_DEVICE_ID4:
fe6d2a38 3899 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
045508a8
PP
3900 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3901 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38
SP
3902 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3903 SLI_INTF_IF_TYPE_SHIFT;
fe6d2a38 3904 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
045508a8
PP
3905 !be_type_2_3(adapter)) {
3906 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3907 return -EINVAL;
3908 }
3909 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3910 SLI_INTF_FAMILY_SHIFT);
3911 adapter->generation = BE_GEN3;
3912 break;
3913 case OC_DEVICE_ID5:
76b73530 3914 case OC_DEVICE_ID6:
045508a8
PP
3915 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3916 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
fe6d2a38
SP
3917 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3918 return -EINVAL;
3919 }
fe6d2a38
SP
3920 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3921 SLI_INTF_FAMILY_SHIFT);
3922 adapter->generation = BE_GEN3;
3923 break;
3924 default:
3925 adapter->generation = 0;
3926 }
39f1d94d
SP
3927
3928 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3929 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
fe6d2a38
SP
3930 return 0;
3931}
3932
f67ef7ba 3933static int lancer_recover_func(struct be_adapter *adapter)
d8110f62
PR
3934{
3935 int status;
d8110f62 3936
f67ef7ba
PR
3937 status = lancer_test_and_set_rdy_state(adapter);
3938 if (status)
3939 goto err;
d8110f62 3940
f67ef7ba
PR
3941 if (netif_running(adapter->netdev))
3942 be_close(adapter->netdev);
d8110f62 3943
f67ef7ba
PR
3944 be_clear(adapter);
3945
3946 adapter->hw_error = false;
3947 adapter->fw_timeout = false;
3948
3949 status = be_setup(adapter);
3950 if (status)
3951 goto err;
d8110f62 3952
f67ef7ba
PR
3953 if (netif_running(adapter->netdev)) {
3954 status = be_open(adapter->netdev);
d8110f62
PR
3955 if (status)
3956 goto err;
f67ef7ba 3957 }
d8110f62 3958
f67ef7ba
PR
3959 dev_err(&adapter->pdev->dev,
3960 "Adapter SLIPORT recovery succeeded\n");
3961 return 0;
3962err:
67297ad8
PR
3963 if (adapter->eeh_error)
3964 dev_err(&adapter->pdev->dev,
3965 "Adapter SLIPORT recovery failed\n");
d8110f62 3966
f67ef7ba
PR
3967 return status;
3968}
3969
3970static void be_func_recovery_task(struct work_struct *work)
3971{
3972 struct be_adapter *adapter =
3973 container_of(work, struct be_adapter, func_recovery_work.work);
3974 int status;
d8110f62 3975
f67ef7ba 3976 be_detect_error(adapter);
d8110f62 3977
f67ef7ba 3978 if (adapter->hw_error && lancer_chip(adapter)) {
d8110f62 3979
f67ef7ba
PR
3980 if (adapter->eeh_error)
3981 goto out;
d8110f62 3982
f67ef7ba
PR
3983 rtnl_lock();
3984 netif_device_detach(adapter->netdev);
3985 rtnl_unlock();
d8110f62 3986
f67ef7ba 3987 status = lancer_recover_func(adapter);
d8110f62 3988
f67ef7ba
PR
3989 if (!status)
3990 netif_device_attach(adapter->netdev);
d8110f62 3991 }
f67ef7ba
PR
3992
3993out:
3994 schedule_delayed_work(&adapter->func_recovery_work,
3995 msecs_to_jiffies(1000));
d8110f62
PR
3996}
3997
3998static void be_worker(struct work_struct *work)
3999{
4000 struct be_adapter *adapter =
4001 container_of(work, struct be_adapter, work.work);
4002 struct be_rx_obj *rxo;
10ef9ab4 4003 struct be_eq_obj *eqo;
d8110f62
PR
4004 int i;
4005
d8110f62
PR
4006 /* when interrupts are not yet enabled, just reap any pending
4007 * mcc completions */
4008 if (!netif_running(adapter->netdev)) {
072a9c48 4009 local_bh_disable();
10ef9ab4 4010 be_process_mcc(adapter);
072a9c48 4011 local_bh_enable();
d8110f62
PR
4012 goto reschedule;
4013 }
4014
4015 if (!adapter->stats_cmd_sent) {
4016 if (lancer_chip(adapter))
4017 lancer_cmd_get_pport_stats(adapter,
4018 &adapter->stats_cmd);
4019 else
4020 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4021 }
4022
7aeb2156
PR
4023 if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4024 be_cmd_get_die_temperature(adapter);
4025
d8110f62 4026 for_all_rx_queues(adapter, rxo, i) {
d8110f62
PR
4027 if (rxo->rx_post_starved) {
4028 rxo->rx_post_starved = false;
4029 be_post_rx_frags(rxo, GFP_KERNEL);
4030 }
4031 }
4032
10ef9ab4
SP
4033 for_all_evt_queues(adapter, eqo, i)
4034 be_eqd_update(adapter, eqo);
4035
d8110f62
PR
4036reschedule:
4037 adapter->work_counter++;
4038 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4039}
4040
39f1d94d
SP
4041static bool be_reset_required(struct be_adapter *adapter)
4042{
d79c0a20 4043 return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
39f1d94d
SP
4044}
4045
d379142b
SP
4046static char *mc_name(struct be_adapter *adapter)
4047{
4048 if (adapter->function_mode & FLEX10_MODE)
4049 return "FLEX10";
4050 else if (adapter->function_mode & VNIC_MODE)
4051 return "vNIC";
4052 else if (adapter->function_mode & UMC_ENABLED)
4053 return "UMC";
4054 else
4055 return "";
4056}
4057
4058static inline char *func_name(struct be_adapter *adapter)
4059{
4060 return be_physfn(adapter) ? "PF" : "VF";
4061}
4062
6b7c5b94
SP
4063static int __devinit be_probe(struct pci_dev *pdev,
4064 const struct pci_device_id *pdev_id)
4065{
4066 int status = 0;
4067 struct be_adapter *adapter;
4068 struct net_device *netdev;
b4e32a71 4069 char port_name;
6b7c5b94
SP
4070
4071 status = pci_enable_device(pdev);
4072 if (status)
4073 goto do_none;
4074
4075 status = pci_request_regions(pdev, DRV_NAME);
4076 if (status)
4077 goto disable_dev;
4078 pci_set_master(pdev);
4079
7f640062 4080 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
6b7c5b94
SP
4081 if (netdev == NULL) {
4082 status = -ENOMEM;
4083 goto rel_reg;
4084 }
4085 adapter = netdev_priv(netdev);
4086 adapter->pdev = pdev;
4087 pci_set_drvdata(pdev, adapter);
fe6d2a38 4088
39f1d94d 4089 status = be_dev_type_check(adapter);
63657b9c 4090 if (status)
fe6d2a38
SP
4091 goto free_netdev;
4092
6b7c5b94 4093 adapter->netdev = netdev;
2243e2e9 4094 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 4095
2b7bcebf 4096 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
4097 if (!status) {
4098 netdev->features |= NETIF_F_HIGHDMA;
4099 } else {
2b7bcebf 4100 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
4101 if (status) {
4102 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4103 goto free_netdev;
4104 }
4105 }
4106
d6b6d987
SP
4107 status = pci_enable_pcie_error_reporting(pdev);
4108 if (status)
4109 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4110
6b7c5b94
SP
4111 status = be_ctrl_init(adapter);
4112 if (status)
39f1d94d 4113 goto free_netdev;
6b7c5b94 4114
2243e2e9 4115 /* sync up with fw's ready state */
ba343c77 4116 if (be_physfn(adapter)) {
bf99e50d 4117 status = be_fw_wait_ready(adapter);
ba343c77
SB
4118 if (status)
4119 goto ctrl_clean;
ba343c77 4120 }
6b7c5b94 4121
2243e2e9
SP
4122 /* tell fw we're ready to fire cmds */
4123 status = be_cmd_fw_init(adapter);
6b7c5b94 4124 if (status)
2243e2e9
SP
4125 goto ctrl_clean;
4126
39f1d94d
SP
4127 if (be_reset_required(adapter)) {
4128 status = be_cmd_reset_function(adapter);
4129 if (status)
4130 goto ctrl_clean;
4131 }
556ae191 4132
10ef9ab4
SP
4133 /* The INTR bit may be set in the card when probed by a kdump kernel
4134 * after a crash.
4135 */
4136 if (!lancer_chip(adapter))
4137 be_intr_set(adapter, false);
4138
2243e2e9
SP
4139 status = be_stats_init(adapter);
4140 if (status)
4141 goto ctrl_clean;
4142
39f1d94d 4143 status = be_get_initial_config(adapter);
6b7c5b94
SP
4144 if (status)
4145 goto stats_clean;
6b7c5b94
SP
4146
4147 INIT_DELAYED_WORK(&adapter->work, be_worker);
f67ef7ba 4148 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
a54769f5 4149 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 4150
5fb379ee
SP
4151 status = be_setup(adapter);
4152 if (status)
55f5c3c5 4153 goto stats_clean;
2243e2e9 4154
3abcdeda 4155 be_netdev_init(netdev);
6b7c5b94
SP
4156 status = register_netdev(netdev);
4157 if (status != 0)
5fb379ee 4158 goto unsetup;
6b7c5b94 4159
045508a8
PP
4160 be_roce_dev_add(adapter);
4161
f67ef7ba
PR
4162 schedule_delayed_work(&adapter->func_recovery_work,
4163 msecs_to_jiffies(1000));
b4e32a71
PR
4164
4165 be_cmd_query_port_name(adapter, &port_name);
4166
d379142b
SP
4167 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4168 func_name(adapter), mc_name(adapter), port_name);
34b1ef04 4169
6b7c5b94
SP
4170 return 0;
4171
5fb379ee
SP
4172unsetup:
4173 be_clear(adapter);
6b7c5b94
SP
4174stats_clean:
4175 be_stats_cleanup(adapter);
4176ctrl_clean:
4177 be_ctrl_cleanup(adapter);
f9449ab7 4178free_netdev:
fe6d2a38 4179 free_netdev(netdev);
8d56ff11 4180 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
4181rel_reg:
4182 pci_release_regions(pdev);
4183disable_dev:
4184 pci_disable_device(pdev);
4185do_none:
c4ca2374 4186 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
4187 return status;
4188}
4189
4190static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4191{
4192 struct be_adapter *adapter = pci_get_drvdata(pdev);
4193 struct net_device *netdev = adapter->netdev;
4194
71d8d1b5
AK
4195 if (adapter->wol)
4196 be_setup_wol(adapter, true);
4197
f67ef7ba
PR
4198 cancel_delayed_work_sync(&adapter->func_recovery_work);
4199
6b7c5b94
SP
4200 netif_device_detach(netdev);
4201 if (netif_running(netdev)) {
4202 rtnl_lock();
4203 be_close(netdev);
4204 rtnl_unlock();
4205 }
9b0365f1 4206 be_clear(adapter);
6b7c5b94
SP
4207
4208 pci_save_state(pdev);
4209 pci_disable_device(pdev);
4210 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4211 return 0;
4212}
4213
4214static int be_resume(struct pci_dev *pdev)
4215{
4216 int status = 0;
4217 struct be_adapter *adapter = pci_get_drvdata(pdev);
4218 struct net_device *netdev = adapter->netdev;
4219
4220 netif_device_detach(netdev);
4221
4222 status = pci_enable_device(pdev);
4223 if (status)
4224 return status;
4225
4226 pci_set_power_state(pdev, 0);
4227 pci_restore_state(pdev);
4228
2243e2e9
SP
4229 /* tell fw we're ready to fire cmds */
4230 status = be_cmd_fw_init(adapter);
4231 if (status)
4232 return status;
4233
9b0365f1 4234 be_setup(adapter);
6b7c5b94
SP
4235 if (netif_running(netdev)) {
4236 rtnl_lock();
4237 be_open(netdev);
4238 rtnl_unlock();
4239 }
f67ef7ba
PR
4240
4241 schedule_delayed_work(&adapter->func_recovery_work,
4242 msecs_to_jiffies(1000));
6b7c5b94 4243 netif_device_attach(netdev);
71d8d1b5
AK
4244
4245 if (adapter->wol)
4246 be_setup_wol(adapter, false);
a4ca055f 4247
6b7c5b94
SP
4248 return 0;
4249}
4250
82456b03
SP
4251/*
4252 * An FLR will stop BE from DMAing any data.
4253 */
4254static void be_shutdown(struct pci_dev *pdev)
4255{
4256 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 4257
2d5d4154
AK
4258 if (!adapter)
4259 return;
82456b03 4260
0f4a6828 4261 cancel_delayed_work_sync(&adapter->work);
f67ef7ba 4262 cancel_delayed_work_sync(&adapter->func_recovery_work);
a4ca055f 4263
2d5d4154 4264 netif_device_detach(adapter->netdev);
82456b03 4265
57841869
AK
4266 be_cmd_reset_function(adapter);
4267
82456b03 4268 pci_disable_device(pdev);
82456b03
SP
4269}
4270
cf588477
SP
4271static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4272 pci_channel_state_t state)
4273{
4274 struct be_adapter *adapter = pci_get_drvdata(pdev);
4275 struct net_device *netdev = adapter->netdev;
4276
4277 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4278
f67ef7ba
PR
4279 adapter->eeh_error = true;
4280
4281 cancel_delayed_work_sync(&adapter->func_recovery_work);
cf588477 4282
f67ef7ba 4283 rtnl_lock();
cf588477 4284 netif_device_detach(netdev);
f67ef7ba 4285 rtnl_unlock();
cf588477
SP
4286
4287 if (netif_running(netdev)) {
4288 rtnl_lock();
4289 be_close(netdev);
4290 rtnl_unlock();
4291 }
4292 be_clear(adapter);
4293
4294 if (state == pci_channel_io_perm_failure)
4295 return PCI_ERS_RESULT_DISCONNECT;
4296
4297 pci_disable_device(pdev);
4298
eeb7fc7b
SK
4299 /* The error could cause the FW to trigger a flash debug dump.
4300 * Resetting the card while flash dump is in progress
c8a54163
PR
4301 * can cause it not to recover; wait for it to finish.
4302 * Wait only for first function as it is needed only once per
4303 * adapter.
eeb7fc7b 4304 */
c8a54163
PR
4305 if (pdev->devfn == 0)
4306 ssleep(30);
4307
cf588477
SP
4308 return PCI_ERS_RESULT_NEED_RESET;
4309}
4310
4311static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4312{
4313 struct be_adapter *adapter = pci_get_drvdata(pdev);
4314 int status;
4315
4316 dev_info(&adapter->pdev->dev, "EEH reset\n");
f67ef7ba 4317 be_clear_all_error(adapter);
cf588477
SP
4318
4319 status = pci_enable_device(pdev);
4320 if (status)
4321 return PCI_ERS_RESULT_DISCONNECT;
4322
4323 pci_set_master(pdev);
4324 pci_set_power_state(pdev, 0);
4325 pci_restore_state(pdev);
4326
4327 /* Check if card is ok and fw is ready */
bf99e50d 4328 status = be_fw_wait_ready(adapter);
cf588477
SP
4329 if (status)
4330 return PCI_ERS_RESULT_DISCONNECT;
4331
d6b6d987 4332 pci_cleanup_aer_uncorrect_error_status(pdev);
cf588477
SP
4333 return PCI_ERS_RESULT_RECOVERED;
4334}
4335
4336static void be_eeh_resume(struct pci_dev *pdev)
4337{
4338 int status = 0;
4339 struct be_adapter *adapter = pci_get_drvdata(pdev);
4340 struct net_device *netdev = adapter->netdev;
4341
4342 dev_info(&adapter->pdev->dev, "EEH resume\n");
4343
4344 pci_save_state(pdev);
4345
4346 /* tell fw we're ready to fire cmds */
4347 status = be_cmd_fw_init(adapter);
4348 if (status)
4349 goto err;
4350
bf99e50d
PR
4351 status = be_cmd_reset_function(adapter);
4352 if (status)
4353 goto err;
4354
cf588477
SP
4355 status = be_setup(adapter);
4356 if (status)
4357 goto err;
4358
4359 if (netif_running(netdev)) {
4360 status = be_open(netdev);
4361 if (status)
4362 goto err;
4363 }
f67ef7ba
PR
4364
4365 schedule_delayed_work(&adapter->func_recovery_work,
4366 msecs_to_jiffies(1000));
cf588477
SP
4367 netif_device_attach(netdev);
4368 return;
4369err:
4370 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
4371}
4372
3646f0e5 4373static const struct pci_error_handlers be_eeh_handlers = {
cf588477
SP
4374 .error_detected = be_eeh_err_detected,
4375 .slot_reset = be_eeh_reset,
4376 .resume = be_eeh_resume,
4377};
4378
6b7c5b94
SP
4379static struct pci_driver be_driver = {
4380 .name = DRV_NAME,
4381 .id_table = be_dev_ids,
4382 .probe = be_probe,
4383 .remove = be_remove,
4384 .suspend = be_suspend,
cf588477 4385 .resume = be_resume,
82456b03 4386 .shutdown = be_shutdown,
cf588477 4387 .err_handler = &be_eeh_handlers
6b7c5b94
SP
4388};
4389
4390static int __init be_init_module(void)
4391{
8e95a202
JP
4392 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4393 rx_frag_size != 2048) {
6b7c5b94
SP
4394 printk(KERN_WARNING DRV_NAME
4395 " : Module param rx_frag_size must be 2048/4096/8192."
4396 " Using 2048\n");
4397 rx_frag_size = 2048;
4398 }
6b7c5b94
SP
4399
4400 return pci_register_driver(&be_driver);
4401}
4402module_init(be_init_module);
4403
4404static void __exit be_exit_module(void)
4405{
4406 pci_unregister_driver(&be_driver);
4407}
4408module_exit(be_exit_module);