]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix race in posting rx buffers.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
ac124ff9
SP
248 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
249 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
250 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 251 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
252 &rxf_stats->port[adapter->port_num];
253 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 254
ac124ff9 255 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 270 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
271 drvs->rx_dropped_header_too_small =
272 port_stats->rx_dropped_header_too_small;
ac124ff9 273 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
274 drvs->rx_alignment_symbol_errors =
275 port_stats->rx_alignment_symbol_errors;
276
277 drvs->tx_pauseframes = port_stats->tx_pauseframes;
278 drvs->tx_controlframes = port_stats->tx_controlframes;
279
280 if (adapter->port_num)
ac124ff9 281 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 282 else
ac124ff9 283 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
284 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
285 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
286 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
287 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
288 drvs->forwarded_packets = rxf_stats->forwarded_packets;
289 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
290 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
291 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
292 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
293}
294
295static void populate_be3_stats(struct be_adapter *adapter)
296{
ac124ff9
SP
297 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
298 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
299 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 300 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
301 &rxf_stats->port[adapter->port_num];
302 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 303
ac124ff9 304 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
305 drvs->rx_pause_frames = port_stats->rx_pause_frames;
306 drvs->rx_crc_errors = port_stats->rx_crc_errors;
307 drvs->rx_control_frames = port_stats->rx_control_frames;
308 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
309 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
310 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
311 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
312 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
313 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
314 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
315 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
316 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
317 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
318 drvs->rx_dropped_header_too_small =
319 port_stats->rx_dropped_header_too_small;
320 drvs->rx_input_fifo_overflow_drop =
321 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 322 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
323 drvs->rx_alignment_symbol_errors =
324 port_stats->rx_alignment_symbol_errors;
ac124ff9 325 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
326 drvs->tx_pauseframes = port_stats->tx_pauseframes;
327 drvs->tx_controlframes = port_stats->tx_controlframes;
328 drvs->jabber_events = port_stats->jabber_events;
329 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
330 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
331 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
332 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
333 drvs->forwarded_packets = rxf_stats->forwarded_packets;
334 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
335 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
336 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
337 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
338}
339
005d5696
SX
340static void populate_lancer_stats(struct be_adapter *adapter)
341{
89a88ab8 342
005d5696 343 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
344 struct lancer_pport_stats *pport_stats =
345 pport_stats_from_cmd(adapter);
346
347 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
348 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
349 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
350 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 351 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 352 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
353 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
354 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
355 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
356 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
357 drvs->rx_dropped_tcp_length =
358 pport_stats->rx_dropped_invalid_tcp_length;
359 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
360 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
361 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
362 drvs->rx_dropped_header_too_small =
363 pport_stats->rx_dropped_header_too_small;
364 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
365 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 366 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 367 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
368 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
369 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 370 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 371 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
372 drvs->forwarded_packets = pport_stats->num_forwards_lo;
373 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 374 drvs->rx_drops_too_many_frags =
ac124ff9 375 pport_stats->rx_drops_too_many_frags_lo;
005d5696 376}
89a88ab8
AK
377
378void be_parse_stats(struct be_adapter *adapter)
379{
ac124ff9
SP
380 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
381 struct be_rx_obj *rxo;
382 int i;
383
005d5696
SX
384 if (adapter->generation == BE_GEN3) {
385 if (lancer_chip(adapter))
386 populate_lancer_stats(adapter);
387 else
388 populate_be3_stats(adapter);
389 } else {
89a88ab8 390 populate_be2_stats(adapter);
005d5696 391 }
ac124ff9
SP
392
393 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
394 for_all_rx_queues(adapter, rxo, i)
395 rx_stats(rxo)->rx_drops_no_frags =
396 erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8
AK
397}
398
ab1594e9
SP
399static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
400 struct rtnl_link_stats64 *stats)
6b7c5b94 401{
ab1594e9 402 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 403 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 404 struct be_rx_obj *rxo;
3c8def97 405 struct be_tx_obj *txo;
ab1594e9
SP
406 u64 pkts, bytes;
407 unsigned int start;
3abcdeda 408 int i;
6b7c5b94 409
3abcdeda 410 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
411 const struct be_rx_stats *rx_stats = rx_stats(rxo);
412 do {
413 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
414 pkts = rx_stats(rxo)->rx_pkts;
415 bytes = rx_stats(rxo)->rx_bytes;
416 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
417 stats->rx_packets += pkts;
418 stats->rx_bytes += bytes;
419 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
420 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
421 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
422 }
423
3c8def97 424 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
425 const struct be_tx_stats *tx_stats = tx_stats(txo);
426 do {
427 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
428 pkts = tx_stats(txo)->tx_pkts;
429 bytes = tx_stats(txo)->tx_bytes;
430 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
431 stats->tx_packets += pkts;
432 stats->tx_bytes += bytes;
3c8def97 433 }
6b7c5b94
SP
434
435 /* bad pkts received */
ab1594e9 436 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
437 drvs->rx_alignment_symbol_errors +
438 drvs->rx_in_range_errors +
439 drvs->rx_out_range_errors +
440 drvs->rx_frame_too_long +
441 drvs->rx_dropped_too_small +
442 drvs->rx_dropped_too_short +
443 drvs->rx_dropped_header_too_small +
444 drvs->rx_dropped_tcp_length +
ab1594e9 445 drvs->rx_dropped_runt;
68110868 446
6b7c5b94 447 /* detailed rx errors */
ab1594e9 448 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
449 drvs->rx_out_range_errors +
450 drvs->rx_frame_too_long;
68110868 451
ab1594e9 452 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
453
454 /* frame alignment errors */
ab1594e9 455 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 456
6b7c5b94
SP
457 /* receiver fifo overrun */
458 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 459 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
460 drvs->rx_input_fifo_overflow_drop +
461 drvs->rx_drops_no_pbuf;
ab1594e9 462 return stats;
6b7c5b94
SP
463}
464
ea172a01 465void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 466{
6b7c5b94
SP
467 struct net_device *netdev = adapter->netdev;
468
ea172a01
SP
469 /* when link status changes, link speed must be re-queried from card */
470 adapter->link_speed = -1;
471 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
472 netif_carrier_on(netdev);
473 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
474 } else {
475 netif_carrier_off(netdev);
476 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 477 }
6b7c5b94
SP
478}
479
3c8def97 480static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 481 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 482{
3c8def97
SP
483 struct be_tx_stats *stats = tx_stats(txo);
484
ab1594e9 485 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
486 stats->tx_reqs++;
487 stats->tx_wrbs += wrb_cnt;
488 stats->tx_bytes += copied;
489 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 490 if (stopped)
ac124ff9 491 stats->tx_stops++;
ab1594e9 492 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
493}
494
495/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
496static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
497 bool *dummy)
6b7c5b94 498{
ebc8d2ab
DM
499 int cnt = (skb->len > skb->data_len);
500
501 cnt += skb_shinfo(skb)->nr_frags;
502
6b7c5b94
SP
503 /* to account for hdr wrb */
504 cnt++;
fe6d2a38
SP
505 if (lancer_chip(adapter) || !(cnt & 1)) {
506 *dummy = false;
507 } else {
6b7c5b94
SP
508 /* add a dummy to make it an even num */
509 cnt++;
510 *dummy = true;
fe6d2a38 511 }
6b7c5b94
SP
512 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
513 return cnt;
514}
515
516static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
517{
518 wrb->frag_pa_hi = upper_32_bits(addr);
519 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
520 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
521}
522
cc4ce020
SK
523static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
524 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 525{
cc4ce020
SK
526 u8 vlan_prio = 0;
527 u16 vlan_tag = 0;
528
6b7c5b94
SP
529 memset(hdr, 0, sizeof(*hdr));
530
531 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
532
49e4b847 533 if (skb_is_gso(skb)) {
6b7c5b94
SP
534 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
535 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
536 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 537 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 538 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
539 if (lancer_chip(adapter) && adapter->sli_family ==
540 LANCER_A0_SLI_FAMILY) {
541 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
542 if (is_tcp_pkt(skb))
543 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
544 tcpcs, hdr, 1);
545 else if (is_udp_pkt(skb))
546 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
547 udpcs, hdr, 1);
548 }
6b7c5b94
SP
549 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
550 if (is_tcp_pkt(skb))
551 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
552 else if (is_udp_pkt(skb))
553 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
554 }
555
4c5102f9 556 if (vlan_tx_tag_present(skb)) {
6b7c5b94 557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
558 vlan_tag = vlan_tx_tag_get(skb);
559 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
560 /* If vlan priority provided by OS is NOT in available bmap */
561 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
562 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
563 adapter->recommended_prio;
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
565 }
566
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
568 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
571}
572
2b7bcebf 573static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
574 bool unmap_single)
575{
576 dma_addr_t dma;
577
578 be_dws_le_to_cpu(wrb, sizeof(*wrb));
579
580 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 581 if (wrb->frag_len) {
7101e111 582 if (unmap_single)
2b7bcebf
IV
583 dma_unmap_single(dev, dma, wrb->frag_len,
584 DMA_TO_DEVICE);
7101e111 585 else
2b7bcebf 586 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
587 }
588}
6b7c5b94 589
3c8def97 590static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
591 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
592{
7101e111
SP
593 dma_addr_t busaddr;
594 int i, copied = 0;
2b7bcebf 595 struct device *dev = &adapter->pdev->dev;
6b7c5b94 596 struct sk_buff *first_skb = skb;
6b7c5b94
SP
597 struct be_eth_wrb *wrb;
598 struct be_eth_hdr_wrb *hdr;
7101e111
SP
599 bool map_single = false;
600 u16 map_head;
6b7c5b94 601
6b7c5b94
SP
602 hdr = queue_head_node(txq);
603 queue_head_inc(txq);
7101e111 604 map_head = txq->head;
6b7c5b94 605
ebc8d2ab 606 if (skb->len > skb->data_len) {
e743d313 607 int len = skb_headlen(skb);
2b7bcebf
IV
608 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
609 if (dma_mapping_error(dev, busaddr))
7101e111
SP
610 goto dma_err;
611 map_single = true;
ebc8d2ab
DM
612 wrb = queue_head_node(txq);
613 wrb_fill(wrb, busaddr, len);
614 be_dws_cpu_to_le(wrb, sizeof(*wrb));
615 queue_head_inc(txq);
616 copied += len;
617 }
6b7c5b94 618
ebc8d2ab
DM
619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
620 struct skb_frag_struct *frag =
621 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
622 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
623 frag->size, DMA_TO_DEVICE);
624 if (dma_mapping_error(dev, busaddr))
7101e111 625 goto dma_err;
ebc8d2ab
DM
626 wrb = queue_head_node(txq);
627 wrb_fill(wrb, busaddr, frag->size);
628 be_dws_cpu_to_le(wrb, sizeof(*wrb));
629 queue_head_inc(txq);
630 copied += frag->size;
6b7c5b94
SP
631 }
632
633 if (dummy_wrb) {
634 wrb = queue_head_node(txq);
635 wrb_fill(wrb, 0, 0);
636 be_dws_cpu_to_le(wrb, sizeof(*wrb));
637 queue_head_inc(txq);
638 }
639
cc4ce020 640 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
641 be_dws_cpu_to_le(hdr, sizeof(*hdr));
642
643 return copied;
7101e111
SP
644dma_err:
645 txq->head = map_head;
646 while (copied) {
647 wrb = queue_head_node(txq);
2b7bcebf 648 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
649 map_single = false;
650 copied -= wrb->frag_len;
651 queue_head_inc(txq);
652 }
653 return 0;
6b7c5b94
SP
654}
655
61357325 656static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 657 struct net_device *netdev)
6b7c5b94
SP
658{
659 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
660 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
661 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
662 u32 wrb_cnt = 0, copied = 0;
663 u32 start = txq->head;
664 bool dummy_wrb, stopped = false;
665
fe6d2a38 666 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 667
3c8def97 668 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
669 if (copied) {
670 /* record the sent skb in the sent_skb table */
3c8def97
SP
671 BUG_ON(txo->sent_skb_list[start]);
672 txo->sent_skb_list[start] = skb;
c190e3c8
AK
673
674 /* Ensure txq has space for the next skb; Else stop the queue
675 * *BEFORE* ringing the tx doorbell, so that we serialze the
676 * tx compls of the current transmit which'll wake up the queue
677 */
7101e111 678 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
679 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
680 txq->len) {
3c8def97 681 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
682 stopped = true;
683 }
6b7c5b94 684
c190e3c8 685 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 686
3c8def97 687 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 688 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
689 } else {
690 txq->head = start;
691 dev_kfree_skb_any(skb);
6b7c5b94 692 }
6b7c5b94
SP
693 return NETDEV_TX_OK;
694}
695
696static int be_change_mtu(struct net_device *netdev, int new_mtu)
697{
698 struct be_adapter *adapter = netdev_priv(netdev);
699 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
700 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
701 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
702 dev_info(&adapter->pdev->dev,
703 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
704 BE_MIN_MTU,
705 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
706 return -EINVAL;
707 }
708 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
709 netdev->mtu, new_mtu);
710 netdev->mtu = new_mtu;
711 return 0;
712}
713
714/*
82903e4b
AK
715 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
716 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 717 */
1da87b7f 718static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 719{
6b7c5b94
SP
720 u16 vtag[BE_NUM_VLANS_SUPPORTED];
721 u16 ntags = 0, i;
82903e4b 722 int status = 0;
1da87b7f
AK
723 u32 if_handle;
724
725 if (vf) {
726 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
727 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
728 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
729 }
6b7c5b94 730
c0e64ef4
SP
731 /* No need to further configure vids if in promiscuous mode */
732 if (adapter->promiscuous)
733 return 0;
734
82903e4b 735 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 736 /* Construct VLAN Table to give to HW */
b738127d 737 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
738 if (adapter->vlan_tag[i]) {
739 vtag[ntags] = cpu_to_le16(i);
740 ntags++;
741 }
742 }
b31c50a7
SP
743 status = be_cmd_vlan_config(adapter, adapter->if_handle,
744 vtag, ntags, 1, 0);
6b7c5b94 745 } else {
b31c50a7
SP
746 status = be_cmd_vlan_config(adapter, adapter->if_handle,
747 NULL, 0, 1, 1);
6b7c5b94 748 }
1da87b7f 749
b31c50a7 750 return status;
6b7c5b94
SP
751}
752
6b7c5b94
SP
753static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
754{
755 struct be_adapter *adapter = netdev_priv(netdev);
756
1da87b7f 757 adapter->vlans_added++;
ba343c77
SB
758 if (!be_physfn(adapter))
759 return;
760
6b7c5b94 761 adapter->vlan_tag[vid] = 1;
82903e4b 762 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 763 be_vid_config(adapter, false, 0);
6b7c5b94
SP
764}
765
766static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
767{
768 struct be_adapter *adapter = netdev_priv(netdev);
769
1da87b7f 770 adapter->vlans_added--;
1da87b7f 771
ba343c77
SB
772 if (!be_physfn(adapter))
773 return;
774
6b7c5b94 775 adapter->vlan_tag[vid] = 0;
82903e4b 776 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 777 be_vid_config(adapter, false, 0);
6b7c5b94
SP
778}
779
24307eef 780static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
781{
782 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 783
24307eef 784 if (netdev->flags & IFF_PROMISC) {
5b8821b7 785 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
786 adapter->promiscuous = true;
787 goto done;
6b7c5b94
SP
788 }
789
25985edc 790 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
791 if (adapter->promiscuous) {
792 adapter->promiscuous = false;
5b8821b7 793 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
794
795 if (adapter->vlans_added)
796 be_vid_config(adapter, false, 0);
6b7c5b94
SP
797 }
798
e7b909a6 799 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 800 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
801 netdev_mc_count(netdev) > BE_MAX_MC) {
802 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 803 goto done;
6b7c5b94 804 }
6b7c5b94 805
5b8821b7 806 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
807done:
808 return;
6b7c5b94
SP
809}
810
ba343c77
SB
811static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
812{
813 struct be_adapter *adapter = netdev_priv(netdev);
814 int status;
815
816 if (!adapter->sriov_enabled)
817 return -EPERM;
818
819 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
820 return -EINVAL;
821
64600ea5
AK
822 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
823 status = be_cmd_pmac_del(adapter,
824 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 825 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 826
64600ea5
AK
827 status = be_cmd_pmac_add(adapter, mac,
828 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 829 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
830
831 if (status)
ba343c77
SB
832 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
833 mac, vf);
64600ea5
AK
834 else
835 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
836
ba343c77
SB
837 return status;
838}
839
64600ea5
AK
840static int be_get_vf_config(struct net_device *netdev, int vf,
841 struct ifla_vf_info *vi)
842{
843 struct be_adapter *adapter = netdev_priv(netdev);
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (vf >= num_vfs)
849 return -EINVAL;
850
851 vi->vf = vf;
e1d18735 852 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 853 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
854 vi->qos = 0;
855 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
856
857 return 0;
858}
859
1da87b7f
AK
860static int be_set_vf_vlan(struct net_device *netdev,
861 int vf, u16 vlan, u8 qos)
862{
863 struct be_adapter *adapter = netdev_priv(netdev);
864 int status = 0;
865
866 if (!adapter->sriov_enabled)
867 return -EPERM;
868
869 if ((vf >= num_vfs) || (vlan > 4095))
870 return -EINVAL;
871
872 if (vlan) {
873 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
874 adapter->vlans_added++;
875 } else {
876 adapter->vf_cfg[vf].vf_vlan_tag = 0;
877 adapter->vlans_added--;
878 }
879
880 status = be_vid_config(adapter, true, vf);
881
882 if (status)
883 dev_info(&adapter->pdev->dev,
884 "VLAN %d config on VF %d failed\n", vlan, vf);
885 return status;
886}
887
e1d18735
AK
888static int be_set_vf_tx_rate(struct net_device *netdev,
889 int vf, int rate)
890{
891 struct be_adapter *adapter = netdev_priv(netdev);
892 int status = 0;
893
894 if (!adapter->sriov_enabled)
895 return -EPERM;
896
897 if ((vf >= num_vfs) || (rate < 0))
898 return -EINVAL;
899
900 if (rate > 10000)
901 rate = 10000;
902
903 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 904 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
905
906 if (status)
907 dev_info(&adapter->pdev->dev,
908 "tx rate %d on VF %d failed\n", rate, vf);
909 return status;
910}
911
ac124ff9 912static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 913{
ac124ff9
SP
914 struct be_eq_obj *rx_eq = &rxo->rx_eq;
915 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 916 ulong now = jiffies;
ac124ff9 917 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
918 u64 pkts;
919 unsigned int start, eqd;
ac124ff9
SP
920
921 if (!rx_eq->enable_aic)
922 return;
6b7c5b94 923
4097f663 924 /* Wrapped around */
3abcdeda
SP
925 if (time_before(now, stats->rx_jiffies)) {
926 stats->rx_jiffies = now;
4097f663
SP
927 return;
928 }
6b7c5b94 929
ac124ff9
SP
930 /* Update once a second */
931 if (delta < HZ)
6b7c5b94
SP
932 return;
933
ab1594e9
SP
934 do {
935 start = u64_stats_fetch_begin_bh(&stats->sync);
936 pkts = stats->rx_pkts;
937 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
938
68c3e5a7 939 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 940 stats->rx_pkts_prev = pkts;
3abcdeda 941 stats->rx_jiffies = now;
ac124ff9
SP
942 eqd = stats->rx_pps / 110000;
943 eqd = eqd << 3;
944 if (eqd > rx_eq->max_eqd)
945 eqd = rx_eq->max_eqd;
946 if (eqd < rx_eq->min_eqd)
947 eqd = rx_eq->min_eqd;
948 if (eqd < 10)
949 eqd = 0;
950 if (eqd != rx_eq->cur_eqd) {
951 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
952 rx_eq->cur_eqd = eqd;
953 }
6b7c5b94
SP
954}
955
3abcdeda 956static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 957 struct be_rx_compl_info *rxcp)
4097f663 958{
ac124ff9 959 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 960
ab1594e9 961 u64_stats_update_begin(&stats->sync);
3abcdeda 962 stats->rx_compl++;
2e588f84 963 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 964 stats->rx_pkts++;
2e588f84 965 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 966 stats->rx_mcast_pkts++;
2e588f84 967 if (rxcp->err)
ac124ff9 968 stats->rx_compl_err++;
ab1594e9 969 u64_stats_update_end(&stats->sync);
4097f663
SP
970}
971
2e588f84 972static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 973{
19fad86f
PR
974 /* L4 checksum is not reliable for non TCP/UDP packets.
975 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
977 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
978}
979
6b7c5b94 980static struct be_rx_page_info *
3abcdeda
SP
981get_rx_page_info(struct be_adapter *adapter,
982 struct be_rx_obj *rxo,
983 u16 frag_idx)
6b7c5b94
SP
984{
985 struct be_rx_page_info *rx_page_info;
3abcdeda 986 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 987
3abcdeda 988 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
989 BUG_ON(!rx_page_info->page);
990
205859a2 991 if (rx_page_info->last_page_user) {
2b7bcebf
IV
992 dma_unmap_page(&adapter->pdev->dev,
993 dma_unmap_addr(rx_page_info, bus),
994 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
995 rx_page_info->last_page_user = false;
996 }
6b7c5b94
SP
997
998 atomic_dec(&rxq->used);
999 return rx_page_info;
1000}
1001
1002/* Throwaway the data in the Rx completion */
1003static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1004 struct be_rx_obj *rxo,
2e588f84 1005 struct be_rx_compl_info *rxcp)
6b7c5b94 1006{
3abcdeda 1007 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1008 struct be_rx_page_info *page_info;
2e588f84 1009 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1010
e80d9da6 1011 for (i = 0; i < num_rcvd; i++) {
2e588f84 1012 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1013 put_page(page_info->page);
1014 memset(page_info, 0, sizeof(*page_info));
2e588f84 1015 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1016 }
1017}
1018
1019/*
1020 * skb_fill_rx_data forms a complete skb for an ether frame
1021 * indicated by rxcp.
1022 */
3abcdeda 1023static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1024 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1025{
3abcdeda 1026 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1027 struct be_rx_page_info *page_info;
2e588f84
SP
1028 u16 i, j;
1029 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1030 u8 *start;
6b7c5b94 1031
2e588f84 1032 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1033 start = page_address(page_info->page) + page_info->page_offset;
1034 prefetch(start);
1035
1036 /* Copy data in the first descriptor of this completion */
2e588f84 1037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1038
1039 /* Copy the header portion into skb_data */
2e588f84 1040 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1041 memcpy(skb->data, start, hdr_len);
1042 skb->len = curr_frag_len;
1043 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1044 /* Complete packet has now been moved to data */
1045 put_page(page_info->page);
1046 skb->data_len = 0;
1047 skb->tail += curr_frag_len;
1048 } else {
1049 skb_shinfo(skb)->nr_frags = 1;
1050 skb_shinfo(skb)->frags[0].page = page_info->page;
1051 skb_shinfo(skb)->frags[0].page_offset =
1052 page_info->page_offset + hdr_len;
1053 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1054 skb->data_len = curr_frag_len - hdr_len;
1055 skb->tail += hdr_len;
1056 }
205859a2 1057 page_info->page = NULL;
6b7c5b94 1058
2e588f84
SP
1059 if (rxcp->pkt_size <= rx_frag_size) {
1060 BUG_ON(rxcp->num_rcvd != 1);
1061 return;
6b7c5b94
SP
1062 }
1063
1064 /* More frags present for this completion */
2e588f84
SP
1065 index_inc(&rxcp->rxq_idx, rxq->len);
1066 remaining = rxcp->pkt_size - curr_frag_len;
1067 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1068 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1069 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1070
bd46cb6c
AK
1071 /* Coalesce all frags from the same physical page in one slot */
1072 if (page_info->page_offset == 0) {
1073 /* Fresh page */
1074 j++;
1075 skb_shinfo(skb)->frags[j].page = page_info->page;
1076 skb_shinfo(skb)->frags[j].page_offset =
1077 page_info->page_offset;
1078 skb_shinfo(skb)->frags[j].size = 0;
1079 skb_shinfo(skb)->nr_frags++;
1080 } else {
1081 put_page(page_info->page);
1082 }
1083
1084 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1085 skb->len += curr_frag_len;
1086 skb->data_len += curr_frag_len;
6b7c5b94 1087
2e588f84
SP
1088 remaining -= curr_frag_len;
1089 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1090 page_info->page = NULL;
6b7c5b94 1091 }
bd46cb6c 1092 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1093}
1094
5be93b9a 1095/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1096static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1097 struct be_rx_obj *rxo,
2e588f84 1098 struct be_rx_compl_info *rxcp)
6b7c5b94 1099{
6332c8d3 1100 struct net_device *netdev = adapter->netdev;
6b7c5b94 1101 struct sk_buff *skb;
89420424 1102
6332c8d3 1103 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1104 if (unlikely(!skb)) {
ac124ff9 1105 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1106 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1107 return;
1108 }
1109
2e588f84 1110 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1111
6332c8d3 1112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1113 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1114 else
1115 skb_checksum_none_assert(skb);
6b7c5b94
SP
1116
1117 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1118 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1119 if (adapter->netdev->features & NETIF_F_RXHASH)
1120 skb->rxhash = rxcp->rss_hash;
1121
6b7c5b94 1122
4c5102f9
AK
1123 if (unlikely(rxcp->vlanf))
1124 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1125
1126 netif_receive_skb(skb);
6b7c5b94
SP
1127}
1128
5be93b9a
AK
1129/* Process the RX completion indicated by rxcp when GRO is enabled */
1130static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1131 struct be_rx_obj *rxo,
2e588f84 1132 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1133{
1134 struct be_rx_page_info *page_info;
5be93b9a 1135 struct sk_buff *skb = NULL;
3abcdeda
SP
1136 struct be_queue_info *rxq = &rxo->q;
1137 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1138 u16 remaining, curr_frag_len;
1139 u16 i, j;
3968fa1e 1140
5be93b9a
AK
1141 skb = napi_get_frags(&eq_obj->napi);
1142 if (!skb) {
3abcdeda 1143 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1144 return;
1145 }
1146
2e588f84
SP
1147 remaining = rxcp->pkt_size;
1148 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1149 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1150
1151 curr_frag_len = min(remaining, rx_frag_size);
1152
bd46cb6c
AK
1153 /* Coalesce all frags from the same physical page in one slot */
1154 if (i == 0 || page_info->page_offset == 0) {
1155 /* First frag or Fresh page */
1156 j++;
5be93b9a
AK
1157 skb_shinfo(skb)->frags[j].page = page_info->page;
1158 skb_shinfo(skb)->frags[j].page_offset =
1159 page_info->page_offset;
1160 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1161 } else {
1162 put_page(page_info->page);
1163 }
5be93b9a 1164 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1165
bd46cb6c 1166 remaining -= curr_frag_len;
2e588f84 1167 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1168 memset(page_info, 0, sizeof(*page_info));
1169 }
bd46cb6c 1170 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1171
5be93b9a 1172 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1173 skb->len = rxcp->pkt_size;
1174 skb->data_len = rxcp->pkt_size;
1175 skb->truesize += rxcp->pkt_size;
5be93b9a 1176 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1177 if (adapter->netdev->features & NETIF_F_RXHASH)
1178 skb->rxhash = rxcp->rss_hash;
5be93b9a 1179
4c5102f9
AK
1180 if (unlikely(rxcp->vlanf))
1181 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1182
1183 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1184}
1185
1186static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1187 struct be_eth_rx_compl *compl,
1188 struct be_rx_compl_info *rxcp)
1189{
1190 rxcp->pkt_size =
1191 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1192 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1193 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1194 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1195 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1196 rxcp->ip_csum =
1197 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1198 rxcp->l4_csum =
1199 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1200 rxcp->ipv6 =
1201 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1202 rxcp->rxq_idx =
1203 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1204 rxcp->num_rcvd =
1205 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1206 rxcp->pkt_type =
1207 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1208 rxcp->rss_hash =
1209 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1210 if (rxcp->vlanf) {
1211 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1212 compl);
1213 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1214 compl);
15d72184 1215 }
12004ae9 1216 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1217}
1218
1219static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1220 struct be_eth_rx_compl *compl,
1221 struct be_rx_compl_info *rxcp)
1222{
1223 rxcp->pkt_size =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1225 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1226 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1227 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1228 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1229 rxcp->ip_csum =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1231 rxcp->l4_csum =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1233 rxcp->ipv6 =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1235 rxcp->rxq_idx =
1236 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1237 rxcp->num_rcvd =
1238 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1239 rxcp->pkt_type =
1240 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1241 rxcp->rss_hash =
1242 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1243 if (rxcp->vlanf) {
1244 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1245 compl);
1246 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1247 compl);
15d72184 1248 }
12004ae9 1249 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1250}
1251
1252static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1253{
1254 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1255 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1256 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1257
2e588f84
SP
1258 /* For checking the valid bit it is Ok to use either definition as the
1259 * valid bit is at the same position in both v0 and v1 Rx compl */
1260 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1261 return NULL;
6b7c5b94 1262
2e588f84
SP
1263 rmb();
1264 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1265
2e588f84
SP
1266 if (adapter->be3_native)
1267 be_parse_rx_compl_v1(adapter, compl, rxcp);
1268 else
1269 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1270
15d72184
SP
1271 if (rxcp->vlanf) {
1272 /* vlanf could be wrongly set in some cards.
1273 * ignore if vtm is not set */
1274 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1275 rxcp->vlanf = 0;
6b7c5b94 1276
15d72184 1277 if (!lancer_chip(adapter))
3c709f8f 1278 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1279
939cf306 1280 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1281 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1282 rxcp->vlanf = 0;
1283 }
2e588f84
SP
1284
1285 /* As the compl has been parsed, reset it; we wont touch it again */
1286 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1287
3abcdeda 1288 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1289 return rxcp;
1290}
1291
1829b086 1292static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1293{
6b7c5b94 1294 u32 order = get_order(size);
1829b086 1295
6b7c5b94 1296 if (order > 0)
1829b086
ED
1297 gfp |= __GFP_COMP;
1298 return alloc_pages(gfp, order);
6b7c5b94
SP
1299}
1300
1301/*
1302 * Allocate a page, split it to fragments of size rx_frag_size and post as
1303 * receive buffers to BE
1304 */
1829b086 1305static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1306{
3abcdeda
SP
1307 struct be_adapter *adapter = rxo->adapter;
1308 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1309 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1310 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1311 struct page *pagep = NULL;
1312 struct be_eth_rx_d *rxd;
1313 u64 page_dmaaddr = 0, frag_dmaaddr;
1314 u32 posted, page_offset = 0;
1315
3abcdeda 1316 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1317 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1318 if (!pagep) {
1829b086 1319 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1320 if (unlikely(!pagep)) {
ac124ff9 1321 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1322 break;
1323 }
2b7bcebf
IV
1324 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1325 0, adapter->big_page_size,
1326 DMA_FROM_DEVICE);
6b7c5b94
SP
1327 page_info->page_offset = 0;
1328 } else {
1329 get_page(pagep);
1330 page_info->page_offset = page_offset + rx_frag_size;
1331 }
1332 page_offset = page_info->page_offset;
1333 page_info->page = pagep;
fac6da5b 1334 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1335 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1336
1337 rxd = queue_head_node(rxq);
1338 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1339 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1340
1341 /* Any space left in the current big page for another frag? */
1342 if ((page_offset + rx_frag_size + rx_frag_size) >
1343 adapter->big_page_size) {
1344 pagep = NULL;
1345 page_info->last_page_user = true;
1346 }
26d92f92
SP
1347
1348 prev_page_info = page_info;
1349 queue_head_inc(rxq);
6b7c5b94
SP
1350 page_info = &page_info_tbl[rxq->head];
1351 }
1352 if (pagep)
26d92f92 1353 prev_page_info->last_page_user = true;
6b7c5b94
SP
1354
1355 if (posted) {
6b7c5b94 1356 atomic_add(posted, &rxq->used);
8788fdc2 1357 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1358 } else if (atomic_read(&rxq->used) == 0) {
1359 /* Let be_worker replenish when memory is available */
3abcdeda 1360 rxo->rx_post_starved = true;
6b7c5b94 1361 }
6b7c5b94
SP
1362}
1363
5fb379ee 1364static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1365{
6b7c5b94
SP
1366 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1367
1368 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1369 return NULL;
1370
f3eb62d2 1371 rmb();
6b7c5b94
SP
1372 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1373
1374 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1375
1376 queue_tail_inc(tx_cq);
1377 return txcp;
1378}
1379
3c8def97
SP
1380static u16 be_tx_compl_process(struct be_adapter *adapter,
1381 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1382{
3c8def97 1383 struct be_queue_info *txq = &txo->q;
a73b796e 1384 struct be_eth_wrb *wrb;
3c8def97 1385 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1386 struct sk_buff *sent_skb;
ec43b1a6
SP
1387 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1388 bool unmap_skb_hdr = true;
6b7c5b94 1389
ec43b1a6 1390 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1391 BUG_ON(!sent_skb);
ec43b1a6
SP
1392 sent_skbs[txq->tail] = NULL;
1393
1394 /* skip header wrb */
a73b796e 1395 queue_tail_inc(txq);
6b7c5b94 1396
ec43b1a6 1397 do {
6b7c5b94 1398 cur_index = txq->tail;
a73b796e 1399 wrb = queue_tail_node(txq);
2b7bcebf
IV
1400 unmap_tx_frag(&adapter->pdev->dev, wrb,
1401 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1402 unmap_skb_hdr = false;
1403
6b7c5b94
SP
1404 num_wrbs++;
1405 queue_tail_inc(txq);
ec43b1a6 1406 } while (cur_index != last_index);
6b7c5b94 1407
6b7c5b94 1408 kfree_skb(sent_skb);
4d586b82 1409 return num_wrbs;
6b7c5b94
SP
1410}
1411
859b1e4e
SP
1412static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1413{
1414 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1415
1416 if (!eqe->evt)
1417 return NULL;
1418
f3eb62d2 1419 rmb();
859b1e4e
SP
1420 eqe->evt = le32_to_cpu(eqe->evt);
1421 queue_tail_inc(&eq_obj->q);
1422 return eqe;
1423}
1424
1425static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1426 struct be_eq_obj *eq_obj,
1427 bool rearm)
859b1e4e
SP
1428{
1429 struct be_eq_entry *eqe;
1430 u16 num = 0;
1431
1432 while ((eqe = event_get(eq_obj)) != NULL) {
1433 eqe->evt = 0;
1434 num++;
1435 }
1436
1437 /* Deal with any spurious interrupts that come
1438 * without events
1439 */
3c8def97
SP
1440 if (!num)
1441 rearm = true;
1442
1443 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1444 if (num)
1445 napi_schedule(&eq_obj->napi);
1446
1447 return num;
1448}
1449
1450/* Just read and notify events without processing them.
1451 * Used at the time of destroying event queues */
1452static void be_eq_clean(struct be_adapter *adapter,
1453 struct be_eq_obj *eq_obj)
1454{
1455 struct be_eq_entry *eqe;
1456 u16 num = 0;
1457
1458 while ((eqe = event_get(eq_obj)) != NULL) {
1459 eqe->evt = 0;
1460 num++;
1461 }
1462
1463 if (num)
1464 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1465}
1466
3abcdeda 1467static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1468{
1469 struct be_rx_page_info *page_info;
3abcdeda
SP
1470 struct be_queue_info *rxq = &rxo->q;
1471 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1472 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1473 u16 tail;
1474
1475 /* First cleanup pending rx completions */
3abcdeda
SP
1476 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1477 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1478 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1479 }
1480
1481 /* Then free posted rx buffer that were not used */
1482 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1483 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1484 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1485 put_page(page_info->page);
1486 memset(page_info, 0, sizeof(*page_info));
1487 }
1488 BUG_ON(atomic_read(&rxq->used));
482c9e79 1489 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1490}
1491
3c8def97
SP
1492static void be_tx_compl_clean(struct be_adapter *adapter,
1493 struct be_tx_obj *txo)
6b7c5b94 1494{
3c8def97
SP
1495 struct be_queue_info *tx_cq = &txo->cq;
1496 struct be_queue_info *txq = &txo->q;
a8e9179a 1497 struct be_eth_tx_compl *txcp;
4d586b82 1498 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1499 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1500 struct sk_buff *sent_skb;
1501 bool dummy_wrb;
a8e9179a
SP
1502
1503 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1504 do {
1505 while ((txcp = be_tx_compl_get(tx_cq))) {
1506 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1507 wrb_index, txcp);
3c8def97 1508 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1509 cmpl++;
1510 }
1511 if (cmpl) {
1512 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1513 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1514 cmpl = 0;
4d586b82 1515 num_wrbs = 0;
a8e9179a
SP
1516 }
1517
1518 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1519 break;
1520
1521 mdelay(1);
1522 } while (true);
1523
1524 if (atomic_read(&txq->used))
1525 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1526 atomic_read(&txq->used));
b03388d6
SP
1527
1528 /* free posted tx for which compls will never arrive */
1529 while (atomic_read(&txq->used)) {
1530 sent_skb = sent_skbs[txq->tail];
1531 end_idx = txq->tail;
1532 index_adv(&end_idx,
fe6d2a38
SP
1533 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1534 txq->len);
3c8def97 1535 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1536 atomic_sub(num_wrbs, &txq->used);
b03388d6 1537 }
6b7c5b94
SP
1538}
1539
5fb379ee
SP
1540static void be_mcc_queues_destroy(struct be_adapter *adapter)
1541{
1542 struct be_queue_info *q;
5fb379ee 1543
8788fdc2 1544 q = &adapter->mcc_obj.q;
5fb379ee 1545 if (q->created)
8788fdc2 1546 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1547 be_queue_free(adapter, q);
1548
8788fdc2 1549 q = &adapter->mcc_obj.cq;
5fb379ee 1550 if (q->created)
8788fdc2 1551 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1552 be_queue_free(adapter, q);
1553}
1554
1555/* Must be called only after TX qs are created as MCC shares TX EQ */
1556static int be_mcc_queues_create(struct be_adapter *adapter)
1557{
1558 struct be_queue_info *q, *cq;
5fb379ee
SP
1559
1560 /* Alloc MCC compl queue */
8788fdc2 1561 cq = &adapter->mcc_obj.cq;
5fb379ee 1562 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1563 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1564 goto err;
1565
1566 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1567 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1568 goto mcc_cq_free;
1569
1570 /* Alloc MCC queue */
8788fdc2 1571 q = &adapter->mcc_obj.q;
5fb379ee
SP
1572 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1573 goto mcc_cq_destroy;
1574
1575 /* Ask BE to create MCC queue */
8788fdc2 1576 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1577 goto mcc_q_free;
1578
1579 return 0;
1580
1581mcc_q_free:
1582 be_queue_free(adapter, q);
1583mcc_cq_destroy:
8788fdc2 1584 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1585mcc_cq_free:
1586 be_queue_free(adapter, cq);
1587err:
1588 return -1;
1589}
1590
6b7c5b94
SP
1591static void be_tx_queues_destroy(struct be_adapter *adapter)
1592{
1593 struct be_queue_info *q;
3c8def97
SP
1594 struct be_tx_obj *txo;
1595 u8 i;
6b7c5b94 1596
3c8def97
SP
1597 for_all_tx_queues(adapter, txo, i) {
1598 q = &txo->q;
1599 if (q->created)
1600 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1601 be_queue_free(adapter, q);
6b7c5b94 1602
3c8def97
SP
1603 q = &txo->cq;
1604 if (q->created)
1605 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1606 be_queue_free(adapter, q);
1607 }
6b7c5b94 1608
859b1e4e
SP
1609 /* Clear any residual events */
1610 be_eq_clean(adapter, &adapter->tx_eq);
1611
6b7c5b94
SP
1612 q = &adapter->tx_eq.q;
1613 if (q->created)
8788fdc2 1614 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1615 be_queue_free(adapter, q);
1616}
1617
3c8def97 1618/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1619static int be_tx_queues_create(struct be_adapter *adapter)
1620{
1621 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1622 struct be_tx_obj *txo;
1623 u8 i;
6b7c5b94
SP
1624
1625 adapter->tx_eq.max_eqd = 0;
1626 adapter->tx_eq.min_eqd = 0;
1627 adapter->tx_eq.cur_eqd = 96;
1628 adapter->tx_eq.enable_aic = false;
3c8def97 1629
6b7c5b94 1630 eq = &adapter->tx_eq.q;
3c8def97
SP
1631 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1632 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1633 return -1;
1634
8788fdc2 1635 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1636 goto err;
ecd62107 1637 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1638
3c8def97
SP
1639 for_all_tx_queues(adapter, txo, i) {
1640 cq = &txo->cq;
1641 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1642 sizeof(struct be_eth_tx_compl)))
3c8def97 1643 goto err;
6b7c5b94 1644
3c8def97
SP
1645 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1646 goto err;
6b7c5b94 1647
3c8def97
SP
1648 q = &txo->q;
1649 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1650 sizeof(struct be_eth_wrb)))
1651 goto err;
6b7c5b94 1652
3c8def97
SP
1653 if (be_cmd_txq_create(adapter, q, cq))
1654 goto err;
1655 }
6b7c5b94
SP
1656 return 0;
1657
3c8def97
SP
1658err:
1659 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1660 return -1;
1661}
1662
1663static void be_rx_queues_destroy(struct be_adapter *adapter)
1664{
1665 struct be_queue_info *q;
3abcdeda
SP
1666 struct be_rx_obj *rxo;
1667 int i;
1668
1669 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1670 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1671
1672 q = &rxo->cq;
1673 if (q->created)
1674 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1675 be_queue_free(adapter, q);
1676
3abcdeda 1677 q = &rxo->rx_eq.q;
482c9e79 1678 if (q->created)
3abcdeda 1679 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1680 be_queue_free(adapter, q);
6b7c5b94 1681 }
6b7c5b94
SP
1682}
1683
ac6a0c4a
SP
1684static u32 be_num_rxqs_want(struct be_adapter *adapter)
1685{
c814fd36 1686 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1687 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1688 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1689 } else {
1690 dev_warn(&adapter->pdev->dev,
1691 "No support for multiple RX queues\n");
1692 return 1;
1693 }
1694}
1695
6b7c5b94
SP
1696static int be_rx_queues_create(struct be_adapter *adapter)
1697{
1698 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1699 struct be_rx_obj *rxo;
1700 int rc, i;
6b7c5b94 1701
ac6a0c4a
SP
1702 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1703 msix_enabled(adapter) ?
1704 adapter->num_msix_vec - 1 : 1);
1705 if (adapter->num_rx_qs != MAX_RX_QS)
1706 dev_warn(&adapter->pdev->dev,
1707 "Can create only %d RX queues", adapter->num_rx_qs);
1708
6b7c5b94 1709 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1710 for_all_rx_queues(adapter, rxo, i) {
1711 rxo->adapter = adapter;
1712 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1713 rxo->rx_eq.enable_aic = true;
1714
1715 /* EQ */
1716 eq = &rxo->rx_eq.q;
1717 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1718 sizeof(struct be_eq_entry));
1719 if (rc)
1720 goto err;
1721
1722 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1723 if (rc)
1724 goto err;
1725
ecd62107 1726 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1727
3abcdeda
SP
1728 /* CQ */
1729 cq = &rxo->cq;
1730 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1731 sizeof(struct be_eth_rx_compl));
1732 if (rc)
1733 goto err;
1734
1735 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1736 if (rc)
1737 goto err;
482c9e79
SP
1738
1739 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1740 q = &rxo->q;
1741 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1742 sizeof(struct be_eth_rx_d));
1743 if (rc)
1744 goto err;
1745
3abcdeda 1746 }
6b7c5b94
SP
1747
1748 return 0;
3abcdeda
SP
1749err:
1750 be_rx_queues_destroy(adapter);
1751 return -1;
6b7c5b94 1752}
6b7c5b94 1753
fe6d2a38 1754static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1755{
fe6d2a38
SP
1756 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1757 if (!eqe->evt)
1758 return false;
1759 else
1760 return true;
b628bde2
SP
1761}
1762
6b7c5b94
SP
1763static irqreturn_t be_intx(int irq, void *dev)
1764{
1765 struct be_adapter *adapter = dev;
3abcdeda 1766 struct be_rx_obj *rxo;
fe6d2a38 1767 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1768
fe6d2a38
SP
1769 if (lancer_chip(adapter)) {
1770 if (event_peek(&adapter->tx_eq))
3c8def97 1771 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1772 for_all_rx_queues(adapter, rxo, i) {
1773 if (event_peek(&rxo->rx_eq))
3c8def97 1774 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1775 }
6b7c5b94 1776
fe6d2a38
SP
1777 if (!(tx || rx))
1778 return IRQ_NONE;
3abcdeda 1779
fe6d2a38
SP
1780 } else {
1781 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1782 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1783 if (!isr)
1784 return IRQ_NONE;
1785
ecd62107 1786 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1787 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1788
1789 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1790 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1791 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1792 }
3abcdeda 1793 }
c001c213 1794
8788fdc2 1795 return IRQ_HANDLED;
6b7c5b94
SP
1796}
1797
1798static irqreturn_t be_msix_rx(int irq, void *dev)
1799{
3abcdeda
SP
1800 struct be_rx_obj *rxo = dev;
1801 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1802
3c8def97 1803 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1804
1805 return IRQ_HANDLED;
1806}
1807
5fb379ee 1808static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1809{
1810 struct be_adapter *adapter = dev;
1811
3c8def97 1812 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1813
1814 return IRQ_HANDLED;
1815}
1816
2e588f84 1817static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1818{
2e588f84 1819 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1820}
1821
49b05221 1822static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1823{
1824 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1825 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1826 struct be_adapter *adapter = rxo->adapter;
1827 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1828 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1829 u32 work_done;
1830
ac124ff9 1831 rx_stats(rxo)->rx_polls++;
6b7c5b94 1832 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1833 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1834 if (!rxcp)
1835 break;
1836
12004ae9
SP
1837 /* Is it a flush compl that has no data */
1838 if (unlikely(rxcp->num_rcvd == 0))
1839 goto loop_continue;
1840
1841 /* Discard compl with partial DMA Lancer B0 */
1842 if (unlikely(!rxcp->pkt_size)) {
1843 be_rx_compl_discard(adapter, rxo, rxcp);
1844 goto loop_continue;
1845 }
1846
1847 /* On BE drop pkts that arrive due to imperfect filtering in
1848 * promiscuous mode on some skews
1849 */
1850 if (unlikely(rxcp->port != adapter->port_num &&
1851 !lancer_chip(adapter))) {
009dd872 1852 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1853 goto loop_continue;
64642811 1854 }
009dd872 1855
12004ae9
SP
1856 if (do_gro(rxcp))
1857 be_rx_compl_process_gro(adapter, rxo, rxcp);
1858 else
1859 be_rx_compl_process(adapter, rxo, rxcp);
1860loop_continue:
2e588f84 1861 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1862 }
1863
6b7c5b94 1864 /* Refill the queue */
857c9905 1865 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1866 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1867
1868 /* All consumed */
1869 if (work_done < budget) {
1870 napi_complete(napi);
8788fdc2 1871 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1872 } else {
1873 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1874 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1875 }
1876 return work_done;
1877}
1878
f31e50a8
SP
1879/* As TX and MCC share the same EQ check for both TX and MCC completions.
1880 * For TX/MCC we don't honour budget; consume everything
1881 */
1882static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1883{
f31e50a8
SP
1884 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1885 struct be_adapter *adapter =
1886 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1887 struct be_tx_obj *txo;
6b7c5b94 1888 struct be_eth_tx_compl *txcp;
3c8def97
SP
1889 int tx_compl, mcc_compl, status = 0;
1890 u8 i;
1891 u16 num_wrbs;
1892
1893 for_all_tx_queues(adapter, txo, i) {
1894 tx_compl = 0;
1895 num_wrbs = 0;
1896 while ((txcp = be_tx_compl_get(&txo->cq))) {
1897 num_wrbs += be_tx_compl_process(adapter, txo,
1898 AMAP_GET_BITS(struct amap_eth_tx_compl,
1899 wrb_index, txcp));
1900 tx_compl++;
1901 }
1902 if (tx_compl) {
1903 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1904
1905 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1906
3c8def97
SP
1907 /* As Tx wrbs have been freed up, wake up netdev queue
1908 * if it was stopped due to lack of tx wrbs. */
1909 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1910 atomic_read(&txo->q.used) < txo->q.len / 2) {
1911 netif_wake_subqueue(adapter->netdev, i);
1912 }
1913
ab1594e9 1914 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1915 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1916 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1917 }
6b7c5b94
SP
1918 }
1919
f31e50a8
SP
1920 mcc_compl = be_process_mcc(adapter, &status);
1921
f31e50a8
SP
1922 if (mcc_compl) {
1923 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1924 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1925 }
1926
3c8def97 1927 napi_complete(napi);
6b7c5b94 1928
3c8def97 1929 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1930 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1931 return 1;
1932}
1933
d053de91 1934void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1935{
1936 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1937 u32 i;
1938
1939 pci_read_config_dword(adapter->pdev,
1940 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1941 pci_read_config_dword(adapter->pdev,
1942 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1943 pci_read_config_dword(adapter->pdev,
1944 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1945 pci_read_config_dword(adapter->pdev,
1946 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1947
1948 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1949 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1950
d053de91
AK
1951 if (ue_status_lo || ue_status_hi) {
1952 adapter->ue_detected = true;
7acc2087 1953 adapter->eeh_err = true;
d053de91
AK
1954 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1955 }
1956
7c185276
AK
1957 if (ue_status_lo) {
1958 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1959 if (ue_status_lo & 1)
1960 dev_err(&adapter->pdev->dev,
1961 "UE: %s bit set\n", ue_status_low_desc[i]);
1962 }
1963 }
1964 if (ue_status_hi) {
1965 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1966 if (ue_status_hi & 1)
1967 dev_err(&adapter->pdev->dev,
1968 "UE: %s bit set\n", ue_status_hi_desc[i]);
1969 }
1970 }
1971
1972}
1973
ea1dae11
SP
1974static void be_worker(struct work_struct *work)
1975{
1976 struct be_adapter *adapter =
1977 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
1978 struct be_rx_obj *rxo;
1979 int i;
ea1dae11 1980
16da8250
SP
1981 if (!adapter->ue_detected && !lancer_chip(adapter))
1982 be_detect_dump_ue(adapter);
1983
f203af70
SK
1984 /* when interrupts are not yet enabled, just reap any pending
1985 * mcc completions */
1986 if (!netif_running(adapter->netdev)) {
1987 int mcc_compl, status = 0;
1988
1989 mcc_compl = be_process_mcc(adapter, &status);
1990
1991 if (mcc_compl) {
1992 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1993 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1994 }
9b037f38 1995
f203af70
SK
1996 goto reschedule;
1997 }
1998
005d5696
SX
1999 if (!adapter->stats_cmd_sent) {
2000 if (lancer_chip(adapter))
2001 lancer_cmd_get_pport_stats(adapter,
2002 &adapter->stats_cmd);
2003 else
2004 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2005 }
3c8def97 2006
3abcdeda 2007 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2008 be_rx_eqd_update(adapter, rxo);
2009
2010 if (rxo->rx_post_starved) {
2011 rxo->rx_post_starved = false;
1829b086 2012 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2013 }
ea1dae11
SP
2014 }
2015
f203af70 2016reschedule:
e74fbd03 2017 adapter->work_counter++;
ea1dae11
SP
2018 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2019}
2020
8d56ff11
SP
2021static void be_msix_disable(struct be_adapter *adapter)
2022{
ac6a0c4a 2023 if (msix_enabled(adapter)) {
8d56ff11 2024 pci_disable_msix(adapter->pdev);
ac6a0c4a 2025 adapter->num_msix_vec = 0;
3abcdeda
SP
2026 }
2027}
2028
6b7c5b94
SP
2029static void be_msix_enable(struct be_adapter *adapter)
2030{
3abcdeda 2031#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2032 int i, status, num_vec;
6b7c5b94 2033
ac6a0c4a 2034 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2035
ac6a0c4a 2036 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2037 adapter->msix_entries[i].entry = i;
2038
ac6a0c4a 2039 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2040 if (status == 0) {
2041 goto done;
2042 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2043 num_vec = status;
3abcdeda 2044 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2045 num_vec) == 0)
3abcdeda 2046 goto done;
3abcdeda
SP
2047 }
2048 return;
2049done:
ac6a0c4a
SP
2050 adapter->num_msix_vec = num_vec;
2051 return;
6b7c5b94
SP
2052}
2053
ba343c77
SB
2054static void be_sriov_enable(struct be_adapter *adapter)
2055{
344dbf10 2056 be_check_sriov_fn_type(adapter);
6dedec81 2057#ifdef CONFIG_PCI_IOV
ba343c77 2058 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2059 int status, pos;
2060 u16 nvfs;
2061
2062 pos = pci_find_ext_capability(adapter->pdev,
2063 PCI_EXT_CAP_ID_SRIOV);
2064 pci_read_config_word(adapter->pdev,
2065 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2066
2067 if (num_vfs > nvfs) {
2068 dev_info(&adapter->pdev->dev,
2069 "Device supports %d VFs and not %d\n",
2070 nvfs, num_vfs);
2071 num_vfs = nvfs;
2072 }
6dedec81 2073
ba343c77
SB
2074 status = pci_enable_sriov(adapter->pdev, num_vfs);
2075 adapter->sriov_enabled = status ? false : true;
2076 }
2077#endif
ba343c77
SB
2078}
2079
2080static void be_sriov_disable(struct be_adapter *adapter)
2081{
2082#ifdef CONFIG_PCI_IOV
2083 if (adapter->sriov_enabled) {
2084 pci_disable_sriov(adapter->pdev);
2085 adapter->sriov_enabled = false;
2086 }
2087#endif
2088}
2089
fe6d2a38
SP
2090static inline int be_msix_vec_get(struct be_adapter *adapter,
2091 struct be_eq_obj *eq_obj)
6b7c5b94 2092{
ecd62107 2093 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2094}
2095
b628bde2
SP
2096static int be_request_irq(struct be_adapter *adapter,
2097 struct be_eq_obj *eq_obj,
3abcdeda 2098 void *handler, char *desc, void *context)
6b7c5b94
SP
2099{
2100 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2101 int vec;
2102
2103 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2104 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2105 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2106}
2107
3abcdeda
SP
2108static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2109 void *context)
b628bde2 2110{
fe6d2a38 2111 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2112 free_irq(vec, context);
b628bde2 2113}
6b7c5b94 2114
b628bde2
SP
2115static int be_msix_register(struct be_adapter *adapter)
2116{
3abcdeda
SP
2117 struct be_rx_obj *rxo;
2118 int status, i;
2119 char qname[10];
b628bde2 2120
3abcdeda
SP
2121 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2122 adapter);
6b7c5b94
SP
2123 if (status)
2124 goto err;
2125
3abcdeda
SP
2126 for_all_rx_queues(adapter, rxo, i) {
2127 sprintf(qname, "rxq%d", i);
2128 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2129 qname, rxo);
2130 if (status)
2131 goto err_msix;
2132 }
b628bde2 2133
6b7c5b94 2134 return 0;
b628bde2 2135
3abcdeda
SP
2136err_msix:
2137 be_free_irq(adapter, &adapter->tx_eq, adapter);
2138
2139 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2140 be_free_irq(adapter, &rxo->rx_eq, rxo);
2141
6b7c5b94
SP
2142err:
2143 dev_warn(&adapter->pdev->dev,
2144 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2145 be_msix_disable(adapter);
6b7c5b94
SP
2146 return status;
2147}
2148
2149static int be_irq_register(struct be_adapter *adapter)
2150{
2151 struct net_device *netdev = adapter->netdev;
2152 int status;
2153
ac6a0c4a 2154 if (msix_enabled(adapter)) {
6b7c5b94
SP
2155 status = be_msix_register(adapter);
2156 if (status == 0)
2157 goto done;
ba343c77
SB
2158 /* INTx is not supported for VF */
2159 if (!be_physfn(adapter))
2160 return status;
6b7c5b94
SP
2161 }
2162
2163 /* INTx */
2164 netdev->irq = adapter->pdev->irq;
2165 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2166 adapter);
2167 if (status) {
2168 dev_err(&adapter->pdev->dev,
2169 "INTx request IRQ failed - err %d\n", status);
2170 return status;
2171 }
2172done:
2173 adapter->isr_registered = true;
2174 return 0;
2175}
2176
2177static void be_irq_unregister(struct be_adapter *adapter)
2178{
2179 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2180 struct be_rx_obj *rxo;
2181 int i;
6b7c5b94
SP
2182
2183 if (!adapter->isr_registered)
2184 return;
2185
2186 /* INTx */
ac6a0c4a 2187 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2188 free_irq(netdev->irq, adapter);
2189 goto done;
2190 }
2191
2192 /* MSIx */
3abcdeda
SP
2193 be_free_irq(adapter, &adapter->tx_eq, adapter);
2194
2195 for_all_rx_queues(adapter, rxo, i)
2196 be_free_irq(adapter, &rxo->rx_eq, rxo);
2197
6b7c5b94
SP
2198done:
2199 adapter->isr_registered = false;
6b7c5b94
SP
2200}
2201
482c9e79
SP
2202static void be_rx_queues_clear(struct be_adapter *adapter)
2203{
2204 struct be_queue_info *q;
2205 struct be_rx_obj *rxo;
2206 int i;
2207
2208 for_all_rx_queues(adapter, rxo, i) {
2209 q = &rxo->q;
2210 if (q->created) {
2211 be_cmd_rxq_destroy(adapter, q);
2212 /* After the rxq is invalidated, wait for a grace time
2213 * of 1ms for all dma to end and the flush compl to
2214 * arrive
2215 */
2216 mdelay(1);
2217 be_rx_q_clean(adapter, rxo);
2218 }
2219
2220 /* Clear any residual events */
2221 q = &rxo->rx_eq.q;
2222 if (q->created)
2223 be_eq_clean(adapter, &rxo->rx_eq);
2224 }
2225}
2226
889cd4b2
SP
2227static int be_close(struct net_device *netdev)
2228{
2229 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2230 struct be_rx_obj *rxo;
3c8def97 2231 struct be_tx_obj *txo;
889cd4b2 2232 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2233 int vec, i;
889cd4b2 2234
889cd4b2
SP
2235 be_async_mcc_disable(adapter);
2236
fe6d2a38
SP
2237 if (!lancer_chip(adapter))
2238 be_intr_set(adapter, false);
889cd4b2 2239
63fcb27f
PR
2240 for_all_rx_queues(adapter, rxo, i)
2241 napi_disable(&rxo->rx_eq.napi);
2242
2243 napi_disable(&tx_eq->napi);
2244
2245 if (lancer_chip(adapter)) {
63fcb27f
PR
2246 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2247 for_all_rx_queues(adapter, rxo, i)
2248 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2249 for_all_tx_queues(adapter, txo, i)
2250 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2251 }
2252
ac6a0c4a 2253 if (msix_enabled(adapter)) {
fe6d2a38 2254 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2255 synchronize_irq(vec);
3abcdeda
SP
2256
2257 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2258 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2259 synchronize_irq(vec);
2260 }
889cd4b2
SP
2261 } else {
2262 synchronize_irq(netdev->irq);
2263 }
2264 be_irq_unregister(adapter);
2265
889cd4b2
SP
2266 /* Wait for all pending tx completions to arrive so that
2267 * all tx skbs are freed.
2268 */
3c8def97
SP
2269 for_all_tx_queues(adapter, txo, i)
2270 be_tx_compl_clean(adapter, txo);
889cd4b2 2271
482c9e79
SP
2272 be_rx_queues_clear(adapter);
2273 return 0;
2274}
2275
2276static int be_rx_queues_setup(struct be_adapter *adapter)
2277{
2278 struct be_rx_obj *rxo;
2279 int rc, i;
2280 u8 rsstable[MAX_RSS_QS];
2281
2282 for_all_rx_queues(adapter, rxo, i) {
2283 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2284 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2285 adapter->if_handle,
2286 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2287 if (rc)
2288 return rc;
2289 }
2290
2291 if (be_multi_rxq(adapter)) {
2292 for_all_rss_queues(adapter, rxo, i)
2293 rsstable[i] = rxo->rss_id;
2294
2295 rc = be_cmd_rss_config(adapter, rsstable,
2296 adapter->num_rx_qs - 1);
2297 if (rc)
2298 return rc;
2299 }
2300
2301 /* First time posting */
2302 for_all_rx_queues(adapter, rxo, i) {
2303 be_post_rx_frags(rxo, GFP_KERNEL);
2304 napi_enable(&rxo->rx_eq.napi);
2305 }
889cd4b2
SP
2306 return 0;
2307}
2308
6b7c5b94
SP
2309static int be_open(struct net_device *netdev)
2310{
2311 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2312 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2313 struct be_rx_obj *rxo;
3abcdeda 2314 int status, i;
5fb379ee 2315
482c9e79
SP
2316 status = be_rx_queues_setup(adapter);
2317 if (status)
2318 goto err;
2319
5fb379ee
SP
2320 napi_enable(&tx_eq->napi);
2321
2322 be_irq_register(adapter);
2323
fe6d2a38
SP
2324 if (!lancer_chip(adapter))
2325 be_intr_set(adapter, true);
5fb379ee
SP
2326
2327 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2328 for_all_rx_queues(adapter, rxo, i) {
2329 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2330 be_cq_notify(adapter, rxo->cq.id, true, 0);
2331 }
8788fdc2 2332 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2333
7a1e9b20
SP
2334 /* Now that interrupts are on we can process async mcc */
2335 be_async_mcc_enable(adapter);
2336
889cd4b2 2337 if (be_physfn(adapter)) {
1da87b7f 2338 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2339 if (status)
2340 goto err;
4f2aa89c 2341
ba343c77
SB
2342 status = be_cmd_set_flow_control(adapter,
2343 adapter->tx_fc, adapter->rx_fc);
2344 if (status)
889cd4b2 2345 goto err;
ba343c77 2346 }
4f2aa89c 2347
889cd4b2
SP
2348 return 0;
2349err:
2350 be_close(adapter->netdev);
2351 return -EIO;
5fb379ee
SP
2352}
2353
71d8d1b5
AK
2354static int be_setup_wol(struct be_adapter *adapter, bool enable)
2355{
2356 struct be_dma_mem cmd;
2357 int status = 0;
2358 u8 mac[ETH_ALEN];
2359
2360 memset(mac, 0, ETH_ALEN);
2361
2362 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2363 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2364 GFP_KERNEL);
71d8d1b5
AK
2365 if (cmd.va == NULL)
2366 return -1;
2367 memset(cmd.va, 0, cmd.size);
2368
2369 if (enable) {
2370 status = pci_write_config_dword(adapter->pdev,
2371 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2372 if (status) {
2373 dev_err(&adapter->pdev->dev,
2381a55c 2374 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2375 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2376 cmd.dma);
71d8d1b5
AK
2377 return status;
2378 }
2379 status = be_cmd_enable_magic_wol(adapter,
2380 adapter->netdev->dev_addr, &cmd);
2381 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2382 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2383 } else {
2384 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2385 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2386 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2387 }
2388
2b7bcebf 2389 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2390 return status;
2391}
2392
6d87f5c3
AK
2393/*
2394 * Generate a seed MAC address from the PF MAC Address using jhash.
2395 * MAC Address for VFs are assigned incrementally starting from the seed.
2396 * These addresses are programmed in the ASIC by the PF and the VF driver
2397 * queries for the MAC address during its probe.
2398 */
2399static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2400{
2401 u32 vf = 0;
3abcdeda 2402 int status = 0;
6d87f5c3
AK
2403 u8 mac[ETH_ALEN];
2404
2405 be_vf_eth_addr_generate(adapter, mac);
2406
2407 for (vf = 0; vf < num_vfs; vf++) {
2408 status = be_cmd_pmac_add(adapter, mac,
2409 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2410 &adapter->vf_cfg[vf].vf_pmac_id,
2411 vf + 1);
6d87f5c3
AK
2412 if (status)
2413 dev_err(&adapter->pdev->dev,
2414 "Mac address add failed for VF %d\n", vf);
2415 else
2416 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2417
2418 mac[5] += 1;
2419 }
2420 return status;
2421}
2422
2423static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2424{
2425 u32 vf;
2426
2427 for (vf = 0; vf < num_vfs; vf++) {
2428 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2429 be_cmd_pmac_del(adapter,
2430 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2431 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2432 }
2433}
2434
5fb379ee
SP
2435static int be_setup(struct be_adapter *adapter)
2436{
5fb379ee 2437 struct net_device *netdev = adapter->netdev;
ba343c77 2438 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2439 int status;
ba343c77
SB
2440 u8 mac[ETH_ALEN];
2441
2dc1deb6
SP
2442 be_cmd_req_native_mode(adapter);
2443
f21b538c
PR
2444 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2445 BE_IF_FLAGS_BROADCAST |
2446 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2447
ba343c77
SB
2448 if (be_physfn(adapter)) {
2449 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2450 BE_IF_FLAGS_PROMISCUOUS |
2451 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2452 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2453
ac6a0c4a 2454 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2455 cap_flags |= BE_IF_FLAGS_RSS;
2456 en_flags |= BE_IF_FLAGS_RSS;
2457 }
ba343c77 2458 }
73d540f2
SP
2459
2460 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2461 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2462 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2463 if (status != 0)
2464 goto do_none;
2465
ba343c77 2466 if (be_physfn(adapter)) {
c99ac3e7
AK
2467 if (adapter->sriov_enabled) {
2468 while (vf < num_vfs) {
2469 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2470 BE_IF_FLAGS_BROADCAST;
2471 status = be_cmd_if_create(adapter, cap_flags,
2472 en_flags, mac, true,
64600ea5 2473 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2474 NULL, vf+1);
c99ac3e7
AK
2475 if (status) {
2476 dev_err(&adapter->pdev->dev,
2477 "Interface Create failed for VF %d\n",
2478 vf);
2479 goto if_destroy;
2480 }
2481 adapter->vf_cfg[vf].vf_pmac_id =
2482 BE_INVALID_PMAC_ID;
2483 vf++;
ba343c77 2484 }
84e5b9f7 2485 }
c99ac3e7 2486 } else {
ba343c77
SB
2487 status = be_cmd_mac_addr_query(adapter, mac,
2488 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2489 if (!status) {
2490 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2491 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2492 }
2493 }
2494
6b7c5b94
SP
2495 status = be_tx_queues_create(adapter);
2496 if (status != 0)
2497 goto if_destroy;
2498
2499 status = be_rx_queues_create(adapter);
2500 if (status != 0)
2501 goto tx_qs_destroy;
2502
2903dd65
SP
2503 /* Allow all priorities by default. A GRP5 evt may modify this */
2504 adapter->vlan_prio_bmap = 0xff;
2505
5fb379ee
SP
2506 status = be_mcc_queues_create(adapter);
2507 if (status != 0)
2508 goto rx_qs_destroy;
6b7c5b94 2509
0dffc83e
AK
2510 adapter->link_speed = -1;
2511
6b7c5b94
SP
2512 return 0;
2513
5fb379ee
SP
2514rx_qs_destroy:
2515 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2516tx_qs_destroy:
2517 be_tx_queues_destroy(adapter);
2518if_destroy:
c99ac3e7
AK
2519 if (be_physfn(adapter) && adapter->sriov_enabled)
2520 for (vf = 0; vf < num_vfs; vf++)
2521 if (adapter->vf_cfg[vf].vf_if_handle)
2522 be_cmd_if_destroy(adapter,
658681f7
AK
2523 adapter->vf_cfg[vf].vf_if_handle,
2524 vf + 1);
2525 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2526do_none:
2527 return status;
2528}
2529
5fb379ee
SP
2530static int be_clear(struct be_adapter *adapter)
2531{
7ab8b0b4
AK
2532 int vf;
2533
c99ac3e7 2534 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2535 be_vf_eth_addr_rem(adapter);
2536
1a8887d8 2537 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2538 be_rx_queues_destroy(adapter);
2539 be_tx_queues_destroy(adapter);
1f5db833 2540 adapter->eq_next_idx = 0;
5fb379ee 2541
7ab8b0b4
AK
2542 if (be_physfn(adapter) && adapter->sriov_enabled)
2543 for (vf = 0; vf < num_vfs; vf++)
2544 if (adapter->vf_cfg[vf].vf_if_handle)
2545 be_cmd_if_destroy(adapter,
2546 adapter->vf_cfg[vf].vf_if_handle,
2547 vf + 1);
2548
658681f7 2549 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2550
2dc1deb6
SP
2551 adapter->be3_native = 0;
2552
2243e2e9
SP
2553 /* tell fw we're done with firing cmds */
2554 be_cmd_fw_clean(adapter);
5fb379ee
SP
2555 return 0;
2556}
2557
6b7c5b94 2558
84517482 2559#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2560static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2561 const u8 *p, u32 img_start, int image_size,
2562 int hdr_size)
fa9a6fed
SB
2563{
2564 u32 crc_offset;
2565 u8 flashed_crc[4];
2566 int status;
3f0d4560
AK
2567
2568 crc_offset = hdr_size + img_start + image_size - 4;
2569
fa9a6fed 2570 p += crc_offset;
3f0d4560
AK
2571
2572 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2573 (image_size - 4));
fa9a6fed
SB
2574 if (status) {
2575 dev_err(&adapter->pdev->dev,
2576 "could not get crc from flash, not flashing redboot\n");
2577 return false;
2578 }
2579
2580 /*update redboot only if crc does not match*/
2581 if (!memcmp(flashed_crc, p, 4))
2582 return false;
2583 else
2584 return true;
fa9a6fed
SB
2585}
2586
306f1348
SP
2587static bool phy_flashing_required(struct be_adapter *adapter)
2588{
2589 int status = 0;
2590 struct be_phy_info phy_info;
2591
2592 status = be_cmd_get_phy_info(adapter, &phy_info);
2593 if (status)
2594 return false;
2595 if ((phy_info.phy_type == TN_8022) &&
2596 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2597 return true;
2598 }
2599 return false;
2600}
2601
3f0d4560 2602static int be_flash_data(struct be_adapter *adapter,
84517482 2603 const struct firmware *fw,
3f0d4560
AK
2604 struct be_dma_mem *flash_cmd, int num_of_images)
2605
84517482 2606{
3f0d4560
AK
2607 int status = 0, i, filehdr_size = 0;
2608 u32 total_bytes = 0, flash_op;
84517482
AK
2609 int num_bytes;
2610 const u8 *p = fw->data;
2611 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2612 const struct flash_comp *pflashcomp;
9fe96934 2613 int num_comp;
3f0d4560 2614
306f1348 2615 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2616 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2617 FLASH_IMAGE_MAX_SIZE_g3},
2618 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2619 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2620 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2621 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2622 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2623 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2624 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2625 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2626 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2627 FLASH_IMAGE_MAX_SIZE_g3},
2628 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2629 FLASH_IMAGE_MAX_SIZE_g3},
2630 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2631 FLASH_IMAGE_MAX_SIZE_g3},
2632 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2633 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2634 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2635 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2636 };
215faf9c 2637 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2638 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2639 FLASH_IMAGE_MAX_SIZE_g2},
2640 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2641 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2642 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2643 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2644 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2645 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2646 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2647 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2648 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2649 FLASH_IMAGE_MAX_SIZE_g2},
2650 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2651 FLASH_IMAGE_MAX_SIZE_g2},
2652 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2653 FLASH_IMAGE_MAX_SIZE_g2}
2654 };
2655
2656 if (adapter->generation == BE_GEN3) {
2657 pflashcomp = gen3_flash_types;
2658 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2659 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2660 } else {
2661 pflashcomp = gen2_flash_types;
2662 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2663 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2664 }
9fe96934
SB
2665 for (i = 0; i < num_comp; i++) {
2666 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2667 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2668 continue;
306f1348
SP
2669 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2670 if (!phy_flashing_required(adapter))
2671 continue;
2672 }
3f0d4560
AK
2673 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2674 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2675 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2676 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2677 continue;
2678 p = fw->data;
2679 p += filehdr_size + pflashcomp[i].offset
2680 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2681 if (p + pflashcomp[i].size > fw->data + fw->size)
2682 return -1;
2683 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2684 while (total_bytes) {
2685 if (total_bytes > 32*1024)
2686 num_bytes = 32*1024;
2687 else
2688 num_bytes = total_bytes;
2689 total_bytes -= num_bytes;
306f1348
SP
2690 if (!total_bytes) {
2691 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2692 flash_op = FLASHROM_OPER_PHY_FLASH;
2693 else
2694 flash_op = FLASHROM_OPER_FLASH;
2695 } else {
2696 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2697 flash_op = FLASHROM_OPER_PHY_SAVE;
2698 else
2699 flash_op = FLASHROM_OPER_SAVE;
2700 }
3f0d4560
AK
2701 memcpy(req->params.data_buf, p, num_bytes);
2702 p += num_bytes;
2703 status = be_cmd_write_flashrom(adapter, flash_cmd,
2704 pflashcomp[i].optype, flash_op, num_bytes);
2705 if (status) {
306f1348
SP
2706 if ((status == ILLEGAL_IOCTL_REQ) &&
2707 (pflashcomp[i].optype ==
2708 IMG_TYPE_PHY_FW))
2709 break;
3f0d4560
AK
2710 dev_err(&adapter->pdev->dev,
2711 "cmd to write to flash rom failed.\n");
2712 return -1;
2713 }
84517482 2714 }
84517482 2715 }
84517482
AK
2716 return 0;
2717}
2718
3f0d4560
AK
2719static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2720{
2721 if (fhdr == NULL)
2722 return 0;
2723 if (fhdr->build[0] == '3')
2724 return BE_GEN3;
2725 else if (fhdr->build[0] == '2')
2726 return BE_GEN2;
2727 else
2728 return 0;
2729}
2730
485bf569
SN
2731static int lancer_fw_download(struct be_adapter *adapter,
2732 const struct firmware *fw)
84517482 2733{
485bf569
SN
2734#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2735#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2736 struct be_dma_mem flash_cmd;
485bf569
SN
2737 const u8 *data_ptr = NULL;
2738 u8 *dest_image_ptr = NULL;
2739 size_t image_size = 0;
2740 u32 chunk_size = 0;
2741 u32 data_written = 0;
2742 u32 offset = 0;
2743 int status = 0;
2744 u8 add_status = 0;
84517482 2745
485bf569 2746 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2747 dev_err(&adapter->pdev->dev,
485bf569
SN
2748 "FW Image not properly aligned. "
2749 "Length must be 4 byte aligned.\n");
2750 status = -EINVAL;
2751 goto lancer_fw_exit;
d9efd2af
SB
2752 }
2753
485bf569
SN
2754 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2755 + LANCER_FW_DOWNLOAD_CHUNK;
2756 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2757 &flash_cmd.dma, GFP_KERNEL);
2758 if (!flash_cmd.va) {
2759 status = -ENOMEM;
2760 dev_err(&adapter->pdev->dev,
2761 "Memory allocation failure while flashing\n");
2762 goto lancer_fw_exit;
2763 }
84517482 2764
485bf569
SN
2765 dest_image_ptr = flash_cmd.va +
2766 sizeof(struct lancer_cmd_req_write_object);
2767 image_size = fw->size;
2768 data_ptr = fw->data;
2769
2770 while (image_size) {
2771 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2772
2773 /* Copy the image chunk content. */
2774 memcpy(dest_image_ptr, data_ptr, chunk_size);
2775
2776 status = lancer_cmd_write_object(adapter, &flash_cmd,
2777 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2778 &data_written, &add_status);
2779
2780 if (status)
2781 break;
2782
2783 offset += data_written;
2784 data_ptr += data_written;
2785 image_size -= data_written;
2786 }
2787
2788 if (!status) {
2789 /* Commit the FW written */
2790 status = lancer_cmd_write_object(adapter, &flash_cmd,
2791 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2792 &data_written, &add_status);
2793 }
2794
2795 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2796 flash_cmd.dma);
2797 if (status) {
2798 dev_err(&adapter->pdev->dev,
2799 "Firmware load error. "
2800 "Status code: 0x%x Additional Status: 0x%x\n",
2801 status, add_status);
2802 goto lancer_fw_exit;
2803 }
2804
2805 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2806lancer_fw_exit:
2807 return status;
2808}
2809
2810static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2811{
2812 struct flash_file_hdr_g2 *fhdr;
2813 struct flash_file_hdr_g3 *fhdr3;
2814 struct image_hdr *img_hdr_ptr = NULL;
2815 struct be_dma_mem flash_cmd;
2816 const u8 *p;
2817 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2818
2819 p = fw->data;
3f0d4560 2820 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2821
84517482 2822 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2823 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2824 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2825 if (!flash_cmd.va) {
2826 status = -ENOMEM;
2827 dev_err(&adapter->pdev->dev,
2828 "Memory allocation failure while flashing\n");
485bf569 2829 goto be_fw_exit;
84517482
AK
2830 }
2831
3f0d4560
AK
2832 if ((adapter->generation == BE_GEN3) &&
2833 (get_ufigen_type(fhdr) == BE_GEN3)) {
2834 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2835 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2836 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2837 img_hdr_ptr = (struct image_hdr *) (fw->data +
2838 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2839 i * sizeof(struct image_hdr)));
2840 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2841 status = be_flash_data(adapter, fw, &flash_cmd,
2842 num_imgs);
3f0d4560
AK
2843 }
2844 } else if ((adapter->generation == BE_GEN2) &&
2845 (get_ufigen_type(fhdr) == BE_GEN2)) {
2846 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2847 } else {
2848 dev_err(&adapter->pdev->dev,
2849 "UFI and Interface are not compatible for flashing\n");
2850 status = -1;
84517482
AK
2851 }
2852
2b7bcebf
IV
2853 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2854 flash_cmd.dma);
84517482
AK
2855 if (status) {
2856 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2857 goto be_fw_exit;
84517482
AK
2858 }
2859
af901ca1 2860 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2861
485bf569
SN
2862be_fw_exit:
2863 return status;
2864}
2865
2866int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2867{
2868 const struct firmware *fw;
2869 int status;
2870
2871 if (!netif_running(adapter->netdev)) {
2872 dev_err(&adapter->pdev->dev,
2873 "Firmware load not allowed (interface is down)\n");
2874 return -1;
2875 }
2876
2877 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2878 if (status)
2879 goto fw_exit;
2880
2881 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2882
2883 if (lancer_chip(adapter))
2884 status = lancer_fw_download(adapter, fw);
2885 else
2886 status = be_fw_download(adapter, fw);
2887
84517482
AK
2888fw_exit:
2889 release_firmware(fw);
2890 return status;
2891}
2892
6b7c5b94
SP
2893static struct net_device_ops be_netdev_ops = {
2894 .ndo_open = be_open,
2895 .ndo_stop = be_close,
2896 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2897 .ndo_set_rx_mode = be_set_multicast_list,
2898 .ndo_set_mac_address = be_mac_addr_set,
2899 .ndo_change_mtu = be_change_mtu,
ab1594e9 2900 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2901 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2902 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2903 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2904 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2905 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2906 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2907 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2908};
2909
2910static void be_netdev_init(struct net_device *netdev)
2911{
2912 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2913 struct be_rx_obj *rxo;
2914 int i;
6b7c5b94 2915
6332c8d3 2916 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2917 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2918 NETIF_F_HW_VLAN_TX;
2919 if (be_multi_rxq(adapter))
2920 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2921
2922 netdev->features |= netdev->hw_features |
8b8ddc68 2923 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2924
eb8a50d9 2925 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2926 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2927
6b7c5b94
SP
2928 netdev->flags |= IFF_MULTICAST;
2929
9e90c961
AK
2930 /* Default settings for Rx and Tx flow control */
2931 adapter->rx_fc = true;
2932 adapter->tx_fc = true;
2933
c190e3c8
AK
2934 netif_set_gso_max_size(netdev, 65535);
2935
6b7c5b94
SP
2936 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2937
2938 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2939
3abcdeda
SP
2940 for_all_rx_queues(adapter, rxo, i)
2941 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2942 BE_NAPI_WEIGHT);
2943
5fb379ee 2944 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2945 BE_NAPI_WEIGHT);
6b7c5b94
SP
2946}
2947
2948static void be_unmap_pci_bars(struct be_adapter *adapter)
2949{
8788fdc2
SP
2950 if (adapter->csr)
2951 iounmap(adapter->csr);
2952 if (adapter->db)
2953 iounmap(adapter->db);
ba343c77 2954 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2955 iounmap(adapter->pcicfg);
6b7c5b94
SP
2956}
2957
2958static int be_map_pci_bars(struct be_adapter *adapter)
2959{
2960 u8 __iomem *addr;
ba343c77 2961 int pcicfg_reg, db_reg;
6b7c5b94 2962
fe6d2a38
SP
2963 if (lancer_chip(adapter)) {
2964 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2965 pci_resource_len(adapter->pdev, 0));
2966 if (addr == NULL)
2967 return -ENOMEM;
2968 adapter->db = addr;
2969 return 0;
2970 }
2971
ba343c77
SB
2972 if (be_physfn(adapter)) {
2973 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2974 pci_resource_len(adapter->pdev, 2));
2975 if (addr == NULL)
2976 return -ENOMEM;
2977 adapter->csr = addr;
2978 }
6b7c5b94 2979
ba343c77 2980 if (adapter->generation == BE_GEN2) {
7b139c83 2981 pcicfg_reg = 1;
ba343c77
SB
2982 db_reg = 4;
2983 } else {
7b139c83 2984 pcicfg_reg = 0;
ba343c77
SB
2985 if (be_physfn(adapter))
2986 db_reg = 4;
2987 else
2988 db_reg = 0;
2989 }
2990 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2991 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
2992 if (addr == NULL)
2993 goto pci_map_err;
ba343c77
SB
2994 adapter->db = addr;
2995
2996 if (be_physfn(adapter)) {
2997 addr = ioremap_nocache(
2998 pci_resource_start(adapter->pdev, pcicfg_reg),
2999 pci_resource_len(adapter->pdev, pcicfg_reg));
3000 if (addr == NULL)
3001 goto pci_map_err;
3002 adapter->pcicfg = addr;
3003 } else
3004 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3005
3006 return 0;
3007pci_map_err:
3008 be_unmap_pci_bars(adapter);
3009 return -ENOMEM;
3010}
3011
3012
3013static void be_ctrl_cleanup(struct be_adapter *adapter)
3014{
8788fdc2 3015 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3016
3017 be_unmap_pci_bars(adapter);
3018
3019 if (mem->va)
2b7bcebf
IV
3020 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3021 mem->dma);
e7b909a6 3022
5b8821b7 3023 mem = &adapter->rx_filter;
e7b909a6 3024 if (mem->va)
2b7bcebf
IV
3025 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3026 mem->dma);
6b7c5b94
SP
3027}
3028
6b7c5b94
SP
3029static int be_ctrl_init(struct be_adapter *adapter)
3030{
8788fdc2
SP
3031 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3032 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3033 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3034 int status;
6b7c5b94
SP
3035
3036 status = be_map_pci_bars(adapter);
3037 if (status)
e7b909a6 3038 goto done;
6b7c5b94
SP
3039
3040 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3041 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3042 mbox_mem_alloc->size,
3043 &mbox_mem_alloc->dma,
3044 GFP_KERNEL);
6b7c5b94 3045 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3046 status = -ENOMEM;
3047 goto unmap_pci_bars;
6b7c5b94
SP
3048 }
3049 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3050 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3051 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3052 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3053
5b8821b7
SP
3054 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3055 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3056 &rx_filter->dma, GFP_KERNEL);
3057 if (rx_filter->va == NULL) {
e7b909a6
SP
3058 status = -ENOMEM;
3059 goto free_mbox;
3060 }
5b8821b7 3061 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3062
2984961c 3063 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3064 spin_lock_init(&adapter->mcc_lock);
3065 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3066
dd131e76 3067 init_completion(&adapter->flash_compl);
cf588477 3068 pci_save_state(adapter->pdev);
6b7c5b94 3069 return 0;
e7b909a6
SP
3070
3071free_mbox:
2b7bcebf
IV
3072 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3073 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3074
3075unmap_pci_bars:
3076 be_unmap_pci_bars(adapter);
3077
3078done:
3079 return status;
6b7c5b94
SP
3080}
3081
3082static void be_stats_cleanup(struct be_adapter *adapter)
3083{
3abcdeda 3084 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3085
3086 if (cmd->va)
2b7bcebf
IV
3087 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3088 cmd->va, cmd->dma);
6b7c5b94
SP
3089}
3090
3091static int be_stats_init(struct be_adapter *adapter)
3092{
3abcdeda 3093 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3094
005d5696 3095 if (adapter->generation == BE_GEN2) {
89a88ab8 3096 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3097 } else {
3098 if (lancer_chip(adapter))
3099 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3100 else
3101 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3102 }
2b7bcebf
IV
3103 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3104 GFP_KERNEL);
6b7c5b94
SP
3105 if (cmd->va == NULL)
3106 return -1;
d291b9af 3107 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3108 return 0;
3109}
3110
3111static void __devexit be_remove(struct pci_dev *pdev)
3112{
3113 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3114
6b7c5b94
SP
3115 if (!adapter)
3116 return;
3117
f203af70
SK
3118 cancel_delayed_work_sync(&adapter->work);
3119
6b7c5b94
SP
3120 unregister_netdev(adapter->netdev);
3121
5fb379ee
SP
3122 be_clear(adapter);
3123
6b7c5b94
SP
3124 be_stats_cleanup(adapter);
3125
3126 be_ctrl_cleanup(adapter);
3127
48f5a191 3128 kfree(adapter->vf_cfg);
ba343c77
SB
3129 be_sriov_disable(adapter);
3130
8d56ff11 3131 be_msix_disable(adapter);
6b7c5b94
SP
3132
3133 pci_set_drvdata(pdev, NULL);
3134 pci_release_regions(pdev);
3135 pci_disable_device(pdev);
3136
3137 free_netdev(adapter->netdev);
3138}
3139
2243e2e9 3140static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3141{
6b7c5b94 3142 int status;
2243e2e9 3143 u8 mac[ETH_ALEN];
6b7c5b94 3144
2243e2e9 3145 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3146 if (status)
3147 return status;
3148
3abcdeda
SP
3149 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3150 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3151 if (status)
3152 return status;
3153
2243e2e9 3154 memset(mac, 0, ETH_ALEN);
ba343c77 3155
12f4d0a8
ME
3156 /* A default permanent address is given to each VF for Lancer*/
3157 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3158 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3159 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3160
ba343c77
SB
3161 if (status)
3162 return status;
ca9e4988 3163
ba343c77
SB
3164 if (!is_valid_ether_addr(mac))
3165 return -EADDRNOTAVAIL;
3166
3167 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3168 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3169 }
6b7c5b94 3170
3486be29 3171 if (adapter->function_mode & 0x400)
82903e4b
AK
3172 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3173 else
3174 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3175
9e1453c5
AK
3176 status = be_cmd_get_cntl_attributes(adapter);
3177 if (status)
3178 return status;
3179
3c8def97
SP
3180 if ((num_vfs && adapter->sriov_enabled) ||
3181 (adapter->function_mode & 0x400) ||
3182 lancer_chip(adapter) || !be_physfn(adapter)) {
3183 adapter->num_tx_qs = 1;
3184 netif_set_real_num_tx_queues(adapter->netdev,
3185 adapter->num_tx_qs);
3186 } else {
3187 adapter->num_tx_qs = MAX_TX_QS;
3188 }
3189
2243e2e9 3190 return 0;
6b7c5b94
SP
3191}
3192
fe6d2a38
SP
3193static int be_dev_family_check(struct be_adapter *adapter)
3194{
3195 struct pci_dev *pdev = adapter->pdev;
3196 u32 sli_intf = 0, if_type;
3197
3198 switch (pdev->device) {
3199 case BE_DEVICE_ID1:
3200 case OC_DEVICE_ID1:
3201 adapter->generation = BE_GEN2;
3202 break;
3203 case BE_DEVICE_ID2:
3204 case OC_DEVICE_ID2:
3205 adapter->generation = BE_GEN3;
3206 break;
3207 case OC_DEVICE_ID3:
12f4d0a8 3208 case OC_DEVICE_ID4:
fe6d2a38
SP
3209 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3210 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3211 SLI_INTF_IF_TYPE_SHIFT;
3212
3213 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3214 if_type != 0x02) {
3215 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3216 return -EINVAL;
3217 }
fe6d2a38
SP
3218 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3219 SLI_INTF_FAMILY_SHIFT);
3220 adapter->generation = BE_GEN3;
3221 break;
3222 default:
3223 adapter->generation = 0;
3224 }
3225 return 0;
3226}
3227
37eed1cb
PR
3228static int lancer_wait_ready(struct be_adapter *adapter)
3229{
3230#define SLIPORT_READY_TIMEOUT 500
3231 u32 sliport_status;
3232 int status = 0, i;
3233
3234 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3235 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3236 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3237 break;
3238
3239 msleep(20);
3240 }
3241
3242 if (i == SLIPORT_READY_TIMEOUT)
3243 status = -1;
3244
3245 return status;
3246}
3247
3248static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3249{
3250 int status;
3251 u32 sliport_status, err, reset_needed;
3252 status = lancer_wait_ready(adapter);
3253 if (!status) {
3254 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3255 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3256 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3257 if (err && reset_needed) {
3258 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3259 adapter->db + SLIPORT_CONTROL_OFFSET);
3260
3261 /* check adapter has corrected the error */
3262 status = lancer_wait_ready(adapter);
3263 sliport_status = ioread32(adapter->db +
3264 SLIPORT_STATUS_OFFSET);
3265 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3266 SLIPORT_STATUS_RN_MASK);
3267 if (status || sliport_status)
3268 status = -1;
3269 } else if (err || reset_needed) {
3270 status = -1;
3271 }
3272 }
3273 return status;
3274}
3275
6b7c5b94
SP
3276static int __devinit be_probe(struct pci_dev *pdev,
3277 const struct pci_device_id *pdev_id)
3278{
3279 int status = 0;
3280 struct be_adapter *adapter;
3281 struct net_device *netdev;
6b7c5b94
SP
3282
3283 status = pci_enable_device(pdev);
3284 if (status)
3285 goto do_none;
3286
3287 status = pci_request_regions(pdev, DRV_NAME);
3288 if (status)
3289 goto disable_dev;
3290 pci_set_master(pdev);
3291
3c8def97 3292 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3293 if (netdev == NULL) {
3294 status = -ENOMEM;
3295 goto rel_reg;
3296 }
3297 adapter = netdev_priv(netdev);
3298 adapter->pdev = pdev;
3299 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3300
3301 status = be_dev_family_check(adapter);
63657b9c 3302 if (status)
fe6d2a38
SP
3303 goto free_netdev;
3304
6b7c5b94 3305 adapter->netdev = netdev;
2243e2e9 3306 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3307
2b7bcebf 3308 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3309 if (!status) {
3310 netdev->features |= NETIF_F_HIGHDMA;
3311 } else {
2b7bcebf 3312 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3313 if (status) {
3314 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3315 goto free_netdev;
3316 }
3317 }
3318
ba343c77 3319 be_sriov_enable(adapter);
48f5a191
AK
3320 if (adapter->sriov_enabled) {
3321 adapter->vf_cfg = kcalloc(num_vfs,
3322 sizeof(struct be_vf_cfg), GFP_KERNEL);
3323
3324 if (!adapter->vf_cfg)
3325 goto free_netdev;
3326 }
ba343c77 3327
6b7c5b94
SP
3328 status = be_ctrl_init(adapter);
3329 if (status)
48f5a191 3330 goto free_vf_cfg;
6b7c5b94 3331
37eed1cb
PR
3332 if (lancer_chip(adapter)) {
3333 status = lancer_test_and_set_rdy_state(adapter);
3334 if (status) {
3335 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3336 goto ctrl_clean;
37eed1cb
PR
3337 }
3338 }
3339
2243e2e9 3340 /* sync up with fw's ready state */
ba343c77
SB
3341 if (be_physfn(adapter)) {
3342 status = be_cmd_POST(adapter);
3343 if (status)
3344 goto ctrl_clean;
ba343c77 3345 }
6b7c5b94 3346
2243e2e9
SP
3347 /* tell fw we're ready to fire cmds */
3348 status = be_cmd_fw_init(adapter);
6b7c5b94 3349 if (status)
2243e2e9
SP
3350 goto ctrl_clean;
3351
a4b4dfab
AK
3352 status = be_cmd_reset_function(adapter);
3353 if (status)
3354 goto ctrl_clean;
556ae191 3355
2243e2e9
SP
3356 status = be_stats_init(adapter);
3357 if (status)
3358 goto ctrl_clean;
3359
3360 status = be_get_config(adapter);
6b7c5b94
SP
3361 if (status)
3362 goto stats_clean;
6b7c5b94 3363
b9ab82c7
SP
3364 /* The INTR bit may be set in the card when probed by a kdump kernel
3365 * after a crash.
3366 */
3367 if (!lancer_chip(adapter))
3368 be_intr_set(adapter, false);
3369
3abcdeda
SP
3370 be_msix_enable(adapter);
3371
6b7c5b94 3372 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3373
5fb379ee
SP
3374 status = be_setup(adapter);
3375 if (status)
3abcdeda 3376 goto msix_disable;
2243e2e9 3377
3abcdeda 3378 be_netdev_init(netdev);
6b7c5b94
SP
3379 status = register_netdev(netdev);
3380 if (status != 0)
5fb379ee 3381 goto unsetup;
6b7c5b94 3382
e6319365 3383 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42 3384 u8 mac_speed;
d0381c42
AK
3385 u16 vf, lnk_speed;
3386
12f4d0a8
ME
3387 if (!lancer_chip(adapter)) {
3388 status = be_vf_eth_addr_config(adapter);
3389 if (status)
3390 goto unreg_netdev;
3391 }
d0381c42
AK
3392
3393 for (vf = 0; vf < num_vfs; vf++) {
ea172a01
SP
3394 status = be_cmd_link_status_query(adapter, &mac_speed,
3395 &lnk_speed, vf + 1);
d0381c42
AK
3396 if (!status)
3397 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3398 else
3399 goto unreg_netdev;
3400 }
e6319365
AK
3401 }
3402
c4ca2374 3403 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3404
f203af70 3405 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3406 return 0;
3407
e6319365
AK
3408unreg_netdev:
3409 unregister_netdev(netdev);
5fb379ee
SP
3410unsetup:
3411 be_clear(adapter);
3abcdeda
SP
3412msix_disable:
3413 be_msix_disable(adapter);
6b7c5b94
SP
3414stats_clean:
3415 be_stats_cleanup(adapter);
3416ctrl_clean:
3417 be_ctrl_cleanup(adapter);
48f5a191
AK
3418free_vf_cfg:
3419 kfree(adapter->vf_cfg);
6b7c5b94 3420free_netdev:
ba343c77 3421 be_sriov_disable(adapter);
fe6d2a38 3422 free_netdev(netdev);
8d56ff11 3423 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3424rel_reg:
3425 pci_release_regions(pdev);
3426disable_dev:
3427 pci_disable_device(pdev);
3428do_none:
c4ca2374 3429 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3430 return status;
3431}
3432
3433static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3434{
3435 struct be_adapter *adapter = pci_get_drvdata(pdev);
3436 struct net_device *netdev = adapter->netdev;
3437
a4ca055f 3438 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3439 if (adapter->wol)
3440 be_setup_wol(adapter, true);
3441
6b7c5b94
SP
3442 netif_device_detach(netdev);
3443 if (netif_running(netdev)) {
3444 rtnl_lock();
3445 be_close(netdev);
3446 rtnl_unlock();
3447 }
9e90c961 3448 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3449 be_clear(adapter);
6b7c5b94 3450
a4ca055f 3451 be_msix_disable(adapter);
6b7c5b94
SP
3452 pci_save_state(pdev);
3453 pci_disable_device(pdev);
3454 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3455 return 0;
3456}
3457
3458static int be_resume(struct pci_dev *pdev)
3459{
3460 int status = 0;
3461 struct be_adapter *adapter = pci_get_drvdata(pdev);
3462 struct net_device *netdev = adapter->netdev;
3463
3464 netif_device_detach(netdev);
3465
3466 status = pci_enable_device(pdev);
3467 if (status)
3468 return status;
3469
3470 pci_set_power_state(pdev, 0);
3471 pci_restore_state(pdev);
3472
a4ca055f 3473 be_msix_enable(adapter);
2243e2e9
SP
3474 /* tell fw we're ready to fire cmds */
3475 status = be_cmd_fw_init(adapter);
3476 if (status)
3477 return status;
3478
9b0365f1 3479 be_setup(adapter);
6b7c5b94
SP
3480 if (netif_running(netdev)) {
3481 rtnl_lock();
3482 be_open(netdev);
3483 rtnl_unlock();
3484 }
3485 netif_device_attach(netdev);
71d8d1b5
AK
3486
3487 if (adapter->wol)
3488 be_setup_wol(adapter, false);
a4ca055f
AK
3489
3490 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3491 return 0;
3492}
3493
82456b03
SP
3494/*
3495 * An FLR will stop BE from DMAing any data.
3496 */
3497static void be_shutdown(struct pci_dev *pdev)
3498{
3499 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3500
2d5d4154
AK
3501 if (!adapter)
3502 return;
82456b03 3503
0f4a6828 3504 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3505
2d5d4154 3506 netif_device_detach(adapter->netdev);
82456b03 3507
82456b03
SP
3508 if (adapter->wol)
3509 be_setup_wol(adapter, true);
3510
57841869
AK
3511 be_cmd_reset_function(adapter);
3512
82456b03 3513 pci_disable_device(pdev);
82456b03
SP
3514}
3515
cf588477
SP
3516static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3517 pci_channel_state_t state)
3518{
3519 struct be_adapter *adapter = pci_get_drvdata(pdev);
3520 struct net_device *netdev = adapter->netdev;
3521
3522 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3523
3524 adapter->eeh_err = true;
3525
3526 netif_device_detach(netdev);
3527
3528 if (netif_running(netdev)) {
3529 rtnl_lock();
3530 be_close(netdev);
3531 rtnl_unlock();
3532 }
3533 be_clear(adapter);
3534
3535 if (state == pci_channel_io_perm_failure)
3536 return PCI_ERS_RESULT_DISCONNECT;
3537
3538 pci_disable_device(pdev);
3539
3540 return PCI_ERS_RESULT_NEED_RESET;
3541}
3542
3543static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3544{
3545 struct be_adapter *adapter = pci_get_drvdata(pdev);
3546 int status;
3547
3548 dev_info(&adapter->pdev->dev, "EEH reset\n");
3549 adapter->eeh_err = false;
3550
3551 status = pci_enable_device(pdev);
3552 if (status)
3553 return PCI_ERS_RESULT_DISCONNECT;
3554
3555 pci_set_master(pdev);
3556 pci_set_power_state(pdev, 0);
3557 pci_restore_state(pdev);
3558
3559 /* Check if card is ok and fw is ready */
3560 status = be_cmd_POST(adapter);
3561 if (status)
3562 return PCI_ERS_RESULT_DISCONNECT;
3563
3564 return PCI_ERS_RESULT_RECOVERED;
3565}
3566
3567static void be_eeh_resume(struct pci_dev *pdev)
3568{
3569 int status = 0;
3570 struct be_adapter *adapter = pci_get_drvdata(pdev);
3571 struct net_device *netdev = adapter->netdev;
3572
3573 dev_info(&adapter->pdev->dev, "EEH resume\n");
3574
3575 pci_save_state(pdev);
3576
3577 /* tell fw we're ready to fire cmds */
3578 status = be_cmd_fw_init(adapter);
3579 if (status)
3580 goto err;
3581
3582 status = be_setup(adapter);
3583 if (status)
3584 goto err;
3585
3586 if (netif_running(netdev)) {
3587 status = be_open(netdev);
3588 if (status)
3589 goto err;
3590 }
3591 netif_device_attach(netdev);
3592 return;
3593err:
3594 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3595}
3596
3597static struct pci_error_handlers be_eeh_handlers = {
3598 .error_detected = be_eeh_err_detected,
3599 .slot_reset = be_eeh_reset,
3600 .resume = be_eeh_resume,
3601};
3602
6b7c5b94
SP
3603static struct pci_driver be_driver = {
3604 .name = DRV_NAME,
3605 .id_table = be_dev_ids,
3606 .probe = be_probe,
3607 .remove = be_remove,
3608 .suspend = be_suspend,
cf588477 3609 .resume = be_resume,
82456b03 3610 .shutdown = be_shutdown,
cf588477 3611 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3612};
3613
3614static int __init be_init_module(void)
3615{
8e95a202
JP
3616 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3617 rx_frag_size != 2048) {
6b7c5b94
SP
3618 printk(KERN_WARNING DRV_NAME
3619 " : Module param rx_frag_size must be 2048/4096/8192."
3620 " Using 2048\n");
3621 rx_frag_size = 2048;
3622 }
6b7c5b94
SP
3623
3624 return pci_register_driver(&be_driver);
3625}
3626module_init(be_init_module);
3627
3628static void __exit be_exit_module(void)
3629{
3630 pci_unregister_driver(&be_driver);
3631}
3632module_exit(be_exit_module);