]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
unicore32: EXPORT_SYMBOL needs export.h
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94 116
752961a1
SP
117/* Is BE in a multi-channel mode */
118static inline bool be_is_mc(struct be_adapter *adapter) {
119 return (adapter->function_mode & FLEX10_MODE ||
120 adapter->function_mode & VNIC_MODE ||
121 adapter->function_mode & UMC_ENABLED);
122}
123
6b7c5b94
SP
124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
2b7bcebf
IV
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
6b7c5b94
SP
130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
2b7bcebf
IV
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
6b7c5b94
SP
143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
8788fdc2 149static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 150{
db3ea781 151 u32 reg, enabled;
5f0b849e 152
cf588477
SP
153 if (adapter->eeh_err)
154 return;
155
db3ea781
SP
156 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157 &reg);
158 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
5f0b849e 160 if (!enabled && enable)
6b7c5b94 161 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 162 else if (enabled && !enable)
6b7c5b94 163 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 164 else
6b7c5b94 165 return;
5f0b849e 166
db3ea781
SP
167 pci_write_config_dword(adapter->pdev,
168 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_RQ_RING_ID_MASK;
175 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
182{
183 u32 val = 0;
184 val |= qid & DB_TXULP_RING_ID_MASK;
185 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
186
187 wmb();
8788fdc2 188 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
189}
190
8788fdc2 191static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
192 bool arm, bool clear_int, u16 num_popped)
193{
194 u32 val = 0;
195 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
196 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
197 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
198
199 if (adapter->eeh_err)
200 return;
201
6b7c5b94
SP
202 if (arm)
203 val |= 1 << DB_EQ_REARM_SHIFT;
204 if (clear_int)
205 val |= 1 << DB_EQ_CLR_SHIFT;
206 val |= 1 << DB_EQ_EVNT_SHIFT;
207 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 208 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
209}
210
8788fdc2 211void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
212{
213 u32 val = 0;
214 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
215 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
216 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
217
218 if (adapter->eeh_err)
219 return;
220
6b7c5b94
SP
221 if (arm)
222 val |= 1 << DB_CQ_REARM_SHIFT;
223 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 224 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
225}
226
6b7c5b94
SP
227static int be_mac_addr_set(struct net_device *netdev, void *p)
228{
229 struct be_adapter *adapter = netdev_priv(netdev);
230 struct sockaddr *addr = p;
231 int status = 0;
232
ca9e4988
AK
233 if (!is_valid_ether_addr(addr->sa_data))
234 return -EADDRNOTAVAIL;
235
ba343c77
SB
236 /* MAC addr configuration will be done in hardware for VFs
237 * by their corresponding PFs. Just copy to netdev addr here
238 */
239 if (!be_physfn(adapter))
240 goto netdev_addr;
241
f8617e08
AK
242 status = be_cmd_pmac_del(adapter, adapter->if_handle,
243 adapter->pmac_id, 0);
a65027e4
SP
244 if (status)
245 return status;
6b7c5b94 246
a65027e4 247 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 248 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 249netdev_addr:
6b7c5b94
SP
250 if (!status)
251 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
252
253 return status;
254}
255
89a88ab8
AK
256static void populate_be2_stats(struct be_adapter *adapter)
257{
ac124ff9
SP
258 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
259 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
260 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 261 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
262 &rxf_stats->port[adapter->port_num];
263 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 264
ac124ff9 265 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
266 drvs->rx_pause_frames = port_stats->rx_pause_frames;
267 drvs->rx_crc_errors = port_stats->rx_crc_errors;
268 drvs->rx_control_frames = port_stats->rx_control_frames;
269 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
270 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
271 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
272 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
273 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
274 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
275 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
276 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
277 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
278 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
279 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 280 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
281 drvs->rx_dropped_header_too_small =
282 port_stats->rx_dropped_header_too_small;
ac124ff9 283 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
284 drvs->rx_alignment_symbol_errors =
285 port_stats->rx_alignment_symbol_errors;
286
287 drvs->tx_pauseframes = port_stats->tx_pauseframes;
288 drvs->tx_controlframes = port_stats->tx_controlframes;
289
290 if (adapter->port_num)
ac124ff9 291 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 292 else
ac124ff9 293 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
294 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
295 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
296 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
297 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
298 drvs->forwarded_packets = rxf_stats->forwarded_packets;
299 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
300 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
301 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
ac124ff9
SP
307 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
308 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
309 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 310 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
311 &rxf_stats->port[adapter->port_num];
312 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 313
ac124ff9 314 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 332 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
333 drvs->rx_alignment_symbol_errors =
334 port_stats->rx_alignment_symbol_errors;
ac124ff9 335 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
345 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
346 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
347 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
348}
349
005d5696
SX
350static void populate_lancer_stats(struct be_adapter *adapter)
351{
89a88ab8 352
005d5696 353 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
354 struct lancer_pport_stats *pport_stats =
355 pport_stats_from_cmd(adapter);
356
357 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
358 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
359 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
360 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 361 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 362 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
363 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
364 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
365 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
366 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
367 drvs->rx_dropped_tcp_length =
368 pport_stats->rx_dropped_invalid_tcp_length;
369 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
370 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
371 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
372 drvs->rx_dropped_header_too_small =
373 pport_stats->rx_dropped_header_too_small;
374 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
375 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 376 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 377 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
378 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
379 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 380 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 381 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
382 drvs->forwarded_packets = pport_stats->num_forwards_lo;
383 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 384 drvs->rx_drops_too_many_frags =
ac124ff9 385 pport_stats->rx_drops_too_many_frags_lo;
005d5696 386}
89a88ab8 387
09c1c68f
SP
388static void accumulate_16bit_val(u32 *acc, u16 val)
389{
390#define lo(x) (x & 0xFFFF)
391#define hi(x) (x & 0xFFFF0000)
392 bool wrapped = val < lo(*acc);
393 u32 newacc = hi(*acc) + val;
394
395 if (wrapped)
396 newacc += 65536;
397 ACCESS_ONCE(*acc) = newacc;
398}
399
89a88ab8
AK
400void be_parse_stats(struct be_adapter *adapter)
401{
ac124ff9
SP
402 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
403 struct be_rx_obj *rxo;
404 int i;
405
005d5696
SX
406 if (adapter->generation == BE_GEN3) {
407 if (lancer_chip(adapter))
408 populate_lancer_stats(adapter);
409 else
410 populate_be3_stats(adapter);
411 } else {
89a88ab8 412 populate_be2_stats(adapter);
005d5696 413 }
ac124ff9
SP
414
415 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
416 for_all_rx_queues(adapter, rxo, i) {
417 /* below erx HW counter can actually wrap around after
418 * 65535. Driver accumulates a 32-bit value
419 */
420 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
421 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
422 }
89a88ab8
AK
423}
424
ab1594e9
SP
425static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
6b7c5b94 427{
ab1594e9 428 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 429 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 430 struct be_rx_obj *rxo;
3c8def97 431 struct be_tx_obj *txo;
ab1594e9
SP
432 u64 pkts, bytes;
433 unsigned int start;
3abcdeda 434 int i;
6b7c5b94 435
3abcdeda 436 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
437 const struct be_rx_stats *rx_stats = rx_stats(rxo);
438 do {
439 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
440 pkts = rx_stats(rxo)->rx_pkts;
441 bytes = rx_stats(rxo)->rx_bytes;
442 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
443 stats->rx_packets += pkts;
444 stats->rx_bytes += bytes;
445 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
446 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
447 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
448 }
449
3c8def97 450 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
451 const struct be_tx_stats *tx_stats = tx_stats(txo);
452 do {
453 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
454 pkts = tx_stats(txo)->tx_pkts;
455 bytes = tx_stats(txo)->tx_bytes;
456 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
457 stats->tx_packets += pkts;
458 stats->tx_bytes += bytes;
3c8def97 459 }
6b7c5b94
SP
460
461 /* bad pkts received */
ab1594e9 462 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
463 drvs->rx_alignment_symbol_errors +
464 drvs->rx_in_range_errors +
465 drvs->rx_out_range_errors +
466 drvs->rx_frame_too_long +
467 drvs->rx_dropped_too_small +
468 drvs->rx_dropped_too_short +
469 drvs->rx_dropped_header_too_small +
470 drvs->rx_dropped_tcp_length +
ab1594e9 471 drvs->rx_dropped_runt;
68110868 472
6b7c5b94 473 /* detailed rx errors */
ab1594e9 474 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
475 drvs->rx_out_range_errors +
476 drvs->rx_frame_too_long;
68110868 477
ab1594e9 478 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
479
480 /* frame alignment errors */
ab1594e9 481 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 482
6b7c5b94
SP
483 /* receiver fifo overrun */
484 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 485 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
486 drvs->rx_input_fifo_overflow_drop +
487 drvs->rx_drops_no_pbuf;
ab1594e9 488 return stats;
6b7c5b94
SP
489}
490
ea172a01 491void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 492{
6b7c5b94
SP
493 struct net_device *netdev = adapter->netdev;
494
ea172a01
SP
495 /* when link status changes, link speed must be re-queried from card */
496 adapter->link_speed = -1;
497 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
498 netif_carrier_on(netdev);
499 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
500 } else {
501 netif_carrier_off(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 503 }
6b7c5b94
SP
504}
505
3c8def97 506static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 507 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 508{
3c8def97
SP
509 struct be_tx_stats *stats = tx_stats(txo);
510
ab1594e9 511 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
512 stats->tx_reqs++;
513 stats->tx_wrbs += wrb_cnt;
514 stats->tx_bytes += copied;
515 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 516 if (stopped)
ac124ff9 517 stats->tx_stops++;
ab1594e9 518 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
519}
520
521/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
522static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
523 bool *dummy)
6b7c5b94 524{
ebc8d2ab
DM
525 int cnt = (skb->len > skb->data_len);
526
527 cnt += skb_shinfo(skb)->nr_frags;
528
6b7c5b94
SP
529 /* to account for hdr wrb */
530 cnt++;
fe6d2a38
SP
531 if (lancer_chip(adapter) || !(cnt & 1)) {
532 *dummy = false;
533 } else {
6b7c5b94
SP
534 /* add a dummy to make it an even num */
535 cnt++;
536 *dummy = true;
fe6d2a38 537 }
6b7c5b94
SP
538 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
539 return cnt;
540}
541
542static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
543{
544 wrb->frag_pa_hi = upper_32_bits(addr);
545 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
546 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
547}
548
cc4ce020
SK
549static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
550 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 551{
cc4ce020
SK
552 u8 vlan_prio = 0;
553 u16 vlan_tag = 0;
554
6b7c5b94
SP
555 memset(hdr, 0, sizeof(*hdr));
556
557 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
558
49e4b847 559 if (skb_is_gso(skb)) {
6b7c5b94
SP
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
561 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
562 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 563 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
565 if (lancer_chip(adapter) && adapter->sli_family ==
566 LANCER_A0_SLI_FAMILY) {
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
568 if (is_tcp_pkt(skb))
569 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
570 tcpcs, hdr, 1);
571 else if (is_udp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 udpcs, hdr, 1);
574 }
6b7c5b94
SP
575 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
576 if (is_tcp_pkt(skb))
577 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
578 else if (is_udp_pkt(skb))
579 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
580 }
581
4c5102f9 582 if (vlan_tx_tag_present(skb)) {
6b7c5b94 583 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
584 vlan_tag = vlan_tx_tag_get(skb);
585 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
586 /* If vlan priority provided by OS is NOT in available bmap */
587 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
588 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
589 adapter->recommended_prio;
590 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
591 }
592
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
594 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
595 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
597}
598
2b7bcebf 599static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
600 bool unmap_single)
601{
602 dma_addr_t dma;
603
604 be_dws_le_to_cpu(wrb, sizeof(*wrb));
605
606 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 607 if (wrb->frag_len) {
7101e111 608 if (unmap_single)
2b7bcebf
IV
609 dma_unmap_single(dev, dma, wrb->frag_len,
610 DMA_TO_DEVICE);
7101e111 611 else
2b7bcebf 612 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
613 }
614}
6b7c5b94 615
3c8def97 616static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
617 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
618{
7101e111
SP
619 dma_addr_t busaddr;
620 int i, copied = 0;
2b7bcebf 621 struct device *dev = &adapter->pdev->dev;
6b7c5b94 622 struct sk_buff *first_skb = skb;
6b7c5b94
SP
623 struct be_eth_wrb *wrb;
624 struct be_eth_hdr_wrb *hdr;
7101e111
SP
625 bool map_single = false;
626 u16 map_head;
6b7c5b94 627
6b7c5b94
SP
628 hdr = queue_head_node(txq);
629 queue_head_inc(txq);
7101e111 630 map_head = txq->head;
6b7c5b94 631
ebc8d2ab 632 if (skb->len > skb->data_len) {
e743d313 633 int len = skb_headlen(skb);
2b7bcebf
IV
634 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
635 if (dma_mapping_error(dev, busaddr))
7101e111
SP
636 goto dma_err;
637 map_single = true;
ebc8d2ab
DM
638 wrb = queue_head_node(txq);
639 wrb_fill(wrb, busaddr, len);
640 be_dws_cpu_to_le(wrb, sizeof(*wrb));
641 queue_head_inc(txq);
642 copied += len;
643 }
6b7c5b94 644
ebc8d2ab 645 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 646 const struct skb_frag_struct *frag =
ebc8d2ab 647 &skb_shinfo(skb)->frags[i];
b061b39e 648 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 649 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 650 if (dma_mapping_error(dev, busaddr))
7101e111 651 goto dma_err;
ebc8d2ab 652 wrb = queue_head_node(txq);
9e903e08 653 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
654 be_dws_cpu_to_le(wrb, sizeof(*wrb));
655 queue_head_inc(txq);
9e903e08 656 copied += skb_frag_size(frag);
6b7c5b94
SP
657 }
658
659 if (dummy_wrb) {
660 wrb = queue_head_node(txq);
661 wrb_fill(wrb, 0, 0);
662 be_dws_cpu_to_le(wrb, sizeof(*wrb));
663 queue_head_inc(txq);
664 }
665
cc4ce020 666 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
667 be_dws_cpu_to_le(hdr, sizeof(*hdr));
668
669 return copied;
7101e111
SP
670dma_err:
671 txq->head = map_head;
672 while (copied) {
673 wrb = queue_head_node(txq);
2b7bcebf 674 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
675 map_single = false;
676 copied -= wrb->frag_len;
677 queue_head_inc(txq);
678 }
679 return 0;
6b7c5b94
SP
680}
681
61357325 682static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 683 struct net_device *netdev)
6b7c5b94
SP
684{
685 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
686 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
687 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
688 u32 wrb_cnt = 0, copied = 0;
689 u32 start = txq->head;
690 bool dummy_wrb, stopped = false;
691
fe6d2a38 692 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 693
3c8def97 694 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
695 if (copied) {
696 /* record the sent skb in the sent_skb table */
3c8def97
SP
697 BUG_ON(txo->sent_skb_list[start]);
698 txo->sent_skb_list[start] = skb;
c190e3c8
AK
699
700 /* Ensure txq has space for the next skb; Else stop the queue
701 * *BEFORE* ringing the tx doorbell, so that we serialze the
702 * tx compls of the current transmit which'll wake up the queue
703 */
7101e111 704 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
705 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
706 txq->len) {
3c8def97 707 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
708 stopped = true;
709 }
6b7c5b94 710
c190e3c8 711 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 712
3c8def97 713 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 714 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
715 } else {
716 txq->head = start;
717 dev_kfree_skb_any(skb);
6b7c5b94 718 }
6b7c5b94
SP
719 return NETDEV_TX_OK;
720}
721
722static int be_change_mtu(struct net_device *netdev, int new_mtu)
723{
724 struct be_adapter *adapter = netdev_priv(netdev);
725 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
726 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
727 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
728 dev_info(&adapter->pdev->dev,
729 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
730 BE_MIN_MTU,
731 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
732 return -EINVAL;
733 }
734 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
735 netdev->mtu, new_mtu);
736 netdev->mtu = new_mtu;
737 return 0;
738}
739
740/*
82903e4b
AK
741 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
742 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 743 */
1da87b7f 744static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 745{
6b7c5b94
SP
746 u16 vtag[BE_NUM_VLANS_SUPPORTED];
747 u16 ntags = 0, i;
82903e4b 748 int status = 0;
1da87b7f
AK
749 u32 if_handle;
750
751 if (vf) {
752 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
753 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
754 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
755 }
6b7c5b94 756
c0e64ef4
SP
757 /* No need to further configure vids if in promiscuous mode */
758 if (adapter->promiscuous)
759 return 0;
760
82903e4b 761 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 762 /* Construct VLAN Table to give to HW */
b738127d 763 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
764 if (adapter->vlan_tag[i]) {
765 vtag[ntags] = cpu_to_le16(i);
766 ntags++;
767 }
768 }
b31c50a7
SP
769 status = be_cmd_vlan_config(adapter, adapter->if_handle,
770 vtag, ntags, 1, 0);
6b7c5b94 771 } else {
b31c50a7
SP
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 NULL, 0, 1, 1);
6b7c5b94 774 }
1da87b7f 775
b31c50a7 776 return status;
6b7c5b94
SP
777}
778
6b7c5b94
SP
779static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
780{
781 struct be_adapter *adapter = netdev_priv(netdev);
782
1da87b7f 783 adapter->vlans_added++;
ba343c77
SB
784 if (!be_physfn(adapter))
785 return;
786
6b7c5b94 787 adapter->vlan_tag[vid] = 1;
82903e4b 788 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 789 be_vid_config(adapter, false, 0);
6b7c5b94
SP
790}
791
792static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
793{
794 struct be_adapter *adapter = netdev_priv(netdev);
795
1da87b7f 796 adapter->vlans_added--;
1da87b7f 797
ba343c77
SB
798 if (!be_physfn(adapter))
799 return;
800
6b7c5b94 801 adapter->vlan_tag[vid] = 0;
82903e4b 802 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 803 be_vid_config(adapter, false, 0);
6b7c5b94
SP
804}
805
a54769f5 806static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
807{
808 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 809
24307eef 810 if (netdev->flags & IFF_PROMISC) {
5b8821b7 811 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
812 adapter->promiscuous = true;
813 goto done;
6b7c5b94
SP
814 }
815
25985edc 816 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
817 if (adapter->promiscuous) {
818 adapter->promiscuous = false;
5b8821b7 819 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
820
821 if (adapter->vlans_added)
822 be_vid_config(adapter, false, 0);
6b7c5b94
SP
823 }
824
e7b909a6 825 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 826 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
827 netdev_mc_count(netdev) > BE_MAX_MC) {
828 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 829 goto done;
6b7c5b94 830 }
6b7c5b94 831
5b8821b7 832 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
833done:
834 return;
6b7c5b94
SP
835}
836
ba343c77
SB
837static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
838{
839 struct be_adapter *adapter = netdev_priv(netdev);
840 int status;
841
842 if (!adapter->sriov_enabled)
843 return -EPERM;
844
845 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
846 return -EINVAL;
847
64600ea5
AK
848 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
849 status = be_cmd_pmac_del(adapter,
850 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 851 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 852
64600ea5
AK
853 status = be_cmd_pmac_add(adapter, mac,
854 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 855 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
856
857 if (status)
ba343c77
SB
858 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
859 mac, vf);
64600ea5
AK
860 else
861 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
862
ba343c77
SB
863 return status;
864}
865
64600ea5
AK
866static int be_get_vf_config(struct net_device *netdev, int vf,
867 struct ifla_vf_info *vi)
868{
869 struct be_adapter *adapter = netdev_priv(netdev);
870
871 if (!adapter->sriov_enabled)
872 return -EPERM;
873
874 if (vf >= num_vfs)
875 return -EINVAL;
876
877 vi->vf = vf;
e1d18735 878 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 879 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
880 vi->qos = 0;
881 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
882
883 return 0;
884}
885
1da87b7f
AK
886static int be_set_vf_vlan(struct net_device *netdev,
887 int vf, u16 vlan, u8 qos)
888{
889 struct be_adapter *adapter = netdev_priv(netdev);
890 int status = 0;
891
892 if (!adapter->sriov_enabled)
893 return -EPERM;
894
895 if ((vf >= num_vfs) || (vlan > 4095))
896 return -EINVAL;
897
898 if (vlan) {
899 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
900 adapter->vlans_added++;
901 } else {
902 adapter->vf_cfg[vf].vf_vlan_tag = 0;
903 adapter->vlans_added--;
904 }
905
906 status = be_vid_config(adapter, true, vf);
907
908 if (status)
909 dev_info(&adapter->pdev->dev,
910 "VLAN %d config on VF %d failed\n", vlan, vf);
911 return status;
912}
913
e1d18735
AK
914static int be_set_vf_tx_rate(struct net_device *netdev,
915 int vf, int rate)
916{
917 struct be_adapter *adapter = netdev_priv(netdev);
918 int status = 0;
919
920 if (!adapter->sriov_enabled)
921 return -EPERM;
922
923 if ((vf >= num_vfs) || (rate < 0))
924 return -EINVAL;
925
926 if (rate > 10000)
927 rate = 10000;
928
929 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 930 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
931
932 if (status)
933 dev_info(&adapter->pdev->dev,
934 "tx rate %d on VF %d failed\n", rate, vf);
935 return status;
936}
937
ac124ff9 938static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 939{
ac124ff9
SP
940 struct be_eq_obj *rx_eq = &rxo->rx_eq;
941 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 942 ulong now = jiffies;
ac124ff9 943 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
944 u64 pkts;
945 unsigned int start, eqd;
ac124ff9
SP
946
947 if (!rx_eq->enable_aic)
948 return;
6b7c5b94 949
4097f663 950 /* Wrapped around */
3abcdeda
SP
951 if (time_before(now, stats->rx_jiffies)) {
952 stats->rx_jiffies = now;
4097f663
SP
953 return;
954 }
6b7c5b94 955
ac124ff9
SP
956 /* Update once a second */
957 if (delta < HZ)
6b7c5b94
SP
958 return;
959
ab1594e9
SP
960 do {
961 start = u64_stats_fetch_begin_bh(&stats->sync);
962 pkts = stats->rx_pkts;
963 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
964
68c3e5a7 965 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 966 stats->rx_pkts_prev = pkts;
3abcdeda 967 stats->rx_jiffies = now;
ac124ff9
SP
968 eqd = stats->rx_pps / 110000;
969 eqd = eqd << 3;
970 if (eqd > rx_eq->max_eqd)
971 eqd = rx_eq->max_eqd;
972 if (eqd < rx_eq->min_eqd)
973 eqd = rx_eq->min_eqd;
974 if (eqd < 10)
975 eqd = 0;
976 if (eqd != rx_eq->cur_eqd) {
977 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
978 rx_eq->cur_eqd = eqd;
979 }
6b7c5b94
SP
980}
981
3abcdeda 982static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 983 struct be_rx_compl_info *rxcp)
4097f663 984{
ac124ff9 985 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 986
ab1594e9 987 u64_stats_update_begin(&stats->sync);
3abcdeda 988 stats->rx_compl++;
2e588f84 989 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 990 stats->rx_pkts++;
2e588f84 991 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 992 stats->rx_mcast_pkts++;
2e588f84 993 if (rxcp->err)
ac124ff9 994 stats->rx_compl_err++;
ab1594e9 995 u64_stats_update_end(&stats->sync);
4097f663
SP
996}
997
2e588f84 998static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 999{
19fad86f
PR
1000 /* L4 checksum is not reliable for non TCP/UDP packets.
1001 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1002 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1003 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1004}
1005
6b7c5b94 1006static struct be_rx_page_info *
3abcdeda
SP
1007get_rx_page_info(struct be_adapter *adapter,
1008 struct be_rx_obj *rxo,
1009 u16 frag_idx)
6b7c5b94
SP
1010{
1011 struct be_rx_page_info *rx_page_info;
3abcdeda 1012 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1013
3abcdeda 1014 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1015 BUG_ON(!rx_page_info->page);
1016
205859a2 1017 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1018 dma_unmap_page(&adapter->pdev->dev,
1019 dma_unmap_addr(rx_page_info, bus),
1020 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1021 rx_page_info->last_page_user = false;
1022 }
6b7c5b94
SP
1023
1024 atomic_dec(&rxq->used);
1025 return rx_page_info;
1026}
1027
1028/* Throwaway the data in the Rx completion */
1029static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1030 struct be_rx_obj *rxo,
2e588f84 1031 struct be_rx_compl_info *rxcp)
6b7c5b94 1032{
3abcdeda 1033 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1034 struct be_rx_page_info *page_info;
2e588f84 1035 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1036
e80d9da6 1037 for (i = 0; i < num_rcvd; i++) {
2e588f84 1038 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1039 put_page(page_info->page);
1040 memset(page_info, 0, sizeof(*page_info));
2e588f84 1041 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1042 }
1043}
1044
1045/*
1046 * skb_fill_rx_data forms a complete skb for an ether frame
1047 * indicated by rxcp.
1048 */
3abcdeda 1049static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1050 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1051{
3abcdeda 1052 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1053 struct be_rx_page_info *page_info;
2e588f84
SP
1054 u16 i, j;
1055 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1056 u8 *start;
6b7c5b94 1057
2e588f84 1058 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1059 start = page_address(page_info->page) + page_info->page_offset;
1060 prefetch(start);
1061
1062 /* Copy data in the first descriptor of this completion */
2e588f84 1063 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1064
1065 /* Copy the header portion into skb_data */
2e588f84 1066 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1067 memcpy(skb->data, start, hdr_len);
1068 skb->len = curr_frag_len;
1069 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1070 /* Complete packet has now been moved to data */
1071 put_page(page_info->page);
1072 skb->data_len = 0;
1073 skb->tail += curr_frag_len;
1074 } else {
1075 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1076 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1077 skb_shinfo(skb)->frags[0].page_offset =
1078 page_info->page_offset + hdr_len;
9e903e08 1079 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1080 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1081 skb->truesize += rx_frag_size;
6b7c5b94
SP
1082 skb->tail += hdr_len;
1083 }
205859a2 1084 page_info->page = NULL;
6b7c5b94 1085
2e588f84
SP
1086 if (rxcp->pkt_size <= rx_frag_size) {
1087 BUG_ON(rxcp->num_rcvd != 1);
1088 return;
6b7c5b94
SP
1089 }
1090
1091 /* More frags present for this completion */
2e588f84
SP
1092 index_inc(&rxcp->rxq_idx, rxq->len);
1093 remaining = rxcp->pkt_size - curr_frag_len;
1094 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1095 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1096 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1097
bd46cb6c
AK
1098 /* Coalesce all frags from the same physical page in one slot */
1099 if (page_info->page_offset == 0) {
1100 /* Fresh page */
1101 j++;
b061b39e 1102 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1103 skb_shinfo(skb)->frags[j].page_offset =
1104 page_info->page_offset;
9e903e08 1105 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1106 skb_shinfo(skb)->nr_frags++;
1107 } else {
1108 put_page(page_info->page);
1109 }
1110
9e903e08 1111 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1112 skb->len += curr_frag_len;
1113 skb->data_len += curr_frag_len;
bdb28a97 1114 skb->truesize += rx_frag_size;
2e588f84
SP
1115 remaining -= curr_frag_len;
1116 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1117 page_info->page = NULL;
6b7c5b94 1118 }
bd46cb6c 1119 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1120}
1121
5be93b9a 1122/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1123static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1124 struct be_rx_obj *rxo,
2e588f84 1125 struct be_rx_compl_info *rxcp)
6b7c5b94 1126{
6332c8d3 1127 struct net_device *netdev = adapter->netdev;
6b7c5b94 1128 struct sk_buff *skb;
89420424 1129
6332c8d3 1130 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1131 if (unlikely(!skb)) {
ac124ff9 1132 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1133 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1134 return;
1135 }
1136
2e588f84 1137 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1138
6332c8d3 1139 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1140 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1141 else
1142 skb_checksum_none_assert(skb);
6b7c5b94 1143
6332c8d3 1144 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1145 if (adapter->netdev->features & NETIF_F_RXHASH)
1146 skb->rxhash = rxcp->rss_hash;
1147
6b7c5b94 1148
343e43c0 1149 if (rxcp->vlanf)
4c5102f9
AK
1150 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1151
1152 netif_receive_skb(skb);
6b7c5b94
SP
1153}
1154
5be93b9a
AK
1155/* Process the RX completion indicated by rxcp when GRO is enabled */
1156static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1157 struct be_rx_obj *rxo,
2e588f84 1158 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1159{
1160 struct be_rx_page_info *page_info;
5be93b9a 1161 struct sk_buff *skb = NULL;
3abcdeda
SP
1162 struct be_queue_info *rxq = &rxo->q;
1163 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1164 u16 remaining, curr_frag_len;
1165 u16 i, j;
3968fa1e 1166
5be93b9a
AK
1167 skb = napi_get_frags(&eq_obj->napi);
1168 if (!skb) {
3abcdeda 1169 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1170 return;
1171 }
1172
2e588f84
SP
1173 remaining = rxcp->pkt_size;
1174 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1175 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1176
1177 curr_frag_len = min(remaining, rx_frag_size);
1178
bd46cb6c
AK
1179 /* Coalesce all frags from the same physical page in one slot */
1180 if (i == 0 || page_info->page_offset == 0) {
1181 /* First frag or Fresh page */
1182 j++;
b061b39e 1183 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1184 skb_shinfo(skb)->frags[j].page_offset =
1185 page_info->page_offset;
9e903e08 1186 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1187 } else {
1188 put_page(page_info->page);
1189 }
9e903e08 1190 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1191 skb->truesize += rx_frag_size;
bd46cb6c 1192 remaining -= curr_frag_len;
2e588f84 1193 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1194 memset(page_info, 0, sizeof(*page_info));
1195 }
bd46cb6c 1196 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1197
5be93b9a 1198 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1199 skb->len = rxcp->pkt_size;
1200 skb->data_len = rxcp->pkt_size;
5be93b9a 1201 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1202 if (adapter->netdev->features & NETIF_F_RXHASH)
1203 skb->rxhash = rxcp->rss_hash;
5be93b9a 1204
343e43c0 1205 if (rxcp->vlanf)
4c5102f9
AK
1206 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1207
1208 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1209}
1210
1211static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1212 struct be_eth_rx_compl *compl,
1213 struct be_rx_compl_info *rxcp)
1214{
1215 rxcp->pkt_size =
1216 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1217 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1218 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1219 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1220 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1221 rxcp->ip_csum =
1222 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1223 rxcp->l4_csum =
1224 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1225 rxcp->ipv6 =
1226 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1227 rxcp->rxq_idx =
1228 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1229 rxcp->num_rcvd =
1230 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1231 rxcp->pkt_type =
1232 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1233 rxcp->rss_hash =
1234 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1235 if (rxcp->vlanf) {
1236 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1237 compl);
1238 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1239 compl);
15d72184 1240 }
12004ae9 1241 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1242}
1243
1244static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1245 struct be_eth_rx_compl *compl,
1246 struct be_rx_compl_info *rxcp)
1247{
1248 rxcp->pkt_size =
1249 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1250 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1251 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1252 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1253 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1254 rxcp->ip_csum =
1255 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1256 rxcp->l4_csum =
1257 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1258 rxcp->ipv6 =
1259 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1260 rxcp->rxq_idx =
1261 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1262 rxcp->num_rcvd =
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1264 rxcp->pkt_type =
1265 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1266 rxcp->rss_hash =
1267 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1268 if (rxcp->vlanf) {
1269 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1270 compl);
1271 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1272 compl);
15d72184 1273 }
12004ae9 1274 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1275}
1276
1277static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1278{
1279 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1280 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1281 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1282
2e588f84
SP
1283 /* For checking the valid bit it is Ok to use either definition as the
1284 * valid bit is at the same position in both v0 and v1 Rx compl */
1285 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1286 return NULL;
6b7c5b94 1287
2e588f84
SP
1288 rmb();
1289 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1290
2e588f84
SP
1291 if (adapter->be3_native)
1292 be_parse_rx_compl_v1(adapter, compl, rxcp);
1293 else
1294 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1295
15d72184
SP
1296 if (rxcp->vlanf) {
1297 /* vlanf could be wrongly set in some cards.
1298 * ignore if vtm is not set */
752961a1 1299 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1300 rxcp->vlanf = 0;
6b7c5b94 1301
15d72184 1302 if (!lancer_chip(adapter))
3c709f8f 1303 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1304
939cf306 1305 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1306 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1307 rxcp->vlanf = 0;
1308 }
2e588f84
SP
1309
1310 /* As the compl has been parsed, reset it; we wont touch it again */
1311 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1312
3abcdeda 1313 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1314 return rxcp;
1315}
1316
1829b086 1317static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1318{
6b7c5b94 1319 u32 order = get_order(size);
1829b086 1320
6b7c5b94 1321 if (order > 0)
1829b086
ED
1322 gfp |= __GFP_COMP;
1323 return alloc_pages(gfp, order);
6b7c5b94
SP
1324}
1325
1326/*
1327 * Allocate a page, split it to fragments of size rx_frag_size and post as
1328 * receive buffers to BE
1329 */
1829b086 1330static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1331{
3abcdeda
SP
1332 struct be_adapter *adapter = rxo->adapter;
1333 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1334 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1335 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1336 struct page *pagep = NULL;
1337 struct be_eth_rx_d *rxd;
1338 u64 page_dmaaddr = 0, frag_dmaaddr;
1339 u32 posted, page_offset = 0;
1340
3abcdeda 1341 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1342 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1343 if (!pagep) {
1829b086 1344 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1345 if (unlikely(!pagep)) {
ac124ff9 1346 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1347 break;
1348 }
2b7bcebf
IV
1349 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1350 0, adapter->big_page_size,
1351 DMA_FROM_DEVICE);
6b7c5b94
SP
1352 page_info->page_offset = 0;
1353 } else {
1354 get_page(pagep);
1355 page_info->page_offset = page_offset + rx_frag_size;
1356 }
1357 page_offset = page_info->page_offset;
1358 page_info->page = pagep;
fac6da5b 1359 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1360 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1361
1362 rxd = queue_head_node(rxq);
1363 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1364 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1365
1366 /* Any space left in the current big page for another frag? */
1367 if ((page_offset + rx_frag_size + rx_frag_size) >
1368 adapter->big_page_size) {
1369 pagep = NULL;
1370 page_info->last_page_user = true;
1371 }
26d92f92
SP
1372
1373 prev_page_info = page_info;
1374 queue_head_inc(rxq);
6b7c5b94
SP
1375 page_info = &page_info_tbl[rxq->head];
1376 }
1377 if (pagep)
26d92f92 1378 prev_page_info->last_page_user = true;
6b7c5b94
SP
1379
1380 if (posted) {
6b7c5b94 1381 atomic_add(posted, &rxq->used);
8788fdc2 1382 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1383 } else if (atomic_read(&rxq->used) == 0) {
1384 /* Let be_worker replenish when memory is available */
3abcdeda 1385 rxo->rx_post_starved = true;
6b7c5b94 1386 }
6b7c5b94
SP
1387}
1388
5fb379ee 1389static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1390{
6b7c5b94
SP
1391 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1392
1393 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1394 return NULL;
1395
f3eb62d2 1396 rmb();
6b7c5b94
SP
1397 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1398
1399 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1400
1401 queue_tail_inc(tx_cq);
1402 return txcp;
1403}
1404
3c8def97
SP
1405static u16 be_tx_compl_process(struct be_adapter *adapter,
1406 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1407{
3c8def97 1408 struct be_queue_info *txq = &txo->q;
a73b796e 1409 struct be_eth_wrb *wrb;
3c8def97 1410 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1411 struct sk_buff *sent_skb;
ec43b1a6
SP
1412 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1413 bool unmap_skb_hdr = true;
6b7c5b94 1414
ec43b1a6 1415 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1416 BUG_ON(!sent_skb);
ec43b1a6
SP
1417 sent_skbs[txq->tail] = NULL;
1418
1419 /* skip header wrb */
a73b796e 1420 queue_tail_inc(txq);
6b7c5b94 1421
ec43b1a6 1422 do {
6b7c5b94 1423 cur_index = txq->tail;
a73b796e 1424 wrb = queue_tail_node(txq);
2b7bcebf
IV
1425 unmap_tx_frag(&adapter->pdev->dev, wrb,
1426 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1427 unmap_skb_hdr = false;
1428
6b7c5b94
SP
1429 num_wrbs++;
1430 queue_tail_inc(txq);
ec43b1a6 1431 } while (cur_index != last_index);
6b7c5b94 1432
6b7c5b94 1433 kfree_skb(sent_skb);
4d586b82 1434 return num_wrbs;
6b7c5b94
SP
1435}
1436
859b1e4e
SP
1437static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1438{
1439 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1440
1441 if (!eqe->evt)
1442 return NULL;
1443
f3eb62d2 1444 rmb();
859b1e4e
SP
1445 eqe->evt = le32_to_cpu(eqe->evt);
1446 queue_tail_inc(&eq_obj->q);
1447 return eqe;
1448}
1449
1450static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1451 struct be_eq_obj *eq_obj,
1452 bool rearm)
859b1e4e
SP
1453{
1454 struct be_eq_entry *eqe;
1455 u16 num = 0;
1456
1457 while ((eqe = event_get(eq_obj)) != NULL) {
1458 eqe->evt = 0;
1459 num++;
1460 }
1461
1462 /* Deal with any spurious interrupts that come
1463 * without events
1464 */
3c8def97
SP
1465 if (!num)
1466 rearm = true;
1467
1468 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1469 if (num)
1470 napi_schedule(&eq_obj->napi);
1471
1472 return num;
1473}
1474
1475/* Just read and notify events without processing them.
1476 * Used at the time of destroying event queues */
1477static void be_eq_clean(struct be_adapter *adapter,
1478 struct be_eq_obj *eq_obj)
1479{
1480 struct be_eq_entry *eqe;
1481 u16 num = 0;
1482
1483 while ((eqe = event_get(eq_obj)) != NULL) {
1484 eqe->evt = 0;
1485 num++;
1486 }
1487
1488 if (num)
1489 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1490}
1491
3abcdeda 1492static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1493{
1494 struct be_rx_page_info *page_info;
3abcdeda
SP
1495 struct be_queue_info *rxq = &rxo->q;
1496 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1497 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1498 u16 tail;
1499
1500 /* First cleanup pending rx completions */
3abcdeda
SP
1501 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1502 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1503 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1504 }
1505
1506 /* Then free posted rx buffer that were not used */
1507 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1508 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1509 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1510 put_page(page_info->page);
1511 memset(page_info, 0, sizeof(*page_info));
1512 }
1513 BUG_ON(atomic_read(&rxq->used));
482c9e79 1514 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1515}
1516
3c8def97
SP
1517static void be_tx_compl_clean(struct be_adapter *adapter,
1518 struct be_tx_obj *txo)
6b7c5b94 1519{
3c8def97
SP
1520 struct be_queue_info *tx_cq = &txo->cq;
1521 struct be_queue_info *txq = &txo->q;
a8e9179a 1522 struct be_eth_tx_compl *txcp;
4d586b82 1523 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1524 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1525 struct sk_buff *sent_skb;
1526 bool dummy_wrb;
a8e9179a
SP
1527
1528 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1529 do {
1530 while ((txcp = be_tx_compl_get(tx_cq))) {
1531 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1532 wrb_index, txcp);
3c8def97 1533 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1534 cmpl++;
1535 }
1536 if (cmpl) {
1537 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1538 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1539 cmpl = 0;
4d586b82 1540 num_wrbs = 0;
a8e9179a
SP
1541 }
1542
1543 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1544 break;
1545
1546 mdelay(1);
1547 } while (true);
1548
1549 if (atomic_read(&txq->used))
1550 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1551 atomic_read(&txq->used));
b03388d6
SP
1552
1553 /* free posted tx for which compls will never arrive */
1554 while (atomic_read(&txq->used)) {
1555 sent_skb = sent_skbs[txq->tail];
1556 end_idx = txq->tail;
1557 index_adv(&end_idx,
fe6d2a38
SP
1558 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1559 txq->len);
3c8def97 1560 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1561 atomic_sub(num_wrbs, &txq->used);
b03388d6 1562 }
6b7c5b94
SP
1563}
1564
5fb379ee
SP
1565static void be_mcc_queues_destroy(struct be_adapter *adapter)
1566{
1567 struct be_queue_info *q;
5fb379ee 1568
8788fdc2 1569 q = &adapter->mcc_obj.q;
5fb379ee 1570 if (q->created)
8788fdc2 1571 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1572 be_queue_free(adapter, q);
1573
8788fdc2 1574 q = &adapter->mcc_obj.cq;
5fb379ee 1575 if (q->created)
8788fdc2 1576 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1577 be_queue_free(adapter, q);
1578}
1579
1580/* Must be called only after TX qs are created as MCC shares TX EQ */
1581static int be_mcc_queues_create(struct be_adapter *adapter)
1582{
1583 struct be_queue_info *q, *cq;
5fb379ee
SP
1584
1585 /* Alloc MCC compl queue */
8788fdc2 1586 cq = &adapter->mcc_obj.cq;
5fb379ee 1587 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1588 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1589 goto err;
1590
1591 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1592 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1593 goto mcc_cq_free;
1594
1595 /* Alloc MCC queue */
8788fdc2 1596 q = &adapter->mcc_obj.q;
5fb379ee
SP
1597 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1598 goto mcc_cq_destroy;
1599
1600 /* Ask BE to create MCC queue */
8788fdc2 1601 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1602 goto mcc_q_free;
1603
1604 return 0;
1605
1606mcc_q_free:
1607 be_queue_free(adapter, q);
1608mcc_cq_destroy:
8788fdc2 1609 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1610mcc_cq_free:
1611 be_queue_free(adapter, cq);
1612err:
1613 return -1;
1614}
1615
6b7c5b94
SP
1616static void be_tx_queues_destroy(struct be_adapter *adapter)
1617{
1618 struct be_queue_info *q;
3c8def97
SP
1619 struct be_tx_obj *txo;
1620 u8 i;
6b7c5b94 1621
3c8def97
SP
1622 for_all_tx_queues(adapter, txo, i) {
1623 q = &txo->q;
1624 if (q->created)
1625 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1626 be_queue_free(adapter, q);
6b7c5b94 1627
3c8def97
SP
1628 q = &txo->cq;
1629 if (q->created)
1630 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1631 be_queue_free(adapter, q);
1632 }
6b7c5b94 1633
859b1e4e
SP
1634 /* Clear any residual events */
1635 be_eq_clean(adapter, &adapter->tx_eq);
1636
6b7c5b94
SP
1637 q = &adapter->tx_eq.q;
1638 if (q->created)
8788fdc2 1639 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1640 be_queue_free(adapter, q);
1641}
1642
dafc0fe3
SP
1643static int be_num_txqs_want(struct be_adapter *adapter)
1644{
1645 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1646 be_is_mc(adapter) ||
dafc0fe3
SP
1647 lancer_chip(adapter) || !be_physfn(adapter) ||
1648 adapter->generation == BE_GEN2)
1649 return 1;
1650 else
1651 return MAX_TX_QS;
1652}
1653
3c8def97 1654/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1655static int be_tx_queues_create(struct be_adapter *adapter)
1656{
1657 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1658 struct be_tx_obj *txo;
1659 u8 i;
6b7c5b94 1660
dafc0fe3
SP
1661 adapter->num_tx_qs = be_num_txqs_want(adapter);
1662 if (adapter->num_tx_qs != MAX_TX_QS)
1663 netif_set_real_num_tx_queues(adapter->netdev,
1664 adapter->num_tx_qs);
1665
6b7c5b94
SP
1666 adapter->tx_eq.max_eqd = 0;
1667 adapter->tx_eq.min_eqd = 0;
1668 adapter->tx_eq.cur_eqd = 96;
1669 adapter->tx_eq.enable_aic = false;
3c8def97 1670
6b7c5b94 1671 eq = &adapter->tx_eq.q;
3c8def97
SP
1672 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1673 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1674 return -1;
1675
8788fdc2 1676 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1677 goto err;
ecd62107 1678 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1679
3c8def97
SP
1680 for_all_tx_queues(adapter, txo, i) {
1681 cq = &txo->cq;
1682 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1683 sizeof(struct be_eth_tx_compl)))
3c8def97 1684 goto err;
6b7c5b94 1685
3c8def97
SP
1686 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1687 goto err;
6b7c5b94 1688
3c8def97
SP
1689 q = &txo->q;
1690 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1691 sizeof(struct be_eth_wrb)))
1692 goto err;
6b7c5b94 1693
3c8def97
SP
1694 if (be_cmd_txq_create(adapter, q, cq))
1695 goto err;
1696 }
6b7c5b94
SP
1697 return 0;
1698
3c8def97
SP
1699err:
1700 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1701 return -1;
1702}
1703
1704static void be_rx_queues_destroy(struct be_adapter *adapter)
1705{
1706 struct be_queue_info *q;
3abcdeda
SP
1707 struct be_rx_obj *rxo;
1708 int i;
1709
1710 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1711 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1712
1713 q = &rxo->cq;
1714 if (q->created)
1715 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1716 be_queue_free(adapter, q);
1717
3abcdeda 1718 q = &rxo->rx_eq.q;
482c9e79 1719 if (q->created)
3abcdeda 1720 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1721 be_queue_free(adapter, q);
6b7c5b94 1722 }
6b7c5b94
SP
1723}
1724
ac6a0c4a
SP
1725static u32 be_num_rxqs_want(struct be_adapter *adapter)
1726{
c814fd36 1727 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1728 !adapter->sriov_enabled && be_physfn(adapter) &&
1729 !be_is_mc(adapter)) {
ac6a0c4a
SP
1730 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1731 } else {
1732 dev_warn(&adapter->pdev->dev,
1733 "No support for multiple RX queues\n");
1734 return 1;
1735 }
1736}
1737
6b7c5b94
SP
1738static int be_rx_queues_create(struct be_adapter *adapter)
1739{
1740 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1741 struct be_rx_obj *rxo;
1742 int rc, i;
6b7c5b94 1743
ac6a0c4a
SP
1744 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1745 msix_enabled(adapter) ?
1746 adapter->num_msix_vec - 1 : 1);
1747 if (adapter->num_rx_qs != MAX_RX_QS)
1748 dev_warn(&adapter->pdev->dev,
1749 "Can create only %d RX queues", adapter->num_rx_qs);
1750
6b7c5b94 1751 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1752 for_all_rx_queues(adapter, rxo, i) {
1753 rxo->adapter = adapter;
1754 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1755 rxo->rx_eq.enable_aic = true;
1756
1757 /* EQ */
1758 eq = &rxo->rx_eq.q;
1759 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1760 sizeof(struct be_eq_entry));
1761 if (rc)
1762 goto err;
1763
1764 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1765 if (rc)
1766 goto err;
1767
ecd62107 1768 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1769
3abcdeda
SP
1770 /* CQ */
1771 cq = &rxo->cq;
1772 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1773 sizeof(struct be_eth_rx_compl));
1774 if (rc)
1775 goto err;
1776
1777 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1778 if (rc)
1779 goto err;
482c9e79
SP
1780
1781 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1782 q = &rxo->q;
1783 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1784 sizeof(struct be_eth_rx_d));
1785 if (rc)
1786 goto err;
1787
3abcdeda 1788 }
6b7c5b94
SP
1789
1790 return 0;
3abcdeda
SP
1791err:
1792 be_rx_queues_destroy(adapter);
1793 return -1;
6b7c5b94 1794}
6b7c5b94 1795
fe6d2a38 1796static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1797{
fe6d2a38
SP
1798 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1799 if (!eqe->evt)
1800 return false;
1801 else
1802 return true;
b628bde2
SP
1803}
1804
6b7c5b94
SP
1805static irqreturn_t be_intx(int irq, void *dev)
1806{
1807 struct be_adapter *adapter = dev;
3abcdeda 1808 struct be_rx_obj *rxo;
fe6d2a38 1809 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1810
fe6d2a38
SP
1811 if (lancer_chip(adapter)) {
1812 if (event_peek(&adapter->tx_eq))
3c8def97 1813 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1814 for_all_rx_queues(adapter, rxo, i) {
1815 if (event_peek(&rxo->rx_eq))
3c8def97 1816 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1817 }
6b7c5b94 1818
fe6d2a38
SP
1819 if (!(tx || rx))
1820 return IRQ_NONE;
3abcdeda 1821
fe6d2a38
SP
1822 } else {
1823 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1824 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1825 if (!isr)
1826 return IRQ_NONE;
1827
ecd62107 1828 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1829 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1830
1831 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1832 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1833 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1834 }
3abcdeda 1835 }
c001c213 1836
8788fdc2 1837 return IRQ_HANDLED;
6b7c5b94
SP
1838}
1839
1840static irqreturn_t be_msix_rx(int irq, void *dev)
1841{
3abcdeda
SP
1842 struct be_rx_obj *rxo = dev;
1843 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1844
3c8def97 1845 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1846
1847 return IRQ_HANDLED;
1848}
1849
5fb379ee 1850static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1851{
1852 struct be_adapter *adapter = dev;
1853
3c8def97 1854 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1855
1856 return IRQ_HANDLED;
1857}
1858
2e588f84 1859static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1860{
2e588f84 1861 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1862}
1863
49b05221 1864static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1865{
1866 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1867 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1868 struct be_adapter *adapter = rxo->adapter;
1869 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1870 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1871 u32 work_done;
1872
ac124ff9 1873 rx_stats(rxo)->rx_polls++;
6b7c5b94 1874 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1875 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1876 if (!rxcp)
1877 break;
1878
12004ae9
SP
1879 /* Is it a flush compl that has no data */
1880 if (unlikely(rxcp->num_rcvd == 0))
1881 goto loop_continue;
1882
1883 /* Discard compl with partial DMA Lancer B0 */
1884 if (unlikely(!rxcp->pkt_size)) {
1885 be_rx_compl_discard(adapter, rxo, rxcp);
1886 goto loop_continue;
1887 }
1888
1889 /* On BE drop pkts that arrive due to imperfect filtering in
1890 * promiscuous mode on some skews
1891 */
1892 if (unlikely(rxcp->port != adapter->port_num &&
1893 !lancer_chip(adapter))) {
009dd872 1894 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1895 goto loop_continue;
64642811 1896 }
009dd872 1897
12004ae9
SP
1898 if (do_gro(rxcp))
1899 be_rx_compl_process_gro(adapter, rxo, rxcp);
1900 else
1901 be_rx_compl_process(adapter, rxo, rxcp);
1902loop_continue:
2e588f84 1903 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1904 }
1905
6b7c5b94 1906 /* Refill the queue */
857c9905 1907 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1908 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1909
1910 /* All consumed */
1911 if (work_done < budget) {
1912 napi_complete(napi);
8788fdc2 1913 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1914 } else {
1915 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1916 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1917 }
1918 return work_done;
1919}
1920
f31e50a8
SP
1921/* As TX and MCC share the same EQ check for both TX and MCC completions.
1922 * For TX/MCC we don't honour budget; consume everything
1923 */
1924static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1925{
f31e50a8
SP
1926 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1927 struct be_adapter *adapter =
1928 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1929 struct be_tx_obj *txo;
6b7c5b94 1930 struct be_eth_tx_compl *txcp;
3c8def97
SP
1931 int tx_compl, mcc_compl, status = 0;
1932 u8 i;
1933 u16 num_wrbs;
1934
1935 for_all_tx_queues(adapter, txo, i) {
1936 tx_compl = 0;
1937 num_wrbs = 0;
1938 while ((txcp = be_tx_compl_get(&txo->cq))) {
1939 num_wrbs += be_tx_compl_process(adapter, txo,
1940 AMAP_GET_BITS(struct amap_eth_tx_compl,
1941 wrb_index, txcp));
1942 tx_compl++;
1943 }
1944 if (tx_compl) {
1945 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1946
1947 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1948
3c8def97
SP
1949 /* As Tx wrbs have been freed up, wake up netdev queue
1950 * if it was stopped due to lack of tx wrbs. */
1951 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1952 atomic_read(&txo->q.used) < txo->q.len / 2) {
1953 netif_wake_subqueue(adapter->netdev, i);
1954 }
1955
ab1594e9 1956 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1957 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1958 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1959 }
6b7c5b94
SP
1960 }
1961
f31e50a8
SP
1962 mcc_compl = be_process_mcc(adapter, &status);
1963
f31e50a8
SP
1964 if (mcc_compl) {
1965 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1966 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1967 }
1968
3c8def97 1969 napi_complete(napi);
6b7c5b94 1970
3c8def97 1971 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1972 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1973 return 1;
1974}
1975
d053de91 1976void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1977{
1978 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1979 u32 i;
1980
1981 pci_read_config_dword(adapter->pdev,
1982 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1983 pci_read_config_dword(adapter->pdev,
1984 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1985 pci_read_config_dword(adapter->pdev,
1986 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1987 pci_read_config_dword(adapter->pdev,
1988 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1989
1990 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1991 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1992
d053de91
AK
1993 if (ue_status_lo || ue_status_hi) {
1994 adapter->ue_detected = true;
7acc2087 1995 adapter->eeh_err = true;
d053de91
AK
1996 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1997 }
1998
7c185276
AK
1999 if (ue_status_lo) {
2000 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2001 if (ue_status_lo & 1)
2002 dev_err(&adapter->pdev->dev,
2003 "UE: %s bit set\n", ue_status_low_desc[i]);
2004 }
2005 }
2006 if (ue_status_hi) {
2007 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2008 if (ue_status_hi & 1)
2009 dev_err(&adapter->pdev->dev,
2010 "UE: %s bit set\n", ue_status_hi_desc[i]);
2011 }
2012 }
2013
2014}
2015
ea1dae11
SP
2016static void be_worker(struct work_struct *work)
2017{
2018 struct be_adapter *adapter =
2019 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2020 struct be_rx_obj *rxo;
2021 int i;
ea1dae11 2022
16da8250
SP
2023 if (!adapter->ue_detected && !lancer_chip(adapter))
2024 be_detect_dump_ue(adapter);
2025
f203af70
SK
2026 /* when interrupts are not yet enabled, just reap any pending
2027 * mcc completions */
2028 if (!netif_running(adapter->netdev)) {
2029 int mcc_compl, status = 0;
2030
2031 mcc_compl = be_process_mcc(adapter, &status);
2032
2033 if (mcc_compl) {
2034 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2035 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2036 }
9b037f38 2037
f203af70
SK
2038 goto reschedule;
2039 }
2040
005d5696
SX
2041 if (!adapter->stats_cmd_sent) {
2042 if (lancer_chip(adapter))
2043 lancer_cmd_get_pport_stats(adapter,
2044 &adapter->stats_cmd);
2045 else
2046 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2047 }
3c8def97 2048
3abcdeda 2049 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2050 be_rx_eqd_update(adapter, rxo);
2051
2052 if (rxo->rx_post_starved) {
2053 rxo->rx_post_starved = false;
1829b086 2054 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2055 }
ea1dae11
SP
2056 }
2057
f203af70 2058reschedule:
e74fbd03 2059 adapter->work_counter++;
ea1dae11
SP
2060 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2061}
2062
8d56ff11
SP
2063static void be_msix_disable(struct be_adapter *adapter)
2064{
ac6a0c4a 2065 if (msix_enabled(adapter)) {
8d56ff11 2066 pci_disable_msix(adapter->pdev);
ac6a0c4a 2067 adapter->num_msix_vec = 0;
3abcdeda
SP
2068 }
2069}
2070
6b7c5b94
SP
2071static void be_msix_enable(struct be_adapter *adapter)
2072{
3abcdeda 2073#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2074 int i, status, num_vec;
6b7c5b94 2075
ac6a0c4a 2076 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2077
ac6a0c4a 2078 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2079 adapter->msix_entries[i].entry = i;
2080
ac6a0c4a 2081 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2082 if (status == 0) {
2083 goto done;
2084 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2085 num_vec = status;
3abcdeda 2086 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2087 num_vec) == 0)
3abcdeda 2088 goto done;
3abcdeda
SP
2089 }
2090 return;
2091done:
ac6a0c4a
SP
2092 adapter->num_msix_vec = num_vec;
2093 return;
6b7c5b94
SP
2094}
2095
f9449ab7 2096static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2097{
344dbf10 2098 be_check_sriov_fn_type(adapter);
6dedec81 2099#ifdef CONFIG_PCI_IOV
ba343c77 2100 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2101 int status, pos;
2102 u16 nvfs;
2103
2104 pos = pci_find_ext_capability(adapter->pdev,
2105 PCI_EXT_CAP_ID_SRIOV);
2106 pci_read_config_word(adapter->pdev,
2107 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2108
2109 if (num_vfs > nvfs) {
2110 dev_info(&adapter->pdev->dev,
2111 "Device supports %d VFs and not %d\n",
2112 nvfs, num_vfs);
2113 num_vfs = nvfs;
2114 }
6dedec81 2115
ba343c77
SB
2116 status = pci_enable_sriov(adapter->pdev, num_vfs);
2117 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2118
2119 if (adapter->sriov_enabled) {
2120 adapter->vf_cfg = kcalloc(num_vfs,
2121 sizeof(struct be_vf_cfg),
2122 GFP_KERNEL);
2123 if (!adapter->vf_cfg)
2124 return -ENOMEM;
2125 }
ba343c77
SB
2126 }
2127#endif
f9449ab7 2128 return 0;
ba343c77
SB
2129}
2130
2131static void be_sriov_disable(struct be_adapter *adapter)
2132{
2133#ifdef CONFIG_PCI_IOV
2134 if (adapter->sriov_enabled) {
2135 pci_disable_sriov(adapter->pdev);
f9449ab7 2136 kfree(adapter->vf_cfg);
ba343c77
SB
2137 adapter->sriov_enabled = false;
2138 }
2139#endif
2140}
2141
fe6d2a38
SP
2142static inline int be_msix_vec_get(struct be_adapter *adapter,
2143 struct be_eq_obj *eq_obj)
6b7c5b94 2144{
ecd62107 2145 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2146}
2147
b628bde2
SP
2148static int be_request_irq(struct be_adapter *adapter,
2149 struct be_eq_obj *eq_obj,
3abcdeda 2150 void *handler, char *desc, void *context)
6b7c5b94
SP
2151{
2152 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2153 int vec;
2154
2155 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2156 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2157 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2158}
2159
3abcdeda
SP
2160static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2161 void *context)
b628bde2 2162{
fe6d2a38 2163 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2164 free_irq(vec, context);
b628bde2 2165}
6b7c5b94 2166
b628bde2
SP
2167static int be_msix_register(struct be_adapter *adapter)
2168{
3abcdeda
SP
2169 struct be_rx_obj *rxo;
2170 int status, i;
2171 char qname[10];
b628bde2 2172
3abcdeda
SP
2173 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2174 adapter);
6b7c5b94
SP
2175 if (status)
2176 goto err;
2177
3abcdeda
SP
2178 for_all_rx_queues(adapter, rxo, i) {
2179 sprintf(qname, "rxq%d", i);
2180 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2181 qname, rxo);
2182 if (status)
2183 goto err_msix;
2184 }
b628bde2 2185
6b7c5b94 2186 return 0;
b628bde2 2187
3abcdeda
SP
2188err_msix:
2189 be_free_irq(adapter, &adapter->tx_eq, adapter);
2190
2191 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2192 be_free_irq(adapter, &rxo->rx_eq, rxo);
2193
6b7c5b94
SP
2194err:
2195 dev_warn(&adapter->pdev->dev,
2196 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2197 be_msix_disable(adapter);
6b7c5b94
SP
2198 return status;
2199}
2200
2201static int be_irq_register(struct be_adapter *adapter)
2202{
2203 struct net_device *netdev = adapter->netdev;
2204 int status;
2205
ac6a0c4a 2206 if (msix_enabled(adapter)) {
6b7c5b94
SP
2207 status = be_msix_register(adapter);
2208 if (status == 0)
2209 goto done;
ba343c77
SB
2210 /* INTx is not supported for VF */
2211 if (!be_physfn(adapter))
2212 return status;
6b7c5b94
SP
2213 }
2214
2215 /* INTx */
2216 netdev->irq = adapter->pdev->irq;
2217 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2218 adapter);
2219 if (status) {
2220 dev_err(&adapter->pdev->dev,
2221 "INTx request IRQ failed - err %d\n", status);
2222 return status;
2223 }
2224done:
2225 adapter->isr_registered = true;
2226 return 0;
2227}
2228
2229static void be_irq_unregister(struct be_adapter *adapter)
2230{
2231 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2232 struct be_rx_obj *rxo;
2233 int i;
6b7c5b94
SP
2234
2235 if (!adapter->isr_registered)
2236 return;
2237
2238 /* INTx */
ac6a0c4a 2239 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2240 free_irq(netdev->irq, adapter);
2241 goto done;
2242 }
2243
2244 /* MSIx */
3abcdeda
SP
2245 be_free_irq(adapter, &adapter->tx_eq, adapter);
2246
2247 for_all_rx_queues(adapter, rxo, i)
2248 be_free_irq(adapter, &rxo->rx_eq, rxo);
2249
6b7c5b94
SP
2250done:
2251 adapter->isr_registered = false;
6b7c5b94
SP
2252}
2253
482c9e79
SP
2254static void be_rx_queues_clear(struct be_adapter *adapter)
2255{
2256 struct be_queue_info *q;
2257 struct be_rx_obj *rxo;
2258 int i;
2259
2260 for_all_rx_queues(adapter, rxo, i) {
2261 q = &rxo->q;
2262 if (q->created) {
2263 be_cmd_rxq_destroy(adapter, q);
2264 /* After the rxq is invalidated, wait for a grace time
2265 * of 1ms for all dma to end and the flush compl to
2266 * arrive
2267 */
2268 mdelay(1);
2269 be_rx_q_clean(adapter, rxo);
2270 }
2271
2272 /* Clear any residual events */
2273 q = &rxo->rx_eq.q;
2274 if (q->created)
2275 be_eq_clean(adapter, &rxo->rx_eq);
2276 }
2277}
2278
889cd4b2
SP
2279static int be_close(struct net_device *netdev)
2280{
2281 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2282 struct be_rx_obj *rxo;
3c8def97 2283 struct be_tx_obj *txo;
889cd4b2 2284 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2285 int vec, i;
889cd4b2 2286
889cd4b2
SP
2287 be_async_mcc_disable(adapter);
2288
fe6d2a38
SP
2289 if (!lancer_chip(adapter))
2290 be_intr_set(adapter, false);
889cd4b2 2291
63fcb27f
PR
2292 for_all_rx_queues(adapter, rxo, i)
2293 napi_disable(&rxo->rx_eq.napi);
2294
2295 napi_disable(&tx_eq->napi);
2296
2297 if (lancer_chip(adapter)) {
63fcb27f
PR
2298 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2299 for_all_rx_queues(adapter, rxo, i)
2300 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2301 for_all_tx_queues(adapter, txo, i)
2302 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2303 }
2304
ac6a0c4a 2305 if (msix_enabled(adapter)) {
fe6d2a38 2306 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2307 synchronize_irq(vec);
3abcdeda
SP
2308
2309 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2310 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2311 synchronize_irq(vec);
2312 }
889cd4b2
SP
2313 } else {
2314 synchronize_irq(netdev->irq);
2315 }
2316 be_irq_unregister(adapter);
2317
889cd4b2
SP
2318 /* Wait for all pending tx completions to arrive so that
2319 * all tx skbs are freed.
2320 */
3c8def97
SP
2321 for_all_tx_queues(adapter, txo, i)
2322 be_tx_compl_clean(adapter, txo);
889cd4b2 2323
482c9e79
SP
2324 be_rx_queues_clear(adapter);
2325 return 0;
2326}
2327
2328static int be_rx_queues_setup(struct be_adapter *adapter)
2329{
2330 struct be_rx_obj *rxo;
2331 int rc, i;
2332 u8 rsstable[MAX_RSS_QS];
2333
2334 for_all_rx_queues(adapter, rxo, i) {
2335 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2336 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2337 adapter->if_handle,
2338 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2339 if (rc)
2340 return rc;
2341 }
2342
2343 if (be_multi_rxq(adapter)) {
2344 for_all_rss_queues(adapter, rxo, i)
2345 rsstable[i] = rxo->rss_id;
2346
2347 rc = be_cmd_rss_config(adapter, rsstable,
2348 adapter->num_rx_qs - 1);
2349 if (rc)
2350 return rc;
2351 }
2352
2353 /* First time posting */
2354 for_all_rx_queues(adapter, rxo, i) {
2355 be_post_rx_frags(rxo, GFP_KERNEL);
2356 napi_enable(&rxo->rx_eq.napi);
2357 }
889cd4b2
SP
2358 return 0;
2359}
2360
6b7c5b94
SP
2361static int be_open(struct net_device *netdev)
2362{
2363 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2364 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2365 struct be_rx_obj *rxo;
3abcdeda 2366 int status, i;
5fb379ee 2367
482c9e79
SP
2368 status = be_rx_queues_setup(adapter);
2369 if (status)
2370 goto err;
2371
5fb379ee
SP
2372 napi_enable(&tx_eq->napi);
2373
2374 be_irq_register(adapter);
2375
fe6d2a38
SP
2376 if (!lancer_chip(adapter))
2377 be_intr_set(adapter, true);
5fb379ee
SP
2378
2379 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2380 for_all_rx_queues(adapter, rxo, i) {
2381 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2382 be_cq_notify(adapter, rxo->cq.id, true, 0);
2383 }
8788fdc2 2384 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2385
7a1e9b20
SP
2386 /* Now that interrupts are on we can process async mcc */
2387 be_async_mcc_enable(adapter);
2388
889cd4b2
SP
2389 return 0;
2390err:
2391 be_close(adapter->netdev);
2392 return -EIO;
5fb379ee
SP
2393}
2394
71d8d1b5
AK
2395static int be_setup_wol(struct be_adapter *adapter, bool enable)
2396{
2397 struct be_dma_mem cmd;
2398 int status = 0;
2399 u8 mac[ETH_ALEN];
2400
2401 memset(mac, 0, ETH_ALEN);
2402
2403 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2404 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2405 GFP_KERNEL);
71d8d1b5
AK
2406 if (cmd.va == NULL)
2407 return -1;
2408 memset(cmd.va, 0, cmd.size);
2409
2410 if (enable) {
2411 status = pci_write_config_dword(adapter->pdev,
2412 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2413 if (status) {
2414 dev_err(&adapter->pdev->dev,
2381a55c 2415 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2416 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2417 cmd.dma);
71d8d1b5
AK
2418 return status;
2419 }
2420 status = be_cmd_enable_magic_wol(adapter,
2421 adapter->netdev->dev_addr, &cmd);
2422 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2423 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2424 } else {
2425 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2426 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2427 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2428 }
2429
2b7bcebf 2430 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2431 return status;
2432}
2433
6d87f5c3
AK
2434/*
2435 * Generate a seed MAC address from the PF MAC Address using jhash.
2436 * MAC Address for VFs are assigned incrementally starting from the seed.
2437 * These addresses are programmed in the ASIC by the PF and the VF driver
2438 * queries for the MAC address during its probe.
2439 */
2440static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2441{
f9449ab7 2442 u32 vf;
3abcdeda 2443 int status = 0;
6d87f5c3
AK
2444 u8 mac[ETH_ALEN];
2445
2446 be_vf_eth_addr_generate(adapter, mac);
2447
2448 for (vf = 0; vf < num_vfs; vf++) {
2449 status = be_cmd_pmac_add(adapter, mac,
2450 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2451 &adapter->vf_cfg[vf].vf_pmac_id,
2452 vf + 1);
6d87f5c3
AK
2453 if (status)
2454 dev_err(&adapter->pdev->dev,
2455 "Mac address add failed for VF %d\n", vf);
2456 else
2457 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2458
2459 mac[5] += 1;
2460 }
2461 return status;
2462}
2463
f9449ab7 2464static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2465{
2466 u32 vf;
2467
2468 for (vf = 0; vf < num_vfs; vf++) {
2469 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2470 be_cmd_pmac_del(adapter,
2471 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2472 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3 2473 }
f9449ab7
SP
2474
2475 for (vf = 0; vf < num_vfs; vf++)
2476 if (adapter->vf_cfg[vf].vf_if_handle)
2477 be_cmd_if_destroy(adapter,
2478 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
6d87f5c3
AK
2479}
2480
a54769f5
SP
2481static int be_clear(struct be_adapter *adapter)
2482{
a54769f5 2483 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2484 be_vf_clear(adapter);
2485
2486 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2487
2488 be_mcc_queues_destroy(adapter);
2489 be_rx_queues_destroy(adapter);
2490 be_tx_queues_destroy(adapter);
2491 adapter->eq_next_idx = 0;
2492
a54769f5
SP
2493 adapter->be3_native = false;
2494 adapter->promiscuous = false;
2495
2496 /* tell fw we're done with firing cmds */
2497 be_cmd_fw_clean(adapter);
2498 return 0;
2499}
2500
f9449ab7
SP
2501static int be_vf_setup(struct be_adapter *adapter)
2502{
2503 u32 cap_flags, en_flags, vf;
2504 u16 lnk_speed;
2505 int status;
2506
2507 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2508 for (vf = 0; vf < num_vfs; vf++) {
2509 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2510 &adapter->vf_cfg[vf].vf_if_handle,
2511 NULL, vf+1);
2512 if (status)
2513 goto err;
2514 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2515 }
2516
2517 if (!lancer_chip(adapter)) {
2518 status = be_vf_eth_addr_config(adapter);
2519 if (status)
2520 goto err;
2521 }
2522
2523 for (vf = 0; vf < num_vfs; vf++) {
2524 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2525 vf + 1);
2526 if (status)
2527 goto err;
2528 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2529 }
2530 return 0;
2531err:
2532 return status;
2533}
2534
5fb379ee
SP
2535static int be_setup(struct be_adapter *adapter)
2536{
5fb379ee 2537 struct net_device *netdev = adapter->netdev;
f9449ab7 2538 u32 cap_flags, en_flags;
a54769f5 2539 u32 tx_fc, rx_fc;
6b7c5b94 2540 int status;
ba343c77
SB
2541 u8 mac[ETH_ALEN];
2542
f9449ab7
SP
2543 /* Allow all priorities by default. A GRP5 evt may modify this */
2544 adapter->vlan_prio_bmap = 0xff;
2545 adapter->link_speed = -1;
6b7c5b94 2546
f9449ab7 2547 be_cmd_req_native_mode(adapter);
73d540f2 2548
f9449ab7 2549 status = be_tx_queues_create(adapter);
6b7c5b94 2550 if (status != 0)
a54769f5 2551 goto err;
6b7c5b94 2552
f9449ab7 2553 status = be_rx_queues_create(adapter);
6b7c5b94 2554 if (status != 0)
a54769f5 2555 goto err;
6b7c5b94 2556
f9449ab7 2557 status = be_mcc_queues_create(adapter);
6b7c5b94 2558 if (status != 0)
a54769f5 2559 goto err;
6b7c5b94 2560
f9449ab7
SP
2561 memset(mac, 0, ETH_ALEN);
2562 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2563 true /*permanent */, 0);
2564 if (status)
2565 return status;
2566 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2567 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2568
f9449ab7
SP
2569 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2570 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2571 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2572 BE_IF_FLAGS_PROMISCUOUS;
2573 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2574 cap_flags |= BE_IF_FLAGS_RSS;
2575 en_flags |= BE_IF_FLAGS_RSS;
2576 }
2577 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2578 netdev->dev_addr, &adapter->if_handle,
2579 &adapter->pmac_id, 0);
5fb379ee 2580 if (status != 0)
a54769f5 2581 goto err;
6b7c5b94 2582
f9449ab7
SP
2583 /* For BEx, the VF's permanent mac queried from card is incorrect.
2584 * Query the mac configued by the PF using if_handle
2585 */
2586 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2587 status = be_cmd_mac_addr_query(adapter, mac,
2588 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2589 if (!status) {
2590 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2591 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2592 }
2593 }
0dffc83e 2594
04b71175 2595 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2596
a54769f5
SP
2597 status = be_vid_config(adapter, false, 0);
2598 if (status)
2599 goto err;
7ab8b0b4 2600
a54769f5 2601 be_set_rx_mode(adapter->netdev);
5fb379ee 2602
a54769f5
SP
2603 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2604 if (status)
2605 goto err;
2606 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2607 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2608 adapter->rx_fc);
2609 if (status)
2610 goto err;
2611 }
2dc1deb6 2612
a54769f5 2613 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2614
f9449ab7
SP
2615 if (be_physfn(adapter) && adapter->sriov_enabled) {
2616 status = be_vf_setup(adapter);
2617 if (status)
2618 goto err;
2619 }
2620
2621 return 0;
a54769f5
SP
2622err:
2623 be_clear(adapter);
2624 return status;
2625}
6b7c5b94 2626
84517482 2627#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2628static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2629 const u8 *p, u32 img_start, int image_size,
2630 int hdr_size)
fa9a6fed
SB
2631{
2632 u32 crc_offset;
2633 u8 flashed_crc[4];
2634 int status;
3f0d4560
AK
2635
2636 crc_offset = hdr_size + img_start + image_size - 4;
2637
fa9a6fed 2638 p += crc_offset;
3f0d4560
AK
2639
2640 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2641 (image_size - 4));
fa9a6fed
SB
2642 if (status) {
2643 dev_err(&adapter->pdev->dev,
2644 "could not get crc from flash, not flashing redboot\n");
2645 return false;
2646 }
2647
2648 /*update redboot only if crc does not match*/
2649 if (!memcmp(flashed_crc, p, 4))
2650 return false;
2651 else
2652 return true;
fa9a6fed
SB
2653}
2654
306f1348
SP
2655static bool phy_flashing_required(struct be_adapter *adapter)
2656{
2657 int status = 0;
2658 struct be_phy_info phy_info;
2659
2660 status = be_cmd_get_phy_info(adapter, &phy_info);
2661 if (status)
2662 return false;
2663 if ((phy_info.phy_type == TN_8022) &&
2664 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2665 return true;
2666 }
2667 return false;
2668}
2669
3f0d4560 2670static int be_flash_data(struct be_adapter *adapter,
84517482 2671 const struct firmware *fw,
3f0d4560
AK
2672 struct be_dma_mem *flash_cmd, int num_of_images)
2673
84517482 2674{
3f0d4560
AK
2675 int status = 0, i, filehdr_size = 0;
2676 u32 total_bytes = 0, flash_op;
84517482
AK
2677 int num_bytes;
2678 const u8 *p = fw->data;
2679 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2680 const struct flash_comp *pflashcomp;
9fe96934 2681 int num_comp;
3f0d4560 2682
306f1348 2683 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2684 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2685 FLASH_IMAGE_MAX_SIZE_g3},
2686 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2687 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2688 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2689 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2690 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2691 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2692 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2693 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2694 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2695 FLASH_IMAGE_MAX_SIZE_g3},
2696 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2697 FLASH_IMAGE_MAX_SIZE_g3},
2698 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2699 FLASH_IMAGE_MAX_SIZE_g3},
2700 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2701 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2702 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2703 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2704 };
215faf9c 2705 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2706 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2707 FLASH_IMAGE_MAX_SIZE_g2},
2708 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2709 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2710 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2711 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2712 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2713 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2714 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2715 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2716 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2717 FLASH_IMAGE_MAX_SIZE_g2},
2718 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2719 FLASH_IMAGE_MAX_SIZE_g2},
2720 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2721 FLASH_IMAGE_MAX_SIZE_g2}
2722 };
2723
2724 if (adapter->generation == BE_GEN3) {
2725 pflashcomp = gen3_flash_types;
2726 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2727 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2728 } else {
2729 pflashcomp = gen2_flash_types;
2730 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2731 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2732 }
9fe96934
SB
2733 for (i = 0; i < num_comp; i++) {
2734 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2735 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2736 continue;
306f1348
SP
2737 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2738 if (!phy_flashing_required(adapter))
2739 continue;
2740 }
3f0d4560
AK
2741 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2742 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2743 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2744 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2745 continue;
2746 p = fw->data;
2747 p += filehdr_size + pflashcomp[i].offset
2748 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2749 if (p + pflashcomp[i].size > fw->data + fw->size)
2750 return -1;
2751 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2752 while (total_bytes) {
2753 if (total_bytes > 32*1024)
2754 num_bytes = 32*1024;
2755 else
2756 num_bytes = total_bytes;
2757 total_bytes -= num_bytes;
306f1348
SP
2758 if (!total_bytes) {
2759 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2760 flash_op = FLASHROM_OPER_PHY_FLASH;
2761 else
2762 flash_op = FLASHROM_OPER_FLASH;
2763 } else {
2764 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2765 flash_op = FLASHROM_OPER_PHY_SAVE;
2766 else
2767 flash_op = FLASHROM_OPER_SAVE;
2768 }
3f0d4560
AK
2769 memcpy(req->params.data_buf, p, num_bytes);
2770 p += num_bytes;
2771 status = be_cmd_write_flashrom(adapter, flash_cmd,
2772 pflashcomp[i].optype, flash_op, num_bytes);
2773 if (status) {
306f1348
SP
2774 if ((status == ILLEGAL_IOCTL_REQ) &&
2775 (pflashcomp[i].optype ==
2776 IMG_TYPE_PHY_FW))
2777 break;
3f0d4560
AK
2778 dev_err(&adapter->pdev->dev,
2779 "cmd to write to flash rom failed.\n");
2780 return -1;
2781 }
84517482 2782 }
84517482 2783 }
84517482
AK
2784 return 0;
2785}
2786
3f0d4560
AK
2787static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2788{
2789 if (fhdr == NULL)
2790 return 0;
2791 if (fhdr->build[0] == '3')
2792 return BE_GEN3;
2793 else if (fhdr->build[0] == '2')
2794 return BE_GEN2;
2795 else
2796 return 0;
2797}
2798
485bf569
SN
2799static int lancer_fw_download(struct be_adapter *adapter,
2800 const struct firmware *fw)
84517482 2801{
485bf569
SN
2802#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2803#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2804 struct be_dma_mem flash_cmd;
485bf569
SN
2805 const u8 *data_ptr = NULL;
2806 u8 *dest_image_ptr = NULL;
2807 size_t image_size = 0;
2808 u32 chunk_size = 0;
2809 u32 data_written = 0;
2810 u32 offset = 0;
2811 int status = 0;
2812 u8 add_status = 0;
84517482 2813
485bf569 2814 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2815 dev_err(&adapter->pdev->dev,
485bf569
SN
2816 "FW Image not properly aligned. "
2817 "Length must be 4 byte aligned.\n");
2818 status = -EINVAL;
2819 goto lancer_fw_exit;
d9efd2af
SB
2820 }
2821
485bf569
SN
2822 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2823 + LANCER_FW_DOWNLOAD_CHUNK;
2824 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2825 &flash_cmd.dma, GFP_KERNEL);
2826 if (!flash_cmd.va) {
2827 status = -ENOMEM;
2828 dev_err(&adapter->pdev->dev,
2829 "Memory allocation failure while flashing\n");
2830 goto lancer_fw_exit;
2831 }
84517482 2832
485bf569
SN
2833 dest_image_ptr = flash_cmd.va +
2834 sizeof(struct lancer_cmd_req_write_object);
2835 image_size = fw->size;
2836 data_ptr = fw->data;
2837
2838 while (image_size) {
2839 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2840
2841 /* Copy the image chunk content. */
2842 memcpy(dest_image_ptr, data_ptr, chunk_size);
2843
2844 status = lancer_cmd_write_object(adapter, &flash_cmd,
2845 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2846 &data_written, &add_status);
2847
2848 if (status)
2849 break;
2850
2851 offset += data_written;
2852 data_ptr += data_written;
2853 image_size -= data_written;
2854 }
2855
2856 if (!status) {
2857 /* Commit the FW written */
2858 status = lancer_cmd_write_object(adapter, &flash_cmd,
2859 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2860 &data_written, &add_status);
2861 }
2862
2863 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2864 flash_cmd.dma);
2865 if (status) {
2866 dev_err(&adapter->pdev->dev,
2867 "Firmware load error. "
2868 "Status code: 0x%x Additional Status: 0x%x\n",
2869 status, add_status);
2870 goto lancer_fw_exit;
2871 }
2872
2873 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2874lancer_fw_exit:
2875 return status;
2876}
2877
2878static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2879{
2880 struct flash_file_hdr_g2 *fhdr;
2881 struct flash_file_hdr_g3 *fhdr3;
2882 struct image_hdr *img_hdr_ptr = NULL;
2883 struct be_dma_mem flash_cmd;
2884 const u8 *p;
2885 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2886
2887 p = fw->data;
3f0d4560 2888 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2889
84517482 2890 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2891 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2892 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2893 if (!flash_cmd.va) {
2894 status = -ENOMEM;
2895 dev_err(&adapter->pdev->dev,
2896 "Memory allocation failure while flashing\n");
485bf569 2897 goto be_fw_exit;
84517482
AK
2898 }
2899
3f0d4560
AK
2900 if ((adapter->generation == BE_GEN3) &&
2901 (get_ufigen_type(fhdr) == BE_GEN3)) {
2902 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2903 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2904 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2905 img_hdr_ptr = (struct image_hdr *) (fw->data +
2906 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2907 i * sizeof(struct image_hdr)));
2908 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2909 status = be_flash_data(adapter, fw, &flash_cmd,
2910 num_imgs);
3f0d4560
AK
2911 }
2912 } else if ((adapter->generation == BE_GEN2) &&
2913 (get_ufigen_type(fhdr) == BE_GEN2)) {
2914 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2915 } else {
2916 dev_err(&adapter->pdev->dev,
2917 "UFI and Interface are not compatible for flashing\n");
2918 status = -1;
84517482
AK
2919 }
2920
2b7bcebf
IV
2921 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2922 flash_cmd.dma);
84517482
AK
2923 if (status) {
2924 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2925 goto be_fw_exit;
84517482
AK
2926 }
2927
af901ca1 2928 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2929
485bf569
SN
2930be_fw_exit:
2931 return status;
2932}
2933
2934int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2935{
2936 const struct firmware *fw;
2937 int status;
2938
2939 if (!netif_running(adapter->netdev)) {
2940 dev_err(&adapter->pdev->dev,
2941 "Firmware load not allowed (interface is down)\n");
2942 return -1;
2943 }
2944
2945 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2946 if (status)
2947 goto fw_exit;
2948
2949 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2950
2951 if (lancer_chip(adapter))
2952 status = lancer_fw_download(adapter, fw);
2953 else
2954 status = be_fw_download(adapter, fw);
2955
84517482
AK
2956fw_exit:
2957 release_firmware(fw);
2958 return status;
2959}
2960
6b7c5b94
SP
2961static struct net_device_ops be_netdev_ops = {
2962 .ndo_open = be_open,
2963 .ndo_stop = be_close,
2964 .ndo_start_xmit = be_xmit,
a54769f5 2965 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
2966 .ndo_set_mac_address = be_mac_addr_set,
2967 .ndo_change_mtu = be_change_mtu,
ab1594e9 2968 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 2969 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2970 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2971 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2972 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2973 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2974 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2975 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2976};
2977
2978static void be_netdev_init(struct net_device *netdev)
2979{
2980 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2981 struct be_rx_obj *rxo;
2982 int i;
6b7c5b94 2983
6332c8d3 2984 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2985 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2986 NETIF_F_HW_VLAN_TX;
2987 if (be_multi_rxq(adapter))
2988 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2989
2990 netdev->features |= netdev->hw_features |
8b8ddc68 2991 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2992
eb8a50d9 2993 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2994 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2995
6b7c5b94
SP
2996 netdev->flags |= IFF_MULTICAST;
2997
c190e3c8
AK
2998 netif_set_gso_max_size(netdev, 65535);
2999
6b7c5b94
SP
3000 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3001
3002 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3003
3abcdeda
SP
3004 for_all_rx_queues(adapter, rxo, i)
3005 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3006 BE_NAPI_WEIGHT);
3007
5fb379ee 3008 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3009 BE_NAPI_WEIGHT);
6b7c5b94
SP
3010}
3011
3012static void be_unmap_pci_bars(struct be_adapter *adapter)
3013{
8788fdc2
SP
3014 if (adapter->csr)
3015 iounmap(adapter->csr);
3016 if (adapter->db)
3017 iounmap(adapter->db);
6b7c5b94
SP
3018}
3019
3020static int be_map_pci_bars(struct be_adapter *adapter)
3021{
3022 u8 __iomem *addr;
db3ea781 3023 int db_reg;
6b7c5b94 3024
fe6d2a38
SP
3025 if (lancer_chip(adapter)) {
3026 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3027 pci_resource_len(adapter->pdev, 0));
3028 if (addr == NULL)
3029 return -ENOMEM;
3030 adapter->db = addr;
3031 return 0;
3032 }
3033
ba343c77
SB
3034 if (be_physfn(adapter)) {
3035 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3036 pci_resource_len(adapter->pdev, 2));
3037 if (addr == NULL)
3038 return -ENOMEM;
3039 adapter->csr = addr;
3040 }
6b7c5b94 3041
ba343c77 3042 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3043 db_reg = 4;
3044 } else {
ba343c77
SB
3045 if (be_physfn(adapter))
3046 db_reg = 4;
3047 else
3048 db_reg = 0;
3049 }
3050 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3051 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3052 if (addr == NULL)
3053 goto pci_map_err;
ba343c77
SB
3054 adapter->db = addr;
3055
6b7c5b94
SP
3056 return 0;
3057pci_map_err:
3058 be_unmap_pci_bars(adapter);
3059 return -ENOMEM;
3060}
3061
3062
3063static void be_ctrl_cleanup(struct be_adapter *adapter)
3064{
8788fdc2 3065 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3066
3067 be_unmap_pci_bars(adapter);
3068
3069 if (mem->va)
2b7bcebf
IV
3070 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3071 mem->dma);
e7b909a6 3072
5b8821b7 3073 mem = &adapter->rx_filter;
e7b909a6 3074 if (mem->va)
2b7bcebf
IV
3075 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3076 mem->dma);
6b7c5b94
SP
3077}
3078
6b7c5b94
SP
3079static int be_ctrl_init(struct be_adapter *adapter)
3080{
8788fdc2
SP
3081 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3082 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3083 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3084 int status;
6b7c5b94
SP
3085
3086 status = be_map_pci_bars(adapter);
3087 if (status)
e7b909a6 3088 goto done;
6b7c5b94
SP
3089
3090 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3091 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3092 mbox_mem_alloc->size,
3093 &mbox_mem_alloc->dma,
3094 GFP_KERNEL);
6b7c5b94 3095 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3096 status = -ENOMEM;
3097 goto unmap_pci_bars;
6b7c5b94
SP
3098 }
3099 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3100 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3101 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3102 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3103
5b8821b7
SP
3104 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3105 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3106 &rx_filter->dma, GFP_KERNEL);
3107 if (rx_filter->va == NULL) {
e7b909a6
SP
3108 status = -ENOMEM;
3109 goto free_mbox;
3110 }
5b8821b7 3111 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3112
2984961c 3113 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3114 spin_lock_init(&adapter->mcc_lock);
3115 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3116
dd131e76 3117 init_completion(&adapter->flash_compl);
cf588477 3118 pci_save_state(adapter->pdev);
6b7c5b94 3119 return 0;
e7b909a6
SP
3120
3121free_mbox:
2b7bcebf
IV
3122 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3123 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3124
3125unmap_pci_bars:
3126 be_unmap_pci_bars(adapter);
3127
3128done:
3129 return status;
6b7c5b94
SP
3130}
3131
3132static void be_stats_cleanup(struct be_adapter *adapter)
3133{
3abcdeda 3134 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3135
3136 if (cmd->va)
2b7bcebf
IV
3137 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3138 cmd->va, cmd->dma);
6b7c5b94
SP
3139}
3140
3141static int be_stats_init(struct be_adapter *adapter)
3142{
3abcdeda 3143 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3144
005d5696 3145 if (adapter->generation == BE_GEN2) {
89a88ab8 3146 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3147 } else {
3148 if (lancer_chip(adapter))
3149 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3150 else
3151 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3152 }
2b7bcebf
IV
3153 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3154 GFP_KERNEL);
6b7c5b94
SP
3155 if (cmd->va == NULL)
3156 return -1;
d291b9af 3157 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3158 return 0;
3159}
3160
3161static void __devexit be_remove(struct pci_dev *pdev)
3162{
3163 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3164
6b7c5b94
SP
3165 if (!adapter)
3166 return;
3167
f203af70
SK
3168 cancel_delayed_work_sync(&adapter->work);
3169
6b7c5b94
SP
3170 unregister_netdev(adapter->netdev);
3171
5fb379ee
SP
3172 be_clear(adapter);
3173
6b7c5b94
SP
3174 be_stats_cleanup(adapter);
3175
3176 be_ctrl_cleanup(adapter);
3177
ba343c77
SB
3178 be_sriov_disable(adapter);
3179
8d56ff11 3180 be_msix_disable(adapter);
6b7c5b94
SP
3181
3182 pci_set_drvdata(pdev, NULL);
3183 pci_release_regions(pdev);
3184 pci_disable_device(pdev);
3185
3186 free_netdev(adapter->netdev);
3187}
3188
2243e2e9 3189static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3190{
6b7c5b94
SP
3191 int status;
3192
3abcdeda
SP
3193 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3194 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3195 if (status)
3196 return status;
3197
752961a1 3198 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3199 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3200 else
3201 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3202
9e1453c5
AK
3203 status = be_cmd_get_cntl_attributes(adapter);
3204 if (status)
3205 return status;
3206
2243e2e9 3207 return 0;
6b7c5b94
SP
3208}
3209
fe6d2a38
SP
3210static int be_dev_family_check(struct be_adapter *adapter)
3211{
3212 struct pci_dev *pdev = adapter->pdev;
3213 u32 sli_intf = 0, if_type;
3214
3215 switch (pdev->device) {
3216 case BE_DEVICE_ID1:
3217 case OC_DEVICE_ID1:
3218 adapter->generation = BE_GEN2;
3219 break;
3220 case BE_DEVICE_ID2:
3221 case OC_DEVICE_ID2:
3222 adapter->generation = BE_GEN3;
3223 break;
3224 case OC_DEVICE_ID3:
12f4d0a8 3225 case OC_DEVICE_ID4:
fe6d2a38
SP
3226 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3227 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3228 SLI_INTF_IF_TYPE_SHIFT;
3229
3230 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3231 if_type != 0x02) {
3232 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3233 return -EINVAL;
3234 }
fe6d2a38
SP
3235 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3236 SLI_INTF_FAMILY_SHIFT);
3237 adapter->generation = BE_GEN3;
3238 break;
3239 default:
3240 adapter->generation = 0;
3241 }
3242 return 0;
3243}
3244
37eed1cb
PR
3245static int lancer_wait_ready(struct be_adapter *adapter)
3246{
3247#define SLIPORT_READY_TIMEOUT 500
3248 u32 sliport_status;
3249 int status = 0, i;
3250
3251 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3252 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3253 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3254 break;
3255
3256 msleep(20);
3257 }
3258
3259 if (i == SLIPORT_READY_TIMEOUT)
3260 status = -1;
3261
3262 return status;
3263}
3264
3265static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3266{
3267 int status;
3268 u32 sliport_status, err, reset_needed;
3269 status = lancer_wait_ready(adapter);
3270 if (!status) {
3271 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3272 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3273 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3274 if (err && reset_needed) {
3275 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3276 adapter->db + SLIPORT_CONTROL_OFFSET);
3277
3278 /* check adapter has corrected the error */
3279 status = lancer_wait_ready(adapter);
3280 sliport_status = ioread32(adapter->db +
3281 SLIPORT_STATUS_OFFSET);
3282 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3283 SLIPORT_STATUS_RN_MASK);
3284 if (status || sliport_status)
3285 status = -1;
3286 } else if (err || reset_needed) {
3287 status = -1;
3288 }
3289 }
3290 return status;
3291}
3292
6b7c5b94
SP
3293static int __devinit be_probe(struct pci_dev *pdev,
3294 const struct pci_device_id *pdev_id)
3295{
3296 int status = 0;
3297 struct be_adapter *adapter;
3298 struct net_device *netdev;
6b7c5b94
SP
3299
3300 status = pci_enable_device(pdev);
3301 if (status)
3302 goto do_none;
3303
3304 status = pci_request_regions(pdev, DRV_NAME);
3305 if (status)
3306 goto disable_dev;
3307 pci_set_master(pdev);
3308
3c8def97 3309 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3310 if (netdev == NULL) {
3311 status = -ENOMEM;
3312 goto rel_reg;
3313 }
3314 adapter = netdev_priv(netdev);
3315 adapter->pdev = pdev;
3316 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3317
3318 status = be_dev_family_check(adapter);
63657b9c 3319 if (status)
fe6d2a38
SP
3320 goto free_netdev;
3321
6b7c5b94 3322 adapter->netdev = netdev;
2243e2e9 3323 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3324
2b7bcebf 3325 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3326 if (!status) {
3327 netdev->features |= NETIF_F_HIGHDMA;
3328 } else {
2b7bcebf 3329 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3330 if (status) {
3331 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3332 goto free_netdev;
3333 }
3334 }
3335
f9449ab7
SP
3336 status = be_sriov_enable(adapter);
3337 if (status)
3338 goto free_netdev;
ba343c77 3339
6b7c5b94
SP
3340 status = be_ctrl_init(adapter);
3341 if (status)
f9449ab7 3342 goto disable_sriov;
6b7c5b94 3343
37eed1cb
PR
3344 if (lancer_chip(adapter)) {
3345 status = lancer_test_and_set_rdy_state(adapter);
3346 if (status) {
3347 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3348 goto ctrl_clean;
37eed1cb
PR
3349 }
3350 }
3351
2243e2e9 3352 /* sync up with fw's ready state */
ba343c77
SB
3353 if (be_physfn(adapter)) {
3354 status = be_cmd_POST(adapter);
3355 if (status)
3356 goto ctrl_clean;
ba343c77 3357 }
6b7c5b94 3358
2243e2e9
SP
3359 /* tell fw we're ready to fire cmds */
3360 status = be_cmd_fw_init(adapter);
6b7c5b94 3361 if (status)
2243e2e9
SP
3362 goto ctrl_clean;
3363
a4b4dfab
AK
3364 status = be_cmd_reset_function(adapter);
3365 if (status)
3366 goto ctrl_clean;
556ae191 3367
2243e2e9
SP
3368 status = be_stats_init(adapter);
3369 if (status)
3370 goto ctrl_clean;
3371
3372 status = be_get_config(adapter);
6b7c5b94
SP
3373 if (status)
3374 goto stats_clean;
6b7c5b94 3375
b9ab82c7
SP
3376 /* The INTR bit may be set in the card when probed by a kdump kernel
3377 * after a crash.
3378 */
3379 if (!lancer_chip(adapter))
3380 be_intr_set(adapter, false);
3381
3abcdeda
SP
3382 be_msix_enable(adapter);
3383
6b7c5b94 3384 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3385 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3386
5fb379ee
SP
3387 status = be_setup(adapter);
3388 if (status)
3abcdeda 3389 goto msix_disable;
2243e2e9 3390
3abcdeda 3391 be_netdev_init(netdev);
6b7c5b94
SP
3392 status = register_netdev(netdev);
3393 if (status != 0)
5fb379ee 3394 goto unsetup;
6b7c5b94 3395
c4ca2374 3396 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3397
f203af70 3398 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3399 return 0;
3400
5fb379ee
SP
3401unsetup:
3402 be_clear(adapter);
3abcdeda
SP
3403msix_disable:
3404 be_msix_disable(adapter);
6b7c5b94
SP
3405stats_clean:
3406 be_stats_cleanup(adapter);
3407ctrl_clean:
3408 be_ctrl_cleanup(adapter);
f9449ab7 3409disable_sriov:
ba343c77 3410 be_sriov_disable(adapter);
f9449ab7 3411free_netdev:
fe6d2a38 3412 free_netdev(netdev);
8d56ff11 3413 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3414rel_reg:
3415 pci_release_regions(pdev);
3416disable_dev:
3417 pci_disable_device(pdev);
3418do_none:
c4ca2374 3419 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3420 return status;
3421}
3422
3423static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3424{
3425 struct be_adapter *adapter = pci_get_drvdata(pdev);
3426 struct net_device *netdev = adapter->netdev;
3427
a4ca055f 3428 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3429 if (adapter->wol)
3430 be_setup_wol(adapter, true);
3431
6b7c5b94
SP
3432 netif_device_detach(netdev);
3433 if (netif_running(netdev)) {
3434 rtnl_lock();
3435 be_close(netdev);
3436 rtnl_unlock();
3437 }
9b0365f1 3438 be_clear(adapter);
6b7c5b94 3439
a4ca055f 3440 be_msix_disable(adapter);
6b7c5b94
SP
3441 pci_save_state(pdev);
3442 pci_disable_device(pdev);
3443 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3444 return 0;
3445}
3446
3447static int be_resume(struct pci_dev *pdev)
3448{
3449 int status = 0;
3450 struct be_adapter *adapter = pci_get_drvdata(pdev);
3451 struct net_device *netdev = adapter->netdev;
3452
3453 netif_device_detach(netdev);
3454
3455 status = pci_enable_device(pdev);
3456 if (status)
3457 return status;
3458
3459 pci_set_power_state(pdev, 0);
3460 pci_restore_state(pdev);
3461
a4ca055f 3462 be_msix_enable(adapter);
2243e2e9
SP
3463 /* tell fw we're ready to fire cmds */
3464 status = be_cmd_fw_init(adapter);
3465 if (status)
3466 return status;
3467
9b0365f1 3468 be_setup(adapter);
6b7c5b94
SP
3469 if (netif_running(netdev)) {
3470 rtnl_lock();
3471 be_open(netdev);
3472 rtnl_unlock();
3473 }
3474 netif_device_attach(netdev);
71d8d1b5
AK
3475
3476 if (adapter->wol)
3477 be_setup_wol(adapter, false);
a4ca055f
AK
3478
3479 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3480 return 0;
3481}
3482
82456b03
SP
3483/*
3484 * An FLR will stop BE from DMAing any data.
3485 */
3486static void be_shutdown(struct pci_dev *pdev)
3487{
3488 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3489
2d5d4154
AK
3490 if (!adapter)
3491 return;
82456b03 3492
0f4a6828 3493 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3494
2d5d4154 3495 netif_device_detach(adapter->netdev);
82456b03 3496
82456b03
SP
3497 if (adapter->wol)
3498 be_setup_wol(adapter, true);
3499
57841869
AK
3500 be_cmd_reset_function(adapter);
3501
82456b03 3502 pci_disable_device(pdev);
82456b03
SP
3503}
3504
cf588477
SP
3505static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3506 pci_channel_state_t state)
3507{
3508 struct be_adapter *adapter = pci_get_drvdata(pdev);
3509 struct net_device *netdev = adapter->netdev;
3510
3511 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3512
3513 adapter->eeh_err = true;
3514
3515 netif_device_detach(netdev);
3516
3517 if (netif_running(netdev)) {
3518 rtnl_lock();
3519 be_close(netdev);
3520 rtnl_unlock();
3521 }
3522 be_clear(adapter);
3523
3524 if (state == pci_channel_io_perm_failure)
3525 return PCI_ERS_RESULT_DISCONNECT;
3526
3527 pci_disable_device(pdev);
3528
3529 return PCI_ERS_RESULT_NEED_RESET;
3530}
3531
3532static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3533{
3534 struct be_adapter *adapter = pci_get_drvdata(pdev);
3535 int status;
3536
3537 dev_info(&adapter->pdev->dev, "EEH reset\n");
3538 adapter->eeh_err = false;
3539
3540 status = pci_enable_device(pdev);
3541 if (status)
3542 return PCI_ERS_RESULT_DISCONNECT;
3543
3544 pci_set_master(pdev);
3545 pci_set_power_state(pdev, 0);
3546 pci_restore_state(pdev);
3547
3548 /* Check if card is ok and fw is ready */
3549 status = be_cmd_POST(adapter);
3550 if (status)
3551 return PCI_ERS_RESULT_DISCONNECT;
3552
3553 return PCI_ERS_RESULT_RECOVERED;
3554}
3555
3556static void be_eeh_resume(struct pci_dev *pdev)
3557{
3558 int status = 0;
3559 struct be_adapter *adapter = pci_get_drvdata(pdev);
3560 struct net_device *netdev = adapter->netdev;
3561
3562 dev_info(&adapter->pdev->dev, "EEH resume\n");
3563
3564 pci_save_state(pdev);
3565
3566 /* tell fw we're ready to fire cmds */
3567 status = be_cmd_fw_init(adapter);
3568 if (status)
3569 goto err;
3570
3571 status = be_setup(adapter);
3572 if (status)
3573 goto err;
3574
3575 if (netif_running(netdev)) {
3576 status = be_open(netdev);
3577 if (status)
3578 goto err;
3579 }
3580 netif_device_attach(netdev);
3581 return;
3582err:
3583 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3584}
3585
3586static struct pci_error_handlers be_eeh_handlers = {
3587 .error_detected = be_eeh_err_detected,
3588 .slot_reset = be_eeh_reset,
3589 .resume = be_eeh_resume,
3590};
3591
6b7c5b94
SP
3592static struct pci_driver be_driver = {
3593 .name = DRV_NAME,
3594 .id_table = be_dev_ids,
3595 .probe = be_probe,
3596 .remove = be_remove,
3597 .suspend = be_suspend,
cf588477 3598 .resume = be_resume,
82456b03 3599 .shutdown = be_shutdown,
cf588477 3600 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3601};
3602
3603static int __init be_init_module(void)
3604{
8e95a202
JP
3605 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3606 rx_frag_size != 2048) {
6b7c5b94
SP
3607 printk(KERN_WARNING DRV_NAME
3608 " : Module param rx_frag_size must be 2048/4096/8192."
3609 " Using 2048\n");
3610 rx_frag_size = 2048;
3611 }
6b7c5b94
SP
3612
3613 return pci_register_driver(&be_driver);
3614}
3615module_init(be_init_module);
3616
3617static void __exit be_exit_module(void)
3618{
3619 pci_unregister_driver(&be_driver);
3620}
3621module_exit(be_exit_module);