]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix VLAN promiscous mode for Lancer
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
9d9779e7 19#include <linux/module.h>
6b7c5b94 20#include "be.h"
8788fdc2 21#include "be_cmds.h"
65f71b8b 22#include <asm/div64.h>
6b7c5b94
SP
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
2e588f84 30static ushort rx_frag_size = 2048;
ba343c77 31static unsigned int num_vfs;
2e588f84 32module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 33module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 36
6b7c5b94 37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 47/* UE Status Low CSR */
42c8b11e 48static const char * const ue_status_low_desc[] = {
7c185276
AK
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
42c8b11e 83static const char * const ue_status_hi_desc[] = {
7c185276
AK
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
42c8b11e 107 "NETC",
7c185276
AK
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
6b7c5b94 117
752961a1
SP
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
6b7c5b94
SP
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
2b7bcebf
IV
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
6b7c5b94
SP
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
2b7bcebf
IV
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
6b7c5b94
SP
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
8788fdc2 150static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 151{
db3ea781 152 u32 reg, enabled;
5f0b849e 153
cf588477
SP
154 if (adapter->eeh_err)
155 return;
156
db3ea781
SP
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
5f0b849e 161 if (!enabled && enable)
6b7c5b94 162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 163 else if (enabled && !enable)
6b7c5b94 164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 165 else
6b7c5b94 166 return;
5f0b849e 167
db3ea781
SP
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
6b7c5b94
SP
170}
171
8788fdc2 172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
177
178 wmb();
8788fdc2 179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
180}
181
8788fdc2 182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
187
188 wmb();
8788fdc2 189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
190}
191
8788fdc2 192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
199
200 if (adapter->eeh_err)
201 return;
202
6b7c5b94
SP
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
210}
211
8788fdc2 212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
218
219 if (adapter->eeh_err)
220 return;
221
6b7c5b94
SP
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
226}
227
6b7c5b94
SP
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
e3a7ae2c
SK
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
6b7c5b94 235
ca9e4988
AK
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
e3a7ae2c
SK
239 status = be_cmd_mac_addr_query(adapter, current_mac,
240 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
a65027e4 241 if (status)
e3a7ae2c 242 goto err;
6b7c5b94 243
e3a7ae2c
SK
244 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 246 adapter->if_handle, &adapter->pmac_id, 0);
e3a7ae2c
SK
247 if (status)
248 goto err;
6b7c5b94 249
e3a7ae2c
SK
250 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251 }
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253 return 0;
254err:
255 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
6b7c5b94
SP
256 return status;
257}
258
89a88ab8
AK
259static void populate_be2_stats(struct be_adapter *adapter)
260{
ac124ff9
SP
261 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
89a88ab8 264 struct be_port_rxf_stats_v0 *port_stats =
ac124ff9
SP
265 &rxf_stats->port[adapter->port_num];
266 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 267
ac124ff9 268 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
269 drvs->rx_pause_frames = port_stats->rx_pause_frames;
270 drvs->rx_crc_errors = port_stats->rx_crc_errors;
271 drvs->rx_control_frames = port_stats->rx_control_frames;
272 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
ac124ff9 283 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
89a88ab8
AK
284 drvs->rx_dropped_header_too_small =
285 port_stats->rx_dropped_header_too_small;
ac124ff9 286 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
287 drvs->rx_alignment_symbol_errors =
288 port_stats->rx_alignment_symbol_errors;
289
290 drvs->tx_pauseframes = port_stats->tx_pauseframes;
291 drvs->tx_controlframes = port_stats->tx_controlframes;
292
293 if (adapter->port_num)
ac124ff9 294 drvs->jabber_events = rxf_stats->port1_jabber_events;
89a88ab8 295 else
ac124ff9 296 drvs->jabber_events = rxf_stats->port0_jabber_events;
89a88ab8
AK
297 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301 drvs->forwarded_packets = rxf_stats->forwarded_packets;
302 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
303 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
305 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306}
307
308static void populate_be3_stats(struct be_adapter *adapter)
309{
ac124ff9
SP
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
89a88ab8 313 struct be_port_rxf_stats_v1 *port_stats =
ac124ff9
SP
314 &rxf_stats->port[adapter->port_num];
315 struct be_drv_stats *drvs = &adapter->drv_stats;
89a88ab8 316
ac124ff9 317 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
89a88ab8
AK
318 drvs->rx_pause_frames = port_stats->rx_pause_frames;
319 drvs->rx_crc_errors = port_stats->rx_crc_errors;
320 drvs->rx_control_frames = port_stats->rx_control_frames;
321 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331 drvs->rx_dropped_header_too_small =
332 port_stats->rx_dropped_header_too_small;
333 drvs->rx_input_fifo_overflow_drop =
334 port_stats->rx_input_fifo_overflow_drop;
ac124ff9 335 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
89a88ab8
AK
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
ac124ff9 338 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
89a88ab8
AK
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
ac124ff9
SP
348 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
89a88ab8
AK
350 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351}
352
005d5696
SX
353static void populate_lancer_stats(struct be_adapter *adapter)
354{
89a88ab8 355
005d5696 356 struct be_drv_stats *drvs = &adapter->drv_stats;
ac124ff9
SP
357 struct lancer_pport_stats *pport_stats =
358 pport_stats_from_cmd(adapter);
359
360 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
005d5696 364 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
ac124ff9 365 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
005d5696
SX
366 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370 drvs->rx_dropped_tcp_length =
371 pport_stats->rx_dropped_invalid_tcp_length;
372 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375 drvs->rx_dropped_header_too_small =
376 pport_stats->rx_dropped_header_too_small;
377 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
ac124ff9 379 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
005d5696 380 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
ac124ff9
SP
381 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
005d5696 383 drvs->jabber_events = pport_stats->rx_jabbers;
005d5696 384 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
ac124ff9
SP
385 drvs->forwarded_packets = pport_stats->num_forwards_lo;
386 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
005d5696 387 drvs->rx_drops_too_many_frags =
ac124ff9 388 pport_stats->rx_drops_too_many_frags_lo;
005d5696 389}
89a88ab8 390
09c1c68f
SP
391static void accumulate_16bit_val(u32 *acc, u16 val)
392{
393#define lo(x) (x & 0xFFFF)
394#define hi(x) (x & 0xFFFF0000)
395 bool wrapped = val < lo(*acc);
396 u32 newacc = hi(*acc) + val;
397
398 if (wrapped)
399 newacc += 65536;
400 ACCESS_ONCE(*acc) = newacc;
401}
402
89a88ab8
AK
403void be_parse_stats(struct be_adapter *adapter)
404{
ac124ff9
SP
405 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406 struct be_rx_obj *rxo;
407 int i;
408
005d5696
SX
409 if (adapter->generation == BE_GEN3) {
410 if (lancer_chip(adapter))
411 populate_lancer_stats(adapter);
412 else
413 populate_be3_stats(adapter);
414 } else {
89a88ab8 415 populate_be2_stats(adapter);
005d5696 416 }
ac124ff9
SP
417
418 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
09c1c68f
SP
419 for_all_rx_queues(adapter, rxo, i) {
420 /* below erx HW counter can actually wrap around after
421 * 65535. Driver accumulates a 32-bit value
422 */
423 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425 }
89a88ab8
AK
426}
427
ab1594e9
SP
428static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
6b7c5b94 430{
ab1594e9 431 struct be_adapter *adapter = netdev_priv(netdev);
89a88ab8 432 struct be_drv_stats *drvs = &adapter->drv_stats;
3abcdeda 433 struct be_rx_obj *rxo;
3c8def97 434 struct be_tx_obj *txo;
ab1594e9
SP
435 u64 pkts, bytes;
436 unsigned int start;
3abcdeda 437 int i;
6b7c5b94 438
3abcdeda 439 for_all_rx_queues(adapter, rxo, i) {
ab1594e9
SP
440 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441 do {
442 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443 pkts = rx_stats(rxo)->rx_pkts;
444 bytes = rx_stats(rxo)->rx_bytes;
445 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446 stats->rx_packets += pkts;
447 stats->rx_bytes += bytes;
448 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450 rx_stats(rxo)->rx_drops_no_frags;
3abcdeda
SP
451 }
452
3c8def97 453 for_all_tx_queues(adapter, txo, i) {
ab1594e9
SP
454 const struct be_tx_stats *tx_stats = tx_stats(txo);
455 do {
456 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457 pkts = tx_stats(txo)->tx_pkts;
458 bytes = tx_stats(txo)->tx_bytes;
459 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460 stats->tx_packets += pkts;
461 stats->tx_bytes += bytes;
3c8def97 462 }
6b7c5b94
SP
463
464 /* bad pkts received */
ab1594e9 465 stats->rx_errors = drvs->rx_crc_errors +
89a88ab8
AK
466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
ab1594e9 474 drvs->rx_dropped_runt;
68110868 475
6b7c5b94 476 /* detailed rx errors */
ab1594e9 477 stats->rx_length_errors = drvs->rx_in_range_errors +
89a88ab8
AK
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
68110868 480
ab1594e9 481 stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
482
483 /* frame alignment errors */
ab1594e9 484 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 485
6b7c5b94
SP
486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
ab1594e9 488 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
89a88ab8
AK
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
ab1594e9 491 return stats;
6b7c5b94
SP
492}
493
ea172a01 494void be_link_status_update(struct be_adapter *adapter, u32 link_status)
6b7c5b94 495{
6b7c5b94
SP
496 struct net_device *netdev = adapter->netdev;
497
ea172a01
SP
498 /* when link status changes, link speed must be re-queried from card */
499 adapter->link_speed = -1;
500 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501 netif_carrier_on(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503 } else {
504 netif_carrier_off(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
6b7c5b94 506 }
6b7c5b94
SP
507}
508
3c8def97 509static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 510 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 511{
3c8def97
SP
512 struct be_tx_stats *stats = tx_stats(txo);
513
ab1594e9 514 u64_stats_update_begin(&stats->sync);
ac124ff9
SP
515 stats->tx_reqs++;
516 stats->tx_wrbs += wrb_cnt;
517 stats->tx_bytes += copied;
518 stats->tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94 519 if (stopped)
ac124ff9 520 stats->tx_stops++;
ab1594e9 521 u64_stats_update_end(&stats->sync);
6b7c5b94
SP
522}
523
524/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
525static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526 bool *dummy)
6b7c5b94 527{
ebc8d2ab
DM
528 int cnt = (skb->len > skb->data_len);
529
530 cnt += skb_shinfo(skb)->nr_frags;
531
6b7c5b94
SP
532 /* to account for hdr wrb */
533 cnt++;
fe6d2a38
SP
534 if (lancer_chip(adapter) || !(cnt & 1)) {
535 *dummy = false;
536 } else {
6b7c5b94
SP
537 /* add a dummy to make it an even num */
538 cnt++;
539 *dummy = true;
fe6d2a38 540 }
6b7c5b94
SP
541 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542 return cnt;
543}
544
545static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546{
547 wrb->frag_pa_hi = upper_32_bits(addr);
548 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550}
551
cc4ce020
SK
552static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 554{
cc4ce020
SK
555 u8 vlan_prio = 0;
556 u16 vlan_tag = 0;
557
6b7c5b94
SP
558 memset(hdr, 0, sizeof(*hdr));
559
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
49e4b847 562 if (skb_is_gso(skb)) {
6b7c5b94
SP
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 566 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
568 if (lancer_chip(adapter) && adapter->sli_family ==
569 LANCER_A0_SLI_FAMILY) {
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571 if (is_tcp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 tcpcs, hdr, 1);
574 else if (is_udp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 udpcs, hdr, 1);
577 }
6b7c5b94
SP
578 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579 if (is_tcp_pkt(skb))
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581 else if (is_udp_pkt(skb))
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583 }
584
4c5102f9 585 if (vlan_tx_tag_present(skb)) {
6b7c5b94 586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
587 vlan_tag = vlan_tx_tag_get(skb);
588 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589 /* If vlan priority provided by OS is NOT in available bmap */
590 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592 adapter->recommended_prio;
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
594 }
595
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600}
601
2b7bcebf 602static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
603 bool unmap_single)
604{
605 dma_addr_t dma;
606
607 be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 610 if (wrb->frag_len) {
7101e111 611 if (unmap_single)
2b7bcebf
IV
612 dma_unmap_single(dev, dma, wrb->frag_len,
613 DMA_TO_DEVICE);
7101e111 614 else
2b7bcebf 615 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
616 }
617}
6b7c5b94 618
3c8def97 619static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
620 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621{
7101e111
SP
622 dma_addr_t busaddr;
623 int i, copied = 0;
2b7bcebf 624 struct device *dev = &adapter->pdev->dev;
6b7c5b94 625 struct sk_buff *first_skb = skb;
6b7c5b94
SP
626 struct be_eth_wrb *wrb;
627 struct be_eth_hdr_wrb *hdr;
7101e111
SP
628 bool map_single = false;
629 u16 map_head;
6b7c5b94 630
6b7c5b94
SP
631 hdr = queue_head_node(txq);
632 queue_head_inc(txq);
7101e111 633 map_head = txq->head;
6b7c5b94 634
ebc8d2ab 635 if (skb->len > skb->data_len) {
e743d313 636 int len = skb_headlen(skb);
2b7bcebf
IV
637 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638 if (dma_mapping_error(dev, busaddr))
7101e111
SP
639 goto dma_err;
640 map_single = true;
ebc8d2ab
DM
641 wrb = queue_head_node(txq);
642 wrb_fill(wrb, busaddr, len);
643 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644 queue_head_inc(txq);
645 copied += len;
646 }
6b7c5b94 647
ebc8d2ab 648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 649 const struct skb_frag_struct *frag =
ebc8d2ab 650 &skb_shinfo(skb)->frags[i];
b061b39e 651 busaddr = skb_frag_dma_map(dev, frag, 0,
9e903e08 652 skb_frag_size(frag), DMA_TO_DEVICE);
2b7bcebf 653 if (dma_mapping_error(dev, busaddr))
7101e111 654 goto dma_err;
ebc8d2ab 655 wrb = queue_head_node(txq);
9e903e08 656 wrb_fill(wrb, busaddr, skb_frag_size(frag));
ebc8d2ab
DM
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
9e903e08 659 copied += skb_frag_size(frag);
6b7c5b94
SP
660 }
661
662 if (dummy_wrb) {
663 wrb = queue_head_node(txq);
664 wrb_fill(wrb, 0, 0);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666 queue_head_inc(txq);
667 }
668
cc4ce020 669 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
670 be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672 return copied;
7101e111
SP
673dma_err:
674 txq->head = map_head;
675 while (copied) {
676 wrb = queue_head_node(txq);
2b7bcebf 677 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
678 map_single = false;
679 copied -= wrb->frag_len;
680 queue_head_inc(txq);
681 }
682 return 0;
6b7c5b94
SP
683}
684
61357325 685static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 686 struct net_device *netdev)
6b7c5b94
SP
687{
688 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
689 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
691 u32 wrb_cnt = 0, copied = 0;
692 u32 start = txq->head;
693 bool dummy_wrb, stopped = false;
694
fe6d2a38 695 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 696
3c8def97 697 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
698 if (copied) {
699 /* record the sent skb in the sent_skb table */
3c8def97
SP
700 BUG_ON(txo->sent_skb_list[start]);
701 txo->sent_skb_list[start] = skb;
c190e3c8
AK
702
703 /* Ensure txq has space for the next skb; Else stop the queue
704 * *BEFORE* ringing the tx doorbell, so that we serialze the
705 * tx compls of the current transmit which'll wake up the queue
706 */
7101e111 707 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
708 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709 txq->len) {
3c8def97 710 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
711 stopped = true;
712 }
6b7c5b94 713
c190e3c8 714 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 715
3c8def97 716 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 717 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
718 } else {
719 txq->head = start;
720 dev_kfree_skb_any(skb);
6b7c5b94 721 }
6b7c5b94
SP
722 return NETDEV_TX_OK;
723}
724
725static int be_change_mtu(struct net_device *netdev, int new_mtu)
726{
727 struct be_adapter *adapter = netdev_priv(netdev);
728 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
729 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
731 dev_info(&adapter->pdev->dev,
732 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
733 BE_MIN_MTU,
734 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
735 return -EINVAL;
736 }
737 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738 netdev->mtu, new_mtu);
739 netdev->mtu = new_mtu;
740 return 0;
741}
742
743/*
82903e4b
AK
744 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 746 */
1da87b7f 747static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 748{
6b7c5b94
SP
749 u16 vtag[BE_NUM_VLANS_SUPPORTED];
750 u16 ntags = 0, i;
82903e4b 751 int status = 0;
1da87b7f
AK
752 u32 if_handle;
753
754 if (vf) {
755 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
758 }
6b7c5b94 759
c0e64ef4
SP
760 /* No need to further configure vids if in promiscuous mode */
761 if (adapter->promiscuous)
762 return 0;
763
82903e4b 764 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 765 /* Construct VLAN Table to give to HW */
b738127d 766 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
b31c50a7
SP
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
6b7c5b94 774 } else {
b31c50a7
SP
775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
6b7c5b94 777 }
1da87b7f 778
b31c50a7 779 return status;
6b7c5b94
SP
780}
781
6b7c5b94
SP
782static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785
1da87b7f 786 adapter->vlans_added++;
ba343c77
SB
787 if (!be_physfn(adapter))
788 return;
789
6b7c5b94 790 adapter->vlan_tag[vid] = 1;
82903e4b 791 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 792 be_vid_config(adapter, false, 0);
6b7c5b94
SP
793}
794
795static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798
1da87b7f 799 adapter->vlans_added--;
1da87b7f 800
ba343c77
SB
801 if (!be_physfn(adapter))
802 return;
803
6b7c5b94 804 adapter->vlan_tag[vid] = 0;
82903e4b 805 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 806 be_vid_config(adapter, false, 0);
6b7c5b94
SP
807}
808
a54769f5 809static void be_set_rx_mode(struct net_device *netdev)
6b7c5b94
SP
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 812
24307eef 813 if (netdev->flags & IFF_PROMISC) {
5b8821b7 814 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
24307eef
SP
815 adapter->promiscuous = true;
816 goto done;
6b7c5b94
SP
817 }
818
25985edc 819 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
820 if (adapter->promiscuous) {
821 adapter->promiscuous = false;
5b8821b7 822 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
c0e64ef4
SP
823
824 if (adapter->vlans_added)
825 be_vid_config(adapter, false, 0);
6b7c5b94
SP
826 }
827
e7b909a6 828 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf 829 if (netdev->flags & IFF_ALLMULTI ||
5b8821b7
SP
830 netdev_mc_count(netdev) > BE_MAX_MC) {
831 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
24307eef 832 goto done;
6b7c5b94 833 }
6b7c5b94 834
5b8821b7 835 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
24307eef
SP
836done:
837 return;
6b7c5b94
SP
838}
839
ba343c77
SB
840static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
843 int status;
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
849 return -EINVAL;
850
30128031
SP
851 status = be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
852 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 853
30128031 854 status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle,
f8617e08 855 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5 856 if (status)
ba343c77
SB
857 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
858 mac, vf);
64600ea5
AK
859 else
860 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
861
ba343c77
SB
862 return status;
863}
864
64600ea5
AK
865static int be_get_vf_config(struct net_device *netdev, int vf,
866 struct ifla_vf_info *vi)
867{
868 struct be_adapter *adapter = netdev_priv(netdev);
869
870 if (!adapter->sriov_enabled)
871 return -EPERM;
872
873 if (vf >= num_vfs)
874 return -EINVAL;
875
876 vi->vf = vf;
e1d18735 877 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 878 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
879 vi->qos = 0;
880 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
881
882 return 0;
883}
884
1da87b7f
AK
885static int be_set_vf_vlan(struct net_device *netdev,
886 int vf, u16 vlan, u8 qos)
887{
888 struct be_adapter *adapter = netdev_priv(netdev);
889 int status = 0;
890
891 if (!adapter->sriov_enabled)
892 return -EPERM;
893
894 if ((vf >= num_vfs) || (vlan > 4095))
895 return -EINVAL;
896
897 if (vlan) {
898 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
899 adapter->vlans_added++;
900 } else {
901 adapter->vf_cfg[vf].vf_vlan_tag = 0;
902 adapter->vlans_added--;
903 }
904
905 status = be_vid_config(adapter, true, vf);
906
907 if (status)
908 dev_info(&adapter->pdev->dev,
909 "VLAN %d config on VF %d failed\n", vlan, vf);
910 return status;
911}
912
e1d18735
AK
913static int be_set_vf_tx_rate(struct net_device *netdev,
914 int vf, int rate)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status = 0;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if ((vf >= num_vfs) || (rate < 0))
923 return -EINVAL;
924
925 if (rate > 10000)
926 rate = 10000;
927
928 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 929 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
930
931 if (status)
932 dev_info(&adapter->pdev->dev,
933 "tx rate %d on VF %d failed\n", rate, vf);
934 return status;
935}
936
ac124ff9 937static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 938{
ac124ff9
SP
939 struct be_eq_obj *rx_eq = &rxo->rx_eq;
940 struct be_rx_stats *stats = rx_stats(rxo);
4097f663 941 ulong now = jiffies;
ac124ff9 942 ulong delta = now - stats->rx_jiffies;
ab1594e9
SP
943 u64 pkts;
944 unsigned int start, eqd;
ac124ff9
SP
945
946 if (!rx_eq->enable_aic)
947 return;
6b7c5b94 948
4097f663 949 /* Wrapped around */
3abcdeda
SP
950 if (time_before(now, stats->rx_jiffies)) {
951 stats->rx_jiffies = now;
4097f663
SP
952 return;
953 }
6b7c5b94 954
ac124ff9
SP
955 /* Update once a second */
956 if (delta < HZ)
6b7c5b94
SP
957 return;
958
ab1594e9
SP
959 do {
960 start = u64_stats_fetch_begin_bh(&stats->sync);
961 pkts = stats->rx_pkts;
962 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
963
68c3e5a7 964 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
ab1594e9 965 stats->rx_pkts_prev = pkts;
3abcdeda 966 stats->rx_jiffies = now;
ac124ff9
SP
967 eqd = stats->rx_pps / 110000;
968 eqd = eqd << 3;
969 if (eqd > rx_eq->max_eqd)
970 eqd = rx_eq->max_eqd;
971 if (eqd < rx_eq->min_eqd)
972 eqd = rx_eq->min_eqd;
973 if (eqd < 10)
974 eqd = 0;
975 if (eqd != rx_eq->cur_eqd) {
976 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
977 rx_eq->cur_eqd = eqd;
978 }
6b7c5b94
SP
979}
980
3abcdeda 981static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 982 struct be_rx_compl_info *rxcp)
4097f663 983{
ac124ff9 984 struct be_rx_stats *stats = rx_stats(rxo);
1ef78abe 985
ab1594e9 986 u64_stats_update_begin(&stats->sync);
3abcdeda 987 stats->rx_compl++;
2e588f84 988 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 989 stats->rx_pkts++;
2e588f84 990 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 991 stats->rx_mcast_pkts++;
2e588f84 992 if (rxcp->err)
ac124ff9 993 stats->rx_compl_err++;
ab1594e9 994 u64_stats_update_end(&stats->sync);
4097f663
SP
995}
996
2e588f84 997static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 998{
19fad86f
PR
999 /* L4 checksum is not reliable for non TCP/UDP packets.
1000 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1001 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1002 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1003}
1004
6b7c5b94 1005static struct be_rx_page_info *
3abcdeda
SP
1006get_rx_page_info(struct be_adapter *adapter,
1007 struct be_rx_obj *rxo,
1008 u16 frag_idx)
6b7c5b94
SP
1009{
1010 struct be_rx_page_info *rx_page_info;
3abcdeda 1011 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1012
3abcdeda 1013 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1014 BUG_ON(!rx_page_info->page);
1015
205859a2 1016 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1017 dma_unmap_page(&adapter->pdev->dev,
1018 dma_unmap_addr(rx_page_info, bus),
1019 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1020 rx_page_info->last_page_user = false;
1021 }
6b7c5b94
SP
1022
1023 atomic_dec(&rxq->used);
1024 return rx_page_info;
1025}
1026
1027/* Throwaway the data in the Rx completion */
1028static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1029 struct be_rx_obj *rxo,
2e588f84 1030 struct be_rx_compl_info *rxcp)
6b7c5b94 1031{
3abcdeda 1032 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1033 struct be_rx_page_info *page_info;
2e588f84 1034 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1035
e80d9da6 1036 for (i = 0; i < num_rcvd; i++) {
2e588f84 1037 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1038 put_page(page_info->page);
1039 memset(page_info, 0, sizeof(*page_info));
2e588f84 1040 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1041 }
1042}
1043
1044/*
1045 * skb_fill_rx_data forms a complete skb for an ether frame
1046 * indicated by rxcp.
1047 */
3abcdeda 1048static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1049 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1050{
3abcdeda 1051 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1052 struct be_rx_page_info *page_info;
2e588f84
SP
1053 u16 i, j;
1054 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1055 u8 *start;
6b7c5b94 1056
2e588f84 1057 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1058 start = page_address(page_info->page) + page_info->page_offset;
1059 prefetch(start);
1060
1061 /* Copy data in the first descriptor of this completion */
2e588f84 1062 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1063
1064 /* Copy the header portion into skb_data */
2e588f84 1065 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1066 memcpy(skb->data, start, hdr_len);
1067 skb->len = curr_frag_len;
1068 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1069 /* Complete packet has now been moved to data */
1070 put_page(page_info->page);
1071 skb->data_len = 0;
1072 skb->tail += curr_frag_len;
1073 } else {
1074 skb_shinfo(skb)->nr_frags = 1;
b061b39e 1075 skb_frag_set_page(skb, 0, page_info->page);
6b7c5b94
SP
1076 skb_shinfo(skb)->frags[0].page_offset =
1077 page_info->page_offset + hdr_len;
9e903e08 1078 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
6b7c5b94 1079 skb->data_len = curr_frag_len - hdr_len;
bdb28a97 1080 skb->truesize += rx_frag_size;
6b7c5b94
SP
1081 skb->tail += hdr_len;
1082 }
205859a2 1083 page_info->page = NULL;
6b7c5b94 1084
2e588f84
SP
1085 if (rxcp->pkt_size <= rx_frag_size) {
1086 BUG_ON(rxcp->num_rcvd != 1);
1087 return;
6b7c5b94
SP
1088 }
1089
1090 /* More frags present for this completion */
2e588f84
SP
1091 index_inc(&rxcp->rxq_idx, rxq->len);
1092 remaining = rxcp->pkt_size - curr_frag_len;
1093 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1094 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1095 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1096
bd46cb6c
AK
1097 /* Coalesce all frags from the same physical page in one slot */
1098 if (page_info->page_offset == 0) {
1099 /* Fresh page */
1100 j++;
b061b39e 1101 skb_frag_set_page(skb, j, page_info->page);
bd46cb6c
AK
1102 skb_shinfo(skb)->frags[j].page_offset =
1103 page_info->page_offset;
9e903e08 1104 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1105 skb_shinfo(skb)->nr_frags++;
1106 } else {
1107 put_page(page_info->page);
1108 }
1109
9e903e08 1110 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
6b7c5b94
SP
1111 skb->len += curr_frag_len;
1112 skb->data_len += curr_frag_len;
bdb28a97 1113 skb->truesize += rx_frag_size;
2e588f84
SP
1114 remaining -= curr_frag_len;
1115 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1116 page_info->page = NULL;
6b7c5b94 1117 }
bd46cb6c 1118 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1119}
1120
5be93b9a 1121/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1122static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1123 struct be_rx_obj *rxo,
2e588f84 1124 struct be_rx_compl_info *rxcp)
6b7c5b94 1125{
6332c8d3 1126 struct net_device *netdev = adapter->netdev;
6b7c5b94 1127 struct sk_buff *skb;
89420424 1128
6332c8d3 1129 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1130 if (unlikely(!skb)) {
ac124ff9 1131 rx_stats(rxo)->rx_drops_no_skbs++;
3abcdeda 1132 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1133 return;
1134 }
1135
2e588f84 1136 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1137
6332c8d3 1138 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1139 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1140 else
1141 skb_checksum_none_assert(skb);
6b7c5b94 1142
6332c8d3 1143 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1144 if (adapter->netdev->features & NETIF_F_RXHASH)
1145 skb->rxhash = rxcp->rss_hash;
1146
6b7c5b94 1147
343e43c0 1148 if (rxcp->vlanf)
4c5102f9
AK
1149 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1150
1151 netif_receive_skb(skb);
6b7c5b94
SP
1152}
1153
5be93b9a
AK
1154/* Process the RX completion indicated by rxcp when GRO is enabled */
1155static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1156 struct be_rx_obj *rxo,
2e588f84 1157 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1158{
1159 struct be_rx_page_info *page_info;
5be93b9a 1160 struct sk_buff *skb = NULL;
3abcdeda
SP
1161 struct be_queue_info *rxq = &rxo->q;
1162 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1163 u16 remaining, curr_frag_len;
1164 u16 i, j;
3968fa1e 1165
5be93b9a
AK
1166 skb = napi_get_frags(&eq_obj->napi);
1167 if (!skb) {
3abcdeda 1168 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1169 return;
1170 }
1171
2e588f84
SP
1172 remaining = rxcp->pkt_size;
1173 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1174 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1175
1176 curr_frag_len = min(remaining, rx_frag_size);
1177
bd46cb6c
AK
1178 /* Coalesce all frags from the same physical page in one slot */
1179 if (i == 0 || page_info->page_offset == 0) {
1180 /* First frag or Fresh page */
1181 j++;
b061b39e 1182 skb_frag_set_page(skb, j, page_info->page);
5be93b9a
AK
1183 skb_shinfo(skb)->frags[j].page_offset =
1184 page_info->page_offset;
9e903e08 1185 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
bd46cb6c
AK
1186 } else {
1187 put_page(page_info->page);
1188 }
9e903e08 1189 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
bdb28a97 1190 skb->truesize += rx_frag_size;
bd46cb6c 1191 remaining -= curr_frag_len;
2e588f84 1192 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1193 memset(page_info, 0, sizeof(*page_info));
1194 }
bd46cb6c 1195 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1196
5be93b9a 1197 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1198 skb->len = rxcp->pkt_size;
1199 skb->data_len = rxcp->pkt_size;
5be93b9a 1200 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1201 if (adapter->netdev->features & NETIF_F_RXHASH)
1202 skb->rxhash = rxcp->rss_hash;
5be93b9a 1203
343e43c0 1204 if (rxcp->vlanf)
4c5102f9
AK
1205 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1206
1207 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1208}
1209
1210static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1211 struct be_eth_rx_compl *compl,
1212 struct be_rx_compl_info *rxcp)
1213{
1214 rxcp->pkt_size =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1216 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1217 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1218 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1219 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1220 rxcp->ip_csum =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1222 rxcp->l4_csum =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1224 rxcp->ipv6 =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1226 rxcp->rxq_idx =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1228 rxcp->num_rcvd =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1230 rxcp->pkt_type =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1232 rxcp->rss_hash =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1234 if (rxcp->vlanf) {
1235 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1236 compl);
1237 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1238 compl);
15d72184 1239 }
12004ae9 1240 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
2e588f84
SP
1241}
1242
1243static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1244 struct be_eth_rx_compl *compl,
1245 struct be_rx_compl_info *rxcp)
1246{
1247 rxcp->pkt_size =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1249 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1250 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1251 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1252 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1253 rxcp->ip_csum =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1255 rxcp->l4_csum =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1257 rxcp->ipv6 =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1259 rxcp->rxq_idx =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1261 rxcp->num_rcvd =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1263 rxcp->pkt_type =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1265 rxcp->rss_hash =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1267 if (rxcp->vlanf) {
1268 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1269 compl);
1270 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1271 compl);
15d72184 1272 }
12004ae9 1273 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
2e588f84
SP
1274}
1275
1276static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1277{
1278 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1279 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1280 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1281
2e588f84
SP
1282 /* For checking the valid bit it is Ok to use either definition as the
1283 * valid bit is at the same position in both v0 and v1 Rx compl */
1284 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1285 return NULL;
6b7c5b94 1286
2e588f84
SP
1287 rmb();
1288 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1289
2e588f84
SP
1290 if (adapter->be3_native)
1291 be_parse_rx_compl_v1(adapter, compl, rxcp);
1292 else
1293 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1294
15d72184
SP
1295 if (rxcp->vlanf) {
1296 /* vlanf could be wrongly set in some cards.
1297 * ignore if vtm is not set */
752961a1 1298 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
15d72184 1299 rxcp->vlanf = 0;
6b7c5b94 1300
15d72184 1301 if (!lancer_chip(adapter))
3c709f8f 1302 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1303
939cf306 1304 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
3c709f8f 1305 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1306 rxcp->vlanf = 0;
1307 }
2e588f84
SP
1308
1309 /* As the compl has been parsed, reset it; we wont touch it again */
1310 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1311
3abcdeda 1312 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1313 return rxcp;
1314}
1315
1829b086 1316static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1317{
6b7c5b94 1318 u32 order = get_order(size);
1829b086 1319
6b7c5b94 1320 if (order > 0)
1829b086
ED
1321 gfp |= __GFP_COMP;
1322 return alloc_pages(gfp, order);
6b7c5b94
SP
1323}
1324
1325/*
1326 * Allocate a page, split it to fragments of size rx_frag_size and post as
1327 * receive buffers to BE
1328 */
1829b086 1329static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1330{
3abcdeda
SP
1331 struct be_adapter *adapter = rxo->adapter;
1332 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1333 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1334 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1335 struct page *pagep = NULL;
1336 struct be_eth_rx_d *rxd;
1337 u64 page_dmaaddr = 0, frag_dmaaddr;
1338 u32 posted, page_offset = 0;
1339
3abcdeda 1340 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1341 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1342 if (!pagep) {
1829b086 1343 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1344 if (unlikely(!pagep)) {
ac124ff9 1345 rx_stats(rxo)->rx_post_fail++;
6b7c5b94
SP
1346 break;
1347 }
2b7bcebf
IV
1348 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1349 0, adapter->big_page_size,
1350 DMA_FROM_DEVICE);
6b7c5b94
SP
1351 page_info->page_offset = 0;
1352 } else {
1353 get_page(pagep);
1354 page_info->page_offset = page_offset + rx_frag_size;
1355 }
1356 page_offset = page_info->page_offset;
1357 page_info->page = pagep;
fac6da5b 1358 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1359 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1360
1361 rxd = queue_head_node(rxq);
1362 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1363 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1364
1365 /* Any space left in the current big page for another frag? */
1366 if ((page_offset + rx_frag_size + rx_frag_size) >
1367 adapter->big_page_size) {
1368 pagep = NULL;
1369 page_info->last_page_user = true;
1370 }
26d92f92
SP
1371
1372 prev_page_info = page_info;
1373 queue_head_inc(rxq);
6b7c5b94
SP
1374 page_info = &page_info_tbl[rxq->head];
1375 }
1376 if (pagep)
26d92f92 1377 prev_page_info->last_page_user = true;
6b7c5b94
SP
1378
1379 if (posted) {
6b7c5b94 1380 atomic_add(posted, &rxq->used);
8788fdc2 1381 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1382 } else if (atomic_read(&rxq->used) == 0) {
1383 /* Let be_worker replenish when memory is available */
3abcdeda 1384 rxo->rx_post_starved = true;
6b7c5b94 1385 }
6b7c5b94
SP
1386}
1387
5fb379ee 1388static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1389{
6b7c5b94
SP
1390 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1391
1392 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1393 return NULL;
1394
f3eb62d2 1395 rmb();
6b7c5b94
SP
1396 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1397
1398 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1399
1400 queue_tail_inc(tx_cq);
1401 return txcp;
1402}
1403
3c8def97
SP
1404static u16 be_tx_compl_process(struct be_adapter *adapter,
1405 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1406{
3c8def97 1407 struct be_queue_info *txq = &txo->q;
a73b796e 1408 struct be_eth_wrb *wrb;
3c8def97 1409 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1410 struct sk_buff *sent_skb;
ec43b1a6
SP
1411 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1412 bool unmap_skb_hdr = true;
6b7c5b94 1413
ec43b1a6 1414 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1415 BUG_ON(!sent_skb);
ec43b1a6
SP
1416 sent_skbs[txq->tail] = NULL;
1417
1418 /* skip header wrb */
a73b796e 1419 queue_tail_inc(txq);
6b7c5b94 1420
ec43b1a6 1421 do {
6b7c5b94 1422 cur_index = txq->tail;
a73b796e 1423 wrb = queue_tail_node(txq);
2b7bcebf
IV
1424 unmap_tx_frag(&adapter->pdev->dev, wrb,
1425 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1426 unmap_skb_hdr = false;
1427
6b7c5b94
SP
1428 num_wrbs++;
1429 queue_tail_inc(txq);
ec43b1a6 1430 } while (cur_index != last_index);
6b7c5b94 1431
6b7c5b94 1432 kfree_skb(sent_skb);
4d586b82 1433 return num_wrbs;
6b7c5b94
SP
1434}
1435
859b1e4e
SP
1436static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1437{
1438 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1439
1440 if (!eqe->evt)
1441 return NULL;
1442
f3eb62d2 1443 rmb();
859b1e4e
SP
1444 eqe->evt = le32_to_cpu(eqe->evt);
1445 queue_tail_inc(&eq_obj->q);
1446 return eqe;
1447}
1448
1449static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1450 struct be_eq_obj *eq_obj,
1451 bool rearm)
859b1e4e
SP
1452{
1453 struct be_eq_entry *eqe;
1454 u16 num = 0;
1455
1456 while ((eqe = event_get(eq_obj)) != NULL) {
1457 eqe->evt = 0;
1458 num++;
1459 }
1460
1461 /* Deal with any spurious interrupts that come
1462 * without events
1463 */
3c8def97
SP
1464 if (!num)
1465 rearm = true;
1466
1467 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1468 if (num)
1469 napi_schedule(&eq_obj->napi);
1470
1471 return num;
1472}
1473
1474/* Just read and notify events without processing them.
1475 * Used at the time of destroying event queues */
1476static void be_eq_clean(struct be_adapter *adapter,
1477 struct be_eq_obj *eq_obj)
1478{
1479 struct be_eq_entry *eqe;
1480 u16 num = 0;
1481
1482 while ((eqe = event_get(eq_obj)) != NULL) {
1483 eqe->evt = 0;
1484 num++;
1485 }
1486
1487 if (num)
1488 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1489}
1490
3abcdeda 1491static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1492{
1493 struct be_rx_page_info *page_info;
3abcdeda
SP
1494 struct be_queue_info *rxq = &rxo->q;
1495 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1496 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1497 u16 tail;
1498
1499 /* First cleanup pending rx completions */
3abcdeda
SP
1500 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1501 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1502 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1503 }
1504
1505 /* Then free posted rx buffer that were not used */
1506 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1507 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1508 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1509 put_page(page_info->page);
1510 memset(page_info, 0, sizeof(*page_info));
1511 }
1512 BUG_ON(atomic_read(&rxq->used));
482c9e79 1513 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1514}
1515
3c8def97
SP
1516static void be_tx_compl_clean(struct be_adapter *adapter,
1517 struct be_tx_obj *txo)
6b7c5b94 1518{
3c8def97
SP
1519 struct be_queue_info *tx_cq = &txo->cq;
1520 struct be_queue_info *txq = &txo->q;
a8e9179a 1521 struct be_eth_tx_compl *txcp;
4d586b82 1522 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1523 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1524 struct sk_buff *sent_skb;
1525 bool dummy_wrb;
a8e9179a
SP
1526
1527 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1528 do {
1529 while ((txcp = be_tx_compl_get(tx_cq))) {
1530 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1531 wrb_index, txcp);
3c8def97 1532 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1533 cmpl++;
1534 }
1535 if (cmpl) {
1536 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1537 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1538 cmpl = 0;
4d586b82 1539 num_wrbs = 0;
a8e9179a
SP
1540 }
1541
1542 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1543 break;
1544
1545 mdelay(1);
1546 } while (true);
1547
1548 if (atomic_read(&txq->used))
1549 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1550 atomic_read(&txq->used));
b03388d6
SP
1551
1552 /* free posted tx for which compls will never arrive */
1553 while (atomic_read(&txq->used)) {
1554 sent_skb = sent_skbs[txq->tail];
1555 end_idx = txq->tail;
1556 index_adv(&end_idx,
fe6d2a38
SP
1557 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1558 txq->len);
3c8def97 1559 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1560 atomic_sub(num_wrbs, &txq->used);
b03388d6 1561 }
6b7c5b94
SP
1562}
1563
5fb379ee
SP
1564static void be_mcc_queues_destroy(struct be_adapter *adapter)
1565{
1566 struct be_queue_info *q;
5fb379ee 1567
8788fdc2 1568 q = &adapter->mcc_obj.q;
5fb379ee 1569 if (q->created)
8788fdc2 1570 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1571 be_queue_free(adapter, q);
1572
8788fdc2 1573 q = &adapter->mcc_obj.cq;
5fb379ee 1574 if (q->created)
8788fdc2 1575 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1576 be_queue_free(adapter, q);
1577}
1578
1579/* Must be called only after TX qs are created as MCC shares TX EQ */
1580static int be_mcc_queues_create(struct be_adapter *adapter)
1581{
1582 struct be_queue_info *q, *cq;
5fb379ee
SP
1583
1584 /* Alloc MCC compl queue */
8788fdc2 1585 cq = &adapter->mcc_obj.cq;
5fb379ee 1586 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1587 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1588 goto err;
1589
1590 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1591 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1592 goto mcc_cq_free;
1593
1594 /* Alloc MCC queue */
8788fdc2 1595 q = &adapter->mcc_obj.q;
5fb379ee
SP
1596 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1597 goto mcc_cq_destroy;
1598
1599 /* Ask BE to create MCC queue */
8788fdc2 1600 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1601 goto mcc_q_free;
1602
1603 return 0;
1604
1605mcc_q_free:
1606 be_queue_free(adapter, q);
1607mcc_cq_destroy:
8788fdc2 1608 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1609mcc_cq_free:
1610 be_queue_free(adapter, cq);
1611err:
1612 return -1;
1613}
1614
6b7c5b94
SP
1615static void be_tx_queues_destroy(struct be_adapter *adapter)
1616{
1617 struct be_queue_info *q;
3c8def97
SP
1618 struct be_tx_obj *txo;
1619 u8 i;
6b7c5b94 1620
3c8def97
SP
1621 for_all_tx_queues(adapter, txo, i) {
1622 q = &txo->q;
1623 if (q->created)
1624 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1625 be_queue_free(adapter, q);
6b7c5b94 1626
3c8def97
SP
1627 q = &txo->cq;
1628 if (q->created)
1629 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1630 be_queue_free(adapter, q);
1631 }
6b7c5b94 1632
859b1e4e
SP
1633 /* Clear any residual events */
1634 be_eq_clean(adapter, &adapter->tx_eq);
1635
6b7c5b94
SP
1636 q = &adapter->tx_eq.q;
1637 if (q->created)
8788fdc2 1638 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1639 be_queue_free(adapter, q);
1640}
1641
dafc0fe3
SP
1642static int be_num_txqs_want(struct be_adapter *adapter)
1643{
1644 if ((num_vfs && adapter->sriov_enabled) ||
752961a1 1645 be_is_mc(adapter) ||
dafc0fe3
SP
1646 lancer_chip(adapter) || !be_physfn(adapter) ||
1647 adapter->generation == BE_GEN2)
1648 return 1;
1649 else
1650 return MAX_TX_QS;
1651}
1652
3c8def97 1653/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1654static int be_tx_queues_create(struct be_adapter *adapter)
1655{
1656 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1657 struct be_tx_obj *txo;
1658 u8 i;
6b7c5b94 1659
dafc0fe3
SP
1660 adapter->num_tx_qs = be_num_txqs_want(adapter);
1661 if (adapter->num_tx_qs != MAX_TX_QS)
1662 netif_set_real_num_tx_queues(adapter->netdev,
1663 adapter->num_tx_qs);
1664
6b7c5b94
SP
1665 adapter->tx_eq.max_eqd = 0;
1666 adapter->tx_eq.min_eqd = 0;
1667 adapter->tx_eq.cur_eqd = 96;
1668 adapter->tx_eq.enable_aic = false;
3c8def97 1669
6b7c5b94 1670 eq = &adapter->tx_eq.q;
3c8def97
SP
1671 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1672 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1673 return -1;
1674
8788fdc2 1675 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1676 goto err;
ecd62107 1677 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1678
3c8def97
SP
1679 for_all_tx_queues(adapter, txo, i) {
1680 cq = &txo->cq;
1681 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1682 sizeof(struct be_eth_tx_compl)))
3c8def97 1683 goto err;
6b7c5b94 1684
3c8def97
SP
1685 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1686 goto err;
6b7c5b94 1687
3c8def97
SP
1688 q = &txo->q;
1689 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1690 sizeof(struct be_eth_wrb)))
1691 goto err;
3c8def97 1692 }
6b7c5b94
SP
1693 return 0;
1694
3c8def97
SP
1695err:
1696 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1697 return -1;
1698}
1699
1700static void be_rx_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_queue_info *q;
3abcdeda
SP
1703 struct be_rx_obj *rxo;
1704 int i;
1705
1706 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1707 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1708
1709 q = &rxo->cq;
1710 if (q->created)
1711 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1712 be_queue_free(adapter, q);
1713
3abcdeda 1714 q = &rxo->rx_eq.q;
482c9e79 1715 if (q->created)
3abcdeda 1716 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1717 be_queue_free(adapter, q);
6b7c5b94 1718 }
6b7c5b94
SP
1719}
1720
ac6a0c4a
SP
1721static u32 be_num_rxqs_want(struct be_adapter *adapter)
1722{
c814fd36 1723 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
752961a1
SP
1724 !adapter->sriov_enabled && be_physfn(adapter) &&
1725 !be_is_mc(adapter)) {
ac6a0c4a
SP
1726 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1727 } else {
1728 dev_warn(&adapter->pdev->dev,
1729 "No support for multiple RX queues\n");
1730 return 1;
1731 }
1732}
1733
6b7c5b94
SP
1734static int be_rx_queues_create(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1737 struct be_rx_obj *rxo;
1738 int rc, i;
6b7c5b94 1739
ac6a0c4a
SP
1740 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1741 msix_enabled(adapter) ?
1742 adapter->num_msix_vec - 1 : 1);
1743 if (adapter->num_rx_qs != MAX_RX_QS)
1744 dev_warn(&adapter->pdev->dev,
1745 "Can create only %d RX queues", adapter->num_rx_qs);
1746
6b7c5b94 1747 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1748 for_all_rx_queues(adapter, rxo, i) {
1749 rxo->adapter = adapter;
1750 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1751 rxo->rx_eq.enable_aic = true;
1752
1753 /* EQ */
1754 eq = &rxo->rx_eq.q;
1755 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1756 sizeof(struct be_eq_entry));
1757 if (rc)
1758 goto err;
1759
1760 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1761 if (rc)
1762 goto err;
1763
ecd62107 1764 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1765
3abcdeda
SP
1766 /* CQ */
1767 cq = &rxo->cq;
1768 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1769 sizeof(struct be_eth_rx_compl));
1770 if (rc)
1771 goto err;
1772
1773 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1774 if (rc)
1775 goto err;
482c9e79
SP
1776
1777 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1778 q = &rxo->q;
1779 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1780 sizeof(struct be_eth_rx_d));
1781 if (rc)
1782 goto err;
1783
3abcdeda 1784 }
6b7c5b94
SP
1785
1786 return 0;
3abcdeda
SP
1787err:
1788 be_rx_queues_destroy(adapter);
1789 return -1;
6b7c5b94 1790}
6b7c5b94 1791
fe6d2a38 1792static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1793{
fe6d2a38
SP
1794 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1795 if (!eqe->evt)
1796 return false;
1797 else
1798 return true;
b628bde2
SP
1799}
1800
6b7c5b94
SP
1801static irqreturn_t be_intx(int irq, void *dev)
1802{
1803 struct be_adapter *adapter = dev;
3abcdeda 1804 struct be_rx_obj *rxo;
fe6d2a38 1805 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1806
fe6d2a38
SP
1807 if (lancer_chip(adapter)) {
1808 if (event_peek(&adapter->tx_eq))
3c8def97 1809 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1810 for_all_rx_queues(adapter, rxo, i) {
1811 if (event_peek(&rxo->rx_eq))
3c8def97 1812 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1813 }
6b7c5b94 1814
fe6d2a38
SP
1815 if (!(tx || rx))
1816 return IRQ_NONE;
3abcdeda 1817
fe6d2a38
SP
1818 } else {
1819 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1820 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1821 if (!isr)
1822 return IRQ_NONE;
1823
ecd62107 1824 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1825 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1826
1827 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1828 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1829 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1830 }
3abcdeda 1831 }
c001c213 1832
8788fdc2 1833 return IRQ_HANDLED;
6b7c5b94
SP
1834}
1835
1836static irqreturn_t be_msix_rx(int irq, void *dev)
1837{
3abcdeda
SP
1838 struct be_rx_obj *rxo = dev;
1839 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1840
3c8def97 1841 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1842
1843 return IRQ_HANDLED;
1844}
1845
5fb379ee 1846static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1847{
1848 struct be_adapter *adapter = dev;
1849
3c8def97 1850 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1851
1852 return IRQ_HANDLED;
1853}
1854
2e588f84 1855static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1856{
2e588f84 1857 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1858}
1859
49b05221 1860static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1861{
1862 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1863 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1864 struct be_adapter *adapter = rxo->adapter;
1865 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1866 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1867 u32 work_done;
1868
ac124ff9 1869 rx_stats(rxo)->rx_polls++;
6b7c5b94 1870 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1871 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1872 if (!rxcp)
1873 break;
1874
12004ae9
SP
1875 /* Is it a flush compl that has no data */
1876 if (unlikely(rxcp->num_rcvd == 0))
1877 goto loop_continue;
1878
1879 /* Discard compl with partial DMA Lancer B0 */
1880 if (unlikely(!rxcp->pkt_size)) {
1881 be_rx_compl_discard(adapter, rxo, rxcp);
1882 goto loop_continue;
1883 }
1884
1885 /* On BE drop pkts that arrive due to imperfect filtering in
1886 * promiscuous mode on some skews
1887 */
1888 if (unlikely(rxcp->port != adapter->port_num &&
1889 !lancer_chip(adapter))) {
009dd872 1890 be_rx_compl_discard(adapter, rxo, rxcp);
12004ae9 1891 goto loop_continue;
64642811 1892 }
009dd872 1893
12004ae9
SP
1894 if (do_gro(rxcp))
1895 be_rx_compl_process_gro(adapter, rxo, rxcp);
1896 else
1897 be_rx_compl_process(adapter, rxo, rxcp);
1898loop_continue:
2e588f84 1899 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1900 }
1901
9372cacb
PR
1902 be_cq_notify(adapter, rx_cq->id, false, work_done);
1903
6b7c5b94 1904 /* Refill the queue */
857c9905 1905 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1906 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1907
1908 /* All consumed */
1909 if (work_done < budget) {
1910 napi_complete(napi);
9372cacb
PR
1911 /* Arm CQ */
1912 be_cq_notify(adapter, rx_cq->id, true, 0);
6b7c5b94
SP
1913 }
1914 return work_done;
1915}
1916
f31e50a8
SP
1917/* As TX and MCC share the same EQ check for both TX and MCC completions.
1918 * For TX/MCC we don't honour budget; consume everything
1919 */
1920static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1921{
f31e50a8
SP
1922 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1923 struct be_adapter *adapter =
1924 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1925 struct be_tx_obj *txo;
6b7c5b94 1926 struct be_eth_tx_compl *txcp;
3c8def97
SP
1927 int tx_compl, mcc_compl, status = 0;
1928 u8 i;
1929 u16 num_wrbs;
1930
1931 for_all_tx_queues(adapter, txo, i) {
1932 tx_compl = 0;
1933 num_wrbs = 0;
1934 while ((txcp = be_tx_compl_get(&txo->cq))) {
1935 num_wrbs += be_tx_compl_process(adapter, txo,
1936 AMAP_GET_BITS(struct amap_eth_tx_compl,
1937 wrb_index, txcp));
1938 tx_compl++;
1939 }
1940 if (tx_compl) {
1941 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1942
1943 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1944
3c8def97
SP
1945 /* As Tx wrbs have been freed up, wake up netdev queue
1946 * if it was stopped due to lack of tx wrbs. */
1947 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1948 atomic_read(&txo->q.used) < txo->q.len / 2) {
1949 netif_wake_subqueue(adapter->netdev, i);
1950 }
1951
ab1594e9 1952 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
ac124ff9 1953 tx_stats(txo)->tx_compl += tx_compl;
ab1594e9 1954 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3c8def97 1955 }
6b7c5b94
SP
1956 }
1957
f31e50a8
SP
1958 mcc_compl = be_process_mcc(adapter, &status);
1959
f31e50a8
SP
1960 if (mcc_compl) {
1961 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1962 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1963 }
1964
3c8def97 1965 napi_complete(napi);
6b7c5b94 1966
3c8def97 1967 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
ab1594e9 1968 adapter->drv_stats.tx_events++;
6b7c5b94
SP
1969 return 1;
1970}
1971
d053de91 1972void be_detect_dump_ue(struct be_adapter *adapter)
7c185276 1973{
e1cfb67a
PR
1974 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1975 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
7c185276
AK
1976 u32 i;
1977
72f02485
SP
1978 if (adapter->eeh_err || adapter->ue_detected)
1979 return;
1980
e1cfb67a
PR
1981 if (lancer_chip(adapter)) {
1982 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1983 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1984 sliport_err1 = ioread32(adapter->db +
1985 SLIPORT_ERROR1_OFFSET);
1986 sliport_err2 = ioread32(adapter->db +
1987 SLIPORT_ERROR2_OFFSET);
1988 }
1989 } else {
1990 pci_read_config_dword(adapter->pdev,
1991 PCICFG_UE_STATUS_LOW, &ue_lo);
1992 pci_read_config_dword(adapter->pdev,
1993 PCICFG_UE_STATUS_HIGH, &ue_hi);
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
1998
1999 ue_lo = (ue_lo & (~ue_lo_mask));
2000 ue_hi = (ue_hi & (~ue_hi_mask));
2001 }
7c185276 2002
e1cfb67a
PR
2003 if (ue_lo || ue_hi ||
2004 sliport_status & SLIPORT_STATUS_ERR_MASK) {
d053de91 2005 adapter->ue_detected = true;
7acc2087 2006 adapter->eeh_err = true;
434b3648
SP
2007 dev_err(&adapter->pdev->dev,
2008 "Unrecoverable error in the card\n");
d053de91
AK
2009 }
2010
e1cfb67a
PR
2011 if (ue_lo) {
2012 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2013 if (ue_lo & 1)
7c185276
AK
2014 dev_err(&adapter->pdev->dev,
2015 "UE: %s bit set\n", ue_status_low_desc[i]);
2016 }
2017 }
e1cfb67a
PR
2018 if (ue_hi) {
2019 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2020 if (ue_hi & 1)
7c185276
AK
2021 dev_err(&adapter->pdev->dev,
2022 "UE: %s bit set\n", ue_status_hi_desc[i]);
2023 }
2024 }
2025
e1cfb67a
PR
2026 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2027 dev_err(&adapter->pdev->dev,
2028 "sliport status 0x%x\n", sliport_status);
2029 dev_err(&adapter->pdev->dev,
2030 "sliport error1 0x%x\n", sliport_err1);
2031 dev_err(&adapter->pdev->dev,
2032 "sliport error2 0x%x\n", sliport_err2);
2033 }
7c185276
AK
2034}
2035
ea1dae11
SP
2036static void be_worker(struct work_struct *work)
2037{
2038 struct be_adapter *adapter =
2039 container_of(work, struct be_adapter, work.work);
3abcdeda
SP
2040 struct be_rx_obj *rxo;
2041 int i;
ea1dae11 2042
72f02485 2043 be_detect_dump_ue(adapter);
16da8250 2044
f203af70
SK
2045 /* when interrupts are not yet enabled, just reap any pending
2046 * mcc completions */
2047 if (!netif_running(adapter->netdev)) {
2048 int mcc_compl, status = 0;
2049
2050 mcc_compl = be_process_mcc(adapter, &status);
2051
2052 if (mcc_compl) {
2053 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2054 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2055 }
9b037f38 2056
f203af70
SK
2057 goto reschedule;
2058 }
2059
005d5696
SX
2060 if (!adapter->stats_cmd_sent) {
2061 if (lancer_chip(adapter))
2062 lancer_cmd_get_pport_stats(adapter,
2063 &adapter->stats_cmd);
2064 else
2065 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2066 }
3c8def97 2067
3abcdeda 2068 for_all_rx_queues(adapter, rxo, i) {
3abcdeda
SP
2069 be_rx_eqd_update(adapter, rxo);
2070
2071 if (rxo->rx_post_starved) {
2072 rxo->rx_post_starved = false;
1829b086 2073 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2074 }
ea1dae11
SP
2075 }
2076
f203af70 2077reschedule:
e74fbd03 2078 adapter->work_counter++;
ea1dae11
SP
2079 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2080}
2081
8d56ff11
SP
2082static void be_msix_disable(struct be_adapter *adapter)
2083{
ac6a0c4a 2084 if (msix_enabled(adapter)) {
8d56ff11 2085 pci_disable_msix(adapter->pdev);
ac6a0c4a 2086 adapter->num_msix_vec = 0;
3abcdeda
SP
2087 }
2088}
2089
6b7c5b94
SP
2090static void be_msix_enable(struct be_adapter *adapter)
2091{
3abcdeda 2092#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2093 int i, status, num_vec;
6b7c5b94 2094
ac6a0c4a 2095 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2096
ac6a0c4a 2097 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2098 adapter->msix_entries[i].entry = i;
2099
ac6a0c4a 2100 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2101 if (status == 0) {
2102 goto done;
2103 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2104 num_vec = status;
3abcdeda 2105 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2106 num_vec) == 0)
3abcdeda 2107 goto done;
3abcdeda
SP
2108 }
2109 return;
2110done:
ac6a0c4a
SP
2111 adapter->num_msix_vec = num_vec;
2112 return;
6b7c5b94
SP
2113}
2114
f9449ab7 2115static int be_sriov_enable(struct be_adapter *adapter)
ba343c77 2116{
344dbf10 2117 be_check_sriov_fn_type(adapter);
6dedec81 2118#ifdef CONFIG_PCI_IOV
ba343c77 2119 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2120 int status, pos;
2121 u16 nvfs;
2122
2123 pos = pci_find_ext_capability(adapter->pdev,
2124 PCI_EXT_CAP_ID_SRIOV);
2125 pci_read_config_word(adapter->pdev,
2126 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2127
2128 if (num_vfs > nvfs) {
2129 dev_info(&adapter->pdev->dev,
2130 "Device supports %d VFs and not %d\n",
2131 nvfs, num_vfs);
2132 num_vfs = nvfs;
2133 }
6dedec81 2134
ba343c77
SB
2135 status = pci_enable_sriov(adapter->pdev, num_vfs);
2136 adapter->sriov_enabled = status ? false : true;
f9449ab7
SP
2137
2138 if (adapter->sriov_enabled) {
2139 adapter->vf_cfg = kcalloc(num_vfs,
2140 sizeof(struct be_vf_cfg),
2141 GFP_KERNEL);
2142 if (!adapter->vf_cfg)
2143 return -ENOMEM;
2144 }
ba343c77
SB
2145 }
2146#endif
f9449ab7 2147 return 0;
ba343c77
SB
2148}
2149
2150static void be_sriov_disable(struct be_adapter *adapter)
2151{
2152#ifdef CONFIG_PCI_IOV
2153 if (adapter->sriov_enabled) {
2154 pci_disable_sriov(adapter->pdev);
f9449ab7 2155 kfree(adapter->vf_cfg);
ba343c77
SB
2156 adapter->sriov_enabled = false;
2157 }
2158#endif
2159}
2160
fe6d2a38
SP
2161static inline int be_msix_vec_get(struct be_adapter *adapter,
2162 struct be_eq_obj *eq_obj)
6b7c5b94 2163{
ecd62107 2164 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2165}
2166
b628bde2
SP
2167static int be_request_irq(struct be_adapter *adapter,
2168 struct be_eq_obj *eq_obj,
3abcdeda 2169 void *handler, char *desc, void *context)
6b7c5b94
SP
2170{
2171 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2172 int vec;
2173
2174 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2175 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2176 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2177}
2178
3abcdeda
SP
2179static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2180 void *context)
b628bde2 2181{
fe6d2a38 2182 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2183 free_irq(vec, context);
b628bde2 2184}
6b7c5b94 2185
b628bde2
SP
2186static int be_msix_register(struct be_adapter *adapter)
2187{
3abcdeda
SP
2188 struct be_rx_obj *rxo;
2189 int status, i;
2190 char qname[10];
b628bde2 2191
3abcdeda
SP
2192 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2193 adapter);
6b7c5b94
SP
2194 if (status)
2195 goto err;
2196
3abcdeda
SP
2197 for_all_rx_queues(adapter, rxo, i) {
2198 sprintf(qname, "rxq%d", i);
2199 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2200 qname, rxo);
2201 if (status)
2202 goto err_msix;
2203 }
b628bde2 2204
6b7c5b94 2205 return 0;
b628bde2 2206
3abcdeda
SP
2207err_msix:
2208 be_free_irq(adapter, &adapter->tx_eq, adapter);
2209
2210 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2211 be_free_irq(adapter, &rxo->rx_eq, rxo);
2212
6b7c5b94
SP
2213err:
2214 dev_warn(&adapter->pdev->dev,
2215 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2216 be_msix_disable(adapter);
6b7c5b94
SP
2217 return status;
2218}
2219
2220static int be_irq_register(struct be_adapter *adapter)
2221{
2222 struct net_device *netdev = adapter->netdev;
2223 int status;
2224
ac6a0c4a 2225 if (msix_enabled(adapter)) {
6b7c5b94
SP
2226 status = be_msix_register(adapter);
2227 if (status == 0)
2228 goto done;
ba343c77
SB
2229 /* INTx is not supported for VF */
2230 if (!be_physfn(adapter))
2231 return status;
6b7c5b94
SP
2232 }
2233
2234 /* INTx */
2235 netdev->irq = adapter->pdev->irq;
2236 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2237 adapter);
2238 if (status) {
2239 dev_err(&adapter->pdev->dev,
2240 "INTx request IRQ failed - err %d\n", status);
2241 return status;
2242 }
2243done:
2244 adapter->isr_registered = true;
2245 return 0;
2246}
2247
2248static void be_irq_unregister(struct be_adapter *adapter)
2249{
2250 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2251 struct be_rx_obj *rxo;
2252 int i;
6b7c5b94
SP
2253
2254 if (!adapter->isr_registered)
2255 return;
2256
2257 /* INTx */
ac6a0c4a 2258 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2259 free_irq(netdev->irq, adapter);
2260 goto done;
2261 }
2262
2263 /* MSIx */
3abcdeda
SP
2264 be_free_irq(adapter, &adapter->tx_eq, adapter);
2265
2266 for_all_rx_queues(adapter, rxo, i)
2267 be_free_irq(adapter, &rxo->rx_eq, rxo);
2268
6b7c5b94
SP
2269done:
2270 adapter->isr_registered = false;
6b7c5b94
SP
2271}
2272
482c9e79
SP
2273static void be_rx_queues_clear(struct be_adapter *adapter)
2274{
2275 struct be_queue_info *q;
2276 struct be_rx_obj *rxo;
2277 int i;
2278
2279 for_all_rx_queues(adapter, rxo, i) {
2280 q = &rxo->q;
2281 if (q->created) {
2282 be_cmd_rxq_destroy(adapter, q);
2283 /* After the rxq is invalidated, wait for a grace time
2284 * of 1ms for all dma to end and the flush compl to
2285 * arrive
2286 */
2287 mdelay(1);
2288 be_rx_q_clean(adapter, rxo);
2289 }
2290
2291 /* Clear any residual events */
2292 q = &rxo->rx_eq.q;
2293 if (q->created)
2294 be_eq_clean(adapter, &rxo->rx_eq);
2295 }
2296}
2297
889cd4b2
SP
2298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2301 struct be_rx_obj *rxo;
3c8def97 2302 struct be_tx_obj *txo;
889cd4b2 2303 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2304 int vec, i;
889cd4b2 2305
889cd4b2
SP
2306 be_async_mcc_disable(adapter);
2307
fe6d2a38
SP
2308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
889cd4b2 2310
63fcb27f
PR
2311 for_all_rx_queues(adapter, rxo, i)
2312 napi_disable(&rxo->rx_eq.napi);
2313
2314 napi_disable(&tx_eq->napi);
2315
2316 if (lancer_chip(adapter)) {
63fcb27f
PR
2317 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2318 for_all_rx_queues(adapter, rxo, i)
2319 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2320 for_all_tx_queues(adapter, txo, i)
2321 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2322 }
2323
ac6a0c4a 2324 if (msix_enabled(adapter)) {
fe6d2a38 2325 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2326 synchronize_irq(vec);
3abcdeda
SP
2327
2328 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2329 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2330 synchronize_irq(vec);
2331 }
889cd4b2
SP
2332 } else {
2333 synchronize_irq(netdev->irq);
2334 }
2335 be_irq_unregister(adapter);
2336
889cd4b2
SP
2337 /* Wait for all pending tx completions to arrive so that
2338 * all tx skbs are freed.
2339 */
3c8def97
SP
2340 for_all_tx_queues(adapter, txo, i)
2341 be_tx_compl_clean(adapter, txo);
889cd4b2 2342
482c9e79
SP
2343 be_rx_queues_clear(adapter);
2344 return 0;
2345}
2346
2347static int be_rx_queues_setup(struct be_adapter *adapter)
2348{
2349 struct be_rx_obj *rxo;
2350 int rc, i;
2351 u8 rsstable[MAX_RSS_QS];
2352
2353 for_all_rx_queues(adapter, rxo, i) {
2354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2355 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2356 adapter->if_handle,
2357 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2358 if (rc)
2359 return rc;
2360 }
2361
2362 if (be_multi_rxq(adapter)) {
2363 for_all_rss_queues(adapter, rxo, i)
2364 rsstable[i] = rxo->rss_id;
2365
2366 rc = be_cmd_rss_config(adapter, rsstable,
2367 adapter->num_rx_qs - 1);
2368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
2373 for_all_rx_queues(adapter, rxo, i) {
2374 be_post_rx_frags(rxo, GFP_KERNEL);
2375 napi_enable(&rxo->rx_eq.napi);
2376 }
889cd4b2
SP
2377 return 0;
2378}
2379
6b7c5b94
SP
2380static int be_open(struct net_device *netdev)
2381{
2382 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2383 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2384 struct be_rx_obj *rxo;
3abcdeda 2385 int status, i;
5fb379ee 2386
482c9e79
SP
2387 status = be_rx_queues_setup(adapter);
2388 if (status)
2389 goto err;
2390
5fb379ee
SP
2391 napi_enable(&tx_eq->napi);
2392
2393 be_irq_register(adapter);
2394
fe6d2a38
SP
2395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, true);
5fb379ee
SP
2397
2398 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2399 for_all_rx_queues(adapter, rxo, i) {
2400 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2401 be_cq_notify(adapter, rxo->cq.id, true, 0);
2402 }
8788fdc2 2403 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2404
7a1e9b20
SP
2405 /* Now that interrupts are on we can process async mcc */
2406 be_async_mcc_enable(adapter);
2407
889cd4b2
SP
2408 return 0;
2409err:
2410 be_close(adapter->netdev);
2411 return -EIO;
5fb379ee
SP
2412}
2413
71d8d1b5
AK
2414static int be_setup_wol(struct be_adapter *adapter, bool enable)
2415{
2416 struct be_dma_mem cmd;
2417 int status = 0;
2418 u8 mac[ETH_ALEN];
2419
2420 memset(mac, 0, ETH_ALEN);
2421
2422 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2423 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2424 GFP_KERNEL);
71d8d1b5
AK
2425 if (cmd.va == NULL)
2426 return -1;
2427 memset(cmd.va, 0, cmd.size);
2428
2429 if (enable) {
2430 status = pci_write_config_dword(adapter->pdev,
2431 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2432 if (status) {
2433 dev_err(&adapter->pdev->dev,
2381a55c 2434 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2435 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2436 cmd.dma);
71d8d1b5
AK
2437 return status;
2438 }
2439 status = be_cmd_enable_magic_wol(adapter,
2440 adapter->netdev->dev_addr, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2443 } else {
2444 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2447 }
2448
2b7bcebf 2449 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2450 return status;
2451}
2452
6d87f5c3
AK
2453/*
2454 * Generate a seed MAC address from the PF MAC Address using jhash.
2455 * MAC Address for VFs are assigned incrementally starting from the seed.
2456 * These addresses are programmed in the ASIC by the PF and the VF driver
2457 * queries for the MAC address during its probe.
2458 */
2459static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2460{
f9449ab7 2461 u32 vf;
3abcdeda 2462 int status = 0;
6d87f5c3
AK
2463 u8 mac[ETH_ALEN];
2464
2465 be_vf_eth_addr_generate(adapter, mac);
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 status = be_cmd_pmac_add(adapter, mac,
2469 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2470 &adapter->vf_cfg[vf].vf_pmac_id,
2471 vf + 1);
6d87f5c3
AK
2472 if (status)
2473 dev_err(&adapter->pdev->dev,
2474 "Mac address add failed for VF %d\n", vf);
2475 else
2476 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2477
2478 mac[5] += 1;
2479 }
2480 return status;
2481}
2482
f9449ab7 2483static void be_vf_clear(struct be_adapter *adapter)
6d87f5c3
AK
2484{
2485 u32 vf;
2486
30128031
SP
2487 for (vf = 0; vf < num_vfs; vf++)
2488 be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
2489 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
f9449ab7
SP
2490
2491 for (vf = 0; vf < num_vfs; vf++)
30128031
SP
2492 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
6d87f5c3
AK
2494}
2495
a54769f5
SP
2496static int be_clear(struct be_adapter *adapter)
2497{
a54769f5 2498 if (be_physfn(adapter) && adapter->sriov_enabled)
f9449ab7
SP
2499 be_vf_clear(adapter);
2500
2501 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
a54769f5
SP
2502
2503 be_mcc_queues_destroy(adapter);
2504 be_rx_queues_destroy(adapter);
2505 be_tx_queues_destroy(adapter);
a54769f5
SP
2506
2507 /* tell fw we're done with firing cmds */
2508 be_cmd_fw_clean(adapter);
2509 return 0;
2510}
2511
30128031
SP
2512static void be_vf_setup_init(struct be_adapter *adapter)
2513{
2514 int vf;
2515
2516 for (vf = 0; vf < num_vfs; vf++) {
2517 adapter->vf_cfg[vf].vf_if_handle = -1;
2518 adapter->vf_cfg[vf].vf_pmac_id = -1;
2519 }
2520}
2521
f9449ab7
SP
2522static int be_vf_setup(struct be_adapter *adapter)
2523{
2524 u32 cap_flags, en_flags, vf;
2525 u16 lnk_speed;
2526 int status;
2527
30128031
SP
2528 be_vf_setup_init(adapter);
2529
f9449ab7
SP
2530 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533 &adapter->vf_cfg[vf].vf_if_handle,
2534 NULL, vf+1);
2535 if (status)
2536 goto err;
f9449ab7
SP
2537 }
2538
2539 if (!lancer_chip(adapter)) {
2540 status = be_vf_eth_addr_config(adapter);
2541 if (status)
2542 goto err;
2543 }
2544
2545 for (vf = 0; vf < num_vfs; vf++) {
2546 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2547 vf + 1);
2548 if (status)
2549 goto err;
2550 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2551 }
2552 return 0;
2553err:
2554 return status;
2555}
2556
30128031
SP
2557static void be_setup_init(struct be_adapter *adapter)
2558{
2559 adapter->vlan_prio_bmap = 0xff;
2560 adapter->link_speed = -1;
2561 adapter->if_handle = -1;
2562 adapter->be3_native = false;
2563 adapter->promiscuous = false;
2564 adapter->eq_next_idx = 0;
2565}
2566
5fb379ee
SP
2567static int be_setup(struct be_adapter *adapter)
2568{
5fb379ee 2569 struct net_device *netdev = adapter->netdev;
f9449ab7 2570 u32 cap_flags, en_flags;
a54769f5 2571 u32 tx_fc, rx_fc;
293c4a7d 2572 int status, i;
ba343c77 2573 u8 mac[ETH_ALEN];
293c4a7d 2574 struct be_tx_obj *txo;
ba343c77 2575
30128031 2576 be_setup_init(adapter);
6b7c5b94 2577
f9449ab7 2578 be_cmd_req_native_mode(adapter);
73d540f2 2579
f9449ab7 2580 status = be_tx_queues_create(adapter);
6b7c5b94 2581 if (status != 0)
a54769f5 2582 goto err;
6b7c5b94 2583
f9449ab7 2584 status = be_rx_queues_create(adapter);
6b7c5b94 2585 if (status != 0)
a54769f5 2586 goto err;
6b7c5b94 2587
f9449ab7 2588 status = be_mcc_queues_create(adapter);
6b7c5b94 2589 if (status != 0)
a54769f5 2590 goto err;
6b7c5b94 2591
f9449ab7
SP
2592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2594 true /*permanent */, 0);
2595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2903dd65 2599
f9449ab7
SP
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
5d5adb93
PR
2603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
f9449ab7
SP
2605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
5fb379ee 2612 if (status != 0)
a54769f5 2613 goto err;
6b7c5b94 2614
293c4a7d
PR
2615 for_all_tx_queues(adapter, txo, i) {
2616 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617 if (status)
2618 goto err;
2619 }
2620
f9449ab7
SP
2621 /* For BEx, the VF's permanent mac queried from card is incorrect.
2622 * Query the mac configued by the PF using if_handle
2623 */
2624 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2625 status = be_cmd_mac_addr_query(adapter, mac,
2626 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2627 if (!status) {
2628 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2629 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2630 }
2631 }
0dffc83e 2632
04b71175 2633 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
5a56eb10 2634
a54769f5
SP
2635 status = be_vid_config(adapter, false, 0);
2636 if (status)
2637 goto err;
7ab8b0b4 2638
a54769f5 2639 be_set_rx_mode(adapter->netdev);
5fb379ee 2640
a54769f5
SP
2641 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2642 if (status)
2643 goto err;
2644 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2645 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2646 adapter->rx_fc);
2647 if (status)
2648 goto err;
2649 }
2dc1deb6 2650
a54769f5 2651 pcie_set_readrq(adapter->pdev, 4096);
5fb379ee 2652
f9449ab7
SP
2653 if (be_physfn(adapter) && adapter->sriov_enabled) {
2654 status = be_vf_setup(adapter);
2655 if (status)
2656 goto err;
2657 }
2658
2659 return 0;
a54769f5
SP
2660err:
2661 be_clear(adapter);
2662 return status;
2663}
6b7c5b94 2664
84517482 2665#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2666static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2667 const u8 *p, u32 img_start, int image_size,
2668 int hdr_size)
fa9a6fed
SB
2669{
2670 u32 crc_offset;
2671 u8 flashed_crc[4];
2672 int status;
3f0d4560
AK
2673
2674 crc_offset = hdr_size + img_start + image_size - 4;
2675
fa9a6fed 2676 p += crc_offset;
3f0d4560
AK
2677
2678 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2679 (image_size - 4));
fa9a6fed
SB
2680 if (status) {
2681 dev_err(&adapter->pdev->dev,
2682 "could not get crc from flash, not flashing redboot\n");
2683 return false;
2684 }
2685
2686 /*update redboot only if crc does not match*/
2687 if (!memcmp(flashed_crc, p, 4))
2688 return false;
2689 else
2690 return true;
fa9a6fed
SB
2691}
2692
306f1348
SP
2693static bool phy_flashing_required(struct be_adapter *adapter)
2694{
2695 int status = 0;
2696 struct be_phy_info phy_info;
2697
2698 status = be_cmd_get_phy_info(adapter, &phy_info);
2699 if (status)
2700 return false;
2701 if ((phy_info.phy_type == TN_8022) &&
2702 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2703 return true;
2704 }
2705 return false;
2706}
2707
3f0d4560 2708static int be_flash_data(struct be_adapter *adapter,
84517482 2709 const struct firmware *fw,
3f0d4560
AK
2710 struct be_dma_mem *flash_cmd, int num_of_images)
2711
84517482 2712{
3f0d4560
AK
2713 int status = 0, i, filehdr_size = 0;
2714 u32 total_bytes = 0, flash_op;
84517482
AK
2715 int num_bytes;
2716 const u8 *p = fw->data;
2717 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2718 const struct flash_comp *pflashcomp;
9fe96934 2719 int num_comp;
3f0d4560 2720
306f1348 2721 static const struct flash_comp gen3_flash_types[10] = {
3f0d4560
AK
2722 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2723 FLASH_IMAGE_MAX_SIZE_g3},
2724 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2725 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2726 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2727 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2728 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2729 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2730 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2733 FLASH_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2735 FLASH_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
306f1348
SP
2739 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2740 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2741 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
3f0d4560 2742 };
215faf9c 2743 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2744 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2745 FLASH_IMAGE_MAX_SIZE_g2},
2746 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2747 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2748 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2749 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2750 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2751 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2752 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2755 FLASH_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2757 FLASH_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2}
2760 };
2761
2762 if (adapter->generation == BE_GEN3) {
2763 pflashcomp = gen3_flash_types;
2764 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2765 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2766 } else {
2767 pflashcomp = gen2_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2769 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2770 }
9fe96934
SB
2771 for (i = 0; i < num_comp; i++) {
2772 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2773 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2774 continue;
306f1348
SP
2775 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2776 if (!phy_flashing_required(adapter))
2777 continue;
2778 }
3f0d4560
AK
2779 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2780 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2781 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2782 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2783 continue;
2784 p = fw->data;
2785 p += filehdr_size + pflashcomp[i].offset
2786 + (num_of_images * sizeof(struct image_hdr));
306f1348
SP
2787 if (p + pflashcomp[i].size > fw->data + fw->size)
2788 return -1;
2789 total_bytes = pflashcomp[i].size;
3f0d4560
AK
2790 while (total_bytes) {
2791 if (total_bytes > 32*1024)
2792 num_bytes = 32*1024;
2793 else
2794 num_bytes = total_bytes;
2795 total_bytes -= num_bytes;
306f1348
SP
2796 if (!total_bytes) {
2797 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2798 flash_op = FLASHROM_OPER_PHY_FLASH;
2799 else
2800 flash_op = FLASHROM_OPER_FLASH;
2801 } else {
2802 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2803 flash_op = FLASHROM_OPER_PHY_SAVE;
2804 else
2805 flash_op = FLASHROM_OPER_SAVE;
2806 }
3f0d4560
AK
2807 memcpy(req->params.data_buf, p, num_bytes);
2808 p += num_bytes;
2809 status = be_cmd_write_flashrom(adapter, flash_cmd,
2810 pflashcomp[i].optype, flash_op, num_bytes);
2811 if (status) {
306f1348
SP
2812 if ((status == ILLEGAL_IOCTL_REQ) &&
2813 (pflashcomp[i].optype ==
2814 IMG_TYPE_PHY_FW))
2815 break;
3f0d4560
AK
2816 dev_err(&adapter->pdev->dev,
2817 "cmd to write to flash rom failed.\n");
2818 return -1;
2819 }
84517482 2820 }
84517482 2821 }
84517482
AK
2822 return 0;
2823}
2824
3f0d4560
AK
2825static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2826{
2827 if (fhdr == NULL)
2828 return 0;
2829 if (fhdr->build[0] == '3')
2830 return BE_GEN3;
2831 else if (fhdr->build[0] == '2')
2832 return BE_GEN2;
2833 else
2834 return 0;
2835}
2836
485bf569
SN
2837static int lancer_fw_download(struct be_adapter *adapter,
2838 const struct firmware *fw)
84517482 2839{
485bf569
SN
2840#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2841#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2842 struct be_dma_mem flash_cmd;
485bf569
SN
2843 const u8 *data_ptr = NULL;
2844 u8 *dest_image_ptr = NULL;
2845 size_t image_size = 0;
2846 u32 chunk_size = 0;
2847 u32 data_written = 0;
2848 u32 offset = 0;
2849 int status = 0;
2850 u8 add_status = 0;
84517482 2851
485bf569 2852 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2853 dev_err(&adapter->pdev->dev,
485bf569
SN
2854 "FW Image not properly aligned. "
2855 "Length must be 4 byte aligned.\n");
2856 status = -EINVAL;
2857 goto lancer_fw_exit;
d9efd2af
SB
2858 }
2859
485bf569
SN
2860 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2861 + LANCER_FW_DOWNLOAD_CHUNK;
2862 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2863 &flash_cmd.dma, GFP_KERNEL);
2864 if (!flash_cmd.va) {
2865 status = -ENOMEM;
2866 dev_err(&adapter->pdev->dev,
2867 "Memory allocation failure while flashing\n");
2868 goto lancer_fw_exit;
2869 }
84517482 2870
485bf569
SN
2871 dest_image_ptr = flash_cmd.va +
2872 sizeof(struct lancer_cmd_req_write_object);
2873 image_size = fw->size;
2874 data_ptr = fw->data;
2875
2876 while (image_size) {
2877 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2878
2879 /* Copy the image chunk content. */
2880 memcpy(dest_image_ptr, data_ptr, chunk_size);
2881
2882 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884 &data_written, &add_status);
2885
2886 if (status)
2887 break;
2888
2889 offset += data_written;
2890 data_ptr += data_written;
2891 image_size -= data_written;
2892 }
2893
2894 if (!status) {
2895 /* Commit the FW written */
2896 status = lancer_cmd_write_object(adapter, &flash_cmd,
2897 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2898 &data_written, &add_status);
2899 }
2900
2901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2902 flash_cmd.dma);
2903 if (status) {
2904 dev_err(&adapter->pdev->dev,
2905 "Firmware load error. "
2906 "Status code: 0x%x Additional Status: 0x%x\n",
2907 status, add_status);
2908 goto lancer_fw_exit;
2909 }
2910
2911 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2912lancer_fw_exit:
2913 return status;
2914}
2915
2916static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2917{
2918 struct flash_file_hdr_g2 *fhdr;
2919 struct flash_file_hdr_g3 *fhdr3;
2920 struct image_hdr *img_hdr_ptr = NULL;
2921 struct be_dma_mem flash_cmd;
2922 const u8 *p;
2923 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2924
2925 p = fw->data;
3f0d4560 2926 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2927
84517482 2928 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2929 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2930 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2931 if (!flash_cmd.va) {
2932 status = -ENOMEM;
2933 dev_err(&adapter->pdev->dev,
2934 "Memory allocation failure while flashing\n");
485bf569 2935 goto be_fw_exit;
84517482
AK
2936 }
2937
3f0d4560
AK
2938 if ((adapter->generation == BE_GEN3) &&
2939 (get_ufigen_type(fhdr) == BE_GEN3)) {
2940 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2941 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2942 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2943 img_hdr_ptr = (struct image_hdr *) (fw->data +
2944 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2945 i * sizeof(struct image_hdr)));
2946 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2947 status = be_flash_data(adapter, fw, &flash_cmd,
2948 num_imgs);
3f0d4560
AK
2949 }
2950 } else if ((adapter->generation == BE_GEN2) &&
2951 (get_ufigen_type(fhdr) == BE_GEN2)) {
2952 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2953 } else {
2954 dev_err(&adapter->pdev->dev,
2955 "UFI and Interface are not compatible for flashing\n");
2956 status = -1;
84517482
AK
2957 }
2958
2b7bcebf
IV
2959 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2960 flash_cmd.dma);
84517482
AK
2961 if (status) {
2962 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2963 goto be_fw_exit;
84517482
AK
2964 }
2965
af901ca1 2966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2967
485bf569
SN
2968be_fw_exit:
2969 return status;
2970}
2971
2972int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2973{
2974 const struct firmware *fw;
2975 int status;
2976
2977 if (!netif_running(adapter->netdev)) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load not allowed (interface is down)\n");
2980 return -1;
2981 }
2982
2983 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2984 if (status)
2985 goto fw_exit;
2986
2987 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2988
2989 if (lancer_chip(adapter))
2990 status = lancer_fw_download(adapter, fw);
2991 else
2992 status = be_fw_download(adapter, fw);
2993
84517482
AK
2994fw_exit:
2995 release_firmware(fw);
2996 return status;
2997}
2998
6b7c5b94
SP
2999static struct net_device_ops be_netdev_ops = {
3000 .ndo_open = be_open,
3001 .ndo_stop = be_close,
3002 .ndo_start_xmit = be_xmit,
a54769f5 3003 .ndo_set_rx_mode = be_set_rx_mode,
6b7c5b94
SP
3004 .ndo_set_mac_address = be_mac_addr_set,
3005 .ndo_change_mtu = be_change_mtu,
ab1594e9 3006 .ndo_get_stats64 = be_get_stats64,
6b7c5b94 3007 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
3008 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3009 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 3010 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 3011 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 3012 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 3013 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
3014};
3015
3016static void be_netdev_init(struct net_device *netdev)
3017{
3018 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
3019 struct be_rx_obj *rxo;
3020 int i;
6b7c5b94 3021
6332c8d3 3022 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
3023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3024 NETIF_F_HW_VLAN_TX;
3025 if (be_multi_rxq(adapter))
3026 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
3027
3028 netdev->features |= netdev->hw_features |
8b8ddc68 3029 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 3030
eb8a50d9 3031 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 3032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 3033
6b7c5b94
SP
3034 netdev->flags |= IFF_MULTICAST;
3035
c190e3c8
AK
3036 netif_set_gso_max_size(netdev, 65535);
3037
6b7c5b94
SP
3038 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3039
3040 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3041
3abcdeda
SP
3042 for_all_rx_queues(adapter, rxo, i)
3043 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3044 BE_NAPI_WEIGHT);
3045
5fb379ee 3046 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 3047 BE_NAPI_WEIGHT);
6b7c5b94
SP
3048}
3049
3050static void be_unmap_pci_bars(struct be_adapter *adapter)
3051{
8788fdc2
SP
3052 if (adapter->csr)
3053 iounmap(adapter->csr);
3054 if (adapter->db)
3055 iounmap(adapter->db);
6b7c5b94
SP
3056}
3057
3058static int be_map_pci_bars(struct be_adapter *adapter)
3059{
3060 u8 __iomem *addr;
db3ea781 3061 int db_reg;
6b7c5b94 3062
fe6d2a38
SP
3063 if (lancer_chip(adapter)) {
3064 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3065 pci_resource_len(adapter->pdev, 0));
3066 if (addr == NULL)
3067 return -ENOMEM;
3068 adapter->db = addr;
3069 return 0;
3070 }
3071
ba343c77
SB
3072 if (be_physfn(adapter)) {
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3074 pci_resource_len(adapter->pdev, 2));
3075 if (addr == NULL)
3076 return -ENOMEM;
3077 adapter->csr = addr;
3078 }
6b7c5b94 3079
ba343c77 3080 if (adapter->generation == BE_GEN2) {
ba343c77
SB
3081 db_reg = 4;
3082 } else {
ba343c77
SB
3083 if (be_physfn(adapter))
3084 db_reg = 4;
3085 else
3086 db_reg = 0;
3087 }
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3089 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3090 if (addr == NULL)
3091 goto pci_map_err;
ba343c77
SB
3092 adapter->db = addr;
3093
6b7c5b94
SP
3094 return 0;
3095pci_map_err:
3096 be_unmap_pci_bars(adapter);
3097 return -ENOMEM;
3098}
3099
3100
3101static void be_ctrl_cleanup(struct be_adapter *adapter)
3102{
8788fdc2 3103 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3104
3105 be_unmap_pci_bars(adapter);
3106
3107 if (mem->va)
2b7bcebf
IV
3108 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3109 mem->dma);
e7b909a6 3110
5b8821b7 3111 mem = &adapter->rx_filter;
e7b909a6 3112 if (mem->va)
2b7bcebf
IV
3113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
6b7c5b94
SP
3115}
3116
6b7c5b94
SP
3117static int be_ctrl_init(struct be_adapter *adapter)
3118{
8788fdc2
SP
3119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5b8821b7 3121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
6b7c5b94 3122 int status;
6b7c5b94
SP
3123
3124 status = be_map_pci_bars(adapter);
3125 if (status)
e7b909a6 3126 goto done;
6b7c5b94
SP
3127
3128 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3129 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3130 mbox_mem_alloc->size,
3131 &mbox_mem_alloc->dma,
3132 GFP_KERNEL);
6b7c5b94 3133 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3134 status = -ENOMEM;
3135 goto unmap_pci_bars;
6b7c5b94
SP
3136 }
3137 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3138 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3139 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3140 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6 3141
5b8821b7
SP
3142 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3143 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3144 &rx_filter->dma, GFP_KERNEL);
3145 if (rx_filter->va == NULL) {
e7b909a6
SP
3146 status = -ENOMEM;
3147 goto free_mbox;
3148 }
5b8821b7 3149 memset(rx_filter->va, 0, rx_filter->size);
e7b909a6 3150
2984961c 3151 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3152 spin_lock_init(&adapter->mcc_lock);
3153 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3154
dd131e76 3155 init_completion(&adapter->flash_compl);
cf588477 3156 pci_save_state(adapter->pdev);
6b7c5b94 3157 return 0;
e7b909a6
SP
3158
3159free_mbox:
2b7bcebf
IV
3160 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3161 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3162
3163unmap_pci_bars:
3164 be_unmap_pci_bars(adapter);
3165
3166done:
3167 return status;
6b7c5b94
SP
3168}
3169
3170static void be_stats_cleanup(struct be_adapter *adapter)
3171{
3abcdeda 3172 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3173
3174 if (cmd->va)
2b7bcebf
IV
3175 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3176 cmd->va, cmd->dma);
6b7c5b94
SP
3177}
3178
3179static int be_stats_init(struct be_adapter *adapter)
3180{
3abcdeda 3181 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3182
005d5696 3183 if (adapter->generation == BE_GEN2) {
89a88ab8 3184 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3185 } else {
3186 if (lancer_chip(adapter))
3187 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3188 else
3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3190 }
2b7bcebf
IV
3191 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3192 GFP_KERNEL);
6b7c5b94
SP
3193 if (cmd->va == NULL)
3194 return -1;
d291b9af 3195 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3196 return 0;
3197}
3198
3199static void __devexit be_remove(struct pci_dev *pdev)
3200{
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3202
6b7c5b94
SP
3203 if (!adapter)
3204 return;
3205
f203af70
SK
3206 cancel_delayed_work_sync(&adapter->work);
3207
6b7c5b94
SP
3208 unregister_netdev(adapter->netdev);
3209
5fb379ee
SP
3210 be_clear(adapter);
3211
6b7c5b94
SP
3212 be_stats_cleanup(adapter);
3213
3214 be_ctrl_cleanup(adapter);
3215
ba343c77
SB
3216 be_sriov_disable(adapter);
3217
8d56ff11 3218 be_msix_disable(adapter);
6b7c5b94
SP
3219
3220 pci_set_drvdata(pdev, NULL);
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223
3224 free_netdev(adapter->netdev);
3225}
3226
2243e2e9 3227static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3228{
6b7c5b94
SP
3229 int status;
3230
3abcdeda
SP
3231 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3232 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3233 if (status)
3234 return status;
3235
752961a1 3236 if (adapter->function_mode & FLEX10_MODE)
82903e4b
AK
3237 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3238 else
3239 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3240
9e1453c5
AK
3241 status = be_cmd_get_cntl_attributes(adapter);
3242 if (status)
3243 return status;
3244
2243e2e9 3245 return 0;
6b7c5b94
SP
3246}
3247
fe6d2a38
SP
3248static int be_dev_family_check(struct be_adapter *adapter)
3249{
3250 struct pci_dev *pdev = adapter->pdev;
3251 u32 sli_intf = 0, if_type;
3252
3253 switch (pdev->device) {
3254 case BE_DEVICE_ID1:
3255 case OC_DEVICE_ID1:
3256 adapter->generation = BE_GEN2;
3257 break;
3258 case BE_DEVICE_ID2:
3259 case OC_DEVICE_ID2:
3260 adapter->generation = BE_GEN3;
3261 break;
3262 case OC_DEVICE_ID3:
12f4d0a8 3263 case OC_DEVICE_ID4:
fe6d2a38
SP
3264 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3265 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3266 SLI_INTF_IF_TYPE_SHIFT;
3267
3268 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3269 if_type != 0x02) {
3270 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3271 return -EINVAL;
3272 }
fe6d2a38
SP
3273 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3274 SLI_INTF_FAMILY_SHIFT);
3275 adapter->generation = BE_GEN3;
3276 break;
3277 default:
3278 adapter->generation = 0;
3279 }
3280 return 0;
3281}
3282
37eed1cb
PR
3283static int lancer_wait_ready(struct be_adapter *adapter)
3284{
3285#define SLIPORT_READY_TIMEOUT 500
3286 u32 sliport_status;
3287 int status = 0, i;
3288
3289 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3290 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3291 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3292 break;
3293
3294 msleep(20);
3295 }
3296
3297 if (i == SLIPORT_READY_TIMEOUT)
3298 status = -1;
3299
3300 return status;
3301}
3302
3303static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3304{
3305 int status;
3306 u32 sliport_status, err, reset_needed;
3307 status = lancer_wait_ready(adapter);
3308 if (!status) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3311 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3312 if (err && reset_needed) {
3313 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3314 adapter->db + SLIPORT_CONTROL_OFFSET);
3315
3316 /* check adapter has corrected the error */
3317 status = lancer_wait_ready(adapter);
3318 sliport_status = ioread32(adapter->db +
3319 SLIPORT_STATUS_OFFSET);
3320 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3321 SLIPORT_STATUS_RN_MASK);
3322 if (status || sliport_status)
3323 status = -1;
3324 } else if (err || reset_needed) {
3325 status = -1;
3326 }
3327 }
3328 return status;
3329}
3330
6b7c5b94
SP
3331static int __devinit be_probe(struct pci_dev *pdev,
3332 const struct pci_device_id *pdev_id)
3333{
3334 int status = 0;
3335 struct be_adapter *adapter;
3336 struct net_device *netdev;
6b7c5b94
SP
3337
3338 status = pci_enable_device(pdev);
3339 if (status)
3340 goto do_none;
3341
3342 status = pci_request_regions(pdev, DRV_NAME);
3343 if (status)
3344 goto disable_dev;
3345 pci_set_master(pdev);
3346
3c8def97 3347 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3348 if (netdev == NULL) {
3349 status = -ENOMEM;
3350 goto rel_reg;
3351 }
3352 adapter = netdev_priv(netdev);
3353 adapter->pdev = pdev;
3354 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3355
3356 status = be_dev_family_check(adapter);
63657b9c 3357 if (status)
fe6d2a38
SP
3358 goto free_netdev;
3359
6b7c5b94 3360 adapter->netdev = netdev;
2243e2e9 3361 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3362
2b7bcebf 3363 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3364 if (!status) {
3365 netdev->features |= NETIF_F_HIGHDMA;
3366 } else {
2b7bcebf 3367 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3368 if (status) {
3369 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3370 goto free_netdev;
3371 }
3372 }
3373
f9449ab7
SP
3374 status = be_sriov_enable(adapter);
3375 if (status)
3376 goto free_netdev;
ba343c77 3377
6b7c5b94
SP
3378 status = be_ctrl_init(adapter);
3379 if (status)
f9449ab7 3380 goto disable_sriov;
6b7c5b94 3381
37eed1cb
PR
3382 if (lancer_chip(adapter)) {
3383 status = lancer_test_and_set_rdy_state(adapter);
3384 if (status) {
3385 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3386 goto ctrl_clean;
37eed1cb
PR
3387 }
3388 }
3389
2243e2e9 3390 /* sync up with fw's ready state */
ba343c77
SB
3391 if (be_physfn(adapter)) {
3392 status = be_cmd_POST(adapter);
3393 if (status)
3394 goto ctrl_clean;
ba343c77 3395 }
6b7c5b94 3396
2243e2e9
SP
3397 /* tell fw we're ready to fire cmds */
3398 status = be_cmd_fw_init(adapter);
6b7c5b94 3399 if (status)
2243e2e9
SP
3400 goto ctrl_clean;
3401
a4b4dfab
AK
3402 status = be_cmd_reset_function(adapter);
3403 if (status)
3404 goto ctrl_clean;
556ae191 3405
2243e2e9
SP
3406 status = be_stats_init(adapter);
3407 if (status)
3408 goto ctrl_clean;
3409
3410 status = be_get_config(adapter);
6b7c5b94
SP
3411 if (status)
3412 goto stats_clean;
6b7c5b94 3413
b9ab82c7
SP
3414 /* The INTR bit may be set in the card when probed by a kdump kernel
3415 * after a crash.
3416 */
3417 if (!lancer_chip(adapter))
3418 be_intr_set(adapter, false);
3419
3abcdeda
SP
3420 be_msix_enable(adapter);
3421
6b7c5b94 3422 INIT_DELAYED_WORK(&adapter->work, be_worker);
a54769f5 3423 adapter->rx_fc = adapter->tx_fc = true;
6b7c5b94 3424
5fb379ee
SP
3425 status = be_setup(adapter);
3426 if (status)
3abcdeda 3427 goto msix_disable;
2243e2e9 3428
3abcdeda 3429 be_netdev_init(netdev);
6b7c5b94
SP
3430 status = register_netdev(netdev);
3431 if (status != 0)
5fb379ee 3432 goto unsetup;
6b7c5b94 3433
c4ca2374 3434 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3435
f203af70 3436 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3437 return 0;
3438
5fb379ee
SP
3439unsetup:
3440 be_clear(adapter);
3abcdeda
SP
3441msix_disable:
3442 be_msix_disable(adapter);
6b7c5b94
SP
3443stats_clean:
3444 be_stats_cleanup(adapter);
3445ctrl_clean:
3446 be_ctrl_cleanup(adapter);
f9449ab7 3447disable_sriov:
ba343c77 3448 be_sriov_disable(adapter);
f9449ab7 3449free_netdev:
fe6d2a38 3450 free_netdev(netdev);
8d56ff11 3451 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3452rel_reg:
3453 pci_release_regions(pdev);
3454disable_dev:
3455 pci_disable_device(pdev);
3456do_none:
c4ca2374 3457 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3458 return status;
3459}
3460
3461static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3462{
3463 struct be_adapter *adapter = pci_get_drvdata(pdev);
3464 struct net_device *netdev = adapter->netdev;
3465
a4ca055f 3466 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3467 if (adapter->wol)
3468 be_setup_wol(adapter, true);
3469
6b7c5b94
SP
3470 netif_device_detach(netdev);
3471 if (netif_running(netdev)) {
3472 rtnl_lock();
3473 be_close(netdev);
3474 rtnl_unlock();
3475 }
9b0365f1 3476 be_clear(adapter);
6b7c5b94 3477
a4ca055f 3478 be_msix_disable(adapter);
6b7c5b94
SP
3479 pci_save_state(pdev);
3480 pci_disable_device(pdev);
3481 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3482 return 0;
3483}
3484
3485static int be_resume(struct pci_dev *pdev)
3486{
3487 int status = 0;
3488 struct be_adapter *adapter = pci_get_drvdata(pdev);
3489 struct net_device *netdev = adapter->netdev;
3490
3491 netif_device_detach(netdev);
3492
3493 status = pci_enable_device(pdev);
3494 if (status)
3495 return status;
3496
3497 pci_set_power_state(pdev, 0);
3498 pci_restore_state(pdev);
3499
a4ca055f 3500 be_msix_enable(adapter);
2243e2e9
SP
3501 /* tell fw we're ready to fire cmds */
3502 status = be_cmd_fw_init(adapter);
3503 if (status)
3504 return status;
3505
9b0365f1 3506 be_setup(adapter);
6b7c5b94
SP
3507 if (netif_running(netdev)) {
3508 rtnl_lock();
3509 be_open(netdev);
3510 rtnl_unlock();
3511 }
3512 netif_device_attach(netdev);
71d8d1b5
AK
3513
3514 if (adapter->wol)
3515 be_setup_wol(adapter, false);
a4ca055f
AK
3516
3517 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3518 return 0;
3519}
3520
82456b03
SP
3521/*
3522 * An FLR will stop BE from DMAing any data.
3523 */
3524static void be_shutdown(struct pci_dev *pdev)
3525{
3526 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3527
2d5d4154
AK
3528 if (!adapter)
3529 return;
82456b03 3530
0f4a6828 3531 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3532
2d5d4154 3533 netif_device_detach(adapter->netdev);
82456b03 3534
82456b03
SP
3535 if (adapter->wol)
3536 be_setup_wol(adapter, true);
3537
57841869
AK
3538 be_cmd_reset_function(adapter);
3539
82456b03 3540 pci_disable_device(pdev);
82456b03
SP
3541}
3542
cf588477
SP
3543static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3544 pci_channel_state_t state)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 struct net_device *netdev = adapter->netdev;
3548
3549 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3550
3551 adapter->eeh_err = true;
3552
3553 netif_device_detach(netdev);
3554
3555 if (netif_running(netdev)) {
3556 rtnl_lock();
3557 be_close(netdev);
3558 rtnl_unlock();
3559 }
3560 be_clear(adapter);
3561
3562 if (state == pci_channel_io_perm_failure)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 pci_disable_device(pdev);
3566
3567 return PCI_ERS_RESULT_NEED_RESET;
3568}
3569
3570static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3571{
3572 struct be_adapter *adapter = pci_get_drvdata(pdev);
3573 int status;
3574
3575 dev_info(&adapter->pdev->dev, "EEH reset\n");
3576 adapter->eeh_err = false;
6589ade0
SP
3577 adapter->ue_detected = false;
3578 adapter->fw_timeout = false;
cf588477
SP
3579
3580 status = pci_enable_device(pdev);
3581 if (status)
3582 return PCI_ERS_RESULT_DISCONNECT;
3583
3584 pci_set_master(pdev);
3585 pci_set_power_state(pdev, 0);
3586 pci_restore_state(pdev);
3587
3588 /* Check if card is ok and fw is ready */
3589 status = be_cmd_POST(adapter);
3590 if (status)
3591 return PCI_ERS_RESULT_DISCONNECT;
3592
3593 return PCI_ERS_RESULT_RECOVERED;
3594}
3595
3596static void be_eeh_resume(struct pci_dev *pdev)
3597{
3598 int status = 0;
3599 struct be_adapter *adapter = pci_get_drvdata(pdev);
3600 struct net_device *netdev = adapter->netdev;
3601
3602 dev_info(&adapter->pdev->dev, "EEH resume\n");
3603
3604 pci_save_state(pdev);
3605
3606 /* tell fw we're ready to fire cmds */
3607 status = be_cmd_fw_init(adapter);
3608 if (status)
3609 goto err;
3610
3611 status = be_setup(adapter);
3612 if (status)
3613 goto err;
3614
3615 if (netif_running(netdev)) {
3616 status = be_open(netdev);
3617 if (status)
3618 goto err;
3619 }
3620 netif_device_attach(netdev);
3621 return;
3622err:
3623 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3624}
3625
3626static struct pci_error_handlers be_eeh_handlers = {
3627 .error_detected = be_eeh_err_detected,
3628 .slot_reset = be_eeh_reset,
3629 .resume = be_eeh_resume,
3630};
3631
6b7c5b94
SP
3632static struct pci_driver be_driver = {
3633 .name = DRV_NAME,
3634 .id_table = be_dev_ids,
3635 .probe = be_probe,
3636 .remove = be_remove,
3637 .suspend = be_suspend,
cf588477 3638 .resume = be_resume,
82456b03 3639 .shutdown = be_shutdown,
cf588477 3640 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3641};
3642
3643static int __init be_init_module(void)
3644{
8e95a202
JP
3645 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3646 rx_frag_size != 2048) {
6b7c5b94
SP
3647 printk(KERN_WARNING DRV_NAME
3648 " : Module param rx_frag_size must be 2048/4096/8192."
3649 " Using 2048\n");
3650 rx_frag_size = 2048;
3651 }
6b7c5b94
SP
3652
3653 return pci_register_driver(&be_driver);
3654}
3655module_init(be_init_module);
3656
3657static void __exit be_exit_module(void)
3658{
3659 pci_unregister_driver(&be_driver);
3660}
3661module_exit(be_exit_module);