]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/benet/be_main.c
ipv4: Constrain UFO fragment sizes to multiples of 8 bytes
[mirror_ubuntu-zesty-kernel.git] / drivers / net / benet / be_main.c
CommitLineData
6b7c5b94 1/*
d2145cde 2 * Copyright (C) 2005 - 2011 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
70c71606 18#include <linux/prefetch.h>
6b7c5b94 19#include "be.h"
8788fdc2 20#include "be_cmds.h"
65f71b8b 21#include <asm/div64.h>
6b7c5b94
SP
22
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
2e588f84 29static ushort rx_frag_size = 2048;
ba343c77 30static unsigned int num_vfs;
2e588f84 31module_param(rx_frag_size, ushort, S_IRUGO);
ba343c77 32module_param(num_vfs, uint, S_IRUGO);
6b7c5b94 33MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
ba343c77 34MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
6b7c5b94 35
6b7c5b94 36static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
c4ca2374 37 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
59fd5d87 38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
c4ca2374
AK
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
fe6d2a38 41 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
12f4d0a8 42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
6b7c5b94
SP
43 { 0 }
44};
45MODULE_DEVICE_TABLE(pci, be_dev_ids);
7c185276 46/* UE Status Low CSR */
42c8b11e 47static const char * const ue_status_low_desc[] = {
7c185276
AK
48 "CEV",
49 "CTX",
50 "DBUF",
51 "ERX",
52 "Host",
53 "MPU",
54 "NDMA",
55 "PTC ",
56 "RDMA ",
57 "RXF ",
58 "RXIPS ",
59 "RXULP0 ",
60 "RXULP1 ",
61 "RXULP2 ",
62 "TIM ",
63 "TPOST ",
64 "TPRE ",
65 "TXIPS ",
66 "TXULP0 ",
67 "TXULP1 ",
68 "UC ",
69 "WDMA ",
70 "TXULP2 ",
71 "HOST1 ",
72 "P0_OB_LINK ",
73 "P1_OB_LINK ",
74 "HOST_GPIO ",
75 "MBOX ",
76 "AXGMAC0",
77 "AXGMAC1",
78 "JTAG",
79 "MPU_INTPEND"
80};
81/* UE Status High CSR */
42c8b11e 82static const char * const ue_status_hi_desc[] = {
7c185276
AK
83 "LPCMEMHOST",
84 "MGMT_MAC",
85 "PCS0ONLINE",
86 "MPU_IRAM",
87 "PCS1ONLINE",
88 "PCTL0",
89 "PCTL1",
90 "PMEM",
91 "RR",
92 "TXPB",
93 "RXPP",
94 "XAUI",
95 "TXP",
96 "ARM",
97 "IPC",
98 "HOST2",
99 "HOST3",
100 "HOST4",
101 "HOST5",
102 "HOST6",
103 "HOST7",
104 "HOST8",
105 "HOST9",
42c8b11e 106 "NETC",
7c185276
AK
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown"
115};
6b7c5b94
SP
116
117static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
118{
119 struct be_dma_mem *mem = &q->dma_mem;
120 if (mem->va)
2b7bcebf
IV
121 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
122 mem->dma);
6b7c5b94
SP
123}
124
125static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
126 u16 len, u16 entry_size)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
129
130 memset(q, 0, sizeof(*q));
131 q->len = len;
132 q->entry_size = entry_size;
133 mem->size = len * entry_size;
2b7bcebf
IV
134 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
135 GFP_KERNEL);
6b7c5b94
SP
136 if (!mem->va)
137 return -1;
138 memset(mem->va, 0, mem->size);
139 return 0;
140}
141
8788fdc2 142static void be_intr_set(struct be_adapter *adapter, bool enable)
6b7c5b94 143{
8788fdc2 144 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
6b7c5b94
SP
145 u32 reg = ioread32(addr);
146 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 147
cf588477
SP
148 if (adapter->eeh_err)
149 return;
150
5f0b849e 151 if (!enabled && enable)
6b7c5b94 152 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 153 else if (enabled && !enable)
6b7c5b94 154 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
5f0b849e 155 else
6b7c5b94 156 return;
5f0b849e 157
6b7c5b94
SP
158 iowrite32(reg, addr);
159}
160
8788fdc2 161static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
162{
163 u32 val = 0;
164 val |= qid & DB_RQ_RING_ID_MASK;
165 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
f3eb62d2
SP
166
167 wmb();
8788fdc2 168 iowrite32(val, adapter->db + DB_RQ_OFFSET);
6b7c5b94
SP
169}
170
8788fdc2 171static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
6b7c5b94
SP
172{
173 u32 val = 0;
174 val |= qid & DB_TXULP_RING_ID_MASK;
175 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
f3eb62d2
SP
176
177 wmb();
8788fdc2 178 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
6b7c5b94
SP
179}
180
8788fdc2 181static void be_eq_notify(struct be_adapter *adapter, u16 qid,
6b7c5b94
SP
182 bool arm, bool clear_int, u16 num_popped)
183{
184 u32 val = 0;
185 val |= qid & DB_EQ_RING_ID_MASK;
fe6d2a38
SP
186 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
187 DB_EQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
188
189 if (adapter->eeh_err)
190 return;
191
6b7c5b94
SP
192 if (arm)
193 val |= 1 << DB_EQ_REARM_SHIFT;
194 if (clear_int)
195 val |= 1 << DB_EQ_CLR_SHIFT;
196 val |= 1 << DB_EQ_EVNT_SHIFT;
197 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
8788fdc2 198 iowrite32(val, adapter->db + DB_EQ_OFFSET);
6b7c5b94
SP
199}
200
8788fdc2 201void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
6b7c5b94
SP
202{
203 u32 val = 0;
204 val |= qid & DB_CQ_RING_ID_MASK;
fe6d2a38
SP
205 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
206 DB_CQ_RING_ID_EXT_MASK_SHIFT);
cf588477
SP
207
208 if (adapter->eeh_err)
209 return;
210
6b7c5b94
SP
211 if (arm)
212 val |= 1 << DB_CQ_REARM_SHIFT;
213 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
8788fdc2 214 iowrite32(val, adapter->db + DB_CQ_OFFSET);
6b7c5b94
SP
215}
216
6b7c5b94
SP
217static int be_mac_addr_set(struct net_device *netdev, void *p)
218{
219 struct be_adapter *adapter = netdev_priv(netdev);
220 struct sockaddr *addr = p;
221 int status = 0;
222
ca9e4988
AK
223 if (!is_valid_ether_addr(addr->sa_data))
224 return -EADDRNOTAVAIL;
225
ba343c77
SB
226 /* MAC addr configuration will be done in hardware for VFs
227 * by their corresponding PFs. Just copy to netdev addr here
228 */
229 if (!be_physfn(adapter))
230 goto netdev_addr;
231
f8617e08
AK
232 status = be_cmd_pmac_del(adapter, adapter->if_handle,
233 adapter->pmac_id, 0);
a65027e4
SP
234 if (status)
235 return status;
6b7c5b94 236
a65027e4 237 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
f8617e08 238 adapter->if_handle, &adapter->pmac_id, 0);
ba343c77 239netdev_addr:
6b7c5b94
SP
240 if (!status)
241 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
242
243 return status;
244}
245
89a88ab8
AK
246static void populate_be2_stats(struct be_adapter *adapter)
247{
248
249 struct be_drv_stats *drvs = &adapter->drv_stats;
250 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
251 struct be_port_rxf_stats_v0 *port_stats =
252 be_port_rxf_stats_from_cmd(adapter);
253 struct be_rxf_stats_v0 *rxf_stats =
254 be_rxf_stats_from_cmd(adapter);
255
256 drvs->rx_pause_frames = port_stats->rx_pause_frames;
257 drvs->rx_crc_errors = port_stats->rx_crc_errors;
258 drvs->rx_control_frames = port_stats->rx_control_frames;
259 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
260 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
261 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
262 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
263 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
264 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
265 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
266 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
267 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
268 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
269 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
270 drvs->rx_input_fifo_overflow_drop =
271 port_stats->rx_input_fifo_overflow;
272 drvs->rx_dropped_header_too_small =
273 port_stats->rx_dropped_header_too_small;
274 drvs->rx_address_match_errors =
275 port_stats->rx_address_match_errors;
276 drvs->rx_alignment_symbol_errors =
277 port_stats->rx_alignment_symbol_errors;
278
279 drvs->tx_pauseframes = port_stats->tx_pauseframes;
280 drvs->tx_controlframes = port_stats->tx_controlframes;
281
282 if (adapter->port_num)
283 drvs->jabber_events =
284 rxf_stats->port1_jabber_events;
285 else
286 drvs->jabber_events =
287 rxf_stats->port0_jabber_events;
288 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
289 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
290 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
291 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
292 drvs->forwarded_packets = rxf_stats->forwarded_packets;
293 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
294 drvs->rx_drops_no_tpre_descr =
295 rxf_stats->rx_drops_no_tpre_descr;
296 drvs->rx_drops_too_many_frags =
297 rxf_stats->rx_drops_too_many_frags;
298 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
299}
300
301static void populate_be3_stats(struct be_adapter *adapter)
302{
303 struct be_drv_stats *drvs = &adapter->drv_stats;
304 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
305
306 struct be_rxf_stats_v1 *rxf_stats =
307 be_rxf_stats_from_cmd(adapter);
308 struct be_port_rxf_stats_v1 *port_stats =
309 be_port_rxf_stats_from_cmd(adapter);
310
311 drvs->rx_priority_pause_frames = 0;
312 drvs->pmem_fifo_overflow_drop = 0;
313 drvs->rx_pause_frames = port_stats->rx_pause_frames;
314 drvs->rx_crc_errors = port_stats->rx_crc_errors;
315 drvs->rx_control_frames = port_stats->rx_control_frames;
316 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
317 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
318 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
319 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
320 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
321 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
322 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
323 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
324 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
325 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
326 drvs->rx_dropped_header_too_small =
327 port_stats->rx_dropped_header_too_small;
328 drvs->rx_input_fifo_overflow_drop =
329 port_stats->rx_input_fifo_overflow_drop;
330 drvs->rx_address_match_errors =
331 port_stats->rx_address_match_errors;
332 drvs->rx_alignment_symbol_errors =
333 port_stats->rx_alignment_symbol_errors;
334 drvs->rxpp_fifo_overflow_drop =
335 port_stats->rxpp_fifo_overflow_drop;
336 drvs->tx_pauseframes = port_stats->tx_pauseframes;
337 drvs->tx_controlframes = port_stats->tx_controlframes;
338 drvs->jabber_events = port_stats->jabber_events;
339 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
340 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
341 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
342 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
343 drvs->forwarded_packets = rxf_stats->forwarded_packets;
344 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
345 drvs->rx_drops_no_tpre_descr =
346 rxf_stats->rx_drops_no_tpre_descr;
347 drvs->rx_drops_too_many_frags =
348 rxf_stats->rx_drops_too_many_frags;
349 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
350}
351
005d5696
SX
352static void populate_lancer_stats(struct be_adapter *adapter)
353{
89a88ab8 354
005d5696
SX
355 struct be_drv_stats *drvs = &adapter->drv_stats;
356 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
357 (adapter);
358 drvs->rx_priority_pause_frames = 0;
359 drvs->pmem_fifo_overflow_drop = 0;
360 drvs->rx_pause_frames =
aedfebba
SX
361 make_64bit_val(pport_stats->rx_pause_frames_hi,
362 pport_stats->rx_pause_frames_lo);
005d5696
SX
363 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
364 pport_stats->rx_crc_errors_lo);
365 drvs->rx_control_frames =
366 make_64bit_val(pport_stats->rx_control_frames_hi,
367 pport_stats->rx_control_frames_lo);
368 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
369 drvs->rx_frame_too_long =
370 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
371 pport_stats->rx_frames_too_long_lo);
372 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
373 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
374 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
375 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
376 drvs->rx_dropped_tcp_length =
377 pport_stats->rx_dropped_invalid_tcp_length;
378 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
379 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
380 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
381 drvs->rx_dropped_header_too_small =
382 pport_stats->rx_dropped_header_too_small;
383 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
385 drvs->rx_alignment_symbol_errors =
386 make_64bit_val(pport_stats->rx_symbol_errors_hi,
387 pport_stats->rx_symbol_errors_lo);
388 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
389 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
390 pport_stats->tx_pause_frames_lo);
391 drvs->tx_controlframes =
392 make_64bit_val(pport_stats->tx_control_frames_hi,
393 pport_stats->tx_control_frames_lo);
394 drvs->jabber_events = pport_stats->rx_jabbers;
395 drvs->rx_drops_no_pbuf = 0;
396 drvs->rx_drops_no_txpb = 0;
397 drvs->rx_drops_no_erx_descr = 0;
398 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
399 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
400 pport_stats->num_forwards_lo);
401 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
402 pport_stats->rx_drops_mtu_lo);
403 drvs->rx_drops_no_tpre_descr = 0;
404 drvs->rx_drops_too_many_frags =
405 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
406 pport_stats->rx_drops_too_many_frags_lo);
407}
89a88ab8
AK
408
409void be_parse_stats(struct be_adapter *adapter)
410{
005d5696
SX
411 if (adapter->generation == BE_GEN3) {
412 if (lancer_chip(adapter))
413 populate_lancer_stats(adapter);
414 else
415 populate_be3_stats(adapter);
416 } else {
89a88ab8 417 populate_be2_stats(adapter);
005d5696 418 }
89a88ab8
AK
419}
420
b31c50a7 421void netdev_stats_update(struct be_adapter *adapter)
6b7c5b94 422{
89a88ab8 423 struct be_drv_stats *drvs = &adapter->drv_stats;
78122a52 424 struct net_device_stats *dev_stats = &adapter->netdev->stats;
3abcdeda 425 struct be_rx_obj *rxo;
3c8def97 426 struct be_tx_obj *txo;
6e53391c 427 unsigned long pkts = 0, bytes = 0, mcast = 0, drops = 0;
3abcdeda 428 int i;
6b7c5b94 429
3abcdeda 430 for_all_rx_queues(adapter, rxo, i) {
6e53391c
SP
431 pkts += rx_stats(rxo)->rx_pkts;
432 bytes += rx_stats(rxo)->rx_bytes;
433 mcast += rx_stats(rxo)->rx_mcast_pkts;
1e758962 434 drops += rx_stats(rxo)->rx_dropped;
3abcdeda 435 /* no space in linux buffers: best possible approximation */
89a88ab8 436 if (adapter->generation == BE_GEN3) {
005d5696 437 if (!(lancer_chip(adapter))) {
6e53391c 438 struct be_erx_stats_v1 *erx =
89a88ab8 439 be_erx_stats_from_cmd(adapter);
6e53391c 440 drops += erx->rx_drops_no_fragments[rxo->q.id];
005d5696 441 }
89a88ab8 442 } else {
6e53391c 443 struct be_erx_stats_v0 *erx =
89a88ab8 444 be_erx_stats_from_cmd(adapter);
6e53391c 445 drops += erx->rx_drops_no_fragments[rxo->q.id];
89a88ab8 446 }
3abcdeda 447 }
6e53391c
SP
448 dev_stats->rx_packets = pkts;
449 dev_stats->rx_bytes = bytes;
450 dev_stats->multicast = mcast;
451 dev_stats->rx_dropped = drops;
3abcdeda 452
6e53391c 453 pkts = bytes = 0;
3c8def97 454 for_all_tx_queues(adapter, txo, i) {
6e53391c
SP
455 pkts += tx_stats(txo)->be_tx_pkts;
456 bytes += tx_stats(txo)->be_tx_bytes;
3c8def97 457 }
6e53391c
SP
458 dev_stats->tx_packets = pkts;
459 dev_stats->tx_bytes = bytes;
6b7c5b94
SP
460
461 /* bad pkts received */
89a88ab8
AK
462 dev_stats->rx_errors = drvs->rx_crc_errors +
463 drvs->rx_alignment_symbol_errors +
464 drvs->rx_in_range_errors +
465 drvs->rx_out_range_errors +
466 drvs->rx_frame_too_long +
467 drvs->rx_dropped_too_small +
468 drvs->rx_dropped_too_short +
469 drvs->rx_dropped_header_too_small +
470 drvs->rx_dropped_tcp_length +
471 drvs->rx_dropped_runt +
472 drvs->rx_tcp_checksum_errs +
473 drvs->rx_ip_checksum_errs +
474 drvs->rx_udp_checksum_errs;
68110868 475
6b7c5b94 476 /* detailed rx errors */
89a88ab8
AK
477 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
68110868 480
89a88ab8 481 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
6b7c5b94
SP
482
483 /* frame alignment errors */
89a88ab8 484 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
68110868 485
6b7c5b94
SP
486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
89a88ab8
AK
488 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
6b7c5b94
SP
491}
492
8788fdc2 493void be_link_status_update(struct be_adapter *adapter, bool link_up)
6b7c5b94 494{
6b7c5b94
SP
495 struct net_device *netdev = adapter->netdev;
496
6b7c5b94 497 /* If link came up or went down */
a8f447bd 498 if (adapter->link_up != link_up) {
0dffc83e 499 adapter->link_speed = -1;
a8f447bd 500 if (link_up) {
6b7c5b94
SP
501 netif_carrier_on(netdev);
502 printk(KERN_INFO "%s: Link up\n", netdev->name);
a8f447bd 503 } else {
a8f447bd
SP
504 netif_carrier_off(netdev);
505 printk(KERN_INFO "%s: Link down\n", netdev->name);
6b7c5b94 506 }
a8f447bd 507 adapter->link_up = link_up;
6b7c5b94 508 }
6b7c5b94
SP
509}
510
511/* Update the EQ delay n BE based on the RX frags consumed / sec */
3abcdeda 512static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94 513{
3abcdeda
SP
514 struct be_eq_obj *rx_eq = &rxo->rx_eq;
515 struct be_rx_stats *stats = &rxo->stats;
4097f663
SP
516 ulong now = jiffies;
517 u32 eqd;
518
519 if (!rx_eq->enable_aic)
520 return;
521
522 /* Wrapped around */
523 if (time_before(now, stats->rx_fps_jiffies)) {
524 stats->rx_fps_jiffies = now;
525 return;
526 }
6b7c5b94
SP
527
528 /* Update once a second */
4097f663 529 if ((now - stats->rx_fps_jiffies) < HZ)
6b7c5b94
SP
530 return;
531
3abcdeda 532 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
4097f663 533 ((now - stats->rx_fps_jiffies) / HZ);
6b7c5b94 534
4097f663 535 stats->rx_fps_jiffies = now;
3abcdeda
SP
536 stats->prev_rx_frags = stats->rx_frags;
537 eqd = stats->rx_fps / 110000;
6b7c5b94
SP
538 eqd = eqd << 3;
539 if (eqd > rx_eq->max_eqd)
540 eqd = rx_eq->max_eqd;
541 if (eqd < rx_eq->min_eqd)
542 eqd = rx_eq->min_eqd;
543 if (eqd < 10)
544 eqd = 0;
545 if (eqd != rx_eq->cur_eqd)
8788fdc2 546 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
6b7c5b94
SP
547
548 rx_eq->cur_eqd = eqd;
549}
550
65f71b8b
SH
551static u32 be_calc_rate(u64 bytes, unsigned long ticks)
552{
553 u64 rate = bytes;
554
555 do_div(rate, ticks / HZ);
556 rate <<= 3; /* bytes/sec -> bits/sec */
557 do_div(rate, 1000000ul); /* MB/Sec */
558
559 return rate;
560}
561
3c8def97 562static void be_tx_rate_update(struct be_tx_obj *txo)
4097f663 563{
3c8def97 564 struct be_tx_stats *stats = tx_stats(txo);
4097f663
SP
565 ulong now = jiffies;
566
567 /* Wrapped around? */
568 if (time_before(now, stats->be_tx_jiffies)) {
569 stats->be_tx_jiffies = now;
570 return;
571 }
572
573 /* Update tx rate once in two seconds */
574 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
65f71b8b
SH
575 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
576 - stats->be_tx_bytes_prev,
577 now - stats->be_tx_jiffies);
4097f663
SP
578 stats->be_tx_jiffies = now;
579 stats->be_tx_bytes_prev = stats->be_tx_bytes;
580 }
581}
582
3c8def97 583static void be_tx_stats_update(struct be_tx_obj *txo,
91992e44 584 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
6b7c5b94 585{
3c8def97
SP
586 struct be_tx_stats *stats = tx_stats(txo);
587
6b7c5b94
SP
588 stats->be_tx_reqs++;
589 stats->be_tx_wrbs += wrb_cnt;
590 stats->be_tx_bytes += copied;
91992e44 591 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
6b7c5b94
SP
592 if (stopped)
593 stats->be_tx_stops++;
6b7c5b94
SP
594}
595
596/* Determine number of WRB entries needed to xmit data in an skb */
fe6d2a38
SP
597static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
598 bool *dummy)
6b7c5b94 599{
ebc8d2ab
DM
600 int cnt = (skb->len > skb->data_len);
601
602 cnt += skb_shinfo(skb)->nr_frags;
603
6b7c5b94
SP
604 /* to account for hdr wrb */
605 cnt++;
fe6d2a38
SP
606 if (lancer_chip(adapter) || !(cnt & 1)) {
607 *dummy = false;
608 } else {
6b7c5b94
SP
609 /* add a dummy to make it an even num */
610 cnt++;
611 *dummy = true;
fe6d2a38 612 }
6b7c5b94
SP
613 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
614 return cnt;
615}
616
617static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
618{
619 wrb->frag_pa_hi = upper_32_bits(addr);
620 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
621 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
622}
623
cc4ce020
SK
624static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
625 struct sk_buff *skb, u32 wrb_cnt, u32 len)
6b7c5b94 626{
cc4ce020
SK
627 u8 vlan_prio = 0;
628 u16 vlan_tag = 0;
629
6b7c5b94
SP
630 memset(hdr, 0, sizeof(*hdr));
631
632 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
633
49e4b847 634 if (skb_is_gso(skb)) {
6b7c5b94
SP
635 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
637 hdr, skb_shinfo(skb)->gso_size);
fe6d2a38 638 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
49e4b847 639 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
fe6d2a38
SP
640 if (lancer_chip(adapter) && adapter->sli_family ==
641 LANCER_A0_SLI_FAMILY) {
642 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
643 if (is_tcp_pkt(skb))
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
645 tcpcs, hdr, 1);
646 else if (is_udp_pkt(skb))
647 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
648 udpcs, hdr, 1);
649 }
6b7c5b94
SP
650 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
651 if (is_tcp_pkt(skb))
652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
653 else if (is_udp_pkt(skb))
654 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
655 }
656
4c5102f9 657 if (vlan_tx_tag_present(skb)) {
6b7c5b94 658 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
cc4ce020
SK
659 vlan_tag = vlan_tx_tag_get(skb);
660 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
661 /* If vlan priority provided by OS is NOT in available bmap */
662 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
663 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
664 adapter->recommended_prio;
665 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
6b7c5b94
SP
666 }
667
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
670 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
671 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
672}
673
2b7bcebf 674static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
7101e111
SP
675 bool unmap_single)
676{
677 dma_addr_t dma;
678
679 be_dws_le_to_cpu(wrb, sizeof(*wrb));
680
681 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
b681ee77 682 if (wrb->frag_len) {
7101e111 683 if (unmap_single)
2b7bcebf
IV
684 dma_unmap_single(dev, dma, wrb->frag_len,
685 DMA_TO_DEVICE);
7101e111 686 else
2b7bcebf 687 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
7101e111
SP
688 }
689}
6b7c5b94 690
3c8def97 691static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
6b7c5b94
SP
692 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
693{
7101e111
SP
694 dma_addr_t busaddr;
695 int i, copied = 0;
2b7bcebf 696 struct device *dev = &adapter->pdev->dev;
6b7c5b94 697 struct sk_buff *first_skb = skb;
6b7c5b94
SP
698 struct be_eth_wrb *wrb;
699 struct be_eth_hdr_wrb *hdr;
7101e111
SP
700 bool map_single = false;
701 u16 map_head;
6b7c5b94 702
6b7c5b94
SP
703 hdr = queue_head_node(txq);
704 queue_head_inc(txq);
7101e111 705 map_head = txq->head;
6b7c5b94 706
ebc8d2ab 707 if (skb->len > skb->data_len) {
e743d313 708 int len = skb_headlen(skb);
2b7bcebf
IV
709 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
710 if (dma_mapping_error(dev, busaddr))
7101e111
SP
711 goto dma_err;
712 map_single = true;
ebc8d2ab
DM
713 wrb = queue_head_node(txq);
714 wrb_fill(wrb, busaddr, len);
715 be_dws_cpu_to_le(wrb, sizeof(*wrb));
716 queue_head_inc(txq);
717 copied += len;
718 }
6b7c5b94 719
ebc8d2ab
DM
720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
721 struct skb_frag_struct *frag =
722 &skb_shinfo(skb)->frags[i];
2b7bcebf
IV
723 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
724 frag->size, DMA_TO_DEVICE);
725 if (dma_mapping_error(dev, busaddr))
7101e111 726 goto dma_err;
ebc8d2ab
DM
727 wrb = queue_head_node(txq);
728 wrb_fill(wrb, busaddr, frag->size);
729 be_dws_cpu_to_le(wrb, sizeof(*wrb));
730 queue_head_inc(txq);
731 copied += frag->size;
6b7c5b94
SP
732 }
733
734 if (dummy_wrb) {
735 wrb = queue_head_node(txq);
736 wrb_fill(wrb, 0, 0);
737 be_dws_cpu_to_le(wrb, sizeof(*wrb));
738 queue_head_inc(txq);
739 }
740
cc4ce020 741 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
6b7c5b94
SP
742 be_dws_cpu_to_le(hdr, sizeof(*hdr));
743
744 return copied;
7101e111
SP
745dma_err:
746 txq->head = map_head;
747 while (copied) {
748 wrb = queue_head_node(txq);
2b7bcebf 749 unmap_tx_frag(dev, wrb, map_single);
7101e111
SP
750 map_single = false;
751 copied -= wrb->frag_len;
752 queue_head_inc(txq);
753 }
754 return 0;
6b7c5b94
SP
755}
756
61357325 757static netdev_tx_t be_xmit(struct sk_buff *skb,
b31c50a7 758 struct net_device *netdev)
6b7c5b94
SP
759{
760 struct be_adapter *adapter = netdev_priv(netdev);
3c8def97
SP
761 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
762 struct be_queue_info *txq = &txo->q;
6b7c5b94
SP
763 u32 wrb_cnt = 0, copied = 0;
764 u32 start = txq->head;
765 bool dummy_wrb, stopped = false;
766
fe6d2a38 767 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
6b7c5b94 768
3c8def97 769 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
c190e3c8
AK
770 if (copied) {
771 /* record the sent skb in the sent_skb table */
3c8def97
SP
772 BUG_ON(txo->sent_skb_list[start]);
773 txo->sent_skb_list[start] = skb;
c190e3c8
AK
774
775 /* Ensure txq has space for the next skb; Else stop the queue
776 * *BEFORE* ringing the tx doorbell, so that we serialze the
777 * tx compls of the current transmit which'll wake up the queue
778 */
7101e111 779 atomic_add(wrb_cnt, &txq->used);
c190e3c8
AK
780 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
781 txq->len) {
3c8def97 782 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
c190e3c8
AK
783 stopped = true;
784 }
6b7c5b94 785
c190e3c8 786 be_txq_notify(adapter, txq->id, wrb_cnt);
6b7c5b94 787
3c8def97 788 be_tx_stats_update(txo, wrb_cnt, copied,
91992e44 789 skb_shinfo(skb)->gso_segs, stopped);
c190e3c8
AK
790 } else {
791 txq->head = start;
792 dev_kfree_skb_any(skb);
6b7c5b94 793 }
6b7c5b94
SP
794 return NETDEV_TX_OK;
795}
796
797static int be_change_mtu(struct net_device *netdev, int new_mtu)
798{
799 struct be_adapter *adapter = netdev_priv(netdev);
800 if (new_mtu < BE_MIN_MTU ||
34a89b8c
AK
801 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
802 (ETH_HLEN + ETH_FCS_LEN))) {
6b7c5b94
SP
803 dev_info(&adapter->pdev->dev,
804 "MTU must be between %d and %d bytes\n",
34a89b8c
AK
805 BE_MIN_MTU,
806 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
6b7c5b94
SP
807 return -EINVAL;
808 }
809 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
810 netdev->mtu, new_mtu);
811 netdev->mtu = new_mtu;
812 return 0;
813}
814
815/*
82903e4b
AK
816 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
817 * If the user configures more, place BE in vlan promiscuous mode.
6b7c5b94 818 */
1da87b7f 819static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
6b7c5b94 820{
6b7c5b94
SP
821 u16 vtag[BE_NUM_VLANS_SUPPORTED];
822 u16 ntags = 0, i;
82903e4b 823 int status = 0;
1da87b7f
AK
824 u32 if_handle;
825
826 if (vf) {
827 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
828 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
829 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
830 }
6b7c5b94 831
82903e4b 832 if (adapter->vlans_added <= adapter->max_vlans) {
6b7c5b94 833 /* Construct VLAN Table to give to HW */
b738127d 834 for (i = 0; i < VLAN_N_VID; i++) {
6b7c5b94
SP
835 if (adapter->vlan_tag[i]) {
836 vtag[ntags] = cpu_to_le16(i);
837 ntags++;
838 }
839 }
b31c50a7
SP
840 status = be_cmd_vlan_config(adapter, adapter->if_handle,
841 vtag, ntags, 1, 0);
6b7c5b94 842 } else {
b31c50a7
SP
843 status = be_cmd_vlan_config(adapter, adapter->if_handle,
844 NULL, 0, 1, 1);
6b7c5b94 845 }
1da87b7f 846
b31c50a7 847 return status;
6b7c5b94
SP
848}
849
6b7c5b94
SP
850static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851{
852 struct be_adapter *adapter = netdev_priv(netdev);
853
1da87b7f 854 adapter->vlans_added++;
ba343c77
SB
855 if (!be_physfn(adapter))
856 return;
857
6b7c5b94 858 adapter->vlan_tag[vid] = 1;
82903e4b 859 if (adapter->vlans_added <= (adapter->max_vlans + 1))
1da87b7f 860 be_vid_config(adapter, false, 0);
6b7c5b94
SP
861}
862
863static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866
1da87b7f 867 adapter->vlans_added--;
1da87b7f 868
ba343c77
SB
869 if (!be_physfn(adapter))
870 return;
871
6b7c5b94 872 adapter->vlan_tag[vid] = 0;
82903e4b 873 if (adapter->vlans_added <= adapter->max_vlans)
1da87b7f 874 be_vid_config(adapter, false, 0);
6b7c5b94
SP
875}
876
24307eef 877static void be_set_multicast_list(struct net_device *netdev)
6b7c5b94
SP
878{
879 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 880
24307eef 881 if (netdev->flags & IFF_PROMISC) {
ecd0bf0f 882 be_cmd_promiscuous_config(adapter, true);
24307eef
SP
883 adapter->promiscuous = true;
884 goto done;
6b7c5b94
SP
885 }
886
25985edc 887 /* BE was previously in promiscuous mode; disable it */
24307eef
SP
888 if (adapter->promiscuous) {
889 adapter->promiscuous = false;
ecd0bf0f 890 be_cmd_promiscuous_config(adapter, false);
6b7c5b94
SP
891 }
892
e7b909a6 893 /* Enable multicast promisc if num configured exceeds what we support */
4cd24eaf
JP
894 if (netdev->flags & IFF_ALLMULTI ||
895 netdev_mc_count(netdev) > BE_MAX_MC) {
0ddf477b 896 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
e7b909a6 897 &adapter->mc_cmd_mem);
24307eef 898 goto done;
6b7c5b94 899 }
6b7c5b94 900
0ddf477b 901 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
f31e50a8 902 &adapter->mc_cmd_mem);
24307eef
SP
903done:
904 return;
6b7c5b94
SP
905}
906
ba343c77
SB
907static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
908{
909 struct be_adapter *adapter = netdev_priv(netdev);
910 int status;
911
912 if (!adapter->sriov_enabled)
913 return -EPERM;
914
915 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
916 return -EINVAL;
917
64600ea5
AK
918 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
919 status = be_cmd_pmac_del(adapter,
920 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 921 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
ba343c77 922
64600ea5
AK
923 status = be_cmd_pmac_add(adapter, mac,
924 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 925 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
64600ea5
AK
926
927 if (status)
ba343c77
SB
928 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
929 mac, vf);
64600ea5
AK
930 else
931 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
932
ba343c77
SB
933 return status;
934}
935
64600ea5
AK
936static int be_get_vf_config(struct net_device *netdev, int vf,
937 struct ifla_vf_info *vi)
938{
939 struct be_adapter *adapter = netdev_priv(netdev);
940
941 if (!adapter->sriov_enabled)
942 return -EPERM;
943
944 if (vf >= num_vfs)
945 return -EINVAL;
946
947 vi->vf = vf;
e1d18735 948 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
1da87b7f 949 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
64600ea5
AK
950 vi->qos = 0;
951 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
952
953 return 0;
954}
955
1da87b7f
AK
956static int be_set_vf_vlan(struct net_device *netdev,
957 int vf, u16 vlan, u8 qos)
958{
959 struct be_adapter *adapter = netdev_priv(netdev);
960 int status = 0;
961
962 if (!adapter->sriov_enabled)
963 return -EPERM;
964
965 if ((vf >= num_vfs) || (vlan > 4095))
966 return -EINVAL;
967
968 if (vlan) {
969 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
970 adapter->vlans_added++;
971 } else {
972 adapter->vf_cfg[vf].vf_vlan_tag = 0;
973 adapter->vlans_added--;
974 }
975
976 status = be_vid_config(adapter, true, vf);
977
978 if (status)
979 dev_info(&adapter->pdev->dev,
980 "VLAN %d config on VF %d failed\n", vlan, vf);
981 return status;
982}
983
e1d18735
AK
984static int be_set_vf_tx_rate(struct net_device *netdev,
985 int vf, int rate)
986{
987 struct be_adapter *adapter = netdev_priv(netdev);
988 int status = 0;
989
990 if (!adapter->sriov_enabled)
991 return -EPERM;
992
993 if ((vf >= num_vfs) || (rate < 0))
994 return -EINVAL;
995
996 if (rate > 10000)
997 rate = 10000;
998
999 adapter->vf_cfg[vf].vf_tx_rate = rate;
856c4012 1000 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
e1d18735
AK
1001
1002 if (status)
1003 dev_info(&adapter->pdev->dev,
1004 "tx rate %d on VF %d failed\n", rate, vf);
1005 return status;
1006}
1007
3abcdeda 1008static void be_rx_rate_update(struct be_rx_obj *rxo)
6b7c5b94 1009{
3abcdeda 1010 struct be_rx_stats *stats = &rxo->stats;
4097f663 1011 ulong now = jiffies;
6b7c5b94 1012
4097f663 1013 /* Wrapped around */
3abcdeda
SP
1014 if (time_before(now, stats->rx_jiffies)) {
1015 stats->rx_jiffies = now;
4097f663
SP
1016 return;
1017 }
6b7c5b94
SP
1018
1019 /* Update the rate once in two seconds */
3abcdeda 1020 if ((now - stats->rx_jiffies) < 2 * HZ)
6b7c5b94
SP
1021 return;
1022
3abcdeda
SP
1023 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1024 now - stats->rx_jiffies);
1025 stats->rx_jiffies = now;
1026 stats->rx_bytes_prev = stats->rx_bytes;
6b7c5b94
SP
1027}
1028
3abcdeda 1029static void be_rx_stats_update(struct be_rx_obj *rxo,
2e588f84 1030 struct be_rx_compl_info *rxcp)
4097f663 1031{
3abcdeda 1032 struct be_rx_stats *stats = &rxo->stats;
1ef78abe 1033
3abcdeda 1034 stats->rx_compl++;
2e588f84
SP
1035 stats->rx_frags += rxcp->num_rcvd;
1036 stats->rx_bytes += rxcp->pkt_size;
3abcdeda 1037 stats->rx_pkts++;
2e588f84 1038 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
3abcdeda 1039 stats->rx_mcast_pkts++;
2e588f84
SP
1040 if (rxcp->err)
1041 stats->rxcp_err++;
4097f663
SP
1042}
1043
2e588f84 1044static inline bool csum_passed(struct be_rx_compl_info *rxcp)
728a9972 1045{
19fad86f
PR
1046 /* L4 checksum is not reliable for non TCP/UDP packets.
1047 * Also ignore ipcksm for ipv6 pkts */
2e588f84
SP
1048 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1049 (rxcp->ip_csum || rxcp->ipv6);
728a9972
AK
1050}
1051
6b7c5b94 1052static struct be_rx_page_info *
3abcdeda
SP
1053get_rx_page_info(struct be_adapter *adapter,
1054 struct be_rx_obj *rxo,
1055 u16 frag_idx)
6b7c5b94
SP
1056{
1057 struct be_rx_page_info *rx_page_info;
3abcdeda 1058 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1059
3abcdeda 1060 rx_page_info = &rxo->page_info_tbl[frag_idx];
6b7c5b94
SP
1061 BUG_ON(!rx_page_info->page);
1062
205859a2 1063 if (rx_page_info->last_page_user) {
2b7bcebf
IV
1064 dma_unmap_page(&adapter->pdev->dev,
1065 dma_unmap_addr(rx_page_info, bus),
1066 adapter->big_page_size, DMA_FROM_DEVICE);
205859a2
AK
1067 rx_page_info->last_page_user = false;
1068 }
6b7c5b94
SP
1069
1070 atomic_dec(&rxq->used);
1071 return rx_page_info;
1072}
1073
1074/* Throwaway the data in the Rx completion */
1075static void be_rx_compl_discard(struct be_adapter *adapter,
3abcdeda 1076 struct be_rx_obj *rxo,
2e588f84 1077 struct be_rx_compl_info *rxcp)
6b7c5b94 1078{
3abcdeda 1079 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1080 struct be_rx_page_info *page_info;
2e588f84 1081 u16 i, num_rcvd = rxcp->num_rcvd;
6b7c5b94 1082
e80d9da6 1083 for (i = 0; i < num_rcvd; i++) {
2e588f84 1084 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
e80d9da6
PR
1085 put_page(page_info->page);
1086 memset(page_info, 0, sizeof(*page_info));
2e588f84 1087 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1088 }
1089}
1090
1091/*
1092 * skb_fill_rx_data forms a complete skb for an ether frame
1093 * indicated by rxcp.
1094 */
3abcdeda 1095static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
2e588f84 1096 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
6b7c5b94 1097{
3abcdeda 1098 struct be_queue_info *rxq = &rxo->q;
6b7c5b94 1099 struct be_rx_page_info *page_info;
2e588f84
SP
1100 u16 i, j;
1101 u16 hdr_len, curr_frag_len, remaining;
6b7c5b94 1102 u8 *start;
6b7c5b94 1103
2e588f84 1104 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1105 start = page_address(page_info->page) + page_info->page_offset;
1106 prefetch(start);
1107
1108 /* Copy data in the first descriptor of this completion */
2e588f84 1109 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
6b7c5b94
SP
1110
1111 /* Copy the header portion into skb_data */
2e588f84 1112 hdr_len = min(BE_HDR_LEN, curr_frag_len);
6b7c5b94
SP
1113 memcpy(skb->data, start, hdr_len);
1114 skb->len = curr_frag_len;
1115 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1116 /* Complete packet has now been moved to data */
1117 put_page(page_info->page);
1118 skb->data_len = 0;
1119 skb->tail += curr_frag_len;
1120 } else {
1121 skb_shinfo(skb)->nr_frags = 1;
1122 skb_shinfo(skb)->frags[0].page = page_info->page;
1123 skb_shinfo(skb)->frags[0].page_offset =
1124 page_info->page_offset + hdr_len;
1125 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1126 skb->data_len = curr_frag_len - hdr_len;
1127 skb->tail += hdr_len;
1128 }
205859a2 1129 page_info->page = NULL;
6b7c5b94 1130
2e588f84
SP
1131 if (rxcp->pkt_size <= rx_frag_size) {
1132 BUG_ON(rxcp->num_rcvd != 1);
1133 return;
6b7c5b94
SP
1134 }
1135
1136 /* More frags present for this completion */
2e588f84
SP
1137 index_inc(&rxcp->rxq_idx, rxq->len);
1138 remaining = rxcp->pkt_size - curr_frag_len;
1139 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1140 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1141 curr_frag_len = min(remaining, rx_frag_size);
6b7c5b94 1142
bd46cb6c
AK
1143 /* Coalesce all frags from the same physical page in one slot */
1144 if (page_info->page_offset == 0) {
1145 /* Fresh page */
1146 j++;
1147 skb_shinfo(skb)->frags[j].page = page_info->page;
1148 skb_shinfo(skb)->frags[j].page_offset =
1149 page_info->page_offset;
1150 skb_shinfo(skb)->frags[j].size = 0;
1151 skb_shinfo(skb)->nr_frags++;
1152 } else {
1153 put_page(page_info->page);
1154 }
1155
1156 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94
SP
1157 skb->len += curr_frag_len;
1158 skb->data_len += curr_frag_len;
6b7c5b94 1159
2e588f84
SP
1160 remaining -= curr_frag_len;
1161 index_inc(&rxcp->rxq_idx, rxq->len);
205859a2 1162 page_info->page = NULL;
6b7c5b94 1163 }
bd46cb6c 1164 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94
SP
1165}
1166
5be93b9a 1167/* Process the RX completion indicated by rxcp when GRO is disabled */
6b7c5b94 1168static void be_rx_compl_process(struct be_adapter *adapter,
3abcdeda 1169 struct be_rx_obj *rxo,
2e588f84 1170 struct be_rx_compl_info *rxcp)
6b7c5b94 1171{
6332c8d3 1172 struct net_device *netdev = adapter->netdev;
6b7c5b94 1173 struct sk_buff *skb;
89420424 1174
6332c8d3 1175 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
a058a632 1176 if (unlikely(!skb)) {
1e758962 1177 rxo->stats.rx_dropped++;
3abcdeda 1178 be_rx_compl_discard(adapter, rxo, rxcp);
6b7c5b94
SP
1179 return;
1180 }
1181
2e588f84 1182 skb_fill_rx_data(adapter, rxo, skb, rxcp);
6b7c5b94 1183
6332c8d3 1184 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
728a9972 1185 skb->ip_summed = CHECKSUM_UNNECESSARY;
c6ce2f4b
SK
1186 else
1187 skb_checksum_none_assert(skb);
6b7c5b94
SP
1188
1189 skb->truesize = skb->len + sizeof(struct sk_buff);
6332c8d3 1190 skb->protocol = eth_type_trans(skb, netdev);
4b972914
AK
1191 if (adapter->netdev->features & NETIF_F_RXHASH)
1192 skb->rxhash = rxcp->rss_hash;
1193
6b7c5b94 1194
4c5102f9
AK
1195 if (unlikely(rxcp->vlanf))
1196 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1197
1198 netif_receive_skb(skb);
6b7c5b94
SP
1199}
1200
5be93b9a
AK
1201/* Process the RX completion indicated by rxcp when GRO is enabled */
1202static void be_rx_compl_process_gro(struct be_adapter *adapter,
3abcdeda 1203 struct be_rx_obj *rxo,
2e588f84 1204 struct be_rx_compl_info *rxcp)
6b7c5b94
SP
1205{
1206 struct be_rx_page_info *page_info;
5be93b9a 1207 struct sk_buff *skb = NULL;
3abcdeda
SP
1208 struct be_queue_info *rxq = &rxo->q;
1209 struct be_eq_obj *eq_obj = &rxo->rx_eq;
2e588f84
SP
1210 u16 remaining, curr_frag_len;
1211 u16 i, j;
3968fa1e 1212
5be93b9a
AK
1213 skb = napi_get_frags(&eq_obj->napi);
1214 if (!skb) {
3abcdeda 1215 be_rx_compl_discard(adapter, rxo, rxcp);
5be93b9a
AK
1216 return;
1217 }
1218
2e588f84
SP
1219 remaining = rxcp->pkt_size;
1220 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1221 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
6b7c5b94
SP
1222
1223 curr_frag_len = min(remaining, rx_frag_size);
1224
bd46cb6c
AK
1225 /* Coalesce all frags from the same physical page in one slot */
1226 if (i == 0 || page_info->page_offset == 0) {
1227 /* First frag or Fresh page */
1228 j++;
5be93b9a
AK
1229 skb_shinfo(skb)->frags[j].page = page_info->page;
1230 skb_shinfo(skb)->frags[j].page_offset =
1231 page_info->page_offset;
1232 skb_shinfo(skb)->frags[j].size = 0;
bd46cb6c
AK
1233 } else {
1234 put_page(page_info->page);
1235 }
5be93b9a 1236 skb_shinfo(skb)->frags[j].size += curr_frag_len;
6b7c5b94 1237
bd46cb6c 1238 remaining -= curr_frag_len;
2e588f84 1239 index_inc(&rxcp->rxq_idx, rxq->len);
6b7c5b94
SP
1240 memset(page_info, 0, sizeof(*page_info));
1241 }
bd46cb6c 1242 BUG_ON(j > MAX_SKB_FRAGS);
6b7c5b94 1243
5be93b9a 1244 skb_shinfo(skb)->nr_frags = j + 1;
2e588f84
SP
1245 skb->len = rxcp->pkt_size;
1246 skb->data_len = rxcp->pkt_size;
1247 skb->truesize += rxcp->pkt_size;
5be93b9a 1248 skb->ip_summed = CHECKSUM_UNNECESSARY;
4b972914
AK
1249 if (adapter->netdev->features & NETIF_F_RXHASH)
1250 skb->rxhash = rxcp->rss_hash;
5be93b9a 1251
4c5102f9
AK
1252 if (unlikely(rxcp->vlanf))
1253 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1254
1255 napi_gro_frags(&eq_obj->napi);
2e588f84
SP
1256}
1257
1258static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1259 struct be_eth_rx_compl *compl,
1260 struct be_rx_compl_info *rxcp)
1261{
1262 rxcp->pkt_size =
1263 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1264 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1265 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1266 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
9ecb42fd 1267 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
2e588f84
SP
1268 rxcp->ip_csum =
1269 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1270 rxcp->l4_csum =
1271 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1272 rxcp->ipv6 =
1273 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1274 rxcp->rxq_idx =
1275 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1276 rxcp->num_rcvd =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1278 rxcp->pkt_type =
1279 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
4b972914
AK
1280 rxcp->rss_hash =
1281 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
15d72184
SP
1282 if (rxcp->vlanf) {
1283 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
3c709f8f
DM
1284 compl);
1285 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1286 compl);
15d72184 1287 }
2e588f84
SP
1288}
1289
1290static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1291 struct be_eth_rx_compl *compl,
1292 struct be_rx_compl_info *rxcp)
1293{
1294 rxcp->pkt_size =
1295 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1296 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1297 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1298 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
9ecb42fd 1299 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
2e588f84
SP
1300 rxcp->ip_csum =
1301 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1302 rxcp->l4_csum =
1303 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1304 rxcp->ipv6 =
1305 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1306 rxcp->rxq_idx =
1307 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1308 rxcp->num_rcvd =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1310 rxcp->pkt_type =
1311 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
4b972914
AK
1312 rxcp->rss_hash =
1313 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
15d72184
SP
1314 if (rxcp->vlanf) {
1315 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
3c709f8f
DM
1316 compl);
1317 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1318 compl);
15d72184 1319 }
2e588f84
SP
1320}
1321
1322static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1323{
1324 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1325 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1326 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1327
2e588f84
SP
1328 /* For checking the valid bit it is Ok to use either definition as the
1329 * valid bit is at the same position in both v0 and v1 Rx compl */
1330 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1331 return NULL;
6b7c5b94 1332
2e588f84
SP
1333 rmb();
1334 be_dws_le_to_cpu(compl, sizeof(*compl));
6b7c5b94 1335
2e588f84
SP
1336 if (adapter->be3_native)
1337 be_parse_rx_compl_v1(adapter, compl, rxcp);
1338 else
1339 be_parse_rx_compl_v0(adapter, compl, rxcp);
6b7c5b94 1340
15d72184
SP
1341 if (rxcp->vlanf) {
1342 /* vlanf could be wrongly set in some cards.
1343 * ignore if vtm is not set */
1344 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1345 rxcp->vlanf = 0;
6b7c5b94 1346
15d72184 1347 if (!lancer_chip(adapter))
3c709f8f 1348 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
6b7c5b94 1349
3c709f8f
DM
1350 if (((adapter->pvid & VLAN_VID_MASK) ==
1351 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1352 !adapter->vlan_tag[rxcp->vlan_tag])
15d72184
SP
1353 rxcp->vlanf = 0;
1354 }
2e588f84
SP
1355
1356 /* As the compl has been parsed, reset it; we wont touch it again */
1357 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
6b7c5b94 1358
3abcdeda 1359 queue_tail_inc(&rxo->cq);
6b7c5b94
SP
1360 return rxcp;
1361}
1362
1829b086 1363static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
6b7c5b94 1364{
6b7c5b94 1365 u32 order = get_order(size);
1829b086 1366
6b7c5b94 1367 if (order > 0)
1829b086
ED
1368 gfp |= __GFP_COMP;
1369 return alloc_pages(gfp, order);
6b7c5b94
SP
1370}
1371
1372/*
1373 * Allocate a page, split it to fragments of size rx_frag_size and post as
1374 * receive buffers to BE
1375 */
1829b086 1376static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
6b7c5b94 1377{
3abcdeda
SP
1378 struct be_adapter *adapter = rxo->adapter;
1379 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
26d92f92 1380 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
3abcdeda 1381 struct be_queue_info *rxq = &rxo->q;
6b7c5b94
SP
1382 struct page *pagep = NULL;
1383 struct be_eth_rx_d *rxd;
1384 u64 page_dmaaddr = 0, frag_dmaaddr;
1385 u32 posted, page_offset = 0;
1386
3abcdeda 1387 page_info = &rxo->page_info_tbl[rxq->head];
6b7c5b94
SP
1388 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1389 if (!pagep) {
1829b086 1390 pagep = be_alloc_pages(adapter->big_page_size, gfp);
6b7c5b94 1391 if (unlikely(!pagep)) {
3abcdeda 1392 rxo->stats.rx_post_fail++;
6b7c5b94
SP
1393 break;
1394 }
2b7bcebf
IV
1395 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1396 0, adapter->big_page_size,
1397 DMA_FROM_DEVICE);
6b7c5b94
SP
1398 page_info->page_offset = 0;
1399 } else {
1400 get_page(pagep);
1401 page_info->page_offset = page_offset + rx_frag_size;
1402 }
1403 page_offset = page_info->page_offset;
1404 page_info->page = pagep;
fac6da5b 1405 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
6b7c5b94
SP
1406 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1407
1408 rxd = queue_head_node(rxq);
1409 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1410 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
6b7c5b94
SP
1411
1412 /* Any space left in the current big page for another frag? */
1413 if ((page_offset + rx_frag_size + rx_frag_size) >
1414 adapter->big_page_size) {
1415 pagep = NULL;
1416 page_info->last_page_user = true;
1417 }
26d92f92
SP
1418
1419 prev_page_info = page_info;
1420 queue_head_inc(rxq);
6b7c5b94
SP
1421 page_info = &page_info_tbl[rxq->head];
1422 }
1423 if (pagep)
26d92f92 1424 prev_page_info->last_page_user = true;
6b7c5b94
SP
1425
1426 if (posted) {
6b7c5b94 1427 atomic_add(posted, &rxq->used);
8788fdc2 1428 be_rxq_notify(adapter, rxq->id, posted);
ea1dae11
SP
1429 } else if (atomic_read(&rxq->used) == 0) {
1430 /* Let be_worker replenish when memory is available */
3abcdeda 1431 rxo->rx_post_starved = true;
6b7c5b94 1432 }
6b7c5b94
SP
1433}
1434
5fb379ee 1435static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
6b7c5b94 1436{
6b7c5b94
SP
1437 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1438
1439 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1440 return NULL;
1441
f3eb62d2 1442 rmb();
6b7c5b94
SP
1443 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1444
1445 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1446
1447 queue_tail_inc(tx_cq);
1448 return txcp;
1449}
1450
3c8def97
SP
1451static u16 be_tx_compl_process(struct be_adapter *adapter,
1452 struct be_tx_obj *txo, u16 last_index)
6b7c5b94 1453{
3c8def97 1454 struct be_queue_info *txq = &txo->q;
a73b796e 1455 struct be_eth_wrb *wrb;
3c8def97 1456 struct sk_buff **sent_skbs = txo->sent_skb_list;
6b7c5b94 1457 struct sk_buff *sent_skb;
ec43b1a6
SP
1458 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1459 bool unmap_skb_hdr = true;
6b7c5b94 1460
ec43b1a6 1461 sent_skb = sent_skbs[txq->tail];
6b7c5b94 1462 BUG_ON(!sent_skb);
ec43b1a6
SP
1463 sent_skbs[txq->tail] = NULL;
1464
1465 /* skip header wrb */
a73b796e 1466 queue_tail_inc(txq);
6b7c5b94 1467
ec43b1a6 1468 do {
6b7c5b94 1469 cur_index = txq->tail;
a73b796e 1470 wrb = queue_tail_node(txq);
2b7bcebf
IV
1471 unmap_tx_frag(&adapter->pdev->dev, wrb,
1472 (unmap_skb_hdr && skb_headlen(sent_skb)));
ec43b1a6
SP
1473 unmap_skb_hdr = false;
1474
6b7c5b94
SP
1475 num_wrbs++;
1476 queue_tail_inc(txq);
ec43b1a6 1477 } while (cur_index != last_index);
6b7c5b94 1478
6b7c5b94 1479 kfree_skb(sent_skb);
4d586b82 1480 return num_wrbs;
6b7c5b94
SP
1481}
1482
859b1e4e
SP
1483static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1484{
1485 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1486
1487 if (!eqe->evt)
1488 return NULL;
1489
f3eb62d2 1490 rmb();
859b1e4e
SP
1491 eqe->evt = le32_to_cpu(eqe->evt);
1492 queue_tail_inc(&eq_obj->q);
1493 return eqe;
1494}
1495
1496static int event_handle(struct be_adapter *adapter,
3c8def97
SP
1497 struct be_eq_obj *eq_obj,
1498 bool rearm)
859b1e4e
SP
1499{
1500 struct be_eq_entry *eqe;
1501 u16 num = 0;
1502
1503 while ((eqe = event_get(eq_obj)) != NULL) {
1504 eqe->evt = 0;
1505 num++;
1506 }
1507
1508 /* Deal with any spurious interrupts that come
1509 * without events
1510 */
3c8def97
SP
1511 if (!num)
1512 rearm = true;
1513
1514 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
859b1e4e
SP
1515 if (num)
1516 napi_schedule(&eq_obj->napi);
1517
1518 return num;
1519}
1520
1521/* Just read and notify events without processing them.
1522 * Used at the time of destroying event queues */
1523static void be_eq_clean(struct be_adapter *adapter,
1524 struct be_eq_obj *eq_obj)
1525{
1526 struct be_eq_entry *eqe;
1527 u16 num = 0;
1528
1529 while ((eqe = event_get(eq_obj)) != NULL) {
1530 eqe->evt = 0;
1531 num++;
1532 }
1533
1534 if (num)
1535 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1536}
1537
3abcdeda 1538static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
6b7c5b94
SP
1539{
1540 struct be_rx_page_info *page_info;
3abcdeda
SP
1541 struct be_queue_info *rxq = &rxo->q;
1542 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1543 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1544 u16 tail;
1545
1546 /* First cleanup pending rx completions */
3abcdeda
SP
1547 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1548 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1549 be_cq_notify(adapter, rx_cq->id, false, 1);
6b7c5b94
SP
1550 }
1551
1552 /* Then free posted rx buffer that were not used */
1553 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
cdab23b7 1554 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
3abcdeda 1555 page_info = get_rx_page_info(adapter, rxo, tail);
6b7c5b94
SP
1556 put_page(page_info->page);
1557 memset(page_info, 0, sizeof(*page_info));
1558 }
1559 BUG_ON(atomic_read(&rxq->used));
482c9e79 1560 rxq->tail = rxq->head = 0;
6b7c5b94
SP
1561}
1562
3c8def97
SP
1563static void be_tx_compl_clean(struct be_adapter *adapter,
1564 struct be_tx_obj *txo)
6b7c5b94 1565{
3c8def97
SP
1566 struct be_queue_info *tx_cq = &txo->cq;
1567 struct be_queue_info *txq = &txo->q;
a8e9179a 1568 struct be_eth_tx_compl *txcp;
4d586b82 1569 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
3c8def97 1570 struct sk_buff **sent_skbs = txo->sent_skb_list;
b03388d6
SP
1571 struct sk_buff *sent_skb;
1572 bool dummy_wrb;
a8e9179a
SP
1573
1574 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1575 do {
1576 while ((txcp = be_tx_compl_get(tx_cq))) {
1577 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1578 wrb_index, txcp);
3c8def97 1579 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
a8e9179a
SP
1580 cmpl++;
1581 }
1582 if (cmpl) {
1583 be_cq_notify(adapter, tx_cq->id, false, cmpl);
4d586b82 1584 atomic_sub(num_wrbs, &txq->used);
a8e9179a 1585 cmpl = 0;
4d586b82 1586 num_wrbs = 0;
a8e9179a
SP
1587 }
1588
1589 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1590 break;
1591
1592 mdelay(1);
1593 } while (true);
1594
1595 if (atomic_read(&txq->used))
1596 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1597 atomic_read(&txq->used));
b03388d6
SP
1598
1599 /* free posted tx for which compls will never arrive */
1600 while (atomic_read(&txq->used)) {
1601 sent_skb = sent_skbs[txq->tail];
1602 end_idx = txq->tail;
1603 index_adv(&end_idx,
fe6d2a38
SP
1604 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1605 txq->len);
3c8def97 1606 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
4d586b82 1607 atomic_sub(num_wrbs, &txq->used);
b03388d6 1608 }
6b7c5b94
SP
1609}
1610
5fb379ee
SP
1611static void be_mcc_queues_destroy(struct be_adapter *adapter)
1612{
1613 struct be_queue_info *q;
5fb379ee 1614
8788fdc2 1615 q = &adapter->mcc_obj.q;
5fb379ee 1616 if (q->created)
8788fdc2 1617 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
5fb379ee
SP
1618 be_queue_free(adapter, q);
1619
8788fdc2 1620 q = &adapter->mcc_obj.cq;
5fb379ee 1621 if (q->created)
8788fdc2 1622 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
5fb379ee
SP
1623 be_queue_free(adapter, q);
1624}
1625
1626/* Must be called only after TX qs are created as MCC shares TX EQ */
1627static int be_mcc_queues_create(struct be_adapter *adapter)
1628{
1629 struct be_queue_info *q, *cq;
5fb379ee
SP
1630
1631 /* Alloc MCC compl queue */
8788fdc2 1632 cq = &adapter->mcc_obj.cq;
5fb379ee 1633 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
efd2e40a 1634 sizeof(struct be_mcc_compl)))
5fb379ee
SP
1635 goto err;
1636
1637 /* Ask BE to create MCC compl queue; share TX's eq */
8788fdc2 1638 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
5fb379ee
SP
1639 goto mcc_cq_free;
1640
1641 /* Alloc MCC queue */
8788fdc2 1642 q = &adapter->mcc_obj.q;
5fb379ee
SP
1643 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1644 goto mcc_cq_destroy;
1645
1646 /* Ask BE to create MCC queue */
8788fdc2 1647 if (be_cmd_mccq_create(adapter, q, cq))
5fb379ee
SP
1648 goto mcc_q_free;
1649
1650 return 0;
1651
1652mcc_q_free:
1653 be_queue_free(adapter, q);
1654mcc_cq_destroy:
8788fdc2 1655 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
5fb379ee
SP
1656mcc_cq_free:
1657 be_queue_free(adapter, cq);
1658err:
1659 return -1;
1660}
1661
6b7c5b94
SP
1662static void be_tx_queues_destroy(struct be_adapter *adapter)
1663{
1664 struct be_queue_info *q;
3c8def97
SP
1665 struct be_tx_obj *txo;
1666 u8 i;
6b7c5b94 1667
3c8def97
SP
1668 for_all_tx_queues(adapter, txo, i) {
1669 q = &txo->q;
1670 if (q->created)
1671 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1672 be_queue_free(adapter, q);
6b7c5b94 1673
3c8def97
SP
1674 q = &txo->cq;
1675 if (q->created)
1676 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1677 be_queue_free(adapter, q);
1678 }
6b7c5b94 1679
859b1e4e
SP
1680 /* Clear any residual events */
1681 be_eq_clean(adapter, &adapter->tx_eq);
1682
6b7c5b94
SP
1683 q = &adapter->tx_eq.q;
1684 if (q->created)
8788fdc2 1685 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
6b7c5b94
SP
1686 be_queue_free(adapter, q);
1687}
1688
3c8def97 1689/* One TX event queue is shared by all TX compl qs */
6b7c5b94
SP
1690static int be_tx_queues_create(struct be_adapter *adapter)
1691{
1692 struct be_queue_info *eq, *q, *cq;
3c8def97
SP
1693 struct be_tx_obj *txo;
1694 u8 i;
6b7c5b94
SP
1695
1696 adapter->tx_eq.max_eqd = 0;
1697 adapter->tx_eq.min_eqd = 0;
1698 adapter->tx_eq.cur_eqd = 96;
1699 adapter->tx_eq.enable_aic = false;
3c8def97 1700
6b7c5b94 1701 eq = &adapter->tx_eq.q;
3c8def97
SP
1702 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1703 sizeof(struct be_eq_entry)))
6b7c5b94
SP
1704 return -1;
1705
8788fdc2 1706 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
3c8def97 1707 goto err;
ecd62107 1708 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1709
3c8def97
SP
1710 for_all_tx_queues(adapter, txo, i) {
1711 cq = &txo->cq;
1712 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
6b7c5b94 1713 sizeof(struct be_eth_tx_compl)))
3c8def97 1714 goto err;
6b7c5b94 1715
3c8def97
SP
1716 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1717 goto err;
6b7c5b94 1718
3c8def97
SP
1719 q = &txo->q;
1720 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1721 sizeof(struct be_eth_wrb)))
1722 goto err;
6b7c5b94 1723
3c8def97
SP
1724 if (be_cmd_txq_create(adapter, q, cq))
1725 goto err;
1726 }
6b7c5b94
SP
1727 return 0;
1728
3c8def97
SP
1729err:
1730 be_tx_queues_destroy(adapter);
6b7c5b94
SP
1731 return -1;
1732}
1733
1734static void be_rx_queues_destroy(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *q;
3abcdeda
SP
1737 struct be_rx_obj *rxo;
1738 int i;
1739
1740 for_all_rx_queues(adapter, rxo, i) {
482c9e79 1741 be_queue_free(adapter, &rxo->q);
3abcdeda
SP
1742
1743 q = &rxo->cq;
1744 if (q->created)
1745 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1746 be_queue_free(adapter, q);
1747
3abcdeda 1748 q = &rxo->rx_eq.q;
482c9e79 1749 if (q->created)
3abcdeda 1750 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
3abcdeda 1751 be_queue_free(adapter, q);
6b7c5b94 1752 }
6b7c5b94
SP
1753}
1754
ac6a0c4a
SP
1755static u32 be_num_rxqs_want(struct be_adapter *adapter)
1756{
c814fd36 1757 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
ac6a0c4a
SP
1758 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1759 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1760 } else {
1761 dev_warn(&adapter->pdev->dev,
1762 "No support for multiple RX queues\n");
1763 return 1;
1764 }
1765}
1766
6b7c5b94
SP
1767static int be_rx_queues_create(struct be_adapter *adapter)
1768{
1769 struct be_queue_info *eq, *q, *cq;
3abcdeda
SP
1770 struct be_rx_obj *rxo;
1771 int rc, i;
6b7c5b94 1772
ac6a0c4a
SP
1773 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1774 msix_enabled(adapter) ?
1775 adapter->num_msix_vec - 1 : 1);
1776 if (adapter->num_rx_qs != MAX_RX_QS)
1777 dev_warn(&adapter->pdev->dev,
1778 "Can create only %d RX queues", adapter->num_rx_qs);
1779
6b7c5b94 1780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3abcdeda
SP
1781 for_all_rx_queues(adapter, rxo, i) {
1782 rxo->adapter = adapter;
1783 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1784 rxo->rx_eq.enable_aic = true;
1785
1786 /* EQ */
1787 eq = &rxo->rx_eq.q;
1788 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1789 sizeof(struct be_eq_entry));
1790 if (rc)
1791 goto err;
1792
1793 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1794 if (rc)
1795 goto err;
1796
ecd62107 1797 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
fe6d2a38 1798
3abcdeda
SP
1799 /* CQ */
1800 cq = &rxo->cq;
1801 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1802 sizeof(struct be_eth_rx_compl));
1803 if (rc)
1804 goto err;
1805
1806 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1807 if (rc)
1808 goto err;
482c9e79
SP
1809
1810 /* Rx Q - will be created in be_open() */
3abcdeda
SP
1811 q = &rxo->q;
1812 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1813 sizeof(struct be_eth_rx_d));
1814 if (rc)
1815 goto err;
1816
3abcdeda 1817 }
6b7c5b94
SP
1818
1819 return 0;
3abcdeda
SP
1820err:
1821 be_rx_queues_destroy(adapter);
1822 return -1;
6b7c5b94 1823}
6b7c5b94 1824
fe6d2a38 1825static bool event_peek(struct be_eq_obj *eq_obj)
b628bde2 1826{
fe6d2a38
SP
1827 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1828 if (!eqe->evt)
1829 return false;
1830 else
1831 return true;
b628bde2
SP
1832}
1833
6b7c5b94
SP
1834static irqreturn_t be_intx(int irq, void *dev)
1835{
1836 struct be_adapter *adapter = dev;
3abcdeda 1837 struct be_rx_obj *rxo;
fe6d2a38 1838 int isr, i, tx = 0 , rx = 0;
6b7c5b94 1839
fe6d2a38
SP
1840 if (lancer_chip(adapter)) {
1841 if (event_peek(&adapter->tx_eq))
3c8def97 1842 tx = event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1843 for_all_rx_queues(adapter, rxo, i) {
1844 if (event_peek(&rxo->rx_eq))
3c8def97 1845 rx |= event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1846 }
6b7c5b94 1847
fe6d2a38
SP
1848 if (!(tx || rx))
1849 return IRQ_NONE;
3abcdeda 1850
fe6d2a38
SP
1851 } else {
1852 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1853 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1854 if (!isr)
1855 return IRQ_NONE;
1856
ecd62107 1857 if ((1 << adapter->tx_eq.eq_idx & isr))
3c8def97 1858 event_handle(adapter, &adapter->tx_eq, false);
fe6d2a38
SP
1859
1860 for_all_rx_queues(adapter, rxo, i) {
ecd62107 1861 if ((1 << rxo->rx_eq.eq_idx & isr))
3c8def97 1862 event_handle(adapter, &rxo->rx_eq, true);
fe6d2a38 1863 }
3abcdeda 1864 }
c001c213 1865
8788fdc2 1866 return IRQ_HANDLED;
6b7c5b94
SP
1867}
1868
1869static irqreturn_t be_msix_rx(int irq, void *dev)
1870{
3abcdeda
SP
1871 struct be_rx_obj *rxo = dev;
1872 struct be_adapter *adapter = rxo->adapter;
6b7c5b94 1873
3c8def97 1874 event_handle(adapter, &rxo->rx_eq, true);
6b7c5b94
SP
1875
1876 return IRQ_HANDLED;
1877}
1878
5fb379ee 1879static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
6b7c5b94
SP
1880{
1881 struct be_adapter *adapter = dev;
1882
3c8def97 1883 event_handle(adapter, &adapter->tx_eq, false);
6b7c5b94
SP
1884
1885 return IRQ_HANDLED;
1886}
1887
2e588f84 1888static inline bool do_gro(struct be_rx_compl_info *rxcp)
6b7c5b94 1889{
2e588f84 1890 return (rxcp->tcpf && !rxcp->err) ? true : false;
6b7c5b94
SP
1891}
1892
49b05221 1893static int be_poll_rx(struct napi_struct *napi, int budget)
6b7c5b94
SP
1894{
1895 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
3abcdeda
SP
1896 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1897 struct be_adapter *adapter = rxo->adapter;
1898 struct be_queue_info *rx_cq = &rxo->cq;
2e588f84 1899 struct be_rx_compl_info *rxcp;
6b7c5b94
SP
1900 u32 work_done;
1901
3abcdeda 1902 rxo->stats.rx_polls++;
6b7c5b94 1903 for (work_done = 0; work_done < budget; work_done++) {
3abcdeda 1904 rxcp = be_rx_compl_get(rxo);
6b7c5b94
SP
1905 if (!rxcp)
1906 break;
1907
e80d9da6 1908 /* Ignore flush completions */
009dd872 1909 if (rxcp->num_rcvd && rxcp->pkt_size) {
2e588f84 1910 if (do_gro(rxcp))
64642811
SP
1911 be_rx_compl_process_gro(adapter, rxo, rxcp);
1912 else
1913 be_rx_compl_process(adapter, rxo, rxcp);
009dd872
PR
1914 } else if (rxcp->pkt_size == 0) {
1915 be_rx_compl_discard(adapter, rxo, rxcp);
64642811 1916 }
009dd872 1917
2e588f84 1918 be_rx_stats_update(rxo, rxcp);
6b7c5b94
SP
1919 }
1920
6b7c5b94 1921 /* Refill the queue */
3abcdeda 1922 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1829b086 1923 be_post_rx_frags(rxo, GFP_ATOMIC);
6b7c5b94
SP
1924
1925 /* All consumed */
1926 if (work_done < budget) {
1927 napi_complete(napi);
8788fdc2 1928 be_cq_notify(adapter, rx_cq->id, true, work_done);
6b7c5b94
SP
1929 } else {
1930 /* More to be consumed; continue with interrupts disabled */
8788fdc2 1931 be_cq_notify(adapter, rx_cq->id, false, work_done);
6b7c5b94
SP
1932 }
1933 return work_done;
1934}
1935
f31e50a8
SP
1936/* As TX and MCC share the same EQ check for both TX and MCC completions.
1937 * For TX/MCC we don't honour budget; consume everything
1938 */
1939static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
6b7c5b94 1940{
f31e50a8
SP
1941 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1942 struct be_adapter *adapter =
1943 container_of(tx_eq, struct be_adapter, tx_eq);
3c8def97 1944 struct be_tx_obj *txo;
6b7c5b94 1945 struct be_eth_tx_compl *txcp;
3c8def97
SP
1946 int tx_compl, mcc_compl, status = 0;
1947 u8 i;
1948 u16 num_wrbs;
1949
1950 for_all_tx_queues(adapter, txo, i) {
1951 tx_compl = 0;
1952 num_wrbs = 0;
1953 while ((txcp = be_tx_compl_get(&txo->cq))) {
1954 num_wrbs += be_tx_compl_process(adapter, txo,
1955 AMAP_GET_BITS(struct amap_eth_tx_compl,
1956 wrb_index, txcp));
1957 tx_compl++;
1958 }
1959 if (tx_compl) {
1960 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1961
1962 atomic_sub(num_wrbs, &txo->q.used);
6b7c5b94 1963
3c8def97
SP
1964 /* As Tx wrbs have been freed up, wake up netdev queue
1965 * if it was stopped due to lack of tx wrbs. */
1966 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1967 atomic_read(&txo->q.used) < txo->q.len / 2) {
1968 netif_wake_subqueue(adapter->netdev, i);
1969 }
1970
1971 adapter->drv_stats.be_tx_events++;
1972 txo->stats.be_tx_compl += tx_compl;
1973 }
6b7c5b94
SP
1974 }
1975
f31e50a8
SP
1976 mcc_compl = be_process_mcc(adapter, &status);
1977
f31e50a8
SP
1978 if (mcc_compl) {
1979 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1980 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1981 }
1982
3c8def97 1983 napi_complete(napi);
6b7c5b94 1984
3c8def97 1985 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
6b7c5b94
SP
1986 return 1;
1987}
1988
d053de91 1989void be_detect_dump_ue(struct be_adapter *adapter)
7c185276
AK
1990{
1991 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1992 u32 i;
1993
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2002
2003 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2004 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2005
d053de91
AK
2006 if (ue_status_lo || ue_status_hi) {
2007 adapter->ue_detected = true;
7acc2087 2008 adapter->eeh_err = true;
d053de91
AK
2009 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2010 }
2011
7c185276
AK
2012 if (ue_status_lo) {
2013 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2014 if (ue_status_lo & 1)
2015 dev_err(&adapter->pdev->dev,
2016 "UE: %s bit set\n", ue_status_low_desc[i]);
2017 }
2018 }
2019 if (ue_status_hi) {
2020 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2021 if (ue_status_hi & 1)
2022 dev_err(&adapter->pdev->dev,
2023 "UE: %s bit set\n", ue_status_hi_desc[i]);
2024 }
2025 }
2026
2027}
2028
ea1dae11
SP
2029static void be_worker(struct work_struct *work)
2030{
2031 struct be_adapter *adapter =
2032 container_of(work, struct be_adapter, work.work);
3abcdeda 2033 struct be_rx_obj *rxo;
3c8def97 2034 struct be_tx_obj *txo;
3abcdeda 2035 int i;
ea1dae11 2036
16da8250
SP
2037 if (!adapter->ue_detected && !lancer_chip(adapter))
2038 be_detect_dump_ue(adapter);
2039
f203af70
SK
2040 /* when interrupts are not yet enabled, just reap any pending
2041 * mcc completions */
2042 if (!netif_running(adapter->netdev)) {
2043 int mcc_compl, status = 0;
2044
2045 mcc_compl = be_process_mcc(adapter, &status);
2046
2047 if (mcc_compl) {
2048 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2049 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2050 }
9b037f38 2051
f203af70
SK
2052 goto reschedule;
2053 }
2054
005d5696
SX
2055 if (!adapter->stats_cmd_sent) {
2056 if (lancer_chip(adapter))
2057 lancer_cmd_get_pport_stats(adapter,
2058 &adapter->stats_cmd);
2059 else
2060 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2061 }
3c8def97
SP
2062
2063 for_all_tx_queues(adapter, txo, i)
2064 be_tx_rate_update(txo);
4097f663 2065
3abcdeda
SP
2066 for_all_rx_queues(adapter, rxo, i) {
2067 be_rx_rate_update(rxo);
2068 be_rx_eqd_update(adapter, rxo);
2069
2070 if (rxo->rx_post_starved) {
2071 rxo->rx_post_starved = false;
1829b086 2072 be_post_rx_frags(rxo, GFP_KERNEL);
3abcdeda 2073 }
ea1dae11
SP
2074 }
2075
f203af70 2076reschedule:
e74fbd03 2077 adapter->work_counter++;
ea1dae11
SP
2078 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2079}
2080
8d56ff11
SP
2081static void be_msix_disable(struct be_adapter *adapter)
2082{
ac6a0c4a 2083 if (msix_enabled(adapter)) {
8d56ff11 2084 pci_disable_msix(adapter->pdev);
ac6a0c4a 2085 adapter->num_msix_vec = 0;
3abcdeda
SP
2086 }
2087}
2088
6b7c5b94
SP
2089static void be_msix_enable(struct be_adapter *adapter)
2090{
3abcdeda 2091#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
ac6a0c4a 2092 int i, status, num_vec;
6b7c5b94 2093
ac6a0c4a 2094 num_vec = be_num_rxqs_want(adapter) + 1;
3abcdeda 2095
ac6a0c4a 2096 for (i = 0; i < num_vec; i++)
6b7c5b94
SP
2097 adapter->msix_entries[i].entry = i;
2098
ac6a0c4a 2099 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
3abcdeda
SP
2100 if (status == 0) {
2101 goto done;
2102 } else if (status >= BE_MIN_MSIX_VECTORS) {
ac6a0c4a 2103 num_vec = status;
3abcdeda 2104 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
ac6a0c4a 2105 num_vec) == 0)
3abcdeda 2106 goto done;
3abcdeda
SP
2107 }
2108 return;
2109done:
ac6a0c4a
SP
2110 adapter->num_msix_vec = num_vec;
2111 return;
6b7c5b94
SP
2112}
2113
ba343c77
SB
2114static void be_sriov_enable(struct be_adapter *adapter)
2115{
344dbf10 2116 be_check_sriov_fn_type(adapter);
6dedec81 2117#ifdef CONFIG_PCI_IOV
ba343c77 2118 if (be_physfn(adapter) && num_vfs) {
81be8f0a
AK
2119 int status, pos;
2120 u16 nvfs;
2121
2122 pos = pci_find_ext_capability(adapter->pdev,
2123 PCI_EXT_CAP_ID_SRIOV);
2124 pci_read_config_word(adapter->pdev,
2125 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2126
2127 if (num_vfs > nvfs) {
2128 dev_info(&adapter->pdev->dev,
2129 "Device supports %d VFs and not %d\n",
2130 nvfs, num_vfs);
2131 num_vfs = nvfs;
2132 }
6dedec81 2133
ba343c77
SB
2134 status = pci_enable_sriov(adapter->pdev, num_vfs);
2135 adapter->sriov_enabled = status ? false : true;
2136 }
2137#endif
ba343c77
SB
2138}
2139
2140static void be_sriov_disable(struct be_adapter *adapter)
2141{
2142#ifdef CONFIG_PCI_IOV
2143 if (adapter->sriov_enabled) {
2144 pci_disable_sriov(adapter->pdev);
2145 adapter->sriov_enabled = false;
2146 }
2147#endif
2148}
2149
fe6d2a38
SP
2150static inline int be_msix_vec_get(struct be_adapter *adapter,
2151 struct be_eq_obj *eq_obj)
6b7c5b94 2152{
ecd62107 2153 return adapter->msix_entries[eq_obj->eq_idx].vector;
6b7c5b94
SP
2154}
2155
b628bde2
SP
2156static int be_request_irq(struct be_adapter *adapter,
2157 struct be_eq_obj *eq_obj,
3abcdeda 2158 void *handler, char *desc, void *context)
6b7c5b94
SP
2159{
2160 struct net_device *netdev = adapter->netdev;
b628bde2
SP
2161 int vec;
2162
2163 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
fe6d2a38 2164 vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2165 return request_irq(vec, handler, 0, eq_obj->desc, context);
b628bde2
SP
2166}
2167
3abcdeda
SP
2168static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2169 void *context)
b628bde2 2170{
fe6d2a38 2171 int vec = be_msix_vec_get(adapter, eq_obj);
3abcdeda 2172 free_irq(vec, context);
b628bde2 2173}
6b7c5b94 2174
b628bde2
SP
2175static int be_msix_register(struct be_adapter *adapter)
2176{
3abcdeda
SP
2177 struct be_rx_obj *rxo;
2178 int status, i;
2179 char qname[10];
b628bde2 2180
3abcdeda
SP
2181 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2182 adapter);
6b7c5b94
SP
2183 if (status)
2184 goto err;
2185
3abcdeda
SP
2186 for_all_rx_queues(adapter, rxo, i) {
2187 sprintf(qname, "rxq%d", i);
2188 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2189 qname, rxo);
2190 if (status)
2191 goto err_msix;
2192 }
b628bde2 2193
6b7c5b94 2194 return 0;
b628bde2 2195
3abcdeda
SP
2196err_msix:
2197 be_free_irq(adapter, &adapter->tx_eq, adapter);
2198
2199 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2200 be_free_irq(adapter, &rxo->rx_eq, rxo);
2201
6b7c5b94
SP
2202err:
2203 dev_warn(&adapter->pdev->dev,
2204 "MSIX Request IRQ failed - err %d\n", status);
ac6a0c4a 2205 be_msix_disable(adapter);
6b7c5b94
SP
2206 return status;
2207}
2208
2209static int be_irq_register(struct be_adapter *adapter)
2210{
2211 struct net_device *netdev = adapter->netdev;
2212 int status;
2213
ac6a0c4a 2214 if (msix_enabled(adapter)) {
6b7c5b94
SP
2215 status = be_msix_register(adapter);
2216 if (status == 0)
2217 goto done;
ba343c77
SB
2218 /* INTx is not supported for VF */
2219 if (!be_physfn(adapter))
2220 return status;
6b7c5b94
SP
2221 }
2222
2223 /* INTx */
2224 netdev->irq = adapter->pdev->irq;
2225 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2226 adapter);
2227 if (status) {
2228 dev_err(&adapter->pdev->dev,
2229 "INTx request IRQ failed - err %d\n", status);
2230 return status;
2231 }
2232done:
2233 adapter->isr_registered = true;
2234 return 0;
2235}
2236
2237static void be_irq_unregister(struct be_adapter *adapter)
2238{
2239 struct net_device *netdev = adapter->netdev;
3abcdeda
SP
2240 struct be_rx_obj *rxo;
2241 int i;
6b7c5b94
SP
2242
2243 if (!adapter->isr_registered)
2244 return;
2245
2246 /* INTx */
ac6a0c4a 2247 if (!msix_enabled(adapter)) {
6b7c5b94
SP
2248 free_irq(netdev->irq, adapter);
2249 goto done;
2250 }
2251
2252 /* MSIx */
3abcdeda
SP
2253 be_free_irq(adapter, &adapter->tx_eq, adapter);
2254
2255 for_all_rx_queues(adapter, rxo, i)
2256 be_free_irq(adapter, &rxo->rx_eq, rxo);
2257
6b7c5b94
SP
2258done:
2259 adapter->isr_registered = false;
6b7c5b94
SP
2260}
2261
482c9e79
SP
2262static void be_rx_queues_clear(struct be_adapter *adapter)
2263{
2264 struct be_queue_info *q;
2265 struct be_rx_obj *rxo;
2266 int i;
2267
2268 for_all_rx_queues(adapter, rxo, i) {
2269 q = &rxo->q;
2270 if (q->created) {
2271 be_cmd_rxq_destroy(adapter, q);
2272 /* After the rxq is invalidated, wait for a grace time
2273 * of 1ms for all dma to end and the flush compl to
2274 * arrive
2275 */
2276 mdelay(1);
2277 be_rx_q_clean(adapter, rxo);
2278 }
2279
2280 /* Clear any residual events */
2281 q = &rxo->rx_eq.q;
2282 if (q->created)
2283 be_eq_clean(adapter, &rxo->rx_eq);
2284 }
2285}
2286
889cd4b2
SP
2287static int be_close(struct net_device *netdev)
2288{
2289 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda 2290 struct be_rx_obj *rxo;
3c8def97 2291 struct be_tx_obj *txo;
889cd4b2 2292 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2293 int vec, i;
889cd4b2 2294
889cd4b2
SP
2295 be_async_mcc_disable(adapter);
2296
889cd4b2
SP
2297 netif_carrier_off(netdev);
2298 adapter->link_up = false;
2299
fe6d2a38
SP
2300 if (!lancer_chip(adapter))
2301 be_intr_set(adapter, false);
889cd4b2 2302
63fcb27f
PR
2303 for_all_rx_queues(adapter, rxo, i)
2304 napi_disable(&rxo->rx_eq.napi);
2305
2306 napi_disable(&tx_eq->napi);
2307
2308 if (lancer_chip(adapter)) {
63fcb27f
PR
2309 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2310 for_all_rx_queues(adapter, rxo, i)
2311 be_cq_notify(adapter, rxo->cq.id, false, 0);
3c8def97
SP
2312 for_all_tx_queues(adapter, txo, i)
2313 be_cq_notify(adapter, txo->cq.id, false, 0);
63fcb27f
PR
2314 }
2315
ac6a0c4a 2316 if (msix_enabled(adapter)) {
fe6d2a38 2317 vec = be_msix_vec_get(adapter, tx_eq);
889cd4b2 2318 synchronize_irq(vec);
3abcdeda
SP
2319
2320 for_all_rx_queues(adapter, rxo, i) {
fe6d2a38 2321 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
3abcdeda
SP
2322 synchronize_irq(vec);
2323 }
889cd4b2
SP
2324 } else {
2325 synchronize_irq(netdev->irq);
2326 }
2327 be_irq_unregister(adapter);
2328
889cd4b2
SP
2329 /* Wait for all pending tx completions to arrive so that
2330 * all tx skbs are freed.
2331 */
3c8def97
SP
2332 for_all_tx_queues(adapter, txo, i)
2333 be_tx_compl_clean(adapter, txo);
889cd4b2 2334
482c9e79
SP
2335 be_rx_queues_clear(adapter);
2336 return 0;
2337}
2338
2339static int be_rx_queues_setup(struct be_adapter *adapter)
2340{
2341 struct be_rx_obj *rxo;
2342 int rc, i;
2343 u8 rsstable[MAX_RSS_QS];
2344
2345 for_all_rx_queues(adapter, rxo, i) {
2346 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2347 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2348 adapter->if_handle,
2349 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2350 if (rc)
2351 return rc;
2352 }
2353
2354 if (be_multi_rxq(adapter)) {
2355 for_all_rss_queues(adapter, rxo, i)
2356 rsstable[i] = rxo->rss_id;
2357
2358 rc = be_cmd_rss_config(adapter, rsstable,
2359 adapter->num_rx_qs - 1);
2360 if (rc)
2361 return rc;
2362 }
2363
2364 /* First time posting */
2365 for_all_rx_queues(adapter, rxo, i) {
2366 be_post_rx_frags(rxo, GFP_KERNEL);
2367 napi_enable(&rxo->rx_eq.napi);
2368 }
889cd4b2
SP
2369 return 0;
2370}
2371
6b7c5b94
SP
2372static int be_open(struct net_device *netdev)
2373{
2374 struct be_adapter *adapter = netdev_priv(netdev);
6b7c5b94 2375 struct be_eq_obj *tx_eq = &adapter->tx_eq;
3abcdeda 2376 struct be_rx_obj *rxo;
a8f447bd 2377 bool link_up;
3abcdeda 2378 int status, i;
0388f251
SB
2379 u8 mac_speed;
2380 u16 link_speed;
5fb379ee 2381
482c9e79
SP
2382 status = be_rx_queues_setup(adapter);
2383 if (status)
2384 goto err;
2385
5fb379ee
SP
2386 napi_enable(&tx_eq->napi);
2387
2388 be_irq_register(adapter);
2389
fe6d2a38
SP
2390 if (!lancer_chip(adapter))
2391 be_intr_set(adapter, true);
5fb379ee
SP
2392
2393 /* The evt queues are created in unarmed state; arm them */
3abcdeda
SP
2394 for_all_rx_queues(adapter, rxo, i) {
2395 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2396 be_cq_notify(adapter, rxo->cq.id, true, 0);
2397 }
8788fdc2 2398 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
5fb379ee 2399
7a1e9b20
SP
2400 /* Now that interrupts are on we can process async mcc */
2401 be_async_mcc_enable(adapter);
2402
0388f251 2403 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
187e8756 2404 &link_speed, 0);
a8f447bd 2405 if (status)
889cd4b2 2406 goto err;
a8f447bd 2407 be_link_status_update(adapter, link_up);
5fb379ee 2408
889cd4b2 2409 if (be_physfn(adapter)) {
1da87b7f 2410 status = be_vid_config(adapter, false, 0);
889cd4b2
SP
2411 if (status)
2412 goto err;
4f2aa89c 2413
ba343c77
SB
2414 status = be_cmd_set_flow_control(adapter,
2415 adapter->tx_fc, adapter->rx_fc);
2416 if (status)
889cd4b2 2417 goto err;
ba343c77 2418 }
4f2aa89c 2419
889cd4b2
SP
2420 return 0;
2421err:
2422 be_close(adapter->netdev);
2423 return -EIO;
5fb379ee
SP
2424}
2425
71d8d1b5
AK
2426static int be_setup_wol(struct be_adapter *adapter, bool enable)
2427{
2428 struct be_dma_mem cmd;
2429 int status = 0;
2430 u8 mac[ETH_ALEN];
2431
2432 memset(mac, 0, ETH_ALEN);
2433
2434 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2b7bcebf
IV
2435 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2436 GFP_KERNEL);
71d8d1b5
AK
2437 if (cmd.va == NULL)
2438 return -1;
2439 memset(cmd.va, 0, cmd.size);
2440
2441 if (enable) {
2442 status = pci_write_config_dword(adapter->pdev,
2443 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2444 if (status) {
2445 dev_err(&adapter->pdev->dev,
2381a55c 2446 "Could not enable Wake-on-lan\n");
2b7bcebf
IV
2447 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2448 cmd.dma);
71d8d1b5
AK
2449 return status;
2450 }
2451 status = be_cmd_enable_magic_wol(adapter,
2452 adapter->netdev->dev_addr, &cmd);
2453 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2454 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2455 } else {
2456 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2457 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2458 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2459 }
2460
2b7bcebf 2461 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
71d8d1b5
AK
2462 return status;
2463}
2464
6d87f5c3
AK
2465/*
2466 * Generate a seed MAC address from the PF MAC Address using jhash.
2467 * MAC Address for VFs are assigned incrementally starting from the seed.
2468 * These addresses are programmed in the ASIC by the PF and the VF driver
2469 * queries for the MAC address during its probe.
2470 */
2471static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2472{
2473 u32 vf = 0;
3abcdeda 2474 int status = 0;
6d87f5c3
AK
2475 u8 mac[ETH_ALEN];
2476
2477 be_vf_eth_addr_generate(adapter, mac);
2478
2479 for (vf = 0; vf < num_vfs; vf++) {
2480 status = be_cmd_pmac_add(adapter, mac,
2481 adapter->vf_cfg[vf].vf_if_handle,
f8617e08
AK
2482 &adapter->vf_cfg[vf].vf_pmac_id,
2483 vf + 1);
6d87f5c3
AK
2484 if (status)
2485 dev_err(&adapter->pdev->dev,
2486 "Mac address add failed for VF %d\n", vf);
2487 else
2488 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2489
2490 mac[5] += 1;
2491 }
2492 return status;
2493}
2494
2495static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2496{
2497 u32 vf;
2498
2499 for (vf = 0; vf < num_vfs; vf++) {
2500 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2501 be_cmd_pmac_del(adapter,
2502 adapter->vf_cfg[vf].vf_if_handle,
f8617e08 2503 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
6d87f5c3
AK
2504 }
2505}
2506
5fb379ee
SP
2507static int be_setup(struct be_adapter *adapter)
2508{
5fb379ee 2509 struct net_device *netdev = adapter->netdev;
ba343c77 2510 u32 cap_flags, en_flags, vf = 0;
6b7c5b94 2511 int status;
ba343c77
SB
2512 u8 mac[ETH_ALEN];
2513
f21b538c
PR
2514 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2515 BE_IF_FLAGS_BROADCAST |
2516 BE_IF_FLAGS_MULTICAST;
6b7c5b94 2517
ba343c77
SB
2518 if (be_physfn(adapter)) {
2519 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2520 BE_IF_FLAGS_PROMISCUOUS |
2521 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2522 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
3abcdeda 2523
ac6a0c4a 2524 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
3abcdeda
SP
2525 cap_flags |= BE_IF_FLAGS_RSS;
2526 en_flags |= BE_IF_FLAGS_RSS;
2527 }
ba343c77 2528 }
73d540f2
SP
2529
2530 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2531 netdev->dev_addr, false/* pmac_invalid */,
ba343c77 2532 &adapter->if_handle, &adapter->pmac_id, 0);
6b7c5b94
SP
2533 if (status != 0)
2534 goto do_none;
2535
ba343c77 2536 if (be_physfn(adapter)) {
c99ac3e7
AK
2537 if (adapter->sriov_enabled) {
2538 while (vf < num_vfs) {
2539 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2540 BE_IF_FLAGS_BROADCAST;
2541 status = be_cmd_if_create(adapter, cap_flags,
2542 en_flags, mac, true,
64600ea5 2543 &adapter->vf_cfg[vf].vf_if_handle,
ba343c77 2544 NULL, vf+1);
c99ac3e7
AK
2545 if (status) {
2546 dev_err(&adapter->pdev->dev,
2547 "Interface Create failed for VF %d\n",
2548 vf);
2549 goto if_destroy;
2550 }
2551 adapter->vf_cfg[vf].vf_pmac_id =
2552 BE_INVALID_PMAC_ID;
2553 vf++;
ba343c77 2554 }
84e5b9f7 2555 }
c99ac3e7 2556 } else {
ba343c77
SB
2557 status = be_cmd_mac_addr_query(adapter, mac,
2558 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2559 if (!status) {
2560 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2561 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2562 }
2563 }
2564
6b7c5b94
SP
2565 status = be_tx_queues_create(adapter);
2566 if (status != 0)
2567 goto if_destroy;
2568
2569 status = be_rx_queues_create(adapter);
2570 if (status != 0)
2571 goto tx_qs_destroy;
2572
2903dd65
SP
2573 /* Allow all priorities by default. A GRP5 evt may modify this */
2574 adapter->vlan_prio_bmap = 0xff;
2575
5fb379ee
SP
2576 status = be_mcc_queues_create(adapter);
2577 if (status != 0)
2578 goto rx_qs_destroy;
6b7c5b94 2579
0dffc83e
AK
2580 adapter->link_speed = -1;
2581
6b7c5b94
SP
2582 return 0;
2583
5fb379ee
SP
2584rx_qs_destroy:
2585 be_rx_queues_destroy(adapter);
6b7c5b94
SP
2586tx_qs_destroy:
2587 be_tx_queues_destroy(adapter);
2588if_destroy:
c99ac3e7
AK
2589 if (be_physfn(adapter) && adapter->sriov_enabled)
2590 for (vf = 0; vf < num_vfs; vf++)
2591 if (adapter->vf_cfg[vf].vf_if_handle)
2592 be_cmd_if_destroy(adapter,
658681f7
AK
2593 adapter->vf_cfg[vf].vf_if_handle,
2594 vf + 1);
2595 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
6b7c5b94
SP
2596do_none:
2597 return status;
2598}
2599
5fb379ee
SP
2600static int be_clear(struct be_adapter *adapter)
2601{
7ab8b0b4
AK
2602 int vf;
2603
c99ac3e7 2604 if (be_physfn(adapter) && adapter->sriov_enabled)
6d87f5c3
AK
2605 be_vf_eth_addr_rem(adapter);
2606
1a8887d8 2607 be_mcc_queues_destroy(adapter);
5fb379ee
SP
2608 be_rx_queues_destroy(adapter);
2609 be_tx_queues_destroy(adapter);
1f5db833 2610 adapter->eq_next_idx = 0;
5fb379ee 2611
7ab8b0b4
AK
2612 if (be_physfn(adapter) && adapter->sriov_enabled)
2613 for (vf = 0; vf < num_vfs; vf++)
2614 if (adapter->vf_cfg[vf].vf_if_handle)
2615 be_cmd_if_destroy(adapter,
2616 adapter->vf_cfg[vf].vf_if_handle,
2617 vf + 1);
2618
658681f7 2619 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
5fb379ee 2620
2243e2e9
SP
2621 /* tell fw we're done with firing cmds */
2622 be_cmd_fw_clean(adapter);
5fb379ee
SP
2623 return 0;
2624}
2625
6b7c5b94 2626
84517482 2627#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
fa9a6fed 2628static bool be_flash_redboot(struct be_adapter *adapter,
3f0d4560
AK
2629 const u8 *p, u32 img_start, int image_size,
2630 int hdr_size)
fa9a6fed
SB
2631{
2632 u32 crc_offset;
2633 u8 flashed_crc[4];
2634 int status;
3f0d4560
AK
2635
2636 crc_offset = hdr_size + img_start + image_size - 4;
2637
fa9a6fed 2638 p += crc_offset;
3f0d4560
AK
2639
2640 status = be_cmd_get_flash_crc(adapter, flashed_crc,
f510fc64 2641 (image_size - 4));
fa9a6fed
SB
2642 if (status) {
2643 dev_err(&adapter->pdev->dev,
2644 "could not get crc from flash, not flashing redboot\n");
2645 return false;
2646 }
2647
2648 /*update redboot only if crc does not match*/
2649 if (!memcmp(flashed_crc, p, 4))
2650 return false;
2651 else
2652 return true;
fa9a6fed
SB
2653}
2654
3f0d4560 2655static int be_flash_data(struct be_adapter *adapter,
84517482 2656 const struct firmware *fw,
3f0d4560
AK
2657 struct be_dma_mem *flash_cmd, int num_of_images)
2658
84517482 2659{
3f0d4560
AK
2660 int status = 0, i, filehdr_size = 0;
2661 u32 total_bytes = 0, flash_op;
84517482
AK
2662 int num_bytes;
2663 const u8 *p = fw->data;
2664 struct be_cmd_write_flashrom *req = flash_cmd->va;
215faf9c 2665 const struct flash_comp *pflashcomp;
9fe96934 2666 int num_comp;
3f0d4560 2667
215faf9c 2668 static const struct flash_comp gen3_flash_types[9] = {
3f0d4560
AK
2669 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2670 FLASH_IMAGE_MAX_SIZE_g3},
2671 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2672 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2673 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2674 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2675 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2676 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2677 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2679 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2680 FLASH_IMAGE_MAX_SIZE_g3},
2681 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2682 FLASH_IMAGE_MAX_SIZE_g3},
2683 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
9fe96934
SB
2684 FLASH_IMAGE_MAX_SIZE_g3},
2685 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2686 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
3f0d4560 2687 };
215faf9c 2688 static const struct flash_comp gen2_flash_types[8] = {
3f0d4560
AK
2689 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2690 FLASH_IMAGE_MAX_SIZE_g2},
2691 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2692 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2693 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2694 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2695 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2696 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2697 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2698 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2699 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2700 FLASH_IMAGE_MAX_SIZE_g2},
2701 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2702 FLASH_IMAGE_MAX_SIZE_g2},
2703 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2704 FLASH_IMAGE_MAX_SIZE_g2}
2705 };
2706
2707 if (adapter->generation == BE_GEN3) {
2708 pflashcomp = gen3_flash_types;
2709 filehdr_size = sizeof(struct flash_file_hdr_g3);
215faf9c 2710 num_comp = ARRAY_SIZE(gen3_flash_types);
3f0d4560
AK
2711 } else {
2712 pflashcomp = gen2_flash_types;
2713 filehdr_size = sizeof(struct flash_file_hdr_g2);
215faf9c 2714 num_comp = ARRAY_SIZE(gen2_flash_types);
84517482 2715 }
9fe96934
SB
2716 for (i = 0; i < num_comp; i++) {
2717 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2718 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2719 continue;
3f0d4560
AK
2720 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2721 (!be_flash_redboot(adapter, fw->data,
fae21a4d
AK
2722 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2723 (num_of_images * sizeof(struct image_hdr)))))
3f0d4560
AK
2724 continue;
2725 p = fw->data;
2726 p += filehdr_size + pflashcomp[i].offset
2727 + (num_of_images * sizeof(struct image_hdr));
2728 if (p + pflashcomp[i].size > fw->data + fw->size)
84517482 2729 return -1;
3f0d4560
AK
2730 total_bytes = pflashcomp[i].size;
2731 while (total_bytes) {
2732 if (total_bytes > 32*1024)
2733 num_bytes = 32*1024;
2734 else
2735 num_bytes = total_bytes;
2736 total_bytes -= num_bytes;
2737
2738 if (!total_bytes)
2739 flash_op = FLASHROM_OPER_FLASH;
2740 else
2741 flash_op = FLASHROM_OPER_SAVE;
2742 memcpy(req->params.data_buf, p, num_bytes);
2743 p += num_bytes;
2744 status = be_cmd_write_flashrom(adapter, flash_cmd,
2745 pflashcomp[i].optype, flash_op, num_bytes);
2746 if (status) {
2747 dev_err(&adapter->pdev->dev,
2748 "cmd to write to flash rom failed.\n");
2749 return -1;
2750 }
84517482 2751 }
84517482 2752 }
84517482
AK
2753 return 0;
2754}
2755
3f0d4560
AK
2756static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2757{
2758 if (fhdr == NULL)
2759 return 0;
2760 if (fhdr->build[0] == '3')
2761 return BE_GEN3;
2762 else if (fhdr->build[0] == '2')
2763 return BE_GEN2;
2764 else
2765 return 0;
2766}
2767
485bf569
SN
2768static int lancer_fw_download(struct be_adapter *adapter,
2769 const struct firmware *fw)
84517482 2770{
485bf569
SN
2771#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2772#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
84517482 2773 struct be_dma_mem flash_cmd;
485bf569
SN
2774 const u8 *data_ptr = NULL;
2775 u8 *dest_image_ptr = NULL;
2776 size_t image_size = 0;
2777 u32 chunk_size = 0;
2778 u32 data_written = 0;
2779 u32 offset = 0;
2780 int status = 0;
2781 u8 add_status = 0;
84517482 2782
485bf569 2783 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
d9efd2af 2784 dev_err(&adapter->pdev->dev,
485bf569
SN
2785 "FW Image not properly aligned. "
2786 "Length must be 4 byte aligned.\n");
2787 status = -EINVAL;
2788 goto lancer_fw_exit;
d9efd2af
SB
2789 }
2790
485bf569
SN
2791 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2792 + LANCER_FW_DOWNLOAD_CHUNK;
2793 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2794 &flash_cmd.dma, GFP_KERNEL);
2795 if (!flash_cmd.va) {
2796 status = -ENOMEM;
2797 dev_err(&adapter->pdev->dev,
2798 "Memory allocation failure while flashing\n");
2799 goto lancer_fw_exit;
2800 }
84517482 2801
485bf569
SN
2802 dest_image_ptr = flash_cmd.va +
2803 sizeof(struct lancer_cmd_req_write_object);
2804 image_size = fw->size;
2805 data_ptr = fw->data;
2806
2807 while (image_size) {
2808 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2809
2810 /* Copy the image chunk content. */
2811 memcpy(dest_image_ptr, data_ptr, chunk_size);
2812
2813 status = lancer_cmd_write_object(adapter, &flash_cmd,
2814 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2815 &data_written, &add_status);
2816
2817 if (status)
2818 break;
2819
2820 offset += data_written;
2821 data_ptr += data_written;
2822 image_size -= data_written;
2823 }
2824
2825 if (!status) {
2826 /* Commit the FW written */
2827 status = lancer_cmd_write_object(adapter, &flash_cmd,
2828 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2829 &data_written, &add_status);
2830 }
2831
2832 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2833 flash_cmd.dma);
2834 if (status) {
2835 dev_err(&adapter->pdev->dev,
2836 "Firmware load error. "
2837 "Status code: 0x%x Additional Status: 0x%x\n",
2838 status, add_status);
2839 goto lancer_fw_exit;
2840 }
2841
2842 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2843lancer_fw_exit:
2844 return status;
2845}
2846
2847static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2848{
2849 struct flash_file_hdr_g2 *fhdr;
2850 struct flash_file_hdr_g3 *fhdr3;
2851 struct image_hdr *img_hdr_ptr = NULL;
2852 struct be_dma_mem flash_cmd;
2853 const u8 *p;
2854 int status = 0, i = 0, num_imgs = 0;
84517482
AK
2855
2856 p = fw->data;
3f0d4560 2857 fhdr = (struct flash_file_hdr_g2 *) p;
84517482 2858
84517482 2859 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2b7bcebf
IV
2860 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2861 &flash_cmd.dma, GFP_KERNEL);
84517482
AK
2862 if (!flash_cmd.va) {
2863 status = -ENOMEM;
2864 dev_err(&adapter->pdev->dev,
2865 "Memory allocation failure while flashing\n");
485bf569 2866 goto be_fw_exit;
84517482
AK
2867 }
2868
3f0d4560
AK
2869 if ((adapter->generation == BE_GEN3) &&
2870 (get_ufigen_type(fhdr) == BE_GEN3)) {
2871 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
8b93b710
AK
2872 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2873 for (i = 0; i < num_imgs; i++) {
3f0d4560
AK
2874 img_hdr_ptr = (struct image_hdr *) (fw->data +
2875 (sizeof(struct flash_file_hdr_g3) +
8b93b710
AK
2876 i * sizeof(struct image_hdr)));
2877 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2878 status = be_flash_data(adapter, fw, &flash_cmd,
2879 num_imgs);
3f0d4560
AK
2880 }
2881 } else if ((adapter->generation == BE_GEN2) &&
2882 (get_ufigen_type(fhdr) == BE_GEN2)) {
2883 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2884 } else {
2885 dev_err(&adapter->pdev->dev,
2886 "UFI and Interface are not compatible for flashing\n");
2887 status = -1;
84517482
AK
2888 }
2889
2b7bcebf
IV
2890 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2891 flash_cmd.dma);
84517482
AK
2892 if (status) {
2893 dev_err(&adapter->pdev->dev, "Firmware load error\n");
485bf569 2894 goto be_fw_exit;
84517482
AK
2895 }
2896
af901ca1 2897 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
84517482 2898
485bf569
SN
2899be_fw_exit:
2900 return status;
2901}
2902
2903int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2904{
2905 const struct firmware *fw;
2906 int status;
2907
2908 if (!netif_running(adapter->netdev)) {
2909 dev_err(&adapter->pdev->dev,
2910 "Firmware load not allowed (interface is down)\n");
2911 return -1;
2912 }
2913
2914 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2915 if (status)
2916 goto fw_exit;
2917
2918 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2919
2920 if (lancer_chip(adapter))
2921 status = lancer_fw_download(adapter, fw);
2922 else
2923 status = be_fw_download(adapter, fw);
2924
84517482
AK
2925fw_exit:
2926 release_firmware(fw);
2927 return status;
2928}
2929
6b7c5b94
SP
2930static struct net_device_ops be_netdev_ops = {
2931 .ndo_open = be_open,
2932 .ndo_stop = be_close,
2933 .ndo_start_xmit = be_xmit,
6b7c5b94
SP
2934 .ndo_set_rx_mode = be_set_multicast_list,
2935 .ndo_set_mac_address = be_mac_addr_set,
2936 .ndo_change_mtu = be_change_mtu,
2937 .ndo_validate_addr = eth_validate_addr,
6b7c5b94
SP
2938 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2939 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
64600ea5 2940 .ndo_set_vf_mac = be_set_vf_mac,
1da87b7f 2941 .ndo_set_vf_vlan = be_set_vf_vlan,
e1d18735 2942 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
64600ea5 2943 .ndo_get_vf_config = be_get_vf_config
6b7c5b94
SP
2944};
2945
2946static void be_netdev_init(struct net_device *netdev)
2947{
2948 struct be_adapter *adapter = netdev_priv(netdev);
3abcdeda
SP
2949 struct be_rx_obj *rxo;
2950 int i;
6b7c5b94 2951
6332c8d3 2952 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
8b8ddc68
MM
2953 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2954 NETIF_F_HW_VLAN_TX;
2955 if (be_multi_rxq(adapter))
2956 netdev->hw_features |= NETIF_F_RXHASH;
6332c8d3
MM
2957
2958 netdev->features |= netdev->hw_features |
8b8ddc68 2959 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4b972914 2960
eb8a50d9 2961 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
79032644 2962 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
51c59870 2963
6b7c5b94
SP
2964 netdev->flags |= IFF_MULTICAST;
2965
9e90c961
AK
2966 /* Default settings for Rx and Tx flow control */
2967 adapter->rx_fc = true;
2968 adapter->tx_fc = true;
2969
c190e3c8
AK
2970 netif_set_gso_max_size(netdev, 65535);
2971
6b7c5b94
SP
2972 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2973
2974 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2975
3abcdeda
SP
2976 for_all_rx_queues(adapter, rxo, i)
2977 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2978 BE_NAPI_WEIGHT);
2979
5fb379ee 2980 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
6b7c5b94 2981 BE_NAPI_WEIGHT);
6b7c5b94
SP
2982}
2983
2984static void be_unmap_pci_bars(struct be_adapter *adapter)
2985{
8788fdc2
SP
2986 if (adapter->csr)
2987 iounmap(adapter->csr);
2988 if (adapter->db)
2989 iounmap(adapter->db);
ba343c77 2990 if (adapter->pcicfg && be_physfn(adapter))
8788fdc2 2991 iounmap(adapter->pcicfg);
6b7c5b94
SP
2992}
2993
2994static int be_map_pci_bars(struct be_adapter *adapter)
2995{
2996 u8 __iomem *addr;
ba343c77 2997 int pcicfg_reg, db_reg;
6b7c5b94 2998
fe6d2a38
SP
2999 if (lancer_chip(adapter)) {
3000 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3001 pci_resource_len(adapter->pdev, 0));
3002 if (addr == NULL)
3003 return -ENOMEM;
3004 adapter->db = addr;
3005 return 0;
3006 }
3007
ba343c77
SB
3008 if (be_physfn(adapter)) {
3009 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3010 pci_resource_len(adapter->pdev, 2));
3011 if (addr == NULL)
3012 return -ENOMEM;
3013 adapter->csr = addr;
3014 }
6b7c5b94 3015
ba343c77 3016 if (adapter->generation == BE_GEN2) {
7b139c83 3017 pcicfg_reg = 1;
ba343c77
SB
3018 db_reg = 4;
3019 } else {
7b139c83 3020 pcicfg_reg = 0;
ba343c77
SB
3021 if (be_physfn(adapter))
3022 db_reg = 4;
3023 else
3024 db_reg = 0;
3025 }
3026 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3027 pci_resource_len(adapter->pdev, db_reg));
6b7c5b94
SP
3028 if (addr == NULL)
3029 goto pci_map_err;
ba343c77
SB
3030 adapter->db = addr;
3031
3032 if (be_physfn(adapter)) {
3033 addr = ioremap_nocache(
3034 pci_resource_start(adapter->pdev, pcicfg_reg),
3035 pci_resource_len(adapter->pdev, pcicfg_reg));
3036 if (addr == NULL)
3037 goto pci_map_err;
3038 adapter->pcicfg = addr;
3039 } else
3040 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
6b7c5b94
SP
3041
3042 return 0;
3043pci_map_err:
3044 be_unmap_pci_bars(adapter);
3045 return -ENOMEM;
3046}
3047
3048
3049static void be_ctrl_cleanup(struct be_adapter *adapter)
3050{
8788fdc2 3051 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
6b7c5b94
SP
3052
3053 be_unmap_pci_bars(adapter);
3054
3055 if (mem->va)
2b7bcebf
IV
3056 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3057 mem->dma);
e7b909a6
SP
3058
3059 mem = &adapter->mc_cmd_mem;
3060 if (mem->va)
2b7bcebf
IV
3061 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3062 mem->dma);
6b7c5b94
SP
3063}
3064
6b7c5b94
SP
3065static int be_ctrl_init(struct be_adapter *adapter)
3066{
8788fdc2
SP
3067 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3068 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
e7b909a6 3069 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
6b7c5b94 3070 int status;
6b7c5b94
SP
3071
3072 status = be_map_pci_bars(adapter);
3073 if (status)
e7b909a6 3074 goto done;
6b7c5b94
SP
3075
3076 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2b7bcebf
IV
3077 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3078 mbox_mem_alloc->size,
3079 &mbox_mem_alloc->dma,
3080 GFP_KERNEL);
6b7c5b94 3081 if (!mbox_mem_alloc->va) {
e7b909a6
SP
3082 status = -ENOMEM;
3083 goto unmap_pci_bars;
6b7c5b94 3084 }
e7b909a6 3085
6b7c5b94
SP
3086 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3087 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3088 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3089 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
e7b909a6
SP
3090
3091 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2b7bcebf
IV
3092 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3093 mc_cmd_mem->size, &mc_cmd_mem->dma,
3094 GFP_KERNEL);
e7b909a6
SP
3095 if (mc_cmd_mem->va == NULL) {
3096 status = -ENOMEM;
3097 goto free_mbox;
3098 }
3099 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3100
2984961c 3101 mutex_init(&adapter->mbox_lock);
8788fdc2
SP
3102 spin_lock_init(&adapter->mcc_lock);
3103 spin_lock_init(&adapter->mcc_cq_lock);
a8f447bd 3104
dd131e76 3105 init_completion(&adapter->flash_compl);
cf588477 3106 pci_save_state(adapter->pdev);
6b7c5b94 3107 return 0;
e7b909a6
SP
3108
3109free_mbox:
2b7bcebf
IV
3110 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3111 mbox_mem_alloc->va, mbox_mem_alloc->dma);
e7b909a6
SP
3112
3113unmap_pci_bars:
3114 be_unmap_pci_bars(adapter);
3115
3116done:
3117 return status;
6b7c5b94
SP
3118}
3119
3120static void be_stats_cleanup(struct be_adapter *adapter)
3121{
3abcdeda 3122 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94
SP
3123
3124 if (cmd->va)
2b7bcebf
IV
3125 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3126 cmd->va, cmd->dma);
6b7c5b94
SP
3127}
3128
3129static int be_stats_init(struct be_adapter *adapter)
3130{
3abcdeda 3131 struct be_dma_mem *cmd = &adapter->stats_cmd;
6b7c5b94 3132
005d5696 3133 if (adapter->generation == BE_GEN2) {
89a88ab8 3134 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
005d5696
SX
3135 } else {
3136 if (lancer_chip(adapter))
3137 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3138 else
3139 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3140 }
2b7bcebf
IV
3141 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3142 GFP_KERNEL);
6b7c5b94
SP
3143 if (cmd->va == NULL)
3144 return -1;
d291b9af 3145 memset(cmd->va, 0, cmd->size);
6b7c5b94
SP
3146 return 0;
3147}
3148
3149static void __devexit be_remove(struct pci_dev *pdev)
3150{
3151 struct be_adapter *adapter = pci_get_drvdata(pdev);
8d56ff11 3152
6b7c5b94
SP
3153 if (!adapter)
3154 return;
3155
f203af70
SK
3156 cancel_delayed_work_sync(&adapter->work);
3157
6b7c5b94
SP
3158 unregister_netdev(adapter->netdev);
3159
5fb379ee
SP
3160 be_clear(adapter);
3161
6b7c5b94
SP
3162 be_stats_cleanup(adapter);
3163
3164 be_ctrl_cleanup(adapter);
3165
48f5a191 3166 kfree(adapter->vf_cfg);
ba343c77
SB
3167 be_sriov_disable(adapter);
3168
8d56ff11 3169 be_msix_disable(adapter);
6b7c5b94
SP
3170
3171 pci_set_drvdata(pdev, NULL);
3172 pci_release_regions(pdev);
3173 pci_disable_device(pdev);
3174
3175 free_netdev(adapter->netdev);
3176}
3177
2243e2e9 3178static int be_get_config(struct be_adapter *adapter)
6b7c5b94 3179{
6b7c5b94 3180 int status;
2243e2e9 3181 u8 mac[ETH_ALEN];
6b7c5b94 3182
2243e2e9 3183 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
6b7c5b94
SP
3184 if (status)
3185 return status;
3186
3abcdeda
SP
3187 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3188 &adapter->function_mode, &adapter->function_caps);
43a04fdc
SP
3189 if (status)
3190 return status;
3191
2243e2e9 3192 memset(mac, 0, ETH_ALEN);
ba343c77 3193
12f4d0a8
ME
3194 /* A default permanent address is given to each VF for Lancer*/
3195 if (be_physfn(adapter) || lancer_chip(adapter)) {
ba343c77 3196 status = be_cmd_mac_addr_query(adapter, mac,
2243e2e9 3197 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
ca9e4988 3198
ba343c77
SB
3199 if (status)
3200 return status;
ca9e4988 3201
ba343c77
SB
3202 if (!is_valid_ether_addr(mac))
3203 return -EADDRNOTAVAIL;
3204
3205 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3206 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3207 }
6b7c5b94 3208
3486be29 3209 if (adapter->function_mode & 0x400)
82903e4b
AK
3210 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3211 else
3212 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3213
9e1453c5
AK
3214 status = be_cmd_get_cntl_attributes(adapter);
3215 if (status)
3216 return status;
3217
2e588f84 3218 be_cmd_check_native_mode(adapter);
3c8def97
SP
3219
3220 if ((num_vfs && adapter->sriov_enabled) ||
3221 (adapter->function_mode & 0x400) ||
3222 lancer_chip(adapter) || !be_physfn(adapter)) {
3223 adapter->num_tx_qs = 1;
3224 netif_set_real_num_tx_queues(adapter->netdev,
3225 adapter->num_tx_qs);
3226 } else {
3227 adapter->num_tx_qs = MAX_TX_QS;
3228 }
3229
2243e2e9 3230 return 0;
6b7c5b94
SP
3231}
3232
fe6d2a38
SP
3233static int be_dev_family_check(struct be_adapter *adapter)
3234{
3235 struct pci_dev *pdev = adapter->pdev;
3236 u32 sli_intf = 0, if_type;
3237
3238 switch (pdev->device) {
3239 case BE_DEVICE_ID1:
3240 case OC_DEVICE_ID1:
3241 adapter->generation = BE_GEN2;
3242 break;
3243 case BE_DEVICE_ID2:
3244 case OC_DEVICE_ID2:
3245 adapter->generation = BE_GEN3;
3246 break;
3247 case OC_DEVICE_ID3:
12f4d0a8 3248 case OC_DEVICE_ID4:
fe6d2a38
SP
3249 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251 SLI_INTF_IF_TYPE_SHIFT;
3252
3253 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3254 if_type != 0x02) {
3255 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3256 return -EINVAL;
3257 }
fe6d2a38
SP
3258 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259 SLI_INTF_FAMILY_SHIFT);
3260 adapter->generation = BE_GEN3;
3261 break;
3262 default:
3263 adapter->generation = 0;
3264 }
3265 return 0;
3266}
3267
37eed1cb
PR
3268static int lancer_wait_ready(struct be_adapter *adapter)
3269{
3270#define SLIPORT_READY_TIMEOUT 500
3271 u32 sliport_status;
3272 int status = 0, i;
3273
3274 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3277 break;
3278
3279 msleep(20);
3280 }
3281
3282 if (i == SLIPORT_READY_TIMEOUT)
3283 status = -1;
3284
3285 return status;
3286}
3287
3288static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3289{
3290 int status;
3291 u32 sliport_status, err, reset_needed;
3292 status = lancer_wait_ready(adapter);
3293 if (!status) {
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297 if (err && reset_needed) {
3298 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299 adapter->db + SLIPORT_CONTROL_OFFSET);
3300
3301 /* check adapter has corrected the error */
3302 status = lancer_wait_ready(adapter);
3303 sliport_status = ioread32(adapter->db +
3304 SLIPORT_STATUS_OFFSET);
3305 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306 SLIPORT_STATUS_RN_MASK);
3307 if (status || sliport_status)
3308 status = -1;
3309 } else if (err || reset_needed) {
3310 status = -1;
3311 }
3312 }
3313 return status;
3314}
3315
6b7c5b94
SP
3316static int __devinit be_probe(struct pci_dev *pdev,
3317 const struct pci_device_id *pdev_id)
3318{
3319 int status = 0;
3320 struct be_adapter *adapter;
3321 struct net_device *netdev;
6b7c5b94
SP
3322
3323 status = pci_enable_device(pdev);
3324 if (status)
3325 goto do_none;
3326
3327 status = pci_request_regions(pdev, DRV_NAME);
3328 if (status)
3329 goto disable_dev;
3330 pci_set_master(pdev);
3331
3c8def97 3332 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
6b7c5b94
SP
3333 if (netdev == NULL) {
3334 status = -ENOMEM;
3335 goto rel_reg;
3336 }
3337 adapter = netdev_priv(netdev);
3338 adapter->pdev = pdev;
3339 pci_set_drvdata(pdev, adapter);
fe6d2a38
SP
3340
3341 status = be_dev_family_check(adapter);
63657b9c 3342 if (status)
fe6d2a38
SP
3343 goto free_netdev;
3344
6b7c5b94 3345 adapter->netdev = netdev;
2243e2e9 3346 SET_NETDEV_DEV(netdev, &pdev->dev);
6b7c5b94 3347
2b7bcebf 3348 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6b7c5b94
SP
3349 if (!status) {
3350 netdev->features |= NETIF_F_HIGHDMA;
3351 } else {
2b7bcebf 3352 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6b7c5b94
SP
3353 if (status) {
3354 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3355 goto free_netdev;
3356 }
3357 }
3358
ba343c77 3359 be_sriov_enable(adapter);
48f5a191
AK
3360 if (adapter->sriov_enabled) {
3361 adapter->vf_cfg = kcalloc(num_vfs,
3362 sizeof(struct be_vf_cfg), GFP_KERNEL);
3363
3364 if (!adapter->vf_cfg)
3365 goto free_netdev;
3366 }
ba343c77 3367
6b7c5b94
SP
3368 status = be_ctrl_init(adapter);
3369 if (status)
48f5a191 3370 goto free_vf_cfg;
6b7c5b94 3371
37eed1cb
PR
3372 if (lancer_chip(adapter)) {
3373 status = lancer_test_and_set_rdy_state(adapter);
3374 if (status) {
3375 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
48f5a191 3376 goto ctrl_clean;
37eed1cb
PR
3377 }
3378 }
3379
2243e2e9 3380 /* sync up with fw's ready state */
ba343c77
SB
3381 if (be_physfn(adapter)) {
3382 status = be_cmd_POST(adapter);
3383 if (status)
3384 goto ctrl_clean;
ba343c77 3385 }
6b7c5b94 3386
2243e2e9
SP
3387 /* tell fw we're ready to fire cmds */
3388 status = be_cmd_fw_init(adapter);
6b7c5b94 3389 if (status)
2243e2e9
SP
3390 goto ctrl_clean;
3391
a4b4dfab
AK
3392 status = be_cmd_reset_function(adapter);
3393 if (status)
3394 goto ctrl_clean;
556ae191 3395
2243e2e9
SP
3396 status = be_stats_init(adapter);
3397 if (status)
3398 goto ctrl_clean;
3399
3400 status = be_get_config(adapter);
6b7c5b94
SP
3401 if (status)
3402 goto stats_clean;
6b7c5b94 3403
b9ab82c7
SP
3404 /* The INTR bit may be set in the card when probed by a kdump kernel
3405 * after a crash.
3406 */
3407 if (!lancer_chip(adapter))
3408 be_intr_set(adapter, false);
3409
3abcdeda
SP
3410 be_msix_enable(adapter);
3411
6b7c5b94 3412 INIT_DELAYED_WORK(&adapter->work, be_worker);
6b7c5b94 3413
5fb379ee
SP
3414 status = be_setup(adapter);
3415 if (status)
3abcdeda 3416 goto msix_disable;
2243e2e9 3417
3abcdeda 3418 be_netdev_init(netdev);
6b7c5b94
SP
3419 status = register_netdev(netdev);
3420 if (status != 0)
5fb379ee 3421 goto unsetup;
63a76944 3422 netif_carrier_off(netdev);
6b7c5b94 3423
e6319365 3424 if (be_physfn(adapter) && adapter->sriov_enabled) {
d0381c42
AK
3425 u8 mac_speed;
3426 bool link_up;
3427 u16 vf, lnk_speed;
3428
12f4d0a8
ME
3429 if (!lancer_chip(adapter)) {
3430 status = be_vf_eth_addr_config(adapter);
3431 if (status)
3432 goto unreg_netdev;
3433 }
d0381c42
AK
3434
3435 for (vf = 0; vf < num_vfs; vf++) {
3436 status = be_cmd_link_status_query(adapter, &link_up,
3437 &mac_speed, &lnk_speed, vf + 1);
3438 if (!status)
3439 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3440 else
3441 goto unreg_netdev;
3442 }
e6319365
AK
3443 }
3444
c4ca2374 3445 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
34b1ef04 3446
f203af70 3447 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3448 return 0;
3449
e6319365
AK
3450unreg_netdev:
3451 unregister_netdev(netdev);
5fb379ee
SP
3452unsetup:
3453 be_clear(adapter);
3abcdeda
SP
3454msix_disable:
3455 be_msix_disable(adapter);
6b7c5b94
SP
3456stats_clean:
3457 be_stats_cleanup(adapter);
3458ctrl_clean:
3459 be_ctrl_cleanup(adapter);
48f5a191
AK
3460free_vf_cfg:
3461 kfree(adapter->vf_cfg);
6b7c5b94 3462free_netdev:
ba343c77 3463 be_sriov_disable(adapter);
fe6d2a38 3464 free_netdev(netdev);
8d56ff11 3465 pci_set_drvdata(pdev, NULL);
6b7c5b94
SP
3466rel_reg:
3467 pci_release_regions(pdev);
3468disable_dev:
3469 pci_disable_device(pdev);
3470do_none:
c4ca2374 3471 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
6b7c5b94
SP
3472 return status;
3473}
3474
3475static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3476{
3477 struct be_adapter *adapter = pci_get_drvdata(pdev);
3478 struct net_device *netdev = adapter->netdev;
3479
a4ca055f 3480 cancel_delayed_work_sync(&adapter->work);
71d8d1b5
AK
3481 if (adapter->wol)
3482 be_setup_wol(adapter, true);
3483
6b7c5b94
SP
3484 netif_device_detach(netdev);
3485 if (netif_running(netdev)) {
3486 rtnl_lock();
3487 be_close(netdev);
3488 rtnl_unlock();
3489 }
9e90c961 3490 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
9b0365f1 3491 be_clear(adapter);
6b7c5b94 3492
a4ca055f 3493 be_msix_disable(adapter);
6b7c5b94
SP
3494 pci_save_state(pdev);
3495 pci_disable_device(pdev);
3496 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3497 return 0;
3498}
3499
3500static int be_resume(struct pci_dev *pdev)
3501{
3502 int status = 0;
3503 struct be_adapter *adapter = pci_get_drvdata(pdev);
3504 struct net_device *netdev = adapter->netdev;
3505
3506 netif_device_detach(netdev);
3507
3508 status = pci_enable_device(pdev);
3509 if (status)
3510 return status;
3511
3512 pci_set_power_state(pdev, 0);
3513 pci_restore_state(pdev);
3514
a4ca055f 3515 be_msix_enable(adapter);
2243e2e9
SP
3516 /* tell fw we're ready to fire cmds */
3517 status = be_cmd_fw_init(adapter);
3518 if (status)
3519 return status;
3520
9b0365f1 3521 be_setup(adapter);
6b7c5b94
SP
3522 if (netif_running(netdev)) {
3523 rtnl_lock();
3524 be_open(netdev);
3525 rtnl_unlock();
3526 }
3527 netif_device_attach(netdev);
71d8d1b5
AK
3528
3529 if (adapter->wol)
3530 be_setup_wol(adapter, false);
a4ca055f
AK
3531
3532 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
6b7c5b94
SP
3533 return 0;
3534}
3535
82456b03
SP
3536/*
3537 * An FLR will stop BE from DMAing any data.
3538 */
3539static void be_shutdown(struct pci_dev *pdev)
3540{
3541 struct be_adapter *adapter = pci_get_drvdata(pdev);
82456b03 3542
2d5d4154
AK
3543 if (!adapter)
3544 return;
82456b03 3545
0f4a6828 3546 cancel_delayed_work_sync(&adapter->work);
a4ca055f 3547
2d5d4154 3548 netif_device_detach(adapter->netdev);
82456b03 3549
82456b03
SP
3550 if (adapter->wol)
3551 be_setup_wol(adapter, true);
3552
57841869
AK
3553 be_cmd_reset_function(adapter);
3554
82456b03 3555 pci_disable_device(pdev);
82456b03
SP
3556}
3557
cf588477
SP
3558static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3559 pci_channel_state_t state)
3560{
3561 struct be_adapter *adapter = pci_get_drvdata(pdev);
3562 struct net_device *netdev = adapter->netdev;
3563
3564 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3565
3566 adapter->eeh_err = true;
3567
3568 netif_device_detach(netdev);
3569
3570 if (netif_running(netdev)) {
3571 rtnl_lock();
3572 be_close(netdev);
3573 rtnl_unlock();
3574 }
3575 be_clear(adapter);
3576
3577 if (state == pci_channel_io_perm_failure)
3578 return PCI_ERS_RESULT_DISCONNECT;
3579
3580 pci_disable_device(pdev);
3581
3582 return PCI_ERS_RESULT_NEED_RESET;
3583}
3584
3585static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3586{
3587 struct be_adapter *adapter = pci_get_drvdata(pdev);
3588 int status;
3589
3590 dev_info(&adapter->pdev->dev, "EEH reset\n");
3591 adapter->eeh_err = false;
3592
3593 status = pci_enable_device(pdev);
3594 if (status)
3595 return PCI_ERS_RESULT_DISCONNECT;
3596
3597 pci_set_master(pdev);
3598 pci_set_power_state(pdev, 0);
3599 pci_restore_state(pdev);
3600
3601 /* Check if card is ok and fw is ready */
3602 status = be_cmd_POST(adapter);
3603 if (status)
3604 return PCI_ERS_RESULT_DISCONNECT;
3605
3606 return PCI_ERS_RESULT_RECOVERED;
3607}
3608
3609static void be_eeh_resume(struct pci_dev *pdev)
3610{
3611 int status = 0;
3612 struct be_adapter *adapter = pci_get_drvdata(pdev);
3613 struct net_device *netdev = adapter->netdev;
3614
3615 dev_info(&adapter->pdev->dev, "EEH resume\n");
3616
3617 pci_save_state(pdev);
3618
3619 /* tell fw we're ready to fire cmds */
3620 status = be_cmd_fw_init(adapter);
3621 if (status)
3622 goto err;
3623
3624 status = be_setup(adapter);
3625 if (status)
3626 goto err;
3627
3628 if (netif_running(netdev)) {
3629 status = be_open(netdev);
3630 if (status)
3631 goto err;
3632 }
3633 netif_device_attach(netdev);
3634 return;
3635err:
3636 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
cf588477
SP
3637}
3638
3639static struct pci_error_handlers be_eeh_handlers = {
3640 .error_detected = be_eeh_err_detected,
3641 .slot_reset = be_eeh_reset,
3642 .resume = be_eeh_resume,
3643};
3644
6b7c5b94
SP
3645static struct pci_driver be_driver = {
3646 .name = DRV_NAME,
3647 .id_table = be_dev_ids,
3648 .probe = be_probe,
3649 .remove = be_remove,
3650 .suspend = be_suspend,
cf588477 3651 .resume = be_resume,
82456b03 3652 .shutdown = be_shutdown,
cf588477 3653 .err_handler = &be_eeh_handlers
6b7c5b94
SP
3654};
3655
3656static int __init be_init_module(void)
3657{
8e95a202
JP
3658 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3659 rx_frag_size != 2048) {
6b7c5b94
SP
3660 printk(KERN_WARNING DRV_NAME
3661 " : Module param rx_frag_size must be 2048/4096/8192."
3662 " Using 2048\n");
3663 rx_frag_size = 2048;
3664 }
6b7c5b94
SP
3665
3666 return pci_register_driver(&be_driver);
3667}
3668module_init(be_init_module);
3669
3670static void __exit be_exit_module(void)
3671{
3672 pci_unregister_driver(&be_driver);
3673}
3674module_exit(be_exit_module);