]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame_incremental - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix VLAN promiscous mode for Lancer
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include <linux/prefetch.h>
19#include <linux/module.h>
20#include "be.h"
21#include "be_cmds.h"
22#include <asm/div64.h>
23
24MODULE_VERSION(DRV_VER);
25MODULE_DEVICE_TABLE(pci, be_dev_ids);
26MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27MODULE_AUTHOR("ServerEngines Corporation");
28MODULE_LICENSE("GPL");
29
30static ushort rx_frag_size = 2048;
31static unsigned int num_vfs;
32module_param(rx_frag_size, ushort, S_IRUGO);
33module_param(num_vfs, uint, S_IRUGO);
34MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44 { 0 }
45};
46MODULE_DEVICE_TABLE(pci, be_dev_ids);
47/* UE Status Low CSR */
48static const char * const ue_status_low_desc[] = {
49 "CEV",
50 "CTX",
51 "DBUF",
52 "ERX",
53 "Host",
54 "MPU",
55 "NDMA",
56 "PTC ",
57 "RDMA ",
58 "RXF ",
59 "RXIPS ",
60 "RXULP0 ",
61 "RXULP1 ",
62 "RXULP2 ",
63 "TIM ",
64 "TPOST ",
65 "TPRE ",
66 "TXIPS ",
67 "TXULP0 ",
68 "TXULP1 ",
69 "UC ",
70 "WDMA ",
71 "TXULP2 ",
72 "HOST1 ",
73 "P0_OB_LINK ",
74 "P1_OB_LINK ",
75 "HOST_GPIO ",
76 "MBOX ",
77 "AXGMAC0",
78 "AXGMAC1",
79 "JTAG",
80 "MPU_INTPEND"
81};
82/* UE Status High CSR */
83static const char * const ue_status_hi_desc[] = {
84 "LPCMEMHOST",
85 "MGMT_MAC",
86 "PCS0ONLINE",
87 "MPU_IRAM",
88 "PCS1ONLINE",
89 "PCTL0",
90 "PCTL1",
91 "PMEM",
92 "RR",
93 "TXPB",
94 "RXPP",
95 "XAUI",
96 "TXP",
97 "ARM",
98 "IPC",
99 "HOST2",
100 "HOST3",
101 "HOST4",
102 "HOST5",
103 "HOST6",
104 "HOST7",
105 "HOST8",
106 "HOST9",
107 "NETC",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown"
116};
117
118/* Is BE in a multi-channel mode */
119static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
123}
124
125static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126{
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va)
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
131}
132
133static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
135{
136 struct be_dma_mem *mem = &q->dma_mem;
137
138 memset(q, 0, sizeof(*q));
139 q->len = len;
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143 GFP_KERNEL);
144 if (!mem->va)
145 return -1;
146 memset(mem->va, 0, mem->size);
147 return 0;
148}
149
150static void be_intr_set(struct be_adapter *adapter, bool enable)
151{
152 u32 reg, enabled;
153
154 if (adapter->eeh_err)
155 return;
156
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158 &reg);
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161 if (!enabled && enable)
162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 else if (enabled && !enable)
164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 else
166 return;
167
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170}
171
172static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173{
174 u32 val = 0;
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178 wmb();
179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
180}
181
182static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183{
184 u32 val = 0;
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188 wmb();
189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190}
191
192static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193 bool arm, bool clear_int, u16 num_popped)
194{
195 u32 val = 0;
196 val |= qid & DB_EQ_RING_ID_MASK;
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200 if (adapter->eeh_err)
201 return;
202
203 if (arm)
204 val |= 1 << DB_EQ_REARM_SHIFT;
205 if (clear_int)
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
210}
211
212void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213{
214 u32 val = 0;
215 val |= qid & DB_CQ_RING_ID_MASK;
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219 if (adapter->eeh_err)
220 return;
221
222 if (arm)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
226}
227
228static int be_mac_addr_set(struct net_device *netdev, void *p)
229{
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
232 int status = 0;
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
235
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
238
239 status = be_cmd_mac_addr_query(adapter, current_mac,
240 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
241 if (status)
242 goto err;
243
244 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
246 adapter->if_handle, &adapter->pmac_id, 0);
247 if (status)
248 goto err;
249
250 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
251 }
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
253 return 0;
254err:
255 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
256 return status;
257}
258
259static void populate_be2_stats(struct be_adapter *adapter)
260{
261 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
264 struct be_port_rxf_stats_v0 *port_stats =
265 &rxf_stats->port[adapter->port_num];
266 struct be_drv_stats *drvs = &adapter->drv_stats;
267
268 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
269 drvs->rx_pause_frames = port_stats->rx_pause_frames;
270 drvs->rx_crc_errors = port_stats->rx_crc_errors;
271 drvs->rx_control_frames = port_stats->rx_control_frames;
272 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
283 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
284 drvs->rx_dropped_header_too_small =
285 port_stats->rx_dropped_header_too_small;
286 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
287 drvs->rx_alignment_symbol_errors =
288 port_stats->rx_alignment_symbol_errors;
289
290 drvs->tx_pauseframes = port_stats->tx_pauseframes;
291 drvs->tx_controlframes = port_stats->tx_controlframes;
292
293 if (adapter->port_num)
294 drvs->jabber_events = rxf_stats->port1_jabber_events;
295 else
296 drvs->jabber_events = rxf_stats->port0_jabber_events;
297 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301 drvs->forwarded_packets = rxf_stats->forwarded_packets;
302 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
303 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
305 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
306}
307
308static void populate_be3_stats(struct be_adapter *adapter)
309{
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
313 struct be_port_rxf_stats_v1 *port_stats =
314 &rxf_stats->port[adapter->port_num];
315 struct be_drv_stats *drvs = &adapter->drv_stats;
316
317 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
318 drvs->rx_pause_frames = port_stats->rx_pause_frames;
319 drvs->rx_crc_errors = port_stats->rx_crc_errors;
320 drvs->rx_control_frames = port_stats->rx_control_frames;
321 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331 drvs->rx_dropped_header_too_small =
332 port_stats->rx_dropped_header_too_small;
333 drvs->rx_input_fifo_overflow_drop =
334 port_stats->rx_input_fifo_overflow_drop;
335 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
350 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
351}
352
353static void populate_lancer_stats(struct be_adapter *adapter)
354{
355
356 struct be_drv_stats *drvs = &adapter->drv_stats;
357 struct lancer_pport_stats *pport_stats =
358 pport_stats_from_cmd(adapter);
359
360 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
364 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
365 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
366 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370 drvs->rx_dropped_tcp_length =
371 pport_stats->rx_dropped_invalid_tcp_length;
372 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375 drvs->rx_dropped_header_too_small =
376 pport_stats->rx_dropped_header_too_small;
377 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
379 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
380 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
383 drvs->jabber_events = pport_stats->rx_jabbers;
384 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
385 drvs->forwarded_packets = pport_stats->num_forwards_lo;
386 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
387 drvs->rx_drops_too_many_frags =
388 pport_stats->rx_drops_too_many_frags_lo;
389}
390
391static void accumulate_16bit_val(u32 *acc, u16 val)
392{
393#define lo(x) (x & 0xFFFF)
394#define hi(x) (x & 0xFFFF0000)
395 bool wrapped = val < lo(*acc);
396 u32 newacc = hi(*acc) + val;
397
398 if (wrapped)
399 newacc += 65536;
400 ACCESS_ONCE(*acc) = newacc;
401}
402
403void be_parse_stats(struct be_adapter *adapter)
404{
405 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406 struct be_rx_obj *rxo;
407 int i;
408
409 if (adapter->generation == BE_GEN3) {
410 if (lancer_chip(adapter))
411 populate_lancer_stats(adapter);
412 else
413 populate_be3_stats(adapter);
414 } else {
415 populate_be2_stats(adapter);
416 }
417
418 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
419 for_all_rx_queues(adapter, rxo, i) {
420 /* below erx HW counter can actually wrap around after
421 * 65535. Driver accumulates a 32-bit value
422 */
423 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
425 }
426}
427
428static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
430{
431 struct be_adapter *adapter = netdev_priv(netdev);
432 struct be_drv_stats *drvs = &adapter->drv_stats;
433 struct be_rx_obj *rxo;
434 struct be_tx_obj *txo;
435 u64 pkts, bytes;
436 unsigned int start;
437 int i;
438
439 for_all_rx_queues(adapter, rxo, i) {
440 const struct be_rx_stats *rx_stats = rx_stats(rxo);
441 do {
442 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443 pkts = rx_stats(rxo)->rx_pkts;
444 bytes = rx_stats(rxo)->rx_bytes;
445 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446 stats->rx_packets += pkts;
447 stats->rx_bytes += bytes;
448 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450 rx_stats(rxo)->rx_drops_no_frags;
451 }
452
453 for_all_tx_queues(adapter, txo, i) {
454 const struct be_tx_stats *tx_stats = tx_stats(txo);
455 do {
456 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457 pkts = tx_stats(txo)->tx_pkts;
458 bytes = tx_stats(txo)->tx_bytes;
459 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460 stats->tx_packets += pkts;
461 stats->tx_bytes += bytes;
462 }
463
464 /* bad pkts received */
465 stats->rx_errors = drvs->rx_crc_errors +
466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
474 drvs->rx_dropped_runt;
475
476 /* detailed rx errors */
477 stats->rx_length_errors = drvs->rx_in_range_errors +
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
480
481 stats->rx_crc_errors = drvs->rx_crc_errors;
482
483 /* frame alignment errors */
484 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
485
486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
488 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
491 return stats;
492}
493
494void be_link_status_update(struct be_adapter *adapter, u32 link_status)
495{
496 struct net_device *netdev = adapter->netdev;
497
498 /* when link status changes, link speed must be re-queried from card */
499 adapter->link_speed = -1;
500 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501 netif_carrier_on(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
503 } else {
504 netif_carrier_off(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
506 }
507}
508
509static void be_tx_stats_update(struct be_tx_obj *txo,
510 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
511{
512 struct be_tx_stats *stats = tx_stats(txo);
513
514 u64_stats_update_begin(&stats->sync);
515 stats->tx_reqs++;
516 stats->tx_wrbs += wrb_cnt;
517 stats->tx_bytes += copied;
518 stats->tx_pkts += (gso_segs ? gso_segs : 1);
519 if (stopped)
520 stats->tx_stops++;
521 u64_stats_update_end(&stats->sync);
522}
523
524/* Determine number of WRB entries needed to xmit data in an skb */
525static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
526 bool *dummy)
527{
528 int cnt = (skb->len > skb->data_len);
529
530 cnt += skb_shinfo(skb)->nr_frags;
531
532 /* to account for hdr wrb */
533 cnt++;
534 if (lancer_chip(adapter) || !(cnt & 1)) {
535 *dummy = false;
536 } else {
537 /* add a dummy to make it an even num */
538 cnt++;
539 *dummy = true;
540 }
541 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
542 return cnt;
543}
544
545static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
546{
547 wrb->frag_pa_hi = upper_32_bits(addr);
548 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
550}
551
552static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553 struct sk_buff *skb, u32 wrb_cnt, u32 len)
554{
555 u8 vlan_prio = 0;
556 u16 vlan_tag = 0;
557
558 memset(hdr, 0, sizeof(*hdr));
559
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
561
562 if (skb_is_gso(skb)) {
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565 hdr, skb_shinfo(skb)->gso_size);
566 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
568 if (lancer_chip(adapter) && adapter->sli_family ==
569 LANCER_A0_SLI_FAMILY) {
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
571 if (is_tcp_pkt(skb))
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
573 tcpcs, hdr, 1);
574 else if (is_udp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
576 udpcs, hdr, 1);
577 }
578 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
579 if (is_tcp_pkt(skb))
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581 else if (is_udp_pkt(skb))
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
583 }
584
585 if (vlan_tx_tag_present(skb)) {
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
587 vlan_tag = vlan_tx_tag_get(skb);
588 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589 /* If vlan priority provided by OS is NOT in available bmap */
590 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592 adapter->recommended_prio;
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
594 }
595
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
600}
601
602static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
603 bool unmap_single)
604{
605 dma_addr_t dma;
606
607 be_dws_le_to_cpu(wrb, sizeof(*wrb));
608
609 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
610 if (wrb->frag_len) {
611 if (unmap_single)
612 dma_unmap_single(dev, dma, wrb->frag_len,
613 DMA_TO_DEVICE);
614 else
615 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
616 }
617}
618
619static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
620 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
621{
622 dma_addr_t busaddr;
623 int i, copied = 0;
624 struct device *dev = &adapter->pdev->dev;
625 struct sk_buff *first_skb = skb;
626 struct be_eth_wrb *wrb;
627 struct be_eth_hdr_wrb *hdr;
628 bool map_single = false;
629 u16 map_head;
630
631 hdr = queue_head_node(txq);
632 queue_head_inc(txq);
633 map_head = txq->head;
634
635 if (skb->len > skb->data_len) {
636 int len = skb_headlen(skb);
637 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638 if (dma_mapping_error(dev, busaddr))
639 goto dma_err;
640 map_single = true;
641 wrb = queue_head_node(txq);
642 wrb_fill(wrb, busaddr, len);
643 be_dws_cpu_to_le(wrb, sizeof(*wrb));
644 queue_head_inc(txq);
645 copied += len;
646 }
647
648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
649 const struct skb_frag_struct *frag =
650 &skb_shinfo(skb)->frags[i];
651 busaddr = skb_frag_dma_map(dev, frag, 0,
652 skb_frag_size(frag), DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
654 goto dma_err;
655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, skb_frag_size(frag));
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
658 queue_head_inc(txq);
659 copied += skb_frag_size(frag);
660 }
661
662 if (dummy_wrb) {
663 wrb = queue_head_node(txq);
664 wrb_fill(wrb, 0, 0);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
666 queue_head_inc(txq);
667 }
668
669 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
670 be_dws_cpu_to_le(hdr, sizeof(*hdr));
671
672 return copied;
673dma_err:
674 txq->head = map_head;
675 while (copied) {
676 wrb = queue_head_node(txq);
677 unmap_tx_frag(dev, wrb, map_single);
678 map_single = false;
679 copied -= wrb->frag_len;
680 queue_head_inc(txq);
681 }
682 return 0;
683}
684
685static netdev_tx_t be_xmit(struct sk_buff *skb,
686 struct net_device *netdev)
687{
688 struct be_adapter *adapter = netdev_priv(netdev);
689 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690 struct be_queue_info *txq = &txo->q;
691 u32 wrb_cnt = 0, copied = 0;
692 u32 start = txq->head;
693 bool dummy_wrb, stopped = false;
694
695 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
696
697 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
698 if (copied) {
699 /* record the sent skb in the sent_skb table */
700 BUG_ON(txo->sent_skb_list[start]);
701 txo->sent_skb_list[start] = skb;
702
703 /* Ensure txq has space for the next skb; Else stop the queue
704 * *BEFORE* ringing the tx doorbell, so that we serialze the
705 * tx compls of the current transmit which'll wake up the queue
706 */
707 atomic_add(wrb_cnt, &txq->used);
708 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
709 txq->len) {
710 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
711 stopped = true;
712 }
713
714 be_txq_notify(adapter, txq->id, wrb_cnt);
715
716 be_tx_stats_update(txo, wrb_cnt, copied,
717 skb_shinfo(skb)->gso_segs, stopped);
718 } else {
719 txq->head = start;
720 dev_kfree_skb_any(skb);
721 }
722 return NETDEV_TX_OK;
723}
724
725static int be_change_mtu(struct net_device *netdev, int new_mtu)
726{
727 struct be_adapter *adapter = netdev_priv(netdev);
728 if (new_mtu < BE_MIN_MTU ||
729 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730 (ETH_HLEN + ETH_FCS_LEN))) {
731 dev_info(&adapter->pdev->dev,
732 "MTU must be between %d and %d bytes\n",
733 BE_MIN_MTU,
734 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
735 return -EINVAL;
736 }
737 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738 netdev->mtu, new_mtu);
739 netdev->mtu = new_mtu;
740 return 0;
741}
742
743/*
744 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745 * If the user configures more, place BE in vlan promiscuous mode.
746 */
747static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
748{
749 u16 vtag[BE_NUM_VLANS_SUPPORTED];
750 u16 ntags = 0, i;
751 int status = 0;
752 u32 if_handle;
753
754 if (vf) {
755 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
758 }
759
760 /* No need to further configure vids if in promiscuous mode */
761 if (adapter->promiscuous)
762 return 0;
763
764 if (adapter->vlans_added <= adapter->max_vlans) {
765 /* Construct VLAN Table to give to HW */
766 for (i = 0; i < VLAN_N_VID; i++) {
767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
769 ntags++;
770 }
771 }
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
773 vtag, ntags, 1, 0);
774 } else {
775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
776 NULL, 0, 1, 1);
777 }
778
779 return status;
780}
781
782static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
783{
784 struct be_adapter *adapter = netdev_priv(netdev);
785
786 adapter->vlans_added++;
787 if (!be_physfn(adapter))
788 return;
789
790 adapter->vlan_tag[vid] = 1;
791 if (adapter->vlans_added <= (adapter->max_vlans + 1))
792 be_vid_config(adapter, false, 0);
793}
794
795static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798
799 adapter->vlans_added--;
800
801 if (!be_physfn(adapter))
802 return;
803
804 adapter->vlan_tag[vid] = 0;
805 if (adapter->vlans_added <= adapter->max_vlans)
806 be_vid_config(adapter, false, 0);
807}
808
809static void be_set_rx_mode(struct net_device *netdev)
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812
813 if (netdev->flags & IFF_PROMISC) {
814 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
815 adapter->promiscuous = true;
816 goto done;
817 }
818
819 /* BE was previously in promiscuous mode; disable it */
820 if (adapter->promiscuous) {
821 adapter->promiscuous = false;
822 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
823
824 if (adapter->vlans_added)
825 be_vid_config(adapter, false, 0);
826 }
827
828 /* Enable multicast promisc if num configured exceeds what we support */
829 if (netdev->flags & IFF_ALLMULTI ||
830 netdev_mc_count(netdev) > BE_MAX_MC) {
831 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
832 goto done;
833 }
834
835 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
836done:
837 return;
838}
839
840static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
841{
842 struct be_adapter *adapter = netdev_priv(netdev);
843 int status;
844
845 if (!adapter->sriov_enabled)
846 return -EPERM;
847
848 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
849 return -EINVAL;
850
851 status = be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
852 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
853
854 status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle,
855 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
856 if (status)
857 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
858 mac, vf);
859 else
860 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
861
862 return status;
863}
864
865static int be_get_vf_config(struct net_device *netdev, int vf,
866 struct ifla_vf_info *vi)
867{
868 struct be_adapter *adapter = netdev_priv(netdev);
869
870 if (!adapter->sriov_enabled)
871 return -EPERM;
872
873 if (vf >= num_vfs)
874 return -EINVAL;
875
876 vi->vf = vf;
877 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
878 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
879 vi->qos = 0;
880 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
881
882 return 0;
883}
884
885static int be_set_vf_vlan(struct net_device *netdev,
886 int vf, u16 vlan, u8 qos)
887{
888 struct be_adapter *adapter = netdev_priv(netdev);
889 int status = 0;
890
891 if (!adapter->sriov_enabled)
892 return -EPERM;
893
894 if ((vf >= num_vfs) || (vlan > 4095))
895 return -EINVAL;
896
897 if (vlan) {
898 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
899 adapter->vlans_added++;
900 } else {
901 adapter->vf_cfg[vf].vf_vlan_tag = 0;
902 adapter->vlans_added--;
903 }
904
905 status = be_vid_config(adapter, true, vf);
906
907 if (status)
908 dev_info(&adapter->pdev->dev,
909 "VLAN %d config on VF %d failed\n", vlan, vf);
910 return status;
911}
912
913static int be_set_vf_tx_rate(struct net_device *netdev,
914 int vf, int rate)
915{
916 struct be_adapter *adapter = netdev_priv(netdev);
917 int status = 0;
918
919 if (!adapter->sriov_enabled)
920 return -EPERM;
921
922 if ((vf >= num_vfs) || (rate < 0))
923 return -EINVAL;
924
925 if (rate > 10000)
926 rate = 10000;
927
928 adapter->vf_cfg[vf].vf_tx_rate = rate;
929 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
930
931 if (status)
932 dev_info(&adapter->pdev->dev,
933 "tx rate %d on VF %d failed\n", rate, vf);
934 return status;
935}
936
937static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
938{
939 struct be_eq_obj *rx_eq = &rxo->rx_eq;
940 struct be_rx_stats *stats = rx_stats(rxo);
941 ulong now = jiffies;
942 ulong delta = now - stats->rx_jiffies;
943 u64 pkts;
944 unsigned int start, eqd;
945
946 if (!rx_eq->enable_aic)
947 return;
948
949 /* Wrapped around */
950 if (time_before(now, stats->rx_jiffies)) {
951 stats->rx_jiffies = now;
952 return;
953 }
954
955 /* Update once a second */
956 if (delta < HZ)
957 return;
958
959 do {
960 start = u64_stats_fetch_begin_bh(&stats->sync);
961 pkts = stats->rx_pkts;
962 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
963
964 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
965 stats->rx_pkts_prev = pkts;
966 stats->rx_jiffies = now;
967 eqd = stats->rx_pps / 110000;
968 eqd = eqd << 3;
969 if (eqd > rx_eq->max_eqd)
970 eqd = rx_eq->max_eqd;
971 if (eqd < rx_eq->min_eqd)
972 eqd = rx_eq->min_eqd;
973 if (eqd < 10)
974 eqd = 0;
975 if (eqd != rx_eq->cur_eqd) {
976 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
977 rx_eq->cur_eqd = eqd;
978 }
979}
980
981static void be_rx_stats_update(struct be_rx_obj *rxo,
982 struct be_rx_compl_info *rxcp)
983{
984 struct be_rx_stats *stats = rx_stats(rxo);
985
986 u64_stats_update_begin(&stats->sync);
987 stats->rx_compl++;
988 stats->rx_bytes += rxcp->pkt_size;
989 stats->rx_pkts++;
990 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
991 stats->rx_mcast_pkts++;
992 if (rxcp->err)
993 stats->rx_compl_err++;
994 u64_stats_update_end(&stats->sync);
995}
996
997static inline bool csum_passed(struct be_rx_compl_info *rxcp)
998{
999 /* L4 checksum is not reliable for non TCP/UDP packets.
1000 * Also ignore ipcksm for ipv6 pkts */
1001 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1002 (rxcp->ip_csum || rxcp->ipv6);
1003}
1004
1005static struct be_rx_page_info *
1006get_rx_page_info(struct be_adapter *adapter,
1007 struct be_rx_obj *rxo,
1008 u16 frag_idx)
1009{
1010 struct be_rx_page_info *rx_page_info;
1011 struct be_queue_info *rxq = &rxo->q;
1012
1013 rx_page_info = &rxo->page_info_tbl[frag_idx];
1014 BUG_ON(!rx_page_info->page);
1015
1016 if (rx_page_info->last_page_user) {
1017 dma_unmap_page(&adapter->pdev->dev,
1018 dma_unmap_addr(rx_page_info, bus),
1019 adapter->big_page_size, DMA_FROM_DEVICE);
1020 rx_page_info->last_page_user = false;
1021 }
1022
1023 atomic_dec(&rxq->used);
1024 return rx_page_info;
1025}
1026
1027/* Throwaway the data in the Rx completion */
1028static void be_rx_compl_discard(struct be_adapter *adapter,
1029 struct be_rx_obj *rxo,
1030 struct be_rx_compl_info *rxcp)
1031{
1032 struct be_queue_info *rxq = &rxo->q;
1033 struct be_rx_page_info *page_info;
1034 u16 i, num_rcvd = rxcp->num_rcvd;
1035
1036 for (i = 0; i < num_rcvd; i++) {
1037 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1038 put_page(page_info->page);
1039 memset(page_info, 0, sizeof(*page_info));
1040 index_inc(&rxcp->rxq_idx, rxq->len);
1041 }
1042}
1043
1044/*
1045 * skb_fill_rx_data forms a complete skb for an ether frame
1046 * indicated by rxcp.
1047 */
1048static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1049 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1050{
1051 struct be_queue_info *rxq = &rxo->q;
1052 struct be_rx_page_info *page_info;
1053 u16 i, j;
1054 u16 hdr_len, curr_frag_len, remaining;
1055 u8 *start;
1056
1057 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1058 start = page_address(page_info->page) + page_info->page_offset;
1059 prefetch(start);
1060
1061 /* Copy data in the first descriptor of this completion */
1062 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1063
1064 /* Copy the header portion into skb_data */
1065 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1066 memcpy(skb->data, start, hdr_len);
1067 skb->len = curr_frag_len;
1068 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1069 /* Complete packet has now been moved to data */
1070 put_page(page_info->page);
1071 skb->data_len = 0;
1072 skb->tail += curr_frag_len;
1073 } else {
1074 skb_shinfo(skb)->nr_frags = 1;
1075 skb_frag_set_page(skb, 0, page_info->page);
1076 skb_shinfo(skb)->frags[0].page_offset =
1077 page_info->page_offset + hdr_len;
1078 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1079 skb->data_len = curr_frag_len - hdr_len;
1080 skb->truesize += rx_frag_size;
1081 skb->tail += hdr_len;
1082 }
1083 page_info->page = NULL;
1084
1085 if (rxcp->pkt_size <= rx_frag_size) {
1086 BUG_ON(rxcp->num_rcvd != 1);
1087 return;
1088 }
1089
1090 /* More frags present for this completion */
1091 index_inc(&rxcp->rxq_idx, rxq->len);
1092 remaining = rxcp->pkt_size - curr_frag_len;
1093 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1094 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1095 curr_frag_len = min(remaining, rx_frag_size);
1096
1097 /* Coalesce all frags from the same physical page in one slot */
1098 if (page_info->page_offset == 0) {
1099 /* Fresh page */
1100 j++;
1101 skb_frag_set_page(skb, j, page_info->page);
1102 skb_shinfo(skb)->frags[j].page_offset =
1103 page_info->page_offset;
1104 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1105 skb_shinfo(skb)->nr_frags++;
1106 } else {
1107 put_page(page_info->page);
1108 }
1109
1110 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1111 skb->len += curr_frag_len;
1112 skb->data_len += curr_frag_len;
1113 skb->truesize += rx_frag_size;
1114 remaining -= curr_frag_len;
1115 index_inc(&rxcp->rxq_idx, rxq->len);
1116 page_info->page = NULL;
1117 }
1118 BUG_ON(j > MAX_SKB_FRAGS);
1119}
1120
1121/* Process the RX completion indicated by rxcp when GRO is disabled */
1122static void be_rx_compl_process(struct be_adapter *adapter,
1123 struct be_rx_obj *rxo,
1124 struct be_rx_compl_info *rxcp)
1125{
1126 struct net_device *netdev = adapter->netdev;
1127 struct sk_buff *skb;
1128
1129 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1130 if (unlikely(!skb)) {
1131 rx_stats(rxo)->rx_drops_no_skbs++;
1132 be_rx_compl_discard(adapter, rxo, rxcp);
1133 return;
1134 }
1135
1136 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1137
1138 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1139 skb->ip_summed = CHECKSUM_UNNECESSARY;
1140 else
1141 skb_checksum_none_assert(skb);
1142
1143 skb->protocol = eth_type_trans(skb, netdev);
1144 if (adapter->netdev->features & NETIF_F_RXHASH)
1145 skb->rxhash = rxcp->rss_hash;
1146
1147
1148 if (rxcp->vlanf)
1149 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1150
1151 netif_receive_skb(skb);
1152}
1153
1154/* Process the RX completion indicated by rxcp when GRO is enabled */
1155static void be_rx_compl_process_gro(struct be_adapter *adapter,
1156 struct be_rx_obj *rxo,
1157 struct be_rx_compl_info *rxcp)
1158{
1159 struct be_rx_page_info *page_info;
1160 struct sk_buff *skb = NULL;
1161 struct be_queue_info *rxq = &rxo->q;
1162 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1163 u16 remaining, curr_frag_len;
1164 u16 i, j;
1165
1166 skb = napi_get_frags(&eq_obj->napi);
1167 if (!skb) {
1168 be_rx_compl_discard(adapter, rxo, rxcp);
1169 return;
1170 }
1171
1172 remaining = rxcp->pkt_size;
1173 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1174 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1175
1176 curr_frag_len = min(remaining, rx_frag_size);
1177
1178 /* Coalesce all frags from the same physical page in one slot */
1179 if (i == 0 || page_info->page_offset == 0) {
1180 /* First frag or Fresh page */
1181 j++;
1182 skb_frag_set_page(skb, j, page_info->page);
1183 skb_shinfo(skb)->frags[j].page_offset =
1184 page_info->page_offset;
1185 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1186 } else {
1187 put_page(page_info->page);
1188 }
1189 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1190 skb->truesize += rx_frag_size;
1191 remaining -= curr_frag_len;
1192 index_inc(&rxcp->rxq_idx, rxq->len);
1193 memset(page_info, 0, sizeof(*page_info));
1194 }
1195 BUG_ON(j > MAX_SKB_FRAGS);
1196
1197 skb_shinfo(skb)->nr_frags = j + 1;
1198 skb->len = rxcp->pkt_size;
1199 skb->data_len = rxcp->pkt_size;
1200 skb->ip_summed = CHECKSUM_UNNECESSARY;
1201 if (adapter->netdev->features & NETIF_F_RXHASH)
1202 skb->rxhash = rxcp->rss_hash;
1203
1204 if (rxcp->vlanf)
1205 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1206
1207 napi_gro_frags(&eq_obj->napi);
1208}
1209
1210static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1211 struct be_eth_rx_compl *compl,
1212 struct be_rx_compl_info *rxcp)
1213{
1214 rxcp->pkt_size =
1215 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1216 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1217 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1218 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1219 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1220 rxcp->ip_csum =
1221 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1222 rxcp->l4_csum =
1223 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1224 rxcp->ipv6 =
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1226 rxcp->rxq_idx =
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1228 rxcp->num_rcvd =
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1230 rxcp->pkt_type =
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1232 rxcp->rss_hash =
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1234 if (rxcp->vlanf) {
1235 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1236 compl);
1237 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1238 compl);
1239 }
1240 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1241}
1242
1243static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1244 struct be_eth_rx_compl *compl,
1245 struct be_rx_compl_info *rxcp)
1246{
1247 rxcp->pkt_size =
1248 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1249 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1250 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1251 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1252 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1253 rxcp->ip_csum =
1254 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1255 rxcp->l4_csum =
1256 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1257 rxcp->ipv6 =
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1259 rxcp->rxq_idx =
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1261 rxcp->num_rcvd =
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1263 rxcp->pkt_type =
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1265 rxcp->rss_hash =
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1267 if (rxcp->vlanf) {
1268 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1269 compl);
1270 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1271 compl);
1272 }
1273 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1274}
1275
1276static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1277{
1278 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1279 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1280 struct be_adapter *adapter = rxo->adapter;
1281
1282 /* For checking the valid bit it is Ok to use either definition as the
1283 * valid bit is at the same position in both v0 and v1 Rx compl */
1284 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1285 return NULL;
1286
1287 rmb();
1288 be_dws_le_to_cpu(compl, sizeof(*compl));
1289
1290 if (adapter->be3_native)
1291 be_parse_rx_compl_v1(adapter, compl, rxcp);
1292 else
1293 be_parse_rx_compl_v0(adapter, compl, rxcp);
1294
1295 if (rxcp->vlanf) {
1296 /* vlanf could be wrongly set in some cards.
1297 * ignore if vtm is not set */
1298 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1299 rxcp->vlanf = 0;
1300
1301 if (!lancer_chip(adapter))
1302 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1303
1304 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1305 !adapter->vlan_tag[rxcp->vlan_tag])
1306 rxcp->vlanf = 0;
1307 }
1308
1309 /* As the compl has been parsed, reset it; we wont touch it again */
1310 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1311
1312 queue_tail_inc(&rxo->cq);
1313 return rxcp;
1314}
1315
1316static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1317{
1318 u32 order = get_order(size);
1319
1320 if (order > 0)
1321 gfp |= __GFP_COMP;
1322 return alloc_pages(gfp, order);
1323}
1324
1325/*
1326 * Allocate a page, split it to fragments of size rx_frag_size and post as
1327 * receive buffers to BE
1328 */
1329static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1330{
1331 struct be_adapter *adapter = rxo->adapter;
1332 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1333 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1334 struct be_queue_info *rxq = &rxo->q;
1335 struct page *pagep = NULL;
1336 struct be_eth_rx_d *rxd;
1337 u64 page_dmaaddr = 0, frag_dmaaddr;
1338 u32 posted, page_offset = 0;
1339
1340 page_info = &rxo->page_info_tbl[rxq->head];
1341 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1342 if (!pagep) {
1343 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1344 if (unlikely(!pagep)) {
1345 rx_stats(rxo)->rx_post_fail++;
1346 break;
1347 }
1348 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1349 0, adapter->big_page_size,
1350 DMA_FROM_DEVICE);
1351 page_info->page_offset = 0;
1352 } else {
1353 get_page(pagep);
1354 page_info->page_offset = page_offset + rx_frag_size;
1355 }
1356 page_offset = page_info->page_offset;
1357 page_info->page = pagep;
1358 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1359 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1360
1361 rxd = queue_head_node(rxq);
1362 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1363 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1364
1365 /* Any space left in the current big page for another frag? */
1366 if ((page_offset + rx_frag_size + rx_frag_size) >
1367 adapter->big_page_size) {
1368 pagep = NULL;
1369 page_info->last_page_user = true;
1370 }
1371
1372 prev_page_info = page_info;
1373 queue_head_inc(rxq);
1374 page_info = &page_info_tbl[rxq->head];
1375 }
1376 if (pagep)
1377 prev_page_info->last_page_user = true;
1378
1379 if (posted) {
1380 atomic_add(posted, &rxq->used);
1381 be_rxq_notify(adapter, rxq->id, posted);
1382 } else if (atomic_read(&rxq->used) == 0) {
1383 /* Let be_worker replenish when memory is available */
1384 rxo->rx_post_starved = true;
1385 }
1386}
1387
1388static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1389{
1390 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1391
1392 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1393 return NULL;
1394
1395 rmb();
1396 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1397
1398 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1399
1400 queue_tail_inc(tx_cq);
1401 return txcp;
1402}
1403
1404static u16 be_tx_compl_process(struct be_adapter *adapter,
1405 struct be_tx_obj *txo, u16 last_index)
1406{
1407 struct be_queue_info *txq = &txo->q;
1408 struct be_eth_wrb *wrb;
1409 struct sk_buff **sent_skbs = txo->sent_skb_list;
1410 struct sk_buff *sent_skb;
1411 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1412 bool unmap_skb_hdr = true;
1413
1414 sent_skb = sent_skbs[txq->tail];
1415 BUG_ON(!sent_skb);
1416 sent_skbs[txq->tail] = NULL;
1417
1418 /* skip header wrb */
1419 queue_tail_inc(txq);
1420
1421 do {
1422 cur_index = txq->tail;
1423 wrb = queue_tail_node(txq);
1424 unmap_tx_frag(&adapter->pdev->dev, wrb,
1425 (unmap_skb_hdr && skb_headlen(sent_skb)));
1426 unmap_skb_hdr = false;
1427
1428 num_wrbs++;
1429 queue_tail_inc(txq);
1430 } while (cur_index != last_index);
1431
1432 kfree_skb(sent_skb);
1433 return num_wrbs;
1434}
1435
1436static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1437{
1438 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1439
1440 if (!eqe->evt)
1441 return NULL;
1442
1443 rmb();
1444 eqe->evt = le32_to_cpu(eqe->evt);
1445 queue_tail_inc(&eq_obj->q);
1446 return eqe;
1447}
1448
1449static int event_handle(struct be_adapter *adapter,
1450 struct be_eq_obj *eq_obj,
1451 bool rearm)
1452{
1453 struct be_eq_entry *eqe;
1454 u16 num = 0;
1455
1456 while ((eqe = event_get(eq_obj)) != NULL) {
1457 eqe->evt = 0;
1458 num++;
1459 }
1460
1461 /* Deal with any spurious interrupts that come
1462 * without events
1463 */
1464 if (!num)
1465 rearm = true;
1466
1467 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1468 if (num)
1469 napi_schedule(&eq_obj->napi);
1470
1471 return num;
1472}
1473
1474/* Just read and notify events without processing them.
1475 * Used at the time of destroying event queues */
1476static void be_eq_clean(struct be_adapter *adapter,
1477 struct be_eq_obj *eq_obj)
1478{
1479 struct be_eq_entry *eqe;
1480 u16 num = 0;
1481
1482 while ((eqe = event_get(eq_obj)) != NULL) {
1483 eqe->evt = 0;
1484 num++;
1485 }
1486
1487 if (num)
1488 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1489}
1490
1491static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1492{
1493 struct be_rx_page_info *page_info;
1494 struct be_queue_info *rxq = &rxo->q;
1495 struct be_queue_info *rx_cq = &rxo->cq;
1496 struct be_rx_compl_info *rxcp;
1497 u16 tail;
1498
1499 /* First cleanup pending rx completions */
1500 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1501 be_rx_compl_discard(adapter, rxo, rxcp);
1502 be_cq_notify(adapter, rx_cq->id, false, 1);
1503 }
1504
1505 /* Then free posted rx buffer that were not used */
1506 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1507 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1508 page_info = get_rx_page_info(adapter, rxo, tail);
1509 put_page(page_info->page);
1510 memset(page_info, 0, sizeof(*page_info));
1511 }
1512 BUG_ON(atomic_read(&rxq->used));
1513 rxq->tail = rxq->head = 0;
1514}
1515
1516static void be_tx_compl_clean(struct be_adapter *adapter,
1517 struct be_tx_obj *txo)
1518{
1519 struct be_queue_info *tx_cq = &txo->cq;
1520 struct be_queue_info *txq = &txo->q;
1521 struct be_eth_tx_compl *txcp;
1522 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1523 struct sk_buff **sent_skbs = txo->sent_skb_list;
1524 struct sk_buff *sent_skb;
1525 bool dummy_wrb;
1526
1527 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1528 do {
1529 while ((txcp = be_tx_compl_get(tx_cq))) {
1530 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1531 wrb_index, txcp);
1532 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1533 cmpl++;
1534 }
1535 if (cmpl) {
1536 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1537 atomic_sub(num_wrbs, &txq->used);
1538 cmpl = 0;
1539 num_wrbs = 0;
1540 }
1541
1542 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1543 break;
1544
1545 mdelay(1);
1546 } while (true);
1547
1548 if (atomic_read(&txq->used))
1549 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1550 atomic_read(&txq->used));
1551
1552 /* free posted tx for which compls will never arrive */
1553 while (atomic_read(&txq->used)) {
1554 sent_skb = sent_skbs[txq->tail];
1555 end_idx = txq->tail;
1556 index_adv(&end_idx,
1557 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1558 txq->len);
1559 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1560 atomic_sub(num_wrbs, &txq->used);
1561 }
1562}
1563
1564static void be_mcc_queues_destroy(struct be_adapter *adapter)
1565{
1566 struct be_queue_info *q;
1567
1568 q = &adapter->mcc_obj.q;
1569 if (q->created)
1570 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1571 be_queue_free(adapter, q);
1572
1573 q = &adapter->mcc_obj.cq;
1574 if (q->created)
1575 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1576 be_queue_free(adapter, q);
1577}
1578
1579/* Must be called only after TX qs are created as MCC shares TX EQ */
1580static int be_mcc_queues_create(struct be_adapter *adapter)
1581{
1582 struct be_queue_info *q, *cq;
1583
1584 /* Alloc MCC compl queue */
1585 cq = &adapter->mcc_obj.cq;
1586 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1587 sizeof(struct be_mcc_compl)))
1588 goto err;
1589
1590 /* Ask BE to create MCC compl queue; share TX's eq */
1591 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1592 goto mcc_cq_free;
1593
1594 /* Alloc MCC queue */
1595 q = &adapter->mcc_obj.q;
1596 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1597 goto mcc_cq_destroy;
1598
1599 /* Ask BE to create MCC queue */
1600 if (be_cmd_mccq_create(adapter, q, cq))
1601 goto mcc_q_free;
1602
1603 return 0;
1604
1605mcc_q_free:
1606 be_queue_free(adapter, q);
1607mcc_cq_destroy:
1608 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1609mcc_cq_free:
1610 be_queue_free(adapter, cq);
1611err:
1612 return -1;
1613}
1614
1615static void be_tx_queues_destroy(struct be_adapter *adapter)
1616{
1617 struct be_queue_info *q;
1618 struct be_tx_obj *txo;
1619 u8 i;
1620
1621 for_all_tx_queues(adapter, txo, i) {
1622 q = &txo->q;
1623 if (q->created)
1624 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1625 be_queue_free(adapter, q);
1626
1627 q = &txo->cq;
1628 if (q->created)
1629 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1630 be_queue_free(adapter, q);
1631 }
1632
1633 /* Clear any residual events */
1634 be_eq_clean(adapter, &adapter->tx_eq);
1635
1636 q = &adapter->tx_eq.q;
1637 if (q->created)
1638 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1639 be_queue_free(adapter, q);
1640}
1641
1642static int be_num_txqs_want(struct be_adapter *adapter)
1643{
1644 if ((num_vfs && adapter->sriov_enabled) ||
1645 be_is_mc(adapter) ||
1646 lancer_chip(adapter) || !be_physfn(adapter) ||
1647 adapter->generation == BE_GEN2)
1648 return 1;
1649 else
1650 return MAX_TX_QS;
1651}
1652
1653/* One TX event queue is shared by all TX compl qs */
1654static int be_tx_queues_create(struct be_adapter *adapter)
1655{
1656 struct be_queue_info *eq, *q, *cq;
1657 struct be_tx_obj *txo;
1658 u8 i;
1659
1660 adapter->num_tx_qs = be_num_txqs_want(adapter);
1661 if (adapter->num_tx_qs != MAX_TX_QS)
1662 netif_set_real_num_tx_queues(adapter->netdev,
1663 adapter->num_tx_qs);
1664
1665 adapter->tx_eq.max_eqd = 0;
1666 adapter->tx_eq.min_eqd = 0;
1667 adapter->tx_eq.cur_eqd = 96;
1668 adapter->tx_eq.enable_aic = false;
1669
1670 eq = &adapter->tx_eq.q;
1671 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1672 sizeof(struct be_eq_entry)))
1673 return -1;
1674
1675 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1676 goto err;
1677 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1678
1679 for_all_tx_queues(adapter, txo, i) {
1680 cq = &txo->cq;
1681 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1682 sizeof(struct be_eth_tx_compl)))
1683 goto err;
1684
1685 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1686 goto err;
1687
1688 q = &txo->q;
1689 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1690 sizeof(struct be_eth_wrb)))
1691 goto err;
1692 }
1693 return 0;
1694
1695err:
1696 be_tx_queues_destroy(adapter);
1697 return -1;
1698}
1699
1700static void be_rx_queues_destroy(struct be_adapter *adapter)
1701{
1702 struct be_queue_info *q;
1703 struct be_rx_obj *rxo;
1704 int i;
1705
1706 for_all_rx_queues(adapter, rxo, i) {
1707 be_queue_free(adapter, &rxo->q);
1708
1709 q = &rxo->cq;
1710 if (q->created)
1711 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1712 be_queue_free(adapter, q);
1713
1714 q = &rxo->rx_eq.q;
1715 if (q->created)
1716 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1717 be_queue_free(adapter, q);
1718 }
1719}
1720
1721static u32 be_num_rxqs_want(struct be_adapter *adapter)
1722{
1723 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1724 !adapter->sriov_enabled && be_physfn(adapter) &&
1725 !be_is_mc(adapter)) {
1726 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1727 } else {
1728 dev_warn(&adapter->pdev->dev,
1729 "No support for multiple RX queues\n");
1730 return 1;
1731 }
1732}
1733
1734static int be_rx_queues_create(struct be_adapter *adapter)
1735{
1736 struct be_queue_info *eq, *q, *cq;
1737 struct be_rx_obj *rxo;
1738 int rc, i;
1739
1740 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1741 msix_enabled(adapter) ?
1742 adapter->num_msix_vec - 1 : 1);
1743 if (adapter->num_rx_qs != MAX_RX_QS)
1744 dev_warn(&adapter->pdev->dev,
1745 "Can create only %d RX queues", adapter->num_rx_qs);
1746
1747 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1748 for_all_rx_queues(adapter, rxo, i) {
1749 rxo->adapter = adapter;
1750 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1751 rxo->rx_eq.enable_aic = true;
1752
1753 /* EQ */
1754 eq = &rxo->rx_eq.q;
1755 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1756 sizeof(struct be_eq_entry));
1757 if (rc)
1758 goto err;
1759
1760 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1761 if (rc)
1762 goto err;
1763
1764 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1765
1766 /* CQ */
1767 cq = &rxo->cq;
1768 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1769 sizeof(struct be_eth_rx_compl));
1770 if (rc)
1771 goto err;
1772
1773 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1774 if (rc)
1775 goto err;
1776
1777 /* Rx Q - will be created in be_open() */
1778 q = &rxo->q;
1779 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1780 sizeof(struct be_eth_rx_d));
1781 if (rc)
1782 goto err;
1783
1784 }
1785
1786 return 0;
1787err:
1788 be_rx_queues_destroy(adapter);
1789 return -1;
1790}
1791
1792static bool event_peek(struct be_eq_obj *eq_obj)
1793{
1794 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1795 if (!eqe->evt)
1796 return false;
1797 else
1798 return true;
1799}
1800
1801static irqreturn_t be_intx(int irq, void *dev)
1802{
1803 struct be_adapter *adapter = dev;
1804 struct be_rx_obj *rxo;
1805 int isr, i, tx = 0 , rx = 0;
1806
1807 if (lancer_chip(adapter)) {
1808 if (event_peek(&adapter->tx_eq))
1809 tx = event_handle(adapter, &adapter->tx_eq, false);
1810 for_all_rx_queues(adapter, rxo, i) {
1811 if (event_peek(&rxo->rx_eq))
1812 rx |= event_handle(adapter, &rxo->rx_eq, true);
1813 }
1814
1815 if (!(tx || rx))
1816 return IRQ_NONE;
1817
1818 } else {
1819 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1820 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1821 if (!isr)
1822 return IRQ_NONE;
1823
1824 if ((1 << adapter->tx_eq.eq_idx & isr))
1825 event_handle(adapter, &adapter->tx_eq, false);
1826
1827 for_all_rx_queues(adapter, rxo, i) {
1828 if ((1 << rxo->rx_eq.eq_idx & isr))
1829 event_handle(adapter, &rxo->rx_eq, true);
1830 }
1831 }
1832
1833 return IRQ_HANDLED;
1834}
1835
1836static irqreturn_t be_msix_rx(int irq, void *dev)
1837{
1838 struct be_rx_obj *rxo = dev;
1839 struct be_adapter *adapter = rxo->adapter;
1840
1841 event_handle(adapter, &rxo->rx_eq, true);
1842
1843 return IRQ_HANDLED;
1844}
1845
1846static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1847{
1848 struct be_adapter *adapter = dev;
1849
1850 event_handle(adapter, &adapter->tx_eq, false);
1851
1852 return IRQ_HANDLED;
1853}
1854
1855static inline bool do_gro(struct be_rx_compl_info *rxcp)
1856{
1857 return (rxcp->tcpf && !rxcp->err) ? true : false;
1858}
1859
1860static int be_poll_rx(struct napi_struct *napi, int budget)
1861{
1862 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1863 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1864 struct be_adapter *adapter = rxo->adapter;
1865 struct be_queue_info *rx_cq = &rxo->cq;
1866 struct be_rx_compl_info *rxcp;
1867 u32 work_done;
1868
1869 rx_stats(rxo)->rx_polls++;
1870 for (work_done = 0; work_done < budget; work_done++) {
1871 rxcp = be_rx_compl_get(rxo);
1872 if (!rxcp)
1873 break;
1874
1875 /* Is it a flush compl that has no data */
1876 if (unlikely(rxcp->num_rcvd == 0))
1877 goto loop_continue;
1878
1879 /* Discard compl with partial DMA Lancer B0 */
1880 if (unlikely(!rxcp->pkt_size)) {
1881 be_rx_compl_discard(adapter, rxo, rxcp);
1882 goto loop_continue;
1883 }
1884
1885 /* On BE drop pkts that arrive due to imperfect filtering in
1886 * promiscuous mode on some skews
1887 */
1888 if (unlikely(rxcp->port != adapter->port_num &&
1889 !lancer_chip(adapter))) {
1890 be_rx_compl_discard(adapter, rxo, rxcp);
1891 goto loop_continue;
1892 }
1893
1894 if (do_gro(rxcp))
1895 be_rx_compl_process_gro(adapter, rxo, rxcp);
1896 else
1897 be_rx_compl_process(adapter, rxo, rxcp);
1898loop_continue:
1899 be_rx_stats_update(rxo, rxcp);
1900 }
1901
1902 be_cq_notify(adapter, rx_cq->id, false, work_done);
1903
1904 /* Refill the queue */
1905 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1906 be_post_rx_frags(rxo, GFP_ATOMIC);
1907
1908 /* All consumed */
1909 if (work_done < budget) {
1910 napi_complete(napi);
1911 /* Arm CQ */
1912 be_cq_notify(adapter, rx_cq->id, true, 0);
1913 }
1914 return work_done;
1915}
1916
1917/* As TX and MCC share the same EQ check for both TX and MCC completions.
1918 * For TX/MCC we don't honour budget; consume everything
1919 */
1920static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1921{
1922 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1923 struct be_adapter *adapter =
1924 container_of(tx_eq, struct be_adapter, tx_eq);
1925 struct be_tx_obj *txo;
1926 struct be_eth_tx_compl *txcp;
1927 int tx_compl, mcc_compl, status = 0;
1928 u8 i;
1929 u16 num_wrbs;
1930
1931 for_all_tx_queues(adapter, txo, i) {
1932 tx_compl = 0;
1933 num_wrbs = 0;
1934 while ((txcp = be_tx_compl_get(&txo->cq))) {
1935 num_wrbs += be_tx_compl_process(adapter, txo,
1936 AMAP_GET_BITS(struct amap_eth_tx_compl,
1937 wrb_index, txcp));
1938 tx_compl++;
1939 }
1940 if (tx_compl) {
1941 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1942
1943 atomic_sub(num_wrbs, &txo->q.used);
1944
1945 /* As Tx wrbs have been freed up, wake up netdev queue
1946 * if it was stopped due to lack of tx wrbs. */
1947 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1948 atomic_read(&txo->q.used) < txo->q.len / 2) {
1949 netif_wake_subqueue(adapter->netdev, i);
1950 }
1951
1952 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1953 tx_stats(txo)->tx_compl += tx_compl;
1954 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1955 }
1956 }
1957
1958 mcc_compl = be_process_mcc(adapter, &status);
1959
1960 if (mcc_compl) {
1961 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1962 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1963 }
1964
1965 napi_complete(napi);
1966
1967 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1968 adapter->drv_stats.tx_events++;
1969 return 1;
1970}
1971
1972void be_detect_dump_ue(struct be_adapter *adapter)
1973{
1974 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1975 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1976 u32 i;
1977
1978 if (adapter->eeh_err || adapter->ue_detected)
1979 return;
1980
1981 if (lancer_chip(adapter)) {
1982 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1983 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1984 sliport_err1 = ioread32(adapter->db +
1985 SLIPORT_ERROR1_OFFSET);
1986 sliport_err2 = ioread32(adapter->db +
1987 SLIPORT_ERROR2_OFFSET);
1988 }
1989 } else {
1990 pci_read_config_dword(adapter->pdev,
1991 PCICFG_UE_STATUS_LOW, &ue_lo);
1992 pci_read_config_dword(adapter->pdev,
1993 PCICFG_UE_STATUS_HIGH, &ue_hi);
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
1998
1999 ue_lo = (ue_lo & (~ue_lo_mask));
2000 ue_hi = (ue_hi & (~ue_hi_mask));
2001 }
2002
2003 if (ue_lo || ue_hi ||
2004 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2005 adapter->ue_detected = true;
2006 adapter->eeh_err = true;
2007 dev_err(&adapter->pdev->dev,
2008 "Unrecoverable error in the card\n");
2009 }
2010
2011 if (ue_lo) {
2012 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2013 if (ue_lo & 1)
2014 dev_err(&adapter->pdev->dev,
2015 "UE: %s bit set\n", ue_status_low_desc[i]);
2016 }
2017 }
2018 if (ue_hi) {
2019 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2020 if (ue_hi & 1)
2021 dev_err(&adapter->pdev->dev,
2022 "UE: %s bit set\n", ue_status_hi_desc[i]);
2023 }
2024 }
2025
2026 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2027 dev_err(&adapter->pdev->dev,
2028 "sliport status 0x%x\n", sliport_status);
2029 dev_err(&adapter->pdev->dev,
2030 "sliport error1 0x%x\n", sliport_err1);
2031 dev_err(&adapter->pdev->dev,
2032 "sliport error2 0x%x\n", sliport_err2);
2033 }
2034}
2035
2036static void be_worker(struct work_struct *work)
2037{
2038 struct be_adapter *adapter =
2039 container_of(work, struct be_adapter, work.work);
2040 struct be_rx_obj *rxo;
2041 int i;
2042
2043 be_detect_dump_ue(adapter);
2044
2045 /* when interrupts are not yet enabled, just reap any pending
2046 * mcc completions */
2047 if (!netif_running(adapter->netdev)) {
2048 int mcc_compl, status = 0;
2049
2050 mcc_compl = be_process_mcc(adapter, &status);
2051
2052 if (mcc_compl) {
2053 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2054 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2055 }
2056
2057 goto reschedule;
2058 }
2059
2060 if (!adapter->stats_cmd_sent) {
2061 if (lancer_chip(adapter))
2062 lancer_cmd_get_pport_stats(adapter,
2063 &adapter->stats_cmd);
2064 else
2065 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2066 }
2067
2068 for_all_rx_queues(adapter, rxo, i) {
2069 be_rx_eqd_update(adapter, rxo);
2070
2071 if (rxo->rx_post_starved) {
2072 rxo->rx_post_starved = false;
2073 be_post_rx_frags(rxo, GFP_KERNEL);
2074 }
2075 }
2076
2077reschedule:
2078 adapter->work_counter++;
2079 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2080}
2081
2082static void be_msix_disable(struct be_adapter *adapter)
2083{
2084 if (msix_enabled(adapter)) {
2085 pci_disable_msix(adapter->pdev);
2086 adapter->num_msix_vec = 0;
2087 }
2088}
2089
2090static void be_msix_enable(struct be_adapter *adapter)
2091{
2092#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2093 int i, status, num_vec;
2094
2095 num_vec = be_num_rxqs_want(adapter) + 1;
2096
2097 for (i = 0; i < num_vec; i++)
2098 adapter->msix_entries[i].entry = i;
2099
2100 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2101 if (status == 0) {
2102 goto done;
2103 } else if (status >= BE_MIN_MSIX_VECTORS) {
2104 num_vec = status;
2105 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2106 num_vec) == 0)
2107 goto done;
2108 }
2109 return;
2110done:
2111 adapter->num_msix_vec = num_vec;
2112 return;
2113}
2114
2115static int be_sriov_enable(struct be_adapter *adapter)
2116{
2117 be_check_sriov_fn_type(adapter);
2118#ifdef CONFIG_PCI_IOV
2119 if (be_physfn(adapter) && num_vfs) {
2120 int status, pos;
2121 u16 nvfs;
2122
2123 pos = pci_find_ext_capability(adapter->pdev,
2124 PCI_EXT_CAP_ID_SRIOV);
2125 pci_read_config_word(adapter->pdev,
2126 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2127
2128 if (num_vfs > nvfs) {
2129 dev_info(&adapter->pdev->dev,
2130 "Device supports %d VFs and not %d\n",
2131 nvfs, num_vfs);
2132 num_vfs = nvfs;
2133 }
2134
2135 status = pci_enable_sriov(adapter->pdev, num_vfs);
2136 adapter->sriov_enabled = status ? false : true;
2137
2138 if (adapter->sriov_enabled) {
2139 adapter->vf_cfg = kcalloc(num_vfs,
2140 sizeof(struct be_vf_cfg),
2141 GFP_KERNEL);
2142 if (!adapter->vf_cfg)
2143 return -ENOMEM;
2144 }
2145 }
2146#endif
2147 return 0;
2148}
2149
2150static void be_sriov_disable(struct be_adapter *adapter)
2151{
2152#ifdef CONFIG_PCI_IOV
2153 if (adapter->sriov_enabled) {
2154 pci_disable_sriov(adapter->pdev);
2155 kfree(adapter->vf_cfg);
2156 adapter->sriov_enabled = false;
2157 }
2158#endif
2159}
2160
2161static inline int be_msix_vec_get(struct be_adapter *adapter,
2162 struct be_eq_obj *eq_obj)
2163{
2164 return adapter->msix_entries[eq_obj->eq_idx].vector;
2165}
2166
2167static int be_request_irq(struct be_adapter *adapter,
2168 struct be_eq_obj *eq_obj,
2169 void *handler, char *desc, void *context)
2170{
2171 struct net_device *netdev = adapter->netdev;
2172 int vec;
2173
2174 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2175 vec = be_msix_vec_get(adapter, eq_obj);
2176 return request_irq(vec, handler, 0, eq_obj->desc, context);
2177}
2178
2179static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2180 void *context)
2181{
2182 int vec = be_msix_vec_get(adapter, eq_obj);
2183 free_irq(vec, context);
2184}
2185
2186static int be_msix_register(struct be_adapter *adapter)
2187{
2188 struct be_rx_obj *rxo;
2189 int status, i;
2190 char qname[10];
2191
2192 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2193 adapter);
2194 if (status)
2195 goto err;
2196
2197 for_all_rx_queues(adapter, rxo, i) {
2198 sprintf(qname, "rxq%d", i);
2199 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2200 qname, rxo);
2201 if (status)
2202 goto err_msix;
2203 }
2204
2205 return 0;
2206
2207err_msix:
2208 be_free_irq(adapter, &adapter->tx_eq, adapter);
2209
2210 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2211 be_free_irq(adapter, &rxo->rx_eq, rxo);
2212
2213err:
2214 dev_warn(&adapter->pdev->dev,
2215 "MSIX Request IRQ failed - err %d\n", status);
2216 be_msix_disable(adapter);
2217 return status;
2218}
2219
2220static int be_irq_register(struct be_adapter *adapter)
2221{
2222 struct net_device *netdev = adapter->netdev;
2223 int status;
2224
2225 if (msix_enabled(adapter)) {
2226 status = be_msix_register(adapter);
2227 if (status == 0)
2228 goto done;
2229 /* INTx is not supported for VF */
2230 if (!be_physfn(adapter))
2231 return status;
2232 }
2233
2234 /* INTx */
2235 netdev->irq = adapter->pdev->irq;
2236 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2237 adapter);
2238 if (status) {
2239 dev_err(&adapter->pdev->dev,
2240 "INTx request IRQ failed - err %d\n", status);
2241 return status;
2242 }
2243done:
2244 adapter->isr_registered = true;
2245 return 0;
2246}
2247
2248static void be_irq_unregister(struct be_adapter *adapter)
2249{
2250 struct net_device *netdev = adapter->netdev;
2251 struct be_rx_obj *rxo;
2252 int i;
2253
2254 if (!adapter->isr_registered)
2255 return;
2256
2257 /* INTx */
2258 if (!msix_enabled(adapter)) {
2259 free_irq(netdev->irq, adapter);
2260 goto done;
2261 }
2262
2263 /* MSIx */
2264 be_free_irq(adapter, &adapter->tx_eq, adapter);
2265
2266 for_all_rx_queues(adapter, rxo, i)
2267 be_free_irq(adapter, &rxo->rx_eq, rxo);
2268
2269done:
2270 adapter->isr_registered = false;
2271}
2272
2273static void be_rx_queues_clear(struct be_adapter *adapter)
2274{
2275 struct be_queue_info *q;
2276 struct be_rx_obj *rxo;
2277 int i;
2278
2279 for_all_rx_queues(adapter, rxo, i) {
2280 q = &rxo->q;
2281 if (q->created) {
2282 be_cmd_rxq_destroy(adapter, q);
2283 /* After the rxq is invalidated, wait for a grace time
2284 * of 1ms for all dma to end and the flush compl to
2285 * arrive
2286 */
2287 mdelay(1);
2288 be_rx_q_clean(adapter, rxo);
2289 }
2290
2291 /* Clear any residual events */
2292 q = &rxo->rx_eq.q;
2293 if (q->created)
2294 be_eq_clean(adapter, &rxo->rx_eq);
2295 }
2296}
2297
2298static int be_close(struct net_device *netdev)
2299{
2300 struct be_adapter *adapter = netdev_priv(netdev);
2301 struct be_rx_obj *rxo;
2302 struct be_tx_obj *txo;
2303 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2304 int vec, i;
2305
2306 be_async_mcc_disable(adapter);
2307
2308 if (!lancer_chip(adapter))
2309 be_intr_set(adapter, false);
2310
2311 for_all_rx_queues(adapter, rxo, i)
2312 napi_disable(&rxo->rx_eq.napi);
2313
2314 napi_disable(&tx_eq->napi);
2315
2316 if (lancer_chip(adapter)) {
2317 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2318 for_all_rx_queues(adapter, rxo, i)
2319 be_cq_notify(adapter, rxo->cq.id, false, 0);
2320 for_all_tx_queues(adapter, txo, i)
2321 be_cq_notify(adapter, txo->cq.id, false, 0);
2322 }
2323
2324 if (msix_enabled(adapter)) {
2325 vec = be_msix_vec_get(adapter, tx_eq);
2326 synchronize_irq(vec);
2327
2328 for_all_rx_queues(adapter, rxo, i) {
2329 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2330 synchronize_irq(vec);
2331 }
2332 } else {
2333 synchronize_irq(netdev->irq);
2334 }
2335 be_irq_unregister(adapter);
2336
2337 /* Wait for all pending tx completions to arrive so that
2338 * all tx skbs are freed.
2339 */
2340 for_all_tx_queues(adapter, txo, i)
2341 be_tx_compl_clean(adapter, txo);
2342
2343 be_rx_queues_clear(adapter);
2344 return 0;
2345}
2346
2347static int be_rx_queues_setup(struct be_adapter *adapter)
2348{
2349 struct be_rx_obj *rxo;
2350 int rc, i;
2351 u8 rsstable[MAX_RSS_QS];
2352
2353 for_all_rx_queues(adapter, rxo, i) {
2354 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2355 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2356 adapter->if_handle,
2357 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2358 if (rc)
2359 return rc;
2360 }
2361
2362 if (be_multi_rxq(adapter)) {
2363 for_all_rss_queues(adapter, rxo, i)
2364 rsstable[i] = rxo->rss_id;
2365
2366 rc = be_cmd_rss_config(adapter, rsstable,
2367 adapter->num_rx_qs - 1);
2368 if (rc)
2369 return rc;
2370 }
2371
2372 /* First time posting */
2373 for_all_rx_queues(adapter, rxo, i) {
2374 be_post_rx_frags(rxo, GFP_KERNEL);
2375 napi_enable(&rxo->rx_eq.napi);
2376 }
2377 return 0;
2378}
2379
2380static int be_open(struct net_device *netdev)
2381{
2382 struct be_adapter *adapter = netdev_priv(netdev);
2383 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2384 struct be_rx_obj *rxo;
2385 int status, i;
2386
2387 status = be_rx_queues_setup(adapter);
2388 if (status)
2389 goto err;
2390
2391 napi_enable(&tx_eq->napi);
2392
2393 be_irq_register(adapter);
2394
2395 if (!lancer_chip(adapter))
2396 be_intr_set(adapter, true);
2397
2398 /* The evt queues are created in unarmed state; arm them */
2399 for_all_rx_queues(adapter, rxo, i) {
2400 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2401 be_cq_notify(adapter, rxo->cq.id, true, 0);
2402 }
2403 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2404
2405 /* Now that interrupts are on we can process async mcc */
2406 be_async_mcc_enable(adapter);
2407
2408 return 0;
2409err:
2410 be_close(adapter->netdev);
2411 return -EIO;
2412}
2413
2414static int be_setup_wol(struct be_adapter *adapter, bool enable)
2415{
2416 struct be_dma_mem cmd;
2417 int status = 0;
2418 u8 mac[ETH_ALEN];
2419
2420 memset(mac, 0, ETH_ALEN);
2421
2422 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2423 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2424 GFP_KERNEL);
2425 if (cmd.va == NULL)
2426 return -1;
2427 memset(cmd.va, 0, cmd.size);
2428
2429 if (enable) {
2430 status = pci_write_config_dword(adapter->pdev,
2431 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2432 if (status) {
2433 dev_err(&adapter->pdev->dev,
2434 "Could not enable Wake-on-lan\n");
2435 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2436 cmd.dma);
2437 return status;
2438 }
2439 status = be_cmd_enable_magic_wol(adapter,
2440 adapter->netdev->dev_addr, &cmd);
2441 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2442 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2443 } else {
2444 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2447 }
2448
2449 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2450 return status;
2451}
2452
2453/*
2454 * Generate a seed MAC address from the PF MAC Address using jhash.
2455 * MAC Address for VFs are assigned incrementally starting from the seed.
2456 * These addresses are programmed in the ASIC by the PF and the VF driver
2457 * queries for the MAC address during its probe.
2458 */
2459static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2460{
2461 u32 vf;
2462 int status = 0;
2463 u8 mac[ETH_ALEN];
2464
2465 be_vf_eth_addr_generate(adapter, mac);
2466
2467 for (vf = 0; vf < num_vfs; vf++) {
2468 status = be_cmd_pmac_add(adapter, mac,
2469 adapter->vf_cfg[vf].vf_if_handle,
2470 &adapter->vf_cfg[vf].vf_pmac_id,
2471 vf + 1);
2472 if (status)
2473 dev_err(&adapter->pdev->dev,
2474 "Mac address add failed for VF %d\n", vf);
2475 else
2476 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2477
2478 mac[5] += 1;
2479 }
2480 return status;
2481}
2482
2483static void be_vf_clear(struct be_adapter *adapter)
2484{
2485 u32 vf;
2486
2487 for (vf = 0; vf < num_vfs; vf++)
2488 be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle,
2489 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2490
2491 for (vf = 0; vf < num_vfs; vf++)
2492 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2493 vf + 1);
2494}
2495
2496static int be_clear(struct be_adapter *adapter)
2497{
2498 if (be_physfn(adapter) && adapter->sriov_enabled)
2499 be_vf_clear(adapter);
2500
2501 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2502
2503 be_mcc_queues_destroy(adapter);
2504 be_rx_queues_destroy(adapter);
2505 be_tx_queues_destroy(adapter);
2506
2507 /* tell fw we're done with firing cmds */
2508 be_cmd_fw_clean(adapter);
2509 return 0;
2510}
2511
2512static void be_vf_setup_init(struct be_adapter *adapter)
2513{
2514 int vf;
2515
2516 for (vf = 0; vf < num_vfs; vf++) {
2517 adapter->vf_cfg[vf].vf_if_handle = -1;
2518 adapter->vf_cfg[vf].vf_pmac_id = -1;
2519 }
2520}
2521
2522static int be_vf_setup(struct be_adapter *adapter)
2523{
2524 u32 cap_flags, en_flags, vf;
2525 u16 lnk_speed;
2526 int status;
2527
2528 be_vf_setup_init(adapter);
2529
2530 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533 &adapter->vf_cfg[vf].vf_if_handle,
2534 NULL, vf+1);
2535 if (status)
2536 goto err;
2537 }
2538
2539 if (!lancer_chip(adapter)) {
2540 status = be_vf_eth_addr_config(adapter);
2541 if (status)
2542 goto err;
2543 }
2544
2545 for (vf = 0; vf < num_vfs; vf++) {
2546 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2547 vf + 1);
2548 if (status)
2549 goto err;
2550 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2551 }
2552 return 0;
2553err:
2554 return status;
2555}
2556
2557static void be_setup_init(struct be_adapter *adapter)
2558{
2559 adapter->vlan_prio_bmap = 0xff;
2560 adapter->link_speed = -1;
2561 adapter->if_handle = -1;
2562 adapter->be3_native = false;
2563 adapter->promiscuous = false;
2564 adapter->eq_next_idx = 0;
2565}
2566
2567static int be_setup(struct be_adapter *adapter)
2568{
2569 struct net_device *netdev = adapter->netdev;
2570 u32 cap_flags, en_flags;
2571 u32 tx_fc, rx_fc;
2572 int status, i;
2573 u8 mac[ETH_ALEN];
2574 struct be_tx_obj *txo;
2575
2576 be_setup_init(adapter);
2577
2578 be_cmd_req_native_mode(adapter);
2579
2580 status = be_tx_queues_create(adapter);
2581 if (status != 0)
2582 goto err;
2583
2584 status = be_rx_queues_create(adapter);
2585 if (status != 0)
2586 goto err;
2587
2588 status = be_mcc_queues_create(adapter);
2589 if (status != 0)
2590 goto err;
2591
2592 memset(mac, 0, ETH_ALEN);
2593 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2594 true /*permanent */, 0);
2595 if (status)
2596 return status;
2597 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2598 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2599
2600 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2602 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2603 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2604
2605 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2606 cap_flags |= BE_IF_FLAGS_RSS;
2607 en_flags |= BE_IF_FLAGS_RSS;
2608 }
2609 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2610 netdev->dev_addr, &adapter->if_handle,
2611 &adapter->pmac_id, 0);
2612 if (status != 0)
2613 goto err;
2614
2615 for_all_tx_queues(adapter, txo, i) {
2616 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2617 if (status)
2618 goto err;
2619 }
2620
2621 /* For BEx, the VF's permanent mac queried from card is incorrect.
2622 * Query the mac configued by the PF using if_handle
2623 */
2624 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2625 status = be_cmd_mac_addr_query(adapter, mac,
2626 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2627 if (!status) {
2628 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2629 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2630 }
2631 }
2632
2633 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2634
2635 status = be_vid_config(adapter, false, 0);
2636 if (status)
2637 goto err;
2638
2639 be_set_rx_mode(adapter->netdev);
2640
2641 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2642 if (status)
2643 goto err;
2644 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2645 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2646 adapter->rx_fc);
2647 if (status)
2648 goto err;
2649 }
2650
2651 pcie_set_readrq(adapter->pdev, 4096);
2652
2653 if (be_physfn(adapter) && adapter->sriov_enabled) {
2654 status = be_vf_setup(adapter);
2655 if (status)
2656 goto err;
2657 }
2658
2659 return 0;
2660err:
2661 be_clear(adapter);
2662 return status;
2663}
2664
2665#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2666static bool be_flash_redboot(struct be_adapter *adapter,
2667 const u8 *p, u32 img_start, int image_size,
2668 int hdr_size)
2669{
2670 u32 crc_offset;
2671 u8 flashed_crc[4];
2672 int status;
2673
2674 crc_offset = hdr_size + img_start + image_size - 4;
2675
2676 p += crc_offset;
2677
2678 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2679 (image_size - 4));
2680 if (status) {
2681 dev_err(&adapter->pdev->dev,
2682 "could not get crc from flash, not flashing redboot\n");
2683 return false;
2684 }
2685
2686 /*update redboot only if crc does not match*/
2687 if (!memcmp(flashed_crc, p, 4))
2688 return false;
2689 else
2690 return true;
2691}
2692
2693static bool phy_flashing_required(struct be_adapter *adapter)
2694{
2695 int status = 0;
2696 struct be_phy_info phy_info;
2697
2698 status = be_cmd_get_phy_info(adapter, &phy_info);
2699 if (status)
2700 return false;
2701 if ((phy_info.phy_type == TN_8022) &&
2702 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2703 return true;
2704 }
2705 return false;
2706}
2707
2708static int be_flash_data(struct be_adapter *adapter,
2709 const struct firmware *fw,
2710 struct be_dma_mem *flash_cmd, int num_of_images)
2711
2712{
2713 int status = 0, i, filehdr_size = 0;
2714 u32 total_bytes = 0, flash_op;
2715 int num_bytes;
2716 const u8 *p = fw->data;
2717 struct be_cmd_write_flashrom *req = flash_cmd->va;
2718 const struct flash_comp *pflashcomp;
2719 int num_comp;
2720
2721 static const struct flash_comp gen3_flash_types[10] = {
2722 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2723 FLASH_IMAGE_MAX_SIZE_g3},
2724 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2725 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2726 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2727 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2728 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2729 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2730 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2731 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2732 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2733 FLASH_IMAGE_MAX_SIZE_g3},
2734 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2735 FLASH_IMAGE_MAX_SIZE_g3},
2736 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2737 FLASH_IMAGE_MAX_SIZE_g3},
2738 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2739 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2740 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2741 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2742 };
2743 static const struct flash_comp gen2_flash_types[8] = {
2744 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2745 FLASH_IMAGE_MAX_SIZE_g2},
2746 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2747 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2748 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2749 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2750 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2751 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2752 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2753 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2754 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2755 FLASH_IMAGE_MAX_SIZE_g2},
2756 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2757 FLASH_IMAGE_MAX_SIZE_g2},
2758 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2759 FLASH_IMAGE_MAX_SIZE_g2}
2760 };
2761
2762 if (adapter->generation == BE_GEN3) {
2763 pflashcomp = gen3_flash_types;
2764 filehdr_size = sizeof(struct flash_file_hdr_g3);
2765 num_comp = ARRAY_SIZE(gen3_flash_types);
2766 } else {
2767 pflashcomp = gen2_flash_types;
2768 filehdr_size = sizeof(struct flash_file_hdr_g2);
2769 num_comp = ARRAY_SIZE(gen2_flash_types);
2770 }
2771 for (i = 0; i < num_comp; i++) {
2772 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2773 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2774 continue;
2775 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2776 if (!phy_flashing_required(adapter))
2777 continue;
2778 }
2779 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2780 (!be_flash_redboot(adapter, fw->data,
2781 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2782 (num_of_images * sizeof(struct image_hdr)))))
2783 continue;
2784 p = fw->data;
2785 p += filehdr_size + pflashcomp[i].offset
2786 + (num_of_images * sizeof(struct image_hdr));
2787 if (p + pflashcomp[i].size > fw->data + fw->size)
2788 return -1;
2789 total_bytes = pflashcomp[i].size;
2790 while (total_bytes) {
2791 if (total_bytes > 32*1024)
2792 num_bytes = 32*1024;
2793 else
2794 num_bytes = total_bytes;
2795 total_bytes -= num_bytes;
2796 if (!total_bytes) {
2797 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2798 flash_op = FLASHROM_OPER_PHY_FLASH;
2799 else
2800 flash_op = FLASHROM_OPER_FLASH;
2801 } else {
2802 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2803 flash_op = FLASHROM_OPER_PHY_SAVE;
2804 else
2805 flash_op = FLASHROM_OPER_SAVE;
2806 }
2807 memcpy(req->params.data_buf, p, num_bytes);
2808 p += num_bytes;
2809 status = be_cmd_write_flashrom(adapter, flash_cmd,
2810 pflashcomp[i].optype, flash_op, num_bytes);
2811 if (status) {
2812 if ((status == ILLEGAL_IOCTL_REQ) &&
2813 (pflashcomp[i].optype ==
2814 IMG_TYPE_PHY_FW))
2815 break;
2816 dev_err(&adapter->pdev->dev,
2817 "cmd to write to flash rom failed.\n");
2818 return -1;
2819 }
2820 }
2821 }
2822 return 0;
2823}
2824
2825static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2826{
2827 if (fhdr == NULL)
2828 return 0;
2829 if (fhdr->build[0] == '3')
2830 return BE_GEN3;
2831 else if (fhdr->build[0] == '2')
2832 return BE_GEN2;
2833 else
2834 return 0;
2835}
2836
2837static int lancer_fw_download(struct be_adapter *adapter,
2838 const struct firmware *fw)
2839{
2840#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2841#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2842 struct be_dma_mem flash_cmd;
2843 const u8 *data_ptr = NULL;
2844 u8 *dest_image_ptr = NULL;
2845 size_t image_size = 0;
2846 u32 chunk_size = 0;
2847 u32 data_written = 0;
2848 u32 offset = 0;
2849 int status = 0;
2850 u8 add_status = 0;
2851
2852 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2853 dev_err(&adapter->pdev->dev,
2854 "FW Image not properly aligned. "
2855 "Length must be 4 byte aligned.\n");
2856 status = -EINVAL;
2857 goto lancer_fw_exit;
2858 }
2859
2860 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2861 + LANCER_FW_DOWNLOAD_CHUNK;
2862 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2863 &flash_cmd.dma, GFP_KERNEL);
2864 if (!flash_cmd.va) {
2865 status = -ENOMEM;
2866 dev_err(&adapter->pdev->dev,
2867 "Memory allocation failure while flashing\n");
2868 goto lancer_fw_exit;
2869 }
2870
2871 dest_image_ptr = flash_cmd.va +
2872 sizeof(struct lancer_cmd_req_write_object);
2873 image_size = fw->size;
2874 data_ptr = fw->data;
2875
2876 while (image_size) {
2877 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2878
2879 /* Copy the image chunk content. */
2880 memcpy(dest_image_ptr, data_ptr, chunk_size);
2881
2882 status = lancer_cmd_write_object(adapter, &flash_cmd,
2883 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2884 &data_written, &add_status);
2885
2886 if (status)
2887 break;
2888
2889 offset += data_written;
2890 data_ptr += data_written;
2891 image_size -= data_written;
2892 }
2893
2894 if (!status) {
2895 /* Commit the FW written */
2896 status = lancer_cmd_write_object(adapter, &flash_cmd,
2897 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2898 &data_written, &add_status);
2899 }
2900
2901 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2902 flash_cmd.dma);
2903 if (status) {
2904 dev_err(&adapter->pdev->dev,
2905 "Firmware load error. "
2906 "Status code: 0x%x Additional Status: 0x%x\n",
2907 status, add_status);
2908 goto lancer_fw_exit;
2909 }
2910
2911 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2912lancer_fw_exit:
2913 return status;
2914}
2915
2916static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2917{
2918 struct flash_file_hdr_g2 *fhdr;
2919 struct flash_file_hdr_g3 *fhdr3;
2920 struct image_hdr *img_hdr_ptr = NULL;
2921 struct be_dma_mem flash_cmd;
2922 const u8 *p;
2923 int status = 0, i = 0, num_imgs = 0;
2924
2925 p = fw->data;
2926 fhdr = (struct flash_file_hdr_g2 *) p;
2927
2928 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2929 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2930 &flash_cmd.dma, GFP_KERNEL);
2931 if (!flash_cmd.va) {
2932 status = -ENOMEM;
2933 dev_err(&adapter->pdev->dev,
2934 "Memory allocation failure while flashing\n");
2935 goto be_fw_exit;
2936 }
2937
2938 if ((adapter->generation == BE_GEN3) &&
2939 (get_ufigen_type(fhdr) == BE_GEN3)) {
2940 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2941 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2942 for (i = 0; i < num_imgs; i++) {
2943 img_hdr_ptr = (struct image_hdr *) (fw->data +
2944 (sizeof(struct flash_file_hdr_g3) +
2945 i * sizeof(struct image_hdr)));
2946 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2947 status = be_flash_data(adapter, fw, &flash_cmd,
2948 num_imgs);
2949 }
2950 } else if ((adapter->generation == BE_GEN2) &&
2951 (get_ufigen_type(fhdr) == BE_GEN2)) {
2952 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2953 } else {
2954 dev_err(&adapter->pdev->dev,
2955 "UFI and Interface are not compatible for flashing\n");
2956 status = -1;
2957 }
2958
2959 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2960 flash_cmd.dma);
2961 if (status) {
2962 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2963 goto be_fw_exit;
2964 }
2965
2966 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2967
2968be_fw_exit:
2969 return status;
2970}
2971
2972int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2973{
2974 const struct firmware *fw;
2975 int status;
2976
2977 if (!netif_running(adapter->netdev)) {
2978 dev_err(&adapter->pdev->dev,
2979 "Firmware load not allowed (interface is down)\n");
2980 return -1;
2981 }
2982
2983 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2984 if (status)
2985 goto fw_exit;
2986
2987 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2988
2989 if (lancer_chip(adapter))
2990 status = lancer_fw_download(adapter, fw);
2991 else
2992 status = be_fw_download(adapter, fw);
2993
2994fw_exit:
2995 release_firmware(fw);
2996 return status;
2997}
2998
2999static struct net_device_ops be_netdev_ops = {
3000 .ndo_open = be_open,
3001 .ndo_stop = be_close,
3002 .ndo_start_xmit = be_xmit,
3003 .ndo_set_rx_mode = be_set_rx_mode,
3004 .ndo_set_mac_address = be_mac_addr_set,
3005 .ndo_change_mtu = be_change_mtu,
3006 .ndo_get_stats64 = be_get_stats64,
3007 .ndo_validate_addr = eth_validate_addr,
3008 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
3009 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
3010 .ndo_set_vf_mac = be_set_vf_mac,
3011 .ndo_set_vf_vlan = be_set_vf_vlan,
3012 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
3013 .ndo_get_vf_config = be_get_vf_config
3014};
3015
3016static void be_netdev_init(struct net_device *netdev)
3017{
3018 struct be_adapter *adapter = netdev_priv(netdev);
3019 struct be_rx_obj *rxo;
3020 int i;
3021
3022 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3024 NETIF_F_HW_VLAN_TX;
3025 if (be_multi_rxq(adapter))
3026 netdev->hw_features |= NETIF_F_RXHASH;
3027
3028 netdev->features |= netdev->hw_features |
3029 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3030
3031 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3032 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3033
3034 netdev->flags |= IFF_MULTICAST;
3035
3036 netif_set_gso_max_size(netdev, 65535);
3037
3038 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3039
3040 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3041
3042 for_all_rx_queues(adapter, rxo, i)
3043 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3044 BE_NAPI_WEIGHT);
3045
3046 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3047 BE_NAPI_WEIGHT);
3048}
3049
3050static void be_unmap_pci_bars(struct be_adapter *adapter)
3051{
3052 if (adapter->csr)
3053 iounmap(adapter->csr);
3054 if (adapter->db)
3055 iounmap(adapter->db);
3056}
3057
3058static int be_map_pci_bars(struct be_adapter *adapter)
3059{
3060 u8 __iomem *addr;
3061 int db_reg;
3062
3063 if (lancer_chip(adapter)) {
3064 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3065 pci_resource_len(adapter->pdev, 0));
3066 if (addr == NULL)
3067 return -ENOMEM;
3068 adapter->db = addr;
3069 return 0;
3070 }
3071
3072 if (be_physfn(adapter)) {
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3074 pci_resource_len(adapter->pdev, 2));
3075 if (addr == NULL)
3076 return -ENOMEM;
3077 adapter->csr = addr;
3078 }
3079
3080 if (adapter->generation == BE_GEN2) {
3081 db_reg = 4;
3082 } else {
3083 if (be_physfn(adapter))
3084 db_reg = 4;
3085 else
3086 db_reg = 0;
3087 }
3088 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3089 pci_resource_len(adapter->pdev, db_reg));
3090 if (addr == NULL)
3091 goto pci_map_err;
3092 adapter->db = addr;
3093
3094 return 0;
3095pci_map_err:
3096 be_unmap_pci_bars(adapter);
3097 return -ENOMEM;
3098}
3099
3100
3101static void be_ctrl_cleanup(struct be_adapter *adapter)
3102{
3103 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3104
3105 be_unmap_pci_bars(adapter);
3106
3107 if (mem->va)
3108 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3109 mem->dma);
3110
3111 mem = &adapter->rx_filter;
3112 if (mem->va)
3113 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3114 mem->dma);
3115}
3116
3117static int be_ctrl_init(struct be_adapter *adapter)
3118{
3119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3122 int status;
3123
3124 status = be_map_pci_bars(adapter);
3125 if (status)
3126 goto done;
3127
3128 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3129 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3130 mbox_mem_alloc->size,
3131 &mbox_mem_alloc->dma,
3132 GFP_KERNEL);
3133 if (!mbox_mem_alloc->va) {
3134 status = -ENOMEM;
3135 goto unmap_pci_bars;
3136 }
3137 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3138 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3139 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3140 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3141
3142 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3143 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3144 &rx_filter->dma, GFP_KERNEL);
3145 if (rx_filter->va == NULL) {
3146 status = -ENOMEM;
3147 goto free_mbox;
3148 }
3149 memset(rx_filter->va, 0, rx_filter->size);
3150
3151 mutex_init(&adapter->mbox_lock);
3152 spin_lock_init(&adapter->mcc_lock);
3153 spin_lock_init(&adapter->mcc_cq_lock);
3154
3155 init_completion(&adapter->flash_compl);
3156 pci_save_state(adapter->pdev);
3157 return 0;
3158
3159free_mbox:
3160 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3161 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3162
3163unmap_pci_bars:
3164 be_unmap_pci_bars(adapter);
3165
3166done:
3167 return status;
3168}
3169
3170static void be_stats_cleanup(struct be_adapter *adapter)
3171{
3172 struct be_dma_mem *cmd = &adapter->stats_cmd;
3173
3174 if (cmd->va)
3175 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3176 cmd->va, cmd->dma);
3177}
3178
3179static int be_stats_init(struct be_adapter *adapter)
3180{
3181 struct be_dma_mem *cmd = &adapter->stats_cmd;
3182
3183 if (adapter->generation == BE_GEN2) {
3184 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3185 } else {
3186 if (lancer_chip(adapter))
3187 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3188 else
3189 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3190 }
3191 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3192 GFP_KERNEL);
3193 if (cmd->va == NULL)
3194 return -1;
3195 memset(cmd->va, 0, cmd->size);
3196 return 0;
3197}
3198
3199static void __devexit be_remove(struct pci_dev *pdev)
3200{
3201 struct be_adapter *adapter = pci_get_drvdata(pdev);
3202
3203 if (!adapter)
3204 return;
3205
3206 cancel_delayed_work_sync(&adapter->work);
3207
3208 unregister_netdev(adapter->netdev);
3209
3210 be_clear(adapter);
3211
3212 be_stats_cleanup(adapter);
3213
3214 be_ctrl_cleanup(adapter);
3215
3216 be_sriov_disable(adapter);
3217
3218 be_msix_disable(adapter);
3219
3220 pci_set_drvdata(pdev, NULL);
3221 pci_release_regions(pdev);
3222 pci_disable_device(pdev);
3223
3224 free_netdev(adapter->netdev);
3225}
3226
3227static int be_get_config(struct be_adapter *adapter)
3228{
3229 int status;
3230
3231 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3232 &adapter->function_mode, &adapter->function_caps);
3233 if (status)
3234 return status;
3235
3236 if (adapter->function_mode & FLEX10_MODE)
3237 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3238 else
3239 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3240
3241 status = be_cmd_get_cntl_attributes(adapter);
3242 if (status)
3243 return status;
3244
3245 return 0;
3246}
3247
3248static int be_dev_family_check(struct be_adapter *adapter)
3249{
3250 struct pci_dev *pdev = adapter->pdev;
3251 u32 sli_intf = 0, if_type;
3252
3253 switch (pdev->device) {
3254 case BE_DEVICE_ID1:
3255 case OC_DEVICE_ID1:
3256 adapter->generation = BE_GEN2;
3257 break;
3258 case BE_DEVICE_ID2:
3259 case OC_DEVICE_ID2:
3260 adapter->generation = BE_GEN3;
3261 break;
3262 case OC_DEVICE_ID3:
3263 case OC_DEVICE_ID4:
3264 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3265 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3266 SLI_INTF_IF_TYPE_SHIFT;
3267
3268 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3269 if_type != 0x02) {
3270 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3271 return -EINVAL;
3272 }
3273 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3274 SLI_INTF_FAMILY_SHIFT);
3275 adapter->generation = BE_GEN3;
3276 break;
3277 default:
3278 adapter->generation = 0;
3279 }
3280 return 0;
3281}
3282
3283static int lancer_wait_ready(struct be_adapter *adapter)
3284{
3285#define SLIPORT_READY_TIMEOUT 500
3286 u32 sliport_status;
3287 int status = 0, i;
3288
3289 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3290 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3291 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3292 break;
3293
3294 msleep(20);
3295 }
3296
3297 if (i == SLIPORT_READY_TIMEOUT)
3298 status = -1;
3299
3300 return status;
3301}
3302
3303static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3304{
3305 int status;
3306 u32 sliport_status, err, reset_needed;
3307 status = lancer_wait_ready(adapter);
3308 if (!status) {
3309 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3310 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3311 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3312 if (err && reset_needed) {
3313 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3314 adapter->db + SLIPORT_CONTROL_OFFSET);
3315
3316 /* check adapter has corrected the error */
3317 status = lancer_wait_ready(adapter);
3318 sliport_status = ioread32(adapter->db +
3319 SLIPORT_STATUS_OFFSET);
3320 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3321 SLIPORT_STATUS_RN_MASK);
3322 if (status || sliport_status)
3323 status = -1;
3324 } else if (err || reset_needed) {
3325 status = -1;
3326 }
3327 }
3328 return status;
3329}
3330
3331static int __devinit be_probe(struct pci_dev *pdev,
3332 const struct pci_device_id *pdev_id)
3333{
3334 int status = 0;
3335 struct be_adapter *adapter;
3336 struct net_device *netdev;
3337
3338 status = pci_enable_device(pdev);
3339 if (status)
3340 goto do_none;
3341
3342 status = pci_request_regions(pdev, DRV_NAME);
3343 if (status)
3344 goto disable_dev;
3345 pci_set_master(pdev);
3346
3347 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3348 if (netdev == NULL) {
3349 status = -ENOMEM;
3350 goto rel_reg;
3351 }
3352 adapter = netdev_priv(netdev);
3353 adapter->pdev = pdev;
3354 pci_set_drvdata(pdev, adapter);
3355
3356 status = be_dev_family_check(adapter);
3357 if (status)
3358 goto free_netdev;
3359
3360 adapter->netdev = netdev;
3361 SET_NETDEV_DEV(netdev, &pdev->dev);
3362
3363 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3364 if (!status) {
3365 netdev->features |= NETIF_F_HIGHDMA;
3366 } else {
3367 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3368 if (status) {
3369 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3370 goto free_netdev;
3371 }
3372 }
3373
3374 status = be_sriov_enable(adapter);
3375 if (status)
3376 goto free_netdev;
3377
3378 status = be_ctrl_init(adapter);
3379 if (status)
3380 goto disable_sriov;
3381
3382 if (lancer_chip(adapter)) {
3383 status = lancer_test_and_set_rdy_state(adapter);
3384 if (status) {
3385 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3386 goto ctrl_clean;
3387 }
3388 }
3389
3390 /* sync up with fw's ready state */
3391 if (be_physfn(adapter)) {
3392 status = be_cmd_POST(adapter);
3393 if (status)
3394 goto ctrl_clean;
3395 }
3396
3397 /* tell fw we're ready to fire cmds */
3398 status = be_cmd_fw_init(adapter);
3399 if (status)
3400 goto ctrl_clean;
3401
3402 status = be_cmd_reset_function(adapter);
3403 if (status)
3404 goto ctrl_clean;
3405
3406 status = be_stats_init(adapter);
3407 if (status)
3408 goto ctrl_clean;
3409
3410 status = be_get_config(adapter);
3411 if (status)
3412 goto stats_clean;
3413
3414 /* The INTR bit may be set in the card when probed by a kdump kernel
3415 * after a crash.
3416 */
3417 if (!lancer_chip(adapter))
3418 be_intr_set(adapter, false);
3419
3420 be_msix_enable(adapter);
3421
3422 INIT_DELAYED_WORK(&adapter->work, be_worker);
3423 adapter->rx_fc = adapter->tx_fc = true;
3424
3425 status = be_setup(adapter);
3426 if (status)
3427 goto msix_disable;
3428
3429 be_netdev_init(netdev);
3430 status = register_netdev(netdev);
3431 if (status != 0)
3432 goto unsetup;
3433
3434 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3435
3436 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3437 return 0;
3438
3439unsetup:
3440 be_clear(adapter);
3441msix_disable:
3442 be_msix_disable(adapter);
3443stats_clean:
3444 be_stats_cleanup(adapter);
3445ctrl_clean:
3446 be_ctrl_cleanup(adapter);
3447disable_sriov:
3448 be_sriov_disable(adapter);
3449free_netdev:
3450 free_netdev(netdev);
3451 pci_set_drvdata(pdev, NULL);
3452rel_reg:
3453 pci_release_regions(pdev);
3454disable_dev:
3455 pci_disable_device(pdev);
3456do_none:
3457 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3458 return status;
3459}
3460
3461static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3462{
3463 struct be_adapter *adapter = pci_get_drvdata(pdev);
3464 struct net_device *netdev = adapter->netdev;
3465
3466 cancel_delayed_work_sync(&adapter->work);
3467 if (adapter->wol)
3468 be_setup_wol(adapter, true);
3469
3470 netif_device_detach(netdev);
3471 if (netif_running(netdev)) {
3472 rtnl_lock();
3473 be_close(netdev);
3474 rtnl_unlock();
3475 }
3476 be_clear(adapter);
3477
3478 be_msix_disable(adapter);
3479 pci_save_state(pdev);
3480 pci_disable_device(pdev);
3481 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3482 return 0;
3483}
3484
3485static int be_resume(struct pci_dev *pdev)
3486{
3487 int status = 0;
3488 struct be_adapter *adapter = pci_get_drvdata(pdev);
3489 struct net_device *netdev = adapter->netdev;
3490
3491 netif_device_detach(netdev);
3492
3493 status = pci_enable_device(pdev);
3494 if (status)
3495 return status;
3496
3497 pci_set_power_state(pdev, 0);
3498 pci_restore_state(pdev);
3499
3500 be_msix_enable(adapter);
3501 /* tell fw we're ready to fire cmds */
3502 status = be_cmd_fw_init(adapter);
3503 if (status)
3504 return status;
3505
3506 be_setup(adapter);
3507 if (netif_running(netdev)) {
3508 rtnl_lock();
3509 be_open(netdev);
3510 rtnl_unlock();
3511 }
3512 netif_device_attach(netdev);
3513
3514 if (adapter->wol)
3515 be_setup_wol(adapter, false);
3516
3517 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3518 return 0;
3519}
3520
3521/*
3522 * An FLR will stop BE from DMAing any data.
3523 */
3524static void be_shutdown(struct pci_dev *pdev)
3525{
3526 struct be_adapter *adapter = pci_get_drvdata(pdev);
3527
3528 if (!adapter)
3529 return;
3530
3531 cancel_delayed_work_sync(&adapter->work);
3532
3533 netif_device_detach(adapter->netdev);
3534
3535 if (adapter->wol)
3536 be_setup_wol(adapter, true);
3537
3538 be_cmd_reset_function(adapter);
3539
3540 pci_disable_device(pdev);
3541}
3542
3543static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3544 pci_channel_state_t state)
3545{
3546 struct be_adapter *adapter = pci_get_drvdata(pdev);
3547 struct net_device *netdev = adapter->netdev;
3548
3549 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3550
3551 adapter->eeh_err = true;
3552
3553 netif_device_detach(netdev);
3554
3555 if (netif_running(netdev)) {
3556 rtnl_lock();
3557 be_close(netdev);
3558 rtnl_unlock();
3559 }
3560 be_clear(adapter);
3561
3562 if (state == pci_channel_io_perm_failure)
3563 return PCI_ERS_RESULT_DISCONNECT;
3564
3565 pci_disable_device(pdev);
3566
3567 return PCI_ERS_RESULT_NEED_RESET;
3568}
3569
3570static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3571{
3572 struct be_adapter *adapter = pci_get_drvdata(pdev);
3573 int status;
3574
3575 dev_info(&adapter->pdev->dev, "EEH reset\n");
3576 adapter->eeh_err = false;
3577 adapter->ue_detected = false;
3578 adapter->fw_timeout = false;
3579
3580 status = pci_enable_device(pdev);
3581 if (status)
3582 return PCI_ERS_RESULT_DISCONNECT;
3583
3584 pci_set_master(pdev);
3585 pci_set_power_state(pdev, 0);
3586 pci_restore_state(pdev);
3587
3588 /* Check if card is ok and fw is ready */
3589 status = be_cmd_POST(adapter);
3590 if (status)
3591 return PCI_ERS_RESULT_DISCONNECT;
3592
3593 return PCI_ERS_RESULT_RECOVERED;
3594}
3595
3596static void be_eeh_resume(struct pci_dev *pdev)
3597{
3598 int status = 0;
3599 struct be_adapter *adapter = pci_get_drvdata(pdev);
3600 struct net_device *netdev = adapter->netdev;
3601
3602 dev_info(&adapter->pdev->dev, "EEH resume\n");
3603
3604 pci_save_state(pdev);
3605
3606 /* tell fw we're ready to fire cmds */
3607 status = be_cmd_fw_init(adapter);
3608 if (status)
3609 goto err;
3610
3611 status = be_setup(adapter);
3612 if (status)
3613 goto err;
3614
3615 if (netif_running(netdev)) {
3616 status = be_open(netdev);
3617 if (status)
3618 goto err;
3619 }
3620 netif_device_attach(netdev);
3621 return;
3622err:
3623 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3624}
3625
3626static struct pci_error_handlers be_eeh_handlers = {
3627 .error_detected = be_eeh_err_detected,
3628 .slot_reset = be_eeh_reset,
3629 .resume = be_eeh_resume,
3630};
3631
3632static struct pci_driver be_driver = {
3633 .name = DRV_NAME,
3634 .id_table = be_dev_ids,
3635 .probe = be_probe,
3636 .remove = be_remove,
3637 .suspend = be_suspend,
3638 .resume = be_resume,
3639 .shutdown = be_shutdown,
3640 .err_handler = &be_eeh_handlers
3641};
3642
3643static int __init be_init_module(void)
3644{
3645 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3646 rx_frag_size != 2048) {
3647 printk(KERN_WARNING DRV_NAME
3648 " : Module param rx_frag_size must be 2048/4096/8192."
3649 " Using 2048\n");
3650 rx_frag_size = 2048;
3651 }
3652
3653 return pci_register_driver(&be_driver);
3654}
3655module_init(be_init_module);
3656
3657static void __exit be_exit_module(void)
3658{
3659 pci_unregister_driver(&be_driver);
3660}
3661module_exit(be_exit_module);