]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/benet/be_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18 #include "be.h"
19 #include "be_cmds.h"
20 #include <asm/div64.h>
21
22 MODULE_VERSION(DRV_VER);
23 MODULE_DEVICE_TABLE(pci, be_dev_ids);
24 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25 MODULE_AUTHOR("ServerEngines Corporation");
26 MODULE_LICENSE("GPL");
27
28 static unsigned int rx_frag_size = 2048;
29 static unsigned int num_vfs;
30 module_param(rx_frag_size, uint, S_IRUGO);
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static bool multi_rxq = true;
36 module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37 MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { 0 }
46 };
47 MODULE_DEVICE_TABLE(pci, be_dev_ids);
48 /* UE Status Low CSR */
49 static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82 };
83 /* UE Status High CSR */
84 static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117 };
118
119 static inline bool be_multi_rxq(struct be_adapter *adapter)
120 {
121 return (adapter->num_rx_qs > 1);
122 }
123
124 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125 {
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
130 }
131
132 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134 {
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147 }
148
149 static void be_intr_set(struct be_adapter *adapter, bool enable)
150 {
151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
154
155 if (adapter->eeh_err)
156 return;
157
158 if (!enabled && enable)
159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160 else if (enabled && !enable)
161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 else
163 return;
164
165 iowrite32(reg, addr);
166 }
167
168 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
169 {
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
173
174 wmb();
175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
176 }
177
178 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
183
184 wmb();
185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
186 }
187
188 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
189 bool arm, bool clear_int, u16 num_popped)
190 {
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
195
196 if (adapter->eeh_err)
197 return;
198
199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
206 }
207
208 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
209 {
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
214
215 if (adapter->eeh_err)
216 return;
217
218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
222 }
223
224 static int be_mac_addr_set(struct net_device *netdev, void *p)
225 {
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
239 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
240 if (status)
241 return status;
242
243 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
244 adapter->if_handle, &adapter->pmac_id);
245 netdev_addr:
246 if (!status)
247 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
248
249 return status;
250 }
251
252 void netdev_stats_update(struct be_adapter *adapter)
253 {
254 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
255 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
256 struct be_port_rxf_stats *port_stats =
257 &rxf_stats->port[adapter->port_num];
258 struct net_device_stats *dev_stats = &adapter->netdev->stats;
259 struct be_erx_stats *erx_stats = &hw_stats->erx;
260 struct be_rx_obj *rxo;
261 int i;
262
263 memset(dev_stats, 0, sizeof(*dev_stats));
264 for_all_rx_queues(adapter, rxo, i) {
265 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
266 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
267 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
268 /* no space in linux buffers: best possible approximation */
269 dev_stats->rx_dropped +=
270 erx_stats->rx_drops_no_fragments[rxo->q.id];
271 }
272
273 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
274 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
275
276 /* bad pkts received */
277 dev_stats->rx_errors = port_stats->rx_crc_errors +
278 port_stats->rx_alignment_symbol_errors +
279 port_stats->rx_in_range_errors +
280 port_stats->rx_out_range_errors +
281 port_stats->rx_frame_too_long +
282 port_stats->rx_dropped_too_small +
283 port_stats->rx_dropped_too_short +
284 port_stats->rx_dropped_header_too_small +
285 port_stats->rx_dropped_tcp_length +
286 port_stats->rx_dropped_runt +
287 port_stats->rx_tcp_checksum_errs +
288 port_stats->rx_ip_checksum_errs +
289 port_stats->rx_udp_checksum_errs;
290
291 /* detailed rx errors */
292 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
293 port_stats->rx_out_range_errors +
294 port_stats->rx_frame_too_long;
295
296 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
297
298 /* frame alignment errors */
299 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
300
301 /* receiver fifo overrun */
302 /* drops_no_pbuf is no per i/f, it's per BE card */
303 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
304 port_stats->rx_input_fifo_overflow +
305 rxf_stats->rx_drops_no_pbuf;
306 }
307
308 void be_link_status_update(struct be_adapter *adapter, bool link_up)
309 {
310 struct net_device *netdev = adapter->netdev;
311
312 /* If link came up or went down */
313 if (adapter->link_up != link_up) {
314 adapter->link_speed = -1;
315 if (link_up) {
316 netif_carrier_on(netdev);
317 printk(KERN_INFO "%s: Link up\n", netdev->name);
318 } else {
319 netif_carrier_off(netdev);
320 printk(KERN_INFO "%s: Link down\n", netdev->name);
321 }
322 adapter->link_up = link_up;
323 }
324 }
325
326 /* Update the EQ delay n BE based on the RX frags consumed / sec */
327 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
328 {
329 struct be_eq_obj *rx_eq = &rxo->rx_eq;
330 struct be_rx_stats *stats = &rxo->stats;
331 ulong now = jiffies;
332 u32 eqd;
333
334 if (!rx_eq->enable_aic)
335 return;
336
337 /* Wrapped around */
338 if (time_before(now, stats->rx_fps_jiffies)) {
339 stats->rx_fps_jiffies = now;
340 return;
341 }
342
343 /* Update once a second */
344 if ((now - stats->rx_fps_jiffies) < HZ)
345 return;
346
347 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
348 ((now - stats->rx_fps_jiffies) / HZ);
349
350 stats->rx_fps_jiffies = now;
351 stats->prev_rx_frags = stats->rx_frags;
352 eqd = stats->rx_fps / 110000;
353 eqd = eqd << 3;
354 if (eqd > rx_eq->max_eqd)
355 eqd = rx_eq->max_eqd;
356 if (eqd < rx_eq->min_eqd)
357 eqd = rx_eq->min_eqd;
358 if (eqd < 10)
359 eqd = 0;
360 if (eqd != rx_eq->cur_eqd)
361 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
362
363 rx_eq->cur_eqd = eqd;
364 }
365
366 static u32 be_calc_rate(u64 bytes, unsigned long ticks)
367 {
368 u64 rate = bytes;
369
370 do_div(rate, ticks / HZ);
371 rate <<= 3; /* bytes/sec -> bits/sec */
372 do_div(rate, 1000000ul); /* MB/Sec */
373
374 return rate;
375 }
376
377 static void be_tx_rate_update(struct be_adapter *adapter)
378 {
379 struct be_tx_stats *stats = tx_stats(adapter);
380 ulong now = jiffies;
381
382 /* Wrapped around? */
383 if (time_before(now, stats->be_tx_jiffies)) {
384 stats->be_tx_jiffies = now;
385 return;
386 }
387
388 /* Update tx rate once in two seconds */
389 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
390 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
391 - stats->be_tx_bytes_prev,
392 now - stats->be_tx_jiffies);
393 stats->be_tx_jiffies = now;
394 stats->be_tx_bytes_prev = stats->be_tx_bytes;
395 }
396 }
397
398 static void be_tx_stats_update(struct be_adapter *adapter,
399 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
400 {
401 struct be_tx_stats *stats = tx_stats(adapter);
402 stats->be_tx_reqs++;
403 stats->be_tx_wrbs += wrb_cnt;
404 stats->be_tx_bytes += copied;
405 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
406 if (stopped)
407 stats->be_tx_stops++;
408 }
409
410 /* Determine number of WRB entries needed to xmit data in an skb */
411 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
412 bool *dummy)
413 {
414 int cnt = (skb->len > skb->data_len);
415
416 cnt += skb_shinfo(skb)->nr_frags;
417
418 /* to account for hdr wrb */
419 cnt++;
420 if (lancer_chip(adapter) || !(cnt & 1)) {
421 *dummy = false;
422 } else {
423 /* add a dummy to make it an even num */
424 cnt++;
425 *dummy = true;
426 }
427 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
428 return cnt;
429 }
430
431 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
432 {
433 wrb->frag_pa_hi = upper_32_bits(addr);
434 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
435 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
436 }
437
438 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
439 struct sk_buff *skb, u32 wrb_cnt, u32 len)
440 {
441 u8 vlan_prio = 0;
442 u16 vlan_tag = 0;
443
444 memset(hdr, 0, sizeof(*hdr));
445
446 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
447
448 if (skb_is_gso(skb)) {
449 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
451 hdr, skb_shinfo(skb)->gso_size);
452 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
453 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
454 if (lancer_chip(adapter) && adapter->sli_family ==
455 LANCER_A0_SLI_FAMILY) {
456 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
459 tcpcs, hdr, 1);
460 else if (is_udp_pkt(skb))
461 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
462 udpcs, hdr, 1);
463 }
464 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
465 if (is_tcp_pkt(skb))
466 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
467 else if (is_udp_pkt(skb))
468 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
469 }
470
471 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
473 vlan_tag = vlan_tx_tag_get(skb);
474 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
475 /* If vlan priority provided by OS is NOT in available bmap */
476 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
477 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
478 adapter->recommended_prio;
479 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
480 }
481
482 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
486 }
487
488 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
489 bool unmap_single)
490 {
491 dma_addr_t dma;
492
493 be_dws_le_to_cpu(wrb, sizeof(*wrb));
494
495 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
496 if (wrb->frag_len) {
497 if (unmap_single)
498 dma_unmap_single(dev, dma, wrb->frag_len,
499 DMA_TO_DEVICE);
500 else
501 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
502 }
503 }
504
505 static int make_tx_wrbs(struct be_adapter *adapter,
506 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
507 {
508 dma_addr_t busaddr;
509 int i, copied = 0;
510 struct device *dev = &adapter->pdev->dev;
511 struct sk_buff *first_skb = skb;
512 struct be_queue_info *txq = &adapter->tx_obj.q;
513 struct be_eth_wrb *wrb;
514 struct be_eth_hdr_wrb *hdr;
515 bool map_single = false;
516 u16 map_head;
517
518 hdr = queue_head_node(txq);
519 queue_head_inc(txq);
520 map_head = txq->head;
521
522 if (skb->len > skb->data_len) {
523 int len = skb_headlen(skb);
524 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
525 if (dma_mapping_error(dev, busaddr))
526 goto dma_err;
527 map_single = true;
528 wrb = queue_head_node(txq);
529 wrb_fill(wrb, busaddr, len);
530 be_dws_cpu_to_le(wrb, sizeof(*wrb));
531 queue_head_inc(txq);
532 copied += len;
533 }
534
535 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
536 struct skb_frag_struct *frag =
537 &skb_shinfo(skb)->frags[i];
538 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
539 frag->size, DMA_TO_DEVICE);
540 if (dma_mapping_error(dev, busaddr))
541 goto dma_err;
542 wrb = queue_head_node(txq);
543 wrb_fill(wrb, busaddr, frag->size);
544 be_dws_cpu_to_le(wrb, sizeof(*wrb));
545 queue_head_inc(txq);
546 copied += frag->size;
547 }
548
549 if (dummy_wrb) {
550 wrb = queue_head_node(txq);
551 wrb_fill(wrb, 0, 0);
552 be_dws_cpu_to_le(wrb, sizeof(*wrb));
553 queue_head_inc(txq);
554 }
555
556 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
557 be_dws_cpu_to_le(hdr, sizeof(*hdr));
558
559 return copied;
560 dma_err:
561 txq->head = map_head;
562 while (copied) {
563 wrb = queue_head_node(txq);
564 unmap_tx_frag(dev, wrb, map_single);
565 map_single = false;
566 copied -= wrb->frag_len;
567 queue_head_inc(txq);
568 }
569 return 0;
570 }
571
572 static netdev_tx_t be_xmit(struct sk_buff *skb,
573 struct net_device *netdev)
574 {
575 struct be_adapter *adapter = netdev_priv(netdev);
576 struct be_tx_obj *tx_obj = &adapter->tx_obj;
577 struct be_queue_info *txq = &tx_obj->q;
578 u32 wrb_cnt = 0, copied = 0;
579 u32 start = txq->head;
580 bool dummy_wrb, stopped = false;
581
582 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
583
584 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
585 if (copied) {
586 /* record the sent skb in the sent_skb table */
587 BUG_ON(tx_obj->sent_skb_list[start]);
588 tx_obj->sent_skb_list[start] = skb;
589
590 /* Ensure txq has space for the next skb; Else stop the queue
591 * *BEFORE* ringing the tx doorbell, so that we serialze the
592 * tx compls of the current transmit which'll wake up the queue
593 */
594 atomic_add(wrb_cnt, &txq->used);
595 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
596 txq->len) {
597 netif_stop_queue(netdev);
598 stopped = true;
599 }
600
601 be_txq_notify(adapter, txq->id, wrb_cnt);
602
603 be_tx_stats_update(adapter, wrb_cnt, copied,
604 skb_shinfo(skb)->gso_segs, stopped);
605 } else {
606 txq->head = start;
607 dev_kfree_skb_any(skb);
608 }
609 return NETDEV_TX_OK;
610 }
611
612 static int be_change_mtu(struct net_device *netdev, int new_mtu)
613 {
614 struct be_adapter *adapter = netdev_priv(netdev);
615 if (new_mtu < BE_MIN_MTU ||
616 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
617 (ETH_HLEN + ETH_FCS_LEN))) {
618 dev_info(&adapter->pdev->dev,
619 "MTU must be between %d and %d bytes\n",
620 BE_MIN_MTU,
621 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
622 return -EINVAL;
623 }
624 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
625 netdev->mtu, new_mtu);
626 netdev->mtu = new_mtu;
627 return 0;
628 }
629
630 /*
631 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
632 * If the user configures more, place BE in vlan promiscuous mode.
633 */
634 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
635 {
636 u16 vtag[BE_NUM_VLANS_SUPPORTED];
637 u16 ntags = 0, i;
638 int status = 0;
639 u32 if_handle;
640
641 if (vf) {
642 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
643 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
644 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
645 }
646
647 if (adapter->vlans_added <= adapter->max_vlans) {
648 /* Construct VLAN Table to give to HW */
649 for (i = 0; i < VLAN_N_VID; i++) {
650 if (adapter->vlan_tag[i]) {
651 vtag[ntags] = cpu_to_le16(i);
652 ntags++;
653 }
654 }
655 status = be_cmd_vlan_config(adapter, adapter->if_handle,
656 vtag, ntags, 1, 0);
657 } else {
658 status = be_cmd_vlan_config(adapter, adapter->if_handle,
659 NULL, 0, 1, 1);
660 }
661
662 return status;
663 }
664
665 static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
666 {
667 struct be_adapter *adapter = netdev_priv(netdev);
668
669 adapter->vlan_grp = grp;
670 }
671
672 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
673 {
674 struct be_adapter *adapter = netdev_priv(netdev);
675
676 adapter->vlans_added++;
677 if (!be_physfn(adapter))
678 return;
679
680 adapter->vlan_tag[vid] = 1;
681 if (adapter->vlans_added <= (adapter->max_vlans + 1))
682 be_vid_config(adapter, false, 0);
683 }
684
685 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
686 {
687 struct be_adapter *adapter = netdev_priv(netdev);
688
689 adapter->vlans_added--;
690 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
691
692 if (!be_physfn(adapter))
693 return;
694
695 adapter->vlan_tag[vid] = 0;
696 if (adapter->vlans_added <= adapter->max_vlans)
697 be_vid_config(adapter, false, 0);
698 }
699
700 static void be_set_multicast_list(struct net_device *netdev)
701 {
702 struct be_adapter *adapter = netdev_priv(netdev);
703
704 if (netdev->flags & IFF_PROMISC) {
705 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
706 adapter->promiscuous = true;
707 goto done;
708 }
709
710 /* BE was previously in promiscous mode; disable it */
711 if (adapter->promiscuous) {
712 adapter->promiscuous = false;
713 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
714 }
715
716 /* Enable multicast promisc if num configured exceeds what we support */
717 if (netdev->flags & IFF_ALLMULTI ||
718 netdev_mc_count(netdev) > BE_MAX_MC) {
719 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
720 &adapter->mc_cmd_mem);
721 goto done;
722 }
723
724 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
725 &adapter->mc_cmd_mem);
726 done:
727 return;
728 }
729
730 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
731 {
732 struct be_adapter *adapter = netdev_priv(netdev);
733 int status;
734
735 if (!adapter->sriov_enabled)
736 return -EPERM;
737
738 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
739 return -EINVAL;
740
741 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
742 status = be_cmd_pmac_del(adapter,
743 adapter->vf_cfg[vf].vf_if_handle,
744 adapter->vf_cfg[vf].vf_pmac_id);
745
746 status = be_cmd_pmac_add(adapter, mac,
747 adapter->vf_cfg[vf].vf_if_handle,
748 &adapter->vf_cfg[vf].vf_pmac_id);
749
750 if (status)
751 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
752 mac, vf);
753 else
754 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
755
756 return status;
757 }
758
759 static int be_get_vf_config(struct net_device *netdev, int vf,
760 struct ifla_vf_info *vi)
761 {
762 struct be_adapter *adapter = netdev_priv(netdev);
763
764 if (!adapter->sriov_enabled)
765 return -EPERM;
766
767 if (vf >= num_vfs)
768 return -EINVAL;
769
770 vi->vf = vf;
771 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
772 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
773 vi->qos = 0;
774 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
775
776 return 0;
777 }
778
779 static int be_set_vf_vlan(struct net_device *netdev,
780 int vf, u16 vlan, u8 qos)
781 {
782 struct be_adapter *adapter = netdev_priv(netdev);
783 int status = 0;
784
785 if (!adapter->sriov_enabled)
786 return -EPERM;
787
788 if ((vf >= num_vfs) || (vlan > 4095))
789 return -EINVAL;
790
791 if (vlan) {
792 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
793 adapter->vlans_added++;
794 } else {
795 adapter->vf_cfg[vf].vf_vlan_tag = 0;
796 adapter->vlans_added--;
797 }
798
799 status = be_vid_config(adapter, true, vf);
800
801 if (status)
802 dev_info(&adapter->pdev->dev,
803 "VLAN %d config on VF %d failed\n", vlan, vf);
804 return status;
805 }
806
807 static int be_set_vf_tx_rate(struct net_device *netdev,
808 int vf, int rate)
809 {
810 struct be_adapter *adapter = netdev_priv(netdev);
811 int status = 0;
812
813 if (!adapter->sriov_enabled)
814 return -EPERM;
815
816 if ((vf >= num_vfs) || (rate < 0))
817 return -EINVAL;
818
819 if (rate > 10000)
820 rate = 10000;
821
822 adapter->vf_cfg[vf].vf_tx_rate = rate;
823 status = be_cmd_set_qos(adapter, rate / 10, vf);
824
825 if (status)
826 dev_info(&adapter->pdev->dev,
827 "tx rate %d on VF %d failed\n", rate, vf);
828 return status;
829 }
830
831 static void be_rx_rate_update(struct be_rx_obj *rxo)
832 {
833 struct be_rx_stats *stats = &rxo->stats;
834 ulong now = jiffies;
835
836 /* Wrapped around */
837 if (time_before(now, stats->rx_jiffies)) {
838 stats->rx_jiffies = now;
839 return;
840 }
841
842 /* Update the rate once in two seconds */
843 if ((now - stats->rx_jiffies) < 2 * HZ)
844 return;
845
846 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
847 now - stats->rx_jiffies);
848 stats->rx_jiffies = now;
849 stats->rx_bytes_prev = stats->rx_bytes;
850 }
851
852 static void be_rx_stats_update(struct be_rx_obj *rxo,
853 u32 pktsize, u16 numfrags, u8 pkt_type)
854 {
855 struct be_rx_stats *stats = &rxo->stats;
856
857 stats->rx_compl++;
858 stats->rx_frags += numfrags;
859 stats->rx_bytes += pktsize;
860 stats->rx_pkts++;
861 if (pkt_type == BE_MULTICAST_PACKET)
862 stats->rx_mcast_pkts++;
863 }
864
865 static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
866 {
867 u8 l4_cksm, ipv6, ipcksm;
868
869 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
870 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
871 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
872
873 /* Ignore ipcksm for ipv6 pkts */
874 return l4_cksm && (ipcksm || ipv6);
875 }
876
877 static struct be_rx_page_info *
878 get_rx_page_info(struct be_adapter *adapter,
879 struct be_rx_obj *rxo,
880 u16 frag_idx)
881 {
882 struct be_rx_page_info *rx_page_info;
883 struct be_queue_info *rxq = &rxo->q;
884
885 rx_page_info = &rxo->page_info_tbl[frag_idx];
886 BUG_ON(!rx_page_info->page);
887
888 if (rx_page_info->last_page_user) {
889 dma_unmap_page(&adapter->pdev->dev,
890 dma_unmap_addr(rx_page_info, bus),
891 adapter->big_page_size, DMA_FROM_DEVICE);
892 rx_page_info->last_page_user = false;
893 }
894
895 atomic_dec(&rxq->used);
896 return rx_page_info;
897 }
898
899 /* Throwaway the data in the Rx completion */
900 static void be_rx_compl_discard(struct be_adapter *adapter,
901 struct be_rx_obj *rxo,
902 struct be_eth_rx_compl *rxcp)
903 {
904 struct be_queue_info *rxq = &rxo->q;
905 struct be_rx_page_info *page_info;
906 u16 rxq_idx, i, num_rcvd;
907
908 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
909 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
910
911 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
912 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
913
914 rxo->last_frag_index = rxq_idx;
915
916 for (i = 0; i < num_rcvd; i++) {
917 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
918 put_page(page_info->page);
919 memset(page_info, 0, sizeof(*page_info));
920 index_inc(&rxq_idx, rxq->len);
921 }
922 }
923 }
924
925 /*
926 * skb_fill_rx_data forms a complete skb for an ether frame
927 * indicated by rxcp.
928 */
929 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
930 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
931 u16 num_rcvd)
932 {
933 struct be_queue_info *rxq = &rxo->q;
934 struct be_rx_page_info *page_info;
935 u16 rxq_idx, i, j;
936 u32 pktsize, hdr_len, curr_frag_len, size;
937 u8 *start;
938 u8 pkt_type;
939
940 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
941 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
942 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
943
944 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
945
946 start = page_address(page_info->page) + page_info->page_offset;
947 prefetch(start);
948
949 /* Copy data in the first descriptor of this completion */
950 curr_frag_len = min(pktsize, rx_frag_size);
951
952 /* Copy the header portion into skb_data */
953 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
954 memcpy(skb->data, start, hdr_len);
955 skb->len = curr_frag_len;
956 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
957 /* Complete packet has now been moved to data */
958 put_page(page_info->page);
959 skb->data_len = 0;
960 skb->tail += curr_frag_len;
961 } else {
962 skb_shinfo(skb)->nr_frags = 1;
963 skb_shinfo(skb)->frags[0].page = page_info->page;
964 skb_shinfo(skb)->frags[0].page_offset =
965 page_info->page_offset + hdr_len;
966 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
967 skb->data_len = curr_frag_len - hdr_len;
968 skb->tail += hdr_len;
969 }
970 page_info->page = NULL;
971
972 if (pktsize <= rx_frag_size) {
973 BUG_ON(num_rcvd != 1);
974 goto done;
975 }
976
977 /* More frags present for this completion */
978 size = pktsize;
979 for (i = 1, j = 0; i < num_rcvd; i++) {
980 size -= curr_frag_len;
981 index_inc(&rxq_idx, rxq->len);
982 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
983
984 curr_frag_len = min(size, rx_frag_size);
985
986 /* Coalesce all frags from the same physical page in one slot */
987 if (page_info->page_offset == 0) {
988 /* Fresh page */
989 j++;
990 skb_shinfo(skb)->frags[j].page = page_info->page;
991 skb_shinfo(skb)->frags[j].page_offset =
992 page_info->page_offset;
993 skb_shinfo(skb)->frags[j].size = 0;
994 skb_shinfo(skb)->nr_frags++;
995 } else {
996 put_page(page_info->page);
997 }
998
999 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1000 skb->len += curr_frag_len;
1001 skb->data_len += curr_frag_len;
1002
1003 page_info->page = NULL;
1004 }
1005 BUG_ON(j > MAX_SKB_FRAGS);
1006
1007 done:
1008 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
1009 }
1010
1011 /* Process the RX completion indicated by rxcp when GRO is disabled */
1012 static void be_rx_compl_process(struct be_adapter *adapter,
1013 struct be_rx_obj *rxo,
1014 struct be_eth_rx_compl *rxcp)
1015 {
1016 struct sk_buff *skb;
1017 u32 vlanf, vid;
1018 u16 num_rcvd;
1019 u8 vtm;
1020
1021 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1022
1023 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1024 if (unlikely(!skb)) {
1025 if (net_ratelimit())
1026 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1027 be_rx_compl_discard(adapter, rxo, rxcp);
1028 return;
1029 }
1030
1031 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
1032
1033 if (likely(adapter->rx_csum && csum_passed(rxcp)))
1034 skb->ip_summed = CHECKSUM_UNNECESSARY;
1035 else
1036 skb_checksum_none_assert(skb);
1037
1038 skb->truesize = skb->len + sizeof(struct sk_buff);
1039 skb->protocol = eth_type_trans(skb, adapter->netdev);
1040
1041 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1042 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1043
1044 /* vlanf could be wrongly set in some cards.
1045 * ignore if vtm is not set */
1046 if ((adapter->function_mode & 0x400) && !vtm)
1047 vlanf = 0;
1048
1049 if (unlikely(vlanf)) {
1050 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
1051 kfree_skb(skb);
1052 return;
1053 }
1054 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1055 if (!lancer_chip(adapter))
1056 vid = swab16(vid);
1057 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1058 } else {
1059 netif_receive_skb(skb);
1060 }
1061 }
1062
1063 /* Process the RX completion indicated by rxcp when GRO is enabled */
1064 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1065 struct be_rx_obj *rxo,
1066 struct be_eth_rx_compl *rxcp)
1067 {
1068 struct be_rx_page_info *page_info;
1069 struct sk_buff *skb = NULL;
1070 struct be_queue_info *rxq = &rxo->q;
1071 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1072 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
1073 u16 i, rxq_idx = 0, vid, j;
1074 u8 vtm;
1075 u8 pkt_type;
1076
1077 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1078 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1079 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1080 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
1081 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1082 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
1083
1084 /* vlanf could be wrongly set in some cards.
1085 * ignore if vtm is not set */
1086 if ((adapter->function_mode & 0x400) && !vtm)
1087 vlanf = 0;
1088
1089 skb = napi_get_frags(&eq_obj->napi);
1090 if (!skb) {
1091 be_rx_compl_discard(adapter, rxo, rxcp);
1092 return;
1093 }
1094
1095 remaining = pkt_size;
1096 for (i = 0, j = -1; i < num_rcvd; i++) {
1097 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
1098
1099 curr_frag_len = min(remaining, rx_frag_size);
1100
1101 /* Coalesce all frags from the same physical page in one slot */
1102 if (i == 0 || page_info->page_offset == 0) {
1103 /* First frag or Fresh page */
1104 j++;
1105 skb_shinfo(skb)->frags[j].page = page_info->page;
1106 skb_shinfo(skb)->frags[j].page_offset =
1107 page_info->page_offset;
1108 skb_shinfo(skb)->frags[j].size = 0;
1109 } else {
1110 put_page(page_info->page);
1111 }
1112 skb_shinfo(skb)->frags[j].size += curr_frag_len;
1113
1114 remaining -= curr_frag_len;
1115 index_inc(&rxq_idx, rxq->len);
1116 memset(page_info, 0, sizeof(*page_info));
1117 }
1118 BUG_ON(j > MAX_SKB_FRAGS);
1119
1120 skb_shinfo(skb)->nr_frags = j + 1;
1121 skb->len = pkt_size;
1122 skb->data_len = pkt_size;
1123 skb->truesize += pkt_size;
1124 skb->ip_summed = CHECKSUM_UNNECESSARY;
1125
1126 if (likely(!vlanf)) {
1127 napi_gro_frags(&eq_obj->napi);
1128 } else {
1129 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1130 if (!lancer_chip(adapter))
1131 vid = swab16(vid);
1132
1133 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1134 return;
1135
1136 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
1137 }
1138
1139 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
1140 }
1141
1142 static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
1143 {
1144 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
1145
1146 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1147 return NULL;
1148
1149 rmb();
1150 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1151
1152 queue_tail_inc(&rxo->cq);
1153 return rxcp;
1154 }
1155
1156 /* To reset the valid bit, we need to reset the whole word as
1157 * when walking the queue the valid entries are little-endian
1158 * and invalid entries are host endian
1159 */
1160 static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1161 {
1162 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1163 }
1164
1165 static inline struct page *be_alloc_pages(u32 size)
1166 {
1167 gfp_t alloc_flags = GFP_ATOMIC;
1168 u32 order = get_order(size);
1169 if (order > 0)
1170 alloc_flags |= __GFP_COMP;
1171 return alloc_pages(alloc_flags, order);
1172 }
1173
1174 /*
1175 * Allocate a page, split it to fragments of size rx_frag_size and post as
1176 * receive buffers to BE
1177 */
1178 static void be_post_rx_frags(struct be_rx_obj *rxo)
1179 {
1180 struct be_adapter *adapter = rxo->adapter;
1181 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1182 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1183 struct be_queue_info *rxq = &rxo->q;
1184 struct page *pagep = NULL;
1185 struct be_eth_rx_d *rxd;
1186 u64 page_dmaaddr = 0, frag_dmaaddr;
1187 u32 posted, page_offset = 0;
1188
1189 page_info = &rxo->page_info_tbl[rxq->head];
1190 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1191 if (!pagep) {
1192 pagep = be_alloc_pages(adapter->big_page_size);
1193 if (unlikely(!pagep)) {
1194 rxo->stats.rx_post_fail++;
1195 break;
1196 }
1197 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1198 0, adapter->big_page_size,
1199 DMA_FROM_DEVICE);
1200 page_info->page_offset = 0;
1201 } else {
1202 get_page(pagep);
1203 page_info->page_offset = page_offset + rx_frag_size;
1204 }
1205 page_offset = page_info->page_offset;
1206 page_info->page = pagep;
1207 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1208 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1209
1210 rxd = queue_head_node(rxq);
1211 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1212 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1213
1214 /* Any space left in the current big page for another frag? */
1215 if ((page_offset + rx_frag_size + rx_frag_size) >
1216 adapter->big_page_size) {
1217 pagep = NULL;
1218 page_info->last_page_user = true;
1219 }
1220
1221 prev_page_info = page_info;
1222 queue_head_inc(rxq);
1223 page_info = &page_info_tbl[rxq->head];
1224 }
1225 if (pagep)
1226 prev_page_info->last_page_user = true;
1227
1228 if (posted) {
1229 atomic_add(posted, &rxq->used);
1230 be_rxq_notify(adapter, rxq->id, posted);
1231 } else if (atomic_read(&rxq->used) == 0) {
1232 /* Let be_worker replenish when memory is available */
1233 rxo->rx_post_starved = true;
1234 }
1235 }
1236
1237 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1238 {
1239 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1240
1241 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1242 return NULL;
1243
1244 rmb();
1245 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1246
1247 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1248
1249 queue_tail_inc(tx_cq);
1250 return txcp;
1251 }
1252
1253 static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1254 {
1255 struct be_queue_info *txq = &adapter->tx_obj.q;
1256 struct be_eth_wrb *wrb;
1257 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1258 struct sk_buff *sent_skb;
1259 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1260 bool unmap_skb_hdr = true;
1261
1262 sent_skb = sent_skbs[txq->tail];
1263 BUG_ON(!sent_skb);
1264 sent_skbs[txq->tail] = NULL;
1265
1266 /* skip header wrb */
1267 queue_tail_inc(txq);
1268
1269 do {
1270 cur_index = txq->tail;
1271 wrb = queue_tail_node(txq);
1272 unmap_tx_frag(&adapter->pdev->dev, wrb,
1273 (unmap_skb_hdr && skb_headlen(sent_skb)));
1274 unmap_skb_hdr = false;
1275
1276 num_wrbs++;
1277 queue_tail_inc(txq);
1278 } while (cur_index != last_index);
1279
1280 atomic_sub(num_wrbs, &txq->used);
1281
1282 kfree_skb(sent_skb);
1283 }
1284
1285 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1286 {
1287 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1288
1289 if (!eqe->evt)
1290 return NULL;
1291
1292 rmb();
1293 eqe->evt = le32_to_cpu(eqe->evt);
1294 queue_tail_inc(&eq_obj->q);
1295 return eqe;
1296 }
1297
1298 static int event_handle(struct be_adapter *adapter,
1299 struct be_eq_obj *eq_obj)
1300 {
1301 struct be_eq_entry *eqe;
1302 u16 num = 0;
1303
1304 while ((eqe = event_get(eq_obj)) != NULL) {
1305 eqe->evt = 0;
1306 num++;
1307 }
1308
1309 /* Deal with any spurious interrupts that come
1310 * without events
1311 */
1312 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1313 if (num)
1314 napi_schedule(&eq_obj->napi);
1315
1316 return num;
1317 }
1318
1319 /* Just read and notify events without processing them.
1320 * Used at the time of destroying event queues */
1321 static void be_eq_clean(struct be_adapter *adapter,
1322 struct be_eq_obj *eq_obj)
1323 {
1324 struct be_eq_entry *eqe;
1325 u16 num = 0;
1326
1327 while ((eqe = event_get(eq_obj)) != NULL) {
1328 eqe->evt = 0;
1329 num++;
1330 }
1331
1332 if (num)
1333 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1334 }
1335
1336 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1337 {
1338 struct be_rx_page_info *page_info;
1339 struct be_queue_info *rxq = &rxo->q;
1340 struct be_queue_info *rx_cq = &rxo->cq;
1341 struct be_eth_rx_compl *rxcp;
1342 u16 tail;
1343
1344 /* First cleanup pending rx completions */
1345 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1346 be_rx_compl_discard(adapter, rxo, rxcp);
1347 be_rx_compl_reset(rxcp);
1348 be_cq_notify(adapter, rx_cq->id, false, 1);
1349 }
1350
1351 /* Then free posted rx buffer that were not used */
1352 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1353 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1354 page_info = get_rx_page_info(adapter, rxo, tail);
1355 put_page(page_info->page);
1356 memset(page_info, 0, sizeof(*page_info));
1357 }
1358 BUG_ON(atomic_read(&rxq->used));
1359 }
1360
1361 static void be_tx_compl_clean(struct be_adapter *adapter)
1362 {
1363 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1364 struct be_queue_info *txq = &adapter->tx_obj.q;
1365 struct be_eth_tx_compl *txcp;
1366 u16 end_idx, cmpl = 0, timeo = 0;
1367 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1368 struct sk_buff *sent_skb;
1369 bool dummy_wrb;
1370
1371 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1372 do {
1373 while ((txcp = be_tx_compl_get(tx_cq))) {
1374 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1375 wrb_index, txcp);
1376 be_tx_compl_process(adapter, end_idx);
1377 cmpl++;
1378 }
1379 if (cmpl) {
1380 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1381 cmpl = 0;
1382 }
1383
1384 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1385 break;
1386
1387 mdelay(1);
1388 } while (true);
1389
1390 if (atomic_read(&txq->used))
1391 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1392 atomic_read(&txq->used));
1393
1394 /* free posted tx for which compls will never arrive */
1395 while (atomic_read(&txq->used)) {
1396 sent_skb = sent_skbs[txq->tail];
1397 end_idx = txq->tail;
1398 index_adv(&end_idx,
1399 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1400 txq->len);
1401 be_tx_compl_process(adapter, end_idx);
1402 }
1403 }
1404
1405 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1406 {
1407 struct be_queue_info *q;
1408
1409 q = &adapter->mcc_obj.q;
1410 if (q->created)
1411 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1412 be_queue_free(adapter, q);
1413
1414 q = &adapter->mcc_obj.cq;
1415 if (q->created)
1416 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1417 be_queue_free(adapter, q);
1418 }
1419
1420 /* Must be called only after TX qs are created as MCC shares TX EQ */
1421 static int be_mcc_queues_create(struct be_adapter *adapter)
1422 {
1423 struct be_queue_info *q, *cq;
1424
1425 /* Alloc MCC compl queue */
1426 cq = &adapter->mcc_obj.cq;
1427 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1428 sizeof(struct be_mcc_compl)))
1429 goto err;
1430
1431 /* Ask BE to create MCC compl queue; share TX's eq */
1432 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1433 goto mcc_cq_free;
1434
1435 /* Alloc MCC queue */
1436 q = &adapter->mcc_obj.q;
1437 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1438 goto mcc_cq_destroy;
1439
1440 /* Ask BE to create MCC queue */
1441 if (be_cmd_mccq_create(adapter, q, cq))
1442 goto mcc_q_free;
1443
1444 return 0;
1445
1446 mcc_q_free:
1447 be_queue_free(adapter, q);
1448 mcc_cq_destroy:
1449 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1450 mcc_cq_free:
1451 be_queue_free(adapter, cq);
1452 err:
1453 return -1;
1454 }
1455
1456 static void be_tx_queues_destroy(struct be_adapter *adapter)
1457 {
1458 struct be_queue_info *q;
1459
1460 q = &adapter->tx_obj.q;
1461 if (q->created)
1462 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1463 be_queue_free(adapter, q);
1464
1465 q = &adapter->tx_obj.cq;
1466 if (q->created)
1467 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1468 be_queue_free(adapter, q);
1469
1470 /* Clear any residual events */
1471 be_eq_clean(adapter, &adapter->tx_eq);
1472
1473 q = &adapter->tx_eq.q;
1474 if (q->created)
1475 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1476 be_queue_free(adapter, q);
1477 }
1478
1479 static int be_tx_queues_create(struct be_adapter *adapter)
1480 {
1481 struct be_queue_info *eq, *q, *cq;
1482
1483 adapter->tx_eq.max_eqd = 0;
1484 adapter->tx_eq.min_eqd = 0;
1485 adapter->tx_eq.cur_eqd = 96;
1486 adapter->tx_eq.enable_aic = false;
1487 /* Alloc Tx Event queue */
1488 eq = &adapter->tx_eq.q;
1489 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1490 return -1;
1491
1492 /* Ask BE to create Tx Event queue */
1493 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1494 goto tx_eq_free;
1495
1496 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1497
1498
1499 /* Alloc TX eth compl queue */
1500 cq = &adapter->tx_obj.cq;
1501 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1502 sizeof(struct be_eth_tx_compl)))
1503 goto tx_eq_destroy;
1504
1505 /* Ask BE to create Tx eth compl queue */
1506 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1507 goto tx_cq_free;
1508
1509 /* Alloc TX eth queue */
1510 q = &adapter->tx_obj.q;
1511 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1512 goto tx_cq_destroy;
1513
1514 /* Ask BE to create Tx eth queue */
1515 if (be_cmd_txq_create(adapter, q, cq))
1516 goto tx_q_free;
1517 return 0;
1518
1519 tx_q_free:
1520 be_queue_free(adapter, q);
1521 tx_cq_destroy:
1522 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1523 tx_cq_free:
1524 be_queue_free(adapter, cq);
1525 tx_eq_destroy:
1526 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
1527 tx_eq_free:
1528 be_queue_free(adapter, eq);
1529 return -1;
1530 }
1531
1532 static void be_rx_queues_destroy(struct be_adapter *adapter)
1533 {
1534 struct be_queue_info *q;
1535 struct be_rx_obj *rxo;
1536 int i;
1537
1538 for_all_rx_queues(adapter, rxo, i) {
1539 q = &rxo->q;
1540 if (q->created) {
1541 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1542 /* After the rxq is invalidated, wait for a grace time
1543 * of 1ms for all dma to end and the flush compl to
1544 * arrive
1545 */
1546 mdelay(1);
1547 be_rx_q_clean(adapter, rxo);
1548 }
1549 be_queue_free(adapter, q);
1550
1551 q = &rxo->cq;
1552 if (q->created)
1553 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1554 be_queue_free(adapter, q);
1555
1556 /* Clear any residual events */
1557 q = &rxo->rx_eq.q;
1558 if (q->created) {
1559 be_eq_clean(adapter, &rxo->rx_eq);
1560 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1561 }
1562 be_queue_free(adapter, q);
1563 }
1564 }
1565
1566 static int be_rx_queues_create(struct be_adapter *adapter)
1567 {
1568 struct be_queue_info *eq, *q, *cq;
1569 struct be_rx_obj *rxo;
1570 int rc, i;
1571
1572 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1573 for_all_rx_queues(adapter, rxo, i) {
1574 rxo->adapter = adapter;
1575 /* Init last_frag_index so that the frag index in the first
1576 * completion will never match */
1577 rxo->last_frag_index = 0xffff;
1578 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1579 rxo->rx_eq.enable_aic = true;
1580
1581 /* EQ */
1582 eq = &rxo->rx_eq.q;
1583 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1584 sizeof(struct be_eq_entry));
1585 if (rc)
1586 goto err;
1587
1588 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1589 if (rc)
1590 goto err;
1591
1592 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1593
1594 /* CQ */
1595 cq = &rxo->cq;
1596 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1597 sizeof(struct be_eth_rx_compl));
1598 if (rc)
1599 goto err;
1600
1601 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1602 if (rc)
1603 goto err;
1604 /* Rx Q */
1605 q = &rxo->q;
1606 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1607 sizeof(struct be_eth_rx_d));
1608 if (rc)
1609 goto err;
1610
1611 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1612 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1613 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1614 if (rc)
1615 goto err;
1616 }
1617
1618 if (be_multi_rxq(adapter)) {
1619 u8 rsstable[MAX_RSS_QS];
1620
1621 for_all_rss_queues(adapter, rxo, i)
1622 rsstable[i] = rxo->rss_id;
1623
1624 rc = be_cmd_rss_config(adapter, rsstable,
1625 adapter->num_rx_qs - 1);
1626 if (rc)
1627 goto err;
1628 }
1629
1630 return 0;
1631 err:
1632 be_rx_queues_destroy(adapter);
1633 return -1;
1634 }
1635
1636 static bool event_peek(struct be_eq_obj *eq_obj)
1637 {
1638 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1639 if (!eqe->evt)
1640 return false;
1641 else
1642 return true;
1643 }
1644
1645 static irqreturn_t be_intx(int irq, void *dev)
1646 {
1647 struct be_adapter *adapter = dev;
1648 struct be_rx_obj *rxo;
1649 int isr, i, tx = 0 , rx = 0;
1650
1651 if (lancer_chip(adapter)) {
1652 if (event_peek(&adapter->tx_eq))
1653 tx = event_handle(adapter, &adapter->tx_eq);
1654 for_all_rx_queues(adapter, rxo, i) {
1655 if (event_peek(&rxo->rx_eq))
1656 rx |= event_handle(adapter, &rxo->rx_eq);
1657 }
1658
1659 if (!(tx || rx))
1660 return IRQ_NONE;
1661
1662 } else {
1663 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1664 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1665 if (!isr)
1666 return IRQ_NONE;
1667
1668 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1669 event_handle(adapter, &adapter->tx_eq);
1670
1671 for_all_rx_queues(adapter, rxo, i) {
1672 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1673 event_handle(adapter, &rxo->rx_eq);
1674 }
1675 }
1676
1677 return IRQ_HANDLED;
1678 }
1679
1680 static irqreturn_t be_msix_rx(int irq, void *dev)
1681 {
1682 struct be_rx_obj *rxo = dev;
1683 struct be_adapter *adapter = rxo->adapter;
1684
1685 event_handle(adapter, &rxo->rx_eq);
1686
1687 return IRQ_HANDLED;
1688 }
1689
1690 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1691 {
1692 struct be_adapter *adapter = dev;
1693
1694 event_handle(adapter, &adapter->tx_eq);
1695
1696 return IRQ_HANDLED;
1697 }
1698
1699 static inline bool do_gro(struct be_rx_obj *rxo,
1700 struct be_eth_rx_compl *rxcp, u8 err)
1701 {
1702 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1703
1704 if (err)
1705 rxo->stats.rxcp_err++;
1706
1707 return (tcp_frame && !err) ? true : false;
1708 }
1709
1710 static int be_poll_rx(struct napi_struct *napi, int budget)
1711 {
1712 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1713 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1714 struct be_adapter *adapter = rxo->adapter;
1715 struct be_queue_info *rx_cq = &rxo->cq;
1716 struct be_eth_rx_compl *rxcp;
1717 u32 work_done;
1718 u16 frag_index, num_rcvd;
1719 u8 err;
1720
1721 rxo->stats.rx_polls++;
1722 for (work_done = 0; work_done < budget; work_done++) {
1723 rxcp = be_rx_compl_get(rxo);
1724 if (!rxcp)
1725 break;
1726
1727 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1728 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1729 rxcp);
1730 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1731 rxcp);
1732
1733 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1734 if (likely(frag_index != rxo->last_frag_index &&
1735 num_rcvd != 0)) {
1736 rxo->last_frag_index = frag_index;
1737
1738 if (do_gro(rxo, rxcp, err))
1739 be_rx_compl_process_gro(adapter, rxo, rxcp);
1740 else
1741 be_rx_compl_process(adapter, rxo, rxcp);
1742 }
1743
1744 be_rx_compl_reset(rxcp);
1745 }
1746
1747 /* Refill the queue */
1748 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1749 be_post_rx_frags(rxo);
1750
1751 /* All consumed */
1752 if (work_done < budget) {
1753 napi_complete(napi);
1754 be_cq_notify(adapter, rx_cq->id, true, work_done);
1755 } else {
1756 /* More to be consumed; continue with interrupts disabled */
1757 be_cq_notify(adapter, rx_cq->id, false, work_done);
1758 }
1759 return work_done;
1760 }
1761
1762 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1763 * For TX/MCC we don't honour budget; consume everything
1764 */
1765 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1766 {
1767 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1768 struct be_adapter *adapter =
1769 container_of(tx_eq, struct be_adapter, tx_eq);
1770 struct be_queue_info *txq = &adapter->tx_obj.q;
1771 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
1772 struct be_eth_tx_compl *txcp;
1773 int tx_compl = 0, mcc_compl, status = 0;
1774 u16 end_idx;
1775
1776 while ((txcp = be_tx_compl_get(tx_cq))) {
1777 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1778 wrb_index, txcp);
1779 be_tx_compl_process(adapter, end_idx);
1780 tx_compl++;
1781 }
1782
1783 mcc_compl = be_process_mcc(adapter, &status);
1784
1785 napi_complete(napi);
1786
1787 if (mcc_compl) {
1788 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1789 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1790 }
1791
1792 if (tx_compl) {
1793 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
1794
1795 /* As Tx wrbs have been freed up, wake up netdev queue if
1796 * it was stopped due to lack of tx wrbs.
1797 */
1798 if (netif_queue_stopped(adapter->netdev) &&
1799 atomic_read(&txq->used) < txq->len / 2) {
1800 netif_wake_queue(adapter->netdev);
1801 }
1802
1803 tx_stats(adapter)->be_tx_events++;
1804 tx_stats(adapter)->be_tx_compl += tx_compl;
1805 }
1806
1807 return 1;
1808 }
1809
1810 void be_detect_dump_ue(struct be_adapter *adapter)
1811 {
1812 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1813 u32 i;
1814
1815 pci_read_config_dword(adapter->pdev,
1816 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1817 pci_read_config_dword(adapter->pdev,
1818 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1819 pci_read_config_dword(adapter->pdev,
1820 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1821 pci_read_config_dword(adapter->pdev,
1822 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1823
1824 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1825 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1826
1827 if (ue_status_lo || ue_status_hi) {
1828 adapter->ue_detected = true;
1829 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1830 }
1831
1832 if (ue_status_lo) {
1833 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1834 if (ue_status_lo & 1)
1835 dev_err(&adapter->pdev->dev,
1836 "UE: %s bit set\n", ue_status_low_desc[i]);
1837 }
1838 }
1839 if (ue_status_hi) {
1840 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1841 if (ue_status_hi & 1)
1842 dev_err(&adapter->pdev->dev,
1843 "UE: %s bit set\n", ue_status_hi_desc[i]);
1844 }
1845 }
1846
1847 }
1848
1849 static void be_worker(struct work_struct *work)
1850 {
1851 struct be_adapter *adapter =
1852 container_of(work, struct be_adapter, work.work);
1853 struct be_rx_obj *rxo;
1854 int i;
1855
1856 /* when interrupts are not yet enabled, just reap any pending
1857 * mcc completions */
1858 if (!netif_running(adapter->netdev)) {
1859 int mcc_compl, status = 0;
1860
1861 mcc_compl = be_process_mcc(adapter, &status);
1862
1863 if (mcc_compl) {
1864 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1865 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1866 }
1867 goto reschedule;
1868 }
1869
1870 if (!adapter->stats_ioctl_sent)
1871 be_cmd_get_stats(adapter, &adapter->stats_cmd);
1872
1873 be_tx_rate_update(adapter);
1874
1875 for_all_rx_queues(adapter, rxo, i) {
1876 be_rx_rate_update(rxo);
1877 be_rx_eqd_update(adapter, rxo);
1878
1879 if (rxo->rx_post_starved) {
1880 rxo->rx_post_starved = false;
1881 be_post_rx_frags(rxo);
1882 }
1883 }
1884 if (!adapter->ue_detected && !lancer_chip(adapter))
1885 be_detect_dump_ue(adapter);
1886
1887 reschedule:
1888 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1889 }
1890
1891 static void be_msix_disable(struct be_adapter *adapter)
1892 {
1893 if (adapter->msix_enabled) {
1894 pci_disable_msix(adapter->pdev);
1895 adapter->msix_enabled = false;
1896 }
1897 }
1898
1899 static int be_num_rxqs_get(struct be_adapter *adapter)
1900 {
1901 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1902 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1903 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1904 } else {
1905 dev_warn(&adapter->pdev->dev,
1906 "No support for multiple RX queues\n");
1907 return 1;
1908 }
1909 }
1910
1911 static void be_msix_enable(struct be_adapter *adapter)
1912 {
1913 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
1914 int i, status;
1915
1916 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1917
1918 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
1919 adapter->msix_entries[i].entry = i;
1920
1921 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1922 adapter->num_rx_qs + 1);
1923 if (status == 0) {
1924 goto done;
1925 } else if (status >= BE_MIN_MSIX_VECTORS) {
1926 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1927 status) == 0) {
1928 adapter->num_rx_qs = status - 1;
1929 dev_warn(&adapter->pdev->dev,
1930 "Could alloc only %d MSIx vectors. "
1931 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1932 goto done;
1933 }
1934 }
1935 return;
1936 done:
1937 adapter->msix_enabled = true;
1938 }
1939
1940 static void be_sriov_enable(struct be_adapter *adapter)
1941 {
1942 be_check_sriov_fn_type(adapter);
1943 #ifdef CONFIG_PCI_IOV
1944 if (be_physfn(adapter) && num_vfs) {
1945 int status;
1946
1947 status = pci_enable_sriov(adapter->pdev, num_vfs);
1948 adapter->sriov_enabled = status ? false : true;
1949 }
1950 #endif
1951 }
1952
1953 static void be_sriov_disable(struct be_adapter *adapter)
1954 {
1955 #ifdef CONFIG_PCI_IOV
1956 if (adapter->sriov_enabled) {
1957 pci_disable_sriov(adapter->pdev);
1958 adapter->sriov_enabled = false;
1959 }
1960 #endif
1961 }
1962
1963 static inline int be_msix_vec_get(struct be_adapter *adapter,
1964 struct be_eq_obj *eq_obj)
1965 {
1966 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1967 }
1968
1969 static int be_request_irq(struct be_adapter *adapter,
1970 struct be_eq_obj *eq_obj,
1971 void *handler, char *desc, void *context)
1972 {
1973 struct net_device *netdev = adapter->netdev;
1974 int vec;
1975
1976 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1977 vec = be_msix_vec_get(adapter, eq_obj);
1978 return request_irq(vec, handler, 0, eq_obj->desc, context);
1979 }
1980
1981 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1982 void *context)
1983 {
1984 int vec = be_msix_vec_get(adapter, eq_obj);
1985 free_irq(vec, context);
1986 }
1987
1988 static int be_msix_register(struct be_adapter *adapter)
1989 {
1990 struct be_rx_obj *rxo;
1991 int status, i;
1992 char qname[10];
1993
1994 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1995 adapter);
1996 if (status)
1997 goto err;
1998
1999 for_all_rx_queues(adapter, rxo, i) {
2000 sprintf(qname, "rxq%d", i);
2001 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2002 qname, rxo);
2003 if (status)
2004 goto err_msix;
2005 }
2006
2007 return 0;
2008
2009 err_msix:
2010 be_free_irq(adapter, &adapter->tx_eq, adapter);
2011
2012 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2013 be_free_irq(adapter, &rxo->rx_eq, rxo);
2014
2015 err:
2016 dev_warn(&adapter->pdev->dev,
2017 "MSIX Request IRQ failed - err %d\n", status);
2018 pci_disable_msix(adapter->pdev);
2019 adapter->msix_enabled = false;
2020 return status;
2021 }
2022
2023 static int be_irq_register(struct be_adapter *adapter)
2024 {
2025 struct net_device *netdev = adapter->netdev;
2026 int status;
2027
2028 if (adapter->msix_enabled) {
2029 status = be_msix_register(adapter);
2030 if (status == 0)
2031 goto done;
2032 /* INTx is not supported for VF */
2033 if (!be_physfn(adapter))
2034 return status;
2035 }
2036
2037 /* INTx */
2038 netdev->irq = adapter->pdev->irq;
2039 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040 adapter);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "INTx request IRQ failed - err %d\n", status);
2044 return status;
2045 }
2046 done:
2047 adapter->isr_registered = true;
2048 return 0;
2049 }
2050
2051 static void be_irq_unregister(struct be_adapter *adapter)
2052 {
2053 struct net_device *netdev = adapter->netdev;
2054 struct be_rx_obj *rxo;
2055 int i;
2056
2057 if (!adapter->isr_registered)
2058 return;
2059
2060 /* INTx */
2061 if (!adapter->msix_enabled) {
2062 free_irq(netdev->irq, adapter);
2063 goto done;
2064 }
2065
2066 /* MSIx */
2067 be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069 for_all_rx_queues(adapter, rxo, i)
2070 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
2072 done:
2073 adapter->isr_registered = false;
2074 }
2075
2076 static int be_close(struct net_device *netdev)
2077 {
2078 struct be_adapter *adapter = netdev_priv(netdev);
2079 struct be_rx_obj *rxo;
2080 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2081 int vec, i;
2082
2083 be_async_mcc_disable(adapter);
2084
2085 netif_stop_queue(netdev);
2086 netif_carrier_off(netdev);
2087 adapter->link_up = false;
2088
2089 if (!lancer_chip(adapter))
2090 be_intr_set(adapter, false);
2091
2092 if (adapter->msix_enabled) {
2093 vec = be_msix_vec_get(adapter, tx_eq);
2094 synchronize_irq(vec);
2095
2096 for_all_rx_queues(adapter, rxo, i) {
2097 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2098 synchronize_irq(vec);
2099 }
2100 } else {
2101 synchronize_irq(netdev->irq);
2102 }
2103 be_irq_unregister(adapter);
2104
2105 for_all_rx_queues(adapter, rxo, i)
2106 napi_disable(&rxo->rx_eq.napi);
2107
2108 napi_disable(&tx_eq->napi);
2109
2110 /* Wait for all pending tx completions to arrive so that
2111 * all tx skbs are freed.
2112 */
2113 be_tx_compl_clean(adapter);
2114
2115 return 0;
2116 }
2117
2118 static int be_open(struct net_device *netdev)
2119 {
2120 struct be_adapter *adapter = netdev_priv(netdev);
2121 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2122 struct be_rx_obj *rxo;
2123 bool link_up;
2124 int status, i;
2125 u8 mac_speed;
2126 u16 link_speed;
2127
2128 for_all_rx_queues(adapter, rxo, i) {
2129 be_post_rx_frags(rxo);
2130 napi_enable(&rxo->rx_eq.napi);
2131 }
2132 napi_enable(&tx_eq->napi);
2133
2134 be_irq_register(adapter);
2135
2136 if (!lancer_chip(adapter))
2137 be_intr_set(adapter, true);
2138
2139 /* The evt queues are created in unarmed state; arm them */
2140 for_all_rx_queues(adapter, rxo, i) {
2141 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2142 be_cq_notify(adapter, rxo->cq.id, true, 0);
2143 }
2144 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2145
2146 /* Now that interrupts are on we can process async mcc */
2147 be_async_mcc_enable(adapter);
2148
2149 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2150 &link_speed);
2151 if (status)
2152 goto err;
2153 be_link_status_update(adapter, link_up);
2154
2155 if (be_physfn(adapter)) {
2156 status = be_vid_config(adapter, false, 0);
2157 if (status)
2158 goto err;
2159
2160 status = be_cmd_set_flow_control(adapter,
2161 adapter->tx_fc, adapter->rx_fc);
2162 if (status)
2163 goto err;
2164 }
2165
2166 return 0;
2167 err:
2168 be_close(adapter->netdev);
2169 return -EIO;
2170 }
2171
2172 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2173 {
2174 struct be_dma_mem cmd;
2175 int status = 0;
2176 u8 mac[ETH_ALEN];
2177
2178 memset(mac, 0, ETH_ALEN);
2179
2180 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2181 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2182 GFP_KERNEL);
2183 if (cmd.va == NULL)
2184 return -1;
2185 memset(cmd.va, 0, cmd.size);
2186
2187 if (enable) {
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190 if (status) {
2191 dev_err(&adapter->pdev->dev,
2192 "Could not enable Wake-on-lan\n");
2193 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma);
2195 return status;
2196 }
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201 } else {
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 }
2206
2207 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2208 return status;
2209 }
2210
2211 /*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218 {
2219 u32 vf = 0;
2220 int status = 0;
2221 u8 mac[ETH_ALEN];
2222
2223 be_vf_eth_addr_generate(adapter, mac);
2224
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
2228 &adapter->vf_cfg[vf].vf_pmac_id);
2229 if (status)
2230 dev_err(&adapter->pdev->dev,
2231 "Mac address add failed for VF %d\n", vf);
2232 else
2233 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2234
2235 mac[5] += 1;
2236 }
2237 return status;
2238 }
2239
2240 static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2241 {
2242 u32 vf;
2243
2244 for (vf = 0; vf < num_vfs; vf++) {
2245 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2246 be_cmd_pmac_del(adapter,
2247 adapter->vf_cfg[vf].vf_if_handle,
2248 adapter->vf_cfg[vf].vf_pmac_id);
2249 }
2250 }
2251
2252 static int be_setup(struct be_adapter *adapter)
2253 {
2254 struct net_device *netdev = adapter->netdev;
2255 u32 cap_flags, en_flags, vf = 0;
2256 int status;
2257 u8 mac[ETH_ALEN];
2258
2259 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2260
2261 if (be_physfn(adapter)) {
2262 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2263 BE_IF_FLAGS_PROMISCUOUS |
2264 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2265 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2266
2267 if (be_multi_rxq(adapter)) {
2268 cap_flags |= BE_IF_FLAGS_RSS;
2269 en_flags |= BE_IF_FLAGS_RSS;
2270 }
2271 }
2272
2273 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2274 netdev->dev_addr, false/* pmac_invalid */,
2275 &adapter->if_handle, &adapter->pmac_id, 0);
2276 if (status != 0)
2277 goto do_none;
2278
2279 if (be_physfn(adapter)) {
2280 while (vf < num_vfs) {
2281 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2282 | BE_IF_FLAGS_BROADCAST;
2283 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2284 mac, true,
2285 &adapter->vf_cfg[vf].vf_if_handle,
2286 NULL, vf+1);
2287 if (status) {
2288 dev_err(&adapter->pdev->dev,
2289 "Interface Create failed for VF %d\n", vf);
2290 goto if_destroy;
2291 }
2292 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2293 vf++;
2294 }
2295 } else if (!be_physfn(adapter)) {
2296 status = be_cmd_mac_addr_query(adapter, mac,
2297 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2298 if (!status) {
2299 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2300 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2301 }
2302 }
2303
2304 status = be_tx_queues_create(adapter);
2305 if (status != 0)
2306 goto if_destroy;
2307
2308 status = be_rx_queues_create(adapter);
2309 if (status != 0)
2310 goto tx_qs_destroy;
2311
2312 status = be_mcc_queues_create(adapter);
2313 if (status != 0)
2314 goto rx_qs_destroy;
2315
2316 if (be_physfn(adapter)) {
2317 status = be_vf_eth_addr_config(adapter);
2318 if (status)
2319 goto mcc_q_destroy;
2320 }
2321
2322 adapter->link_speed = -1;
2323
2324 return 0;
2325
2326 mcc_q_destroy:
2327 if (be_physfn(adapter))
2328 be_vf_eth_addr_rem(adapter);
2329 be_mcc_queues_destroy(adapter);
2330 rx_qs_destroy:
2331 be_rx_queues_destroy(adapter);
2332 tx_qs_destroy:
2333 be_tx_queues_destroy(adapter);
2334 if_destroy:
2335 for (vf = 0; vf < num_vfs; vf++)
2336 if (adapter->vf_cfg[vf].vf_if_handle)
2337 be_cmd_if_destroy(adapter,
2338 adapter->vf_cfg[vf].vf_if_handle);
2339 be_cmd_if_destroy(adapter, adapter->if_handle);
2340 do_none:
2341 return status;
2342 }
2343
2344 static int be_clear(struct be_adapter *adapter)
2345 {
2346 if (be_physfn(adapter))
2347 be_vf_eth_addr_rem(adapter);
2348
2349 be_mcc_queues_destroy(adapter);
2350 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter);
2352
2353 be_cmd_if_destroy(adapter, adapter->if_handle);
2354
2355 /* tell fw we're done with firing cmds */
2356 be_cmd_fw_clean(adapter);
2357 return 0;
2358 }
2359
2360
2361 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2362 static bool be_flash_redboot(struct be_adapter *adapter,
2363 const u8 *p, u32 img_start, int image_size,
2364 int hdr_size)
2365 {
2366 u32 crc_offset;
2367 u8 flashed_crc[4];
2368 int status;
2369
2370 crc_offset = hdr_size + img_start + image_size - 4;
2371
2372 p += crc_offset;
2373
2374 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2375 (image_size - 4));
2376 if (status) {
2377 dev_err(&adapter->pdev->dev,
2378 "could not get crc from flash, not flashing redboot\n");
2379 return false;
2380 }
2381
2382 /*update redboot only if crc does not match*/
2383 if (!memcmp(flashed_crc, p, 4))
2384 return false;
2385 else
2386 return true;
2387 }
2388
2389 static int be_flash_data(struct be_adapter *adapter,
2390 const struct firmware *fw,
2391 struct be_dma_mem *flash_cmd, int num_of_images)
2392
2393 {
2394 int status = 0, i, filehdr_size = 0;
2395 u32 total_bytes = 0, flash_op;
2396 int num_bytes;
2397 const u8 *p = fw->data;
2398 struct be_cmd_write_flashrom *req = flash_cmd->va;
2399 const struct flash_comp *pflashcomp;
2400 int num_comp;
2401
2402 static const struct flash_comp gen3_flash_types[9] = {
2403 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2404 FLASH_IMAGE_MAX_SIZE_g3},
2405 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2406 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2407 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2408 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2409 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2410 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2411 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2412 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2413 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2414 FLASH_IMAGE_MAX_SIZE_g3},
2415 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2416 FLASH_IMAGE_MAX_SIZE_g3},
2417 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2418 FLASH_IMAGE_MAX_SIZE_g3},
2419 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2420 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2421 };
2422 static const struct flash_comp gen2_flash_types[8] = {
2423 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2424 FLASH_IMAGE_MAX_SIZE_g2},
2425 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2426 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2427 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2428 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2429 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2430 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2431 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2432 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2433 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2434 FLASH_IMAGE_MAX_SIZE_g2},
2435 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2436 FLASH_IMAGE_MAX_SIZE_g2},
2437 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2438 FLASH_IMAGE_MAX_SIZE_g2}
2439 };
2440
2441 if (adapter->generation == BE_GEN3) {
2442 pflashcomp = gen3_flash_types;
2443 filehdr_size = sizeof(struct flash_file_hdr_g3);
2444 num_comp = ARRAY_SIZE(gen3_flash_types);
2445 } else {
2446 pflashcomp = gen2_flash_types;
2447 filehdr_size = sizeof(struct flash_file_hdr_g2);
2448 num_comp = ARRAY_SIZE(gen2_flash_types);
2449 }
2450 for (i = 0; i < num_comp; i++) {
2451 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2452 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2453 continue;
2454 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2455 (!be_flash_redboot(adapter, fw->data,
2456 pflashcomp[i].offset, pflashcomp[i].size,
2457 filehdr_size)))
2458 continue;
2459 p = fw->data;
2460 p += filehdr_size + pflashcomp[i].offset
2461 + (num_of_images * sizeof(struct image_hdr));
2462 if (p + pflashcomp[i].size > fw->data + fw->size)
2463 return -1;
2464 total_bytes = pflashcomp[i].size;
2465 while (total_bytes) {
2466 if (total_bytes > 32*1024)
2467 num_bytes = 32*1024;
2468 else
2469 num_bytes = total_bytes;
2470 total_bytes -= num_bytes;
2471
2472 if (!total_bytes)
2473 flash_op = FLASHROM_OPER_FLASH;
2474 else
2475 flash_op = FLASHROM_OPER_SAVE;
2476 memcpy(req->params.data_buf, p, num_bytes);
2477 p += num_bytes;
2478 status = be_cmd_write_flashrom(adapter, flash_cmd,
2479 pflashcomp[i].optype, flash_op, num_bytes);
2480 if (status) {
2481 dev_err(&adapter->pdev->dev,
2482 "cmd to write to flash rom failed.\n");
2483 return -1;
2484 }
2485 yield();
2486 }
2487 }
2488 return 0;
2489 }
2490
2491 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2492 {
2493 if (fhdr == NULL)
2494 return 0;
2495 if (fhdr->build[0] == '3')
2496 return BE_GEN3;
2497 else if (fhdr->build[0] == '2')
2498 return BE_GEN2;
2499 else
2500 return 0;
2501 }
2502
2503 int be_load_fw(struct be_adapter *adapter, u8 *func)
2504 {
2505 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2506 const struct firmware *fw;
2507 struct flash_file_hdr_g2 *fhdr;
2508 struct flash_file_hdr_g3 *fhdr3;
2509 struct image_hdr *img_hdr_ptr = NULL;
2510 struct be_dma_mem flash_cmd;
2511 int status, i = 0, num_imgs = 0;
2512 const u8 *p;
2513
2514 if (!netif_running(adapter->netdev)) {
2515 dev_err(&adapter->pdev->dev,
2516 "Firmware load not allowed (interface is down)\n");
2517 return -EPERM;
2518 }
2519
2520 strcpy(fw_file, func);
2521
2522 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2523 if (status)
2524 goto fw_exit;
2525
2526 p = fw->data;
2527 fhdr = (struct flash_file_hdr_g2 *) p;
2528 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2529
2530 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2531 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2532 &flash_cmd.dma, GFP_KERNEL);
2533 if (!flash_cmd.va) {
2534 status = -ENOMEM;
2535 dev_err(&adapter->pdev->dev,
2536 "Memory allocation failure while flashing\n");
2537 goto fw_exit;
2538 }
2539
2540 if ((adapter->generation == BE_GEN3) &&
2541 (get_ufigen_type(fhdr) == BE_GEN3)) {
2542 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2543 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2544 for (i = 0; i < num_imgs; i++) {
2545 img_hdr_ptr = (struct image_hdr *) (fw->data +
2546 (sizeof(struct flash_file_hdr_g3) +
2547 i * sizeof(struct image_hdr)));
2548 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2549 status = be_flash_data(adapter, fw, &flash_cmd,
2550 num_imgs);
2551 }
2552 } else if ((adapter->generation == BE_GEN2) &&
2553 (get_ufigen_type(fhdr) == BE_GEN2)) {
2554 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2555 } else {
2556 dev_err(&adapter->pdev->dev,
2557 "UFI and Interface are not compatible for flashing\n");
2558 status = -1;
2559 }
2560
2561 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2562 flash_cmd.dma);
2563 if (status) {
2564 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2565 goto fw_exit;
2566 }
2567
2568 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2569
2570 fw_exit:
2571 release_firmware(fw);
2572 return status;
2573 }
2574
2575 static struct net_device_ops be_netdev_ops = {
2576 .ndo_open = be_open,
2577 .ndo_stop = be_close,
2578 .ndo_start_xmit = be_xmit,
2579 .ndo_set_rx_mode = be_set_multicast_list,
2580 .ndo_set_mac_address = be_mac_addr_set,
2581 .ndo_change_mtu = be_change_mtu,
2582 .ndo_validate_addr = eth_validate_addr,
2583 .ndo_vlan_rx_register = be_vlan_register,
2584 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2585 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2586 .ndo_set_vf_mac = be_set_vf_mac,
2587 .ndo_set_vf_vlan = be_set_vf_vlan,
2588 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2589 .ndo_get_vf_config = be_get_vf_config
2590 };
2591
2592 static void be_netdev_init(struct net_device *netdev)
2593 {
2594 struct be_adapter *adapter = netdev_priv(netdev);
2595 struct be_rx_obj *rxo;
2596 int i;
2597
2598 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2599 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2601 NETIF_F_GRO | NETIF_F_TSO6;
2602
2603 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2604 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2605
2606 if (lancer_chip(adapter))
2607 netdev->vlan_features |= NETIF_F_TSO6;
2608
2609 netdev->flags |= IFF_MULTICAST;
2610
2611 adapter->rx_csum = true;
2612
2613 /* Default settings for Rx and Tx flow control */
2614 adapter->rx_fc = true;
2615 adapter->tx_fc = true;
2616
2617 netif_set_gso_max_size(netdev, 65535);
2618
2619 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2620
2621 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2622
2623 for_all_rx_queues(adapter, rxo, i)
2624 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2625 BE_NAPI_WEIGHT);
2626
2627 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
2628 BE_NAPI_WEIGHT);
2629 }
2630
2631 static void be_unmap_pci_bars(struct be_adapter *adapter)
2632 {
2633 if (adapter->csr)
2634 iounmap(adapter->csr);
2635 if (adapter->db)
2636 iounmap(adapter->db);
2637 if (adapter->pcicfg && be_physfn(adapter))
2638 iounmap(adapter->pcicfg);
2639 }
2640
2641 static int be_map_pci_bars(struct be_adapter *adapter)
2642 {
2643 u8 __iomem *addr;
2644 int pcicfg_reg, db_reg;
2645
2646 if (lancer_chip(adapter)) {
2647 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2648 pci_resource_len(adapter->pdev, 0));
2649 if (addr == NULL)
2650 return -ENOMEM;
2651 adapter->db = addr;
2652 return 0;
2653 }
2654
2655 if (be_physfn(adapter)) {
2656 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2657 pci_resource_len(adapter->pdev, 2));
2658 if (addr == NULL)
2659 return -ENOMEM;
2660 adapter->csr = addr;
2661 }
2662
2663 if (adapter->generation == BE_GEN2) {
2664 pcicfg_reg = 1;
2665 db_reg = 4;
2666 } else {
2667 pcicfg_reg = 0;
2668 if (be_physfn(adapter))
2669 db_reg = 4;
2670 else
2671 db_reg = 0;
2672 }
2673 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2674 pci_resource_len(adapter->pdev, db_reg));
2675 if (addr == NULL)
2676 goto pci_map_err;
2677 adapter->db = addr;
2678
2679 if (be_physfn(adapter)) {
2680 addr = ioremap_nocache(
2681 pci_resource_start(adapter->pdev, pcicfg_reg),
2682 pci_resource_len(adapter->pdev, pcicfg_reg));
2683 if (addr == NULL)
2684 goto pci_map_err;
2685 adapter->pcicfg = addr;
2686 } else
2687 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2688
2689 return 0;
2690 pci_map_err:
2691 be_unmap_pci_bars(adapter);
2692 return -ENOMEM;
2693 }
2694
2695
2696 static void be_ctrl_cleanup(struct be_adapter *adapter)
2697 {
2698 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
2699
2700 be_unmap_pci_bars(adapter);
2701
2702 if (mem->va)
2703 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2704 mem->dma);
2705
2706 mem = &adapter->mc_cmd_mem;
2707 if (mem->va)
2708 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2709 mem->dma);
2710 }
2711
2712 static int be_ctrl_init(struct be_adapter *adapter)
2713 {
2714 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2715 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2716 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
2717 int status;
2718
2719 status = be_map_pci_bars(adapter);
2720 if (status)
2721 goto done;
2722
2723 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2724 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2725 mbox_mem_alloc->size,
2726 &mbox_mem_alloc->dma,
2727 GFP_KERNEL);
2728 if (!mbox_mem_alloc->va) {
2729 status = -ENOMEM;
2730 goto unmap_pci_bars;
2731 }
2732
2733 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2734 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2735 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2736 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2737
2738 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2739 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2740 mc_cmd_mem->size, &mc_cmd_mem->dma,
2741 GFP_KERNEL);
2742 if (mc_cmd_mem->va == NULL) {
2743 status = -ENOMEM;
2744 goto free_mbox;
2745 }
2746 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2747
2748 mutex_init(&adapter->mbox_lock);
2749 spin_lock_init(&adapter->mcc_lock);
2750 spin_lock_init(&adapter->mcc_cq_lock);
2751
2752 init_completion(&adapter->flash_compl);
2753 pci_save_state(adapter->pdev);
2754 return 0;
2755
2756 free_mbox:
2757 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2758 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2759
2760 unmap_pci_bars:
2761 be_unmap_pci_bars(adapter);
2762
2763 done:
2764 return status;
2765 }
2766
2767 static void be_stats_cleanup(struct be_adapter *adapter)
2768 {
2769 struct be_dma_mem *cmd = &adapter->stats_cmd;
2770
2771 if (cmd->va)
2772 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2773 cmd->va, cmd->dma);
2774 }
2775
2776 static int be_stats_init(struct be_adapter *adapter)
2777 {
2778 struct be_dma_mem *cmd = &adapter->stats_cmd;
2779
2780 cmd->size = sizeof(struct be_cmd_req_get_stats);
2781 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2782 GFP_KERNEL);
2783 if (cmd->va == NULL)
2784 return -1;
2785 memset(cmd->va, 0, cmd->size);
2786 return 0;
2787 }
2788
2789 static void __devexit be_remove(struct pci_dev *pdev)
2790 {
2791 struct be_adapter *adapter = pci_get_drvdata(pdev);
2792
2793 if (!adapter)
2794 return;
2795
2796 cancel_delayed_work_sync(&adapter->work);
2797
2798 unregister_netdev(adapter->netdev);
2799
2800 be_clear(adapter);
2801
2802 be_stats_cleanup(adapter);
2803
2804 be_ctrl_cleanup(adapter);
2805
2806 be_sriov_disable(adapter);
2807
2808 be_msix_disable(adapter);
2809
2810 pci_set_drvdata(pdev, NULL);
2811 pci_release_regions(pdev);
2812 pci_disable_device(pdev);
2813
2814 free_netdev(adapter->netdev);
2815 }
2816
2817 static int be_get_config(struct be_adapter *adapter)
2818 {
2819 int status;
2820 u8 mac[ETH_ALEN];
2821
2822 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2823 if (status)
2824 return status;
2825
2826 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2827 &adapter->function_mode, &adapter->function_caps);
2828 if (status)
2829 return status;
2830
2831 memset(mac, 0, ETH_ALEN);
2832
2833 if (be_physfn(adapter)) {
2834 status = be_cmd_mac_addr_query(adapter, mac,
2835 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2836
2837 if (status)
2838 return status;
2839
2840 if (!is_valid_ether_addr(mac))
2841 return -EADDRNOTAVAIL;
2842
2843 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2844 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2845 }
2846
2847 if (adapter->function_mode & 0x400)
2848 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2849 else
2850 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2851
2852 return 0;
2853 }
2854
2855 static int be_dev_family_check(struct be_adapter *adapter)
2856 {
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891 }
2892
2893 static int __devinit be_probe(struct pci_dev *pdev,
2894 const struct pci_device_id *pdev_id)
2895 {
2896 int status = 0;
2897 struct be_adapter *adapter;
2898 struct net_device *netdev;
2899
2900 status = pci_enable_device(pdev);
2901 if (status)
2902 goto do_none;
2903
2904 status = pci_request_regions(pdev, DRV_NAME);
2905 if (status)
2906 goto disable_dev;
2907 pci_set_master(pdev);
2908
2909 netdev = alloc_etherdev(sizeof(struct be_adapter));
2910 if (netdev == NULL) {
2911 status = -ENOMEM;
2912 goto rel_reg;
2913 }
2914 adapter = netdev_priv(netdev);
2915 adapter->pdev = pdev;
2916 pci_set_drvdata(pdev, adapter);
2917
2918 status = be_dev_family_check(adapter);
2919 if (status)
2920 goto free_netdev;
2921
2922 adapter->netdev = netdev;
2923 SET_NETDEV_DEV(netdev, &pdev->dev);
2924
2925 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2926 if (!status) {
2927 netdev->features |= NETIF_F_HIGHDMA;
2928 } else {
2929 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2930 if (status) {
2931 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2932 goto free_netdev;
2933 }
2934 }
2935
2936 be_sriov_enable(adapter);
2937
2938 status = be_ctrl_init(adapter);
2939 if (status)
2940 goto free_netdev;
2941
2942 /* sync up with fw's ready state */
2943 if (be_physfn(adapter)) {
2944 status = be_cmd_POST(adapter);
2945 if (status)
2946 goto ctrl_clean;
2947 }
2948
2949 /* tell fw we're ready to fire cmds */
2950 status = be_cmd_fw_init(adapter);
2951 if (status)
2952 goto ctrl_clean;
2953
2954 if (be_physfn(adapter)) {
2955 status = be_cmd_reset_function(adapter);
2956 if (status)
2957 goto ctrl_clean;
2958 }
2959
2960 status = be_stats_init(adapter);
2961 if (status)
2962 goto ctrl_clean;
2963
2964 status = be_get_config(adapter);
2965 if (status)
2966 goto stats_clean;
2967
2968 be_msix_enable(adapter);
2969
2970 INIT_DELAYED_WORK(&adapter->work, be_worker);
2971
2972 status = be_setup(adapter);
2973 if (status)
2974 goto msix_disable;
2975
2976 be_netdev_init(netdev);
2977 status = register_netdev(netdev);
2978 if (status != 0)
2979 goto unsetup;
2980 netif_carrier_off(netdev);
2981
2982 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
2983 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2984 return 0;
2985
2986 unsetup:
2987 be_clear(adapter);
2988 msix_disable:
2989 be_msix_disable(adapter);
2990 stats_clean:
2991 be_stats_cleanup(adapter);
2992 ctrl_clean:
2993 be_ctrl_cleanup(adapter);
2994 free_netdev:
2995 be_sriov_disable(adapter);
2996 free_netdev(netdev);
2997 pci_set_drvdata(pdev, NULL);
2998 rel_reg:
2999 pci_release_regions(pdev);
3000 disable_dev:
3001 pci_disable_device(pdev);
3002 do_none:
3003 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3004 return status;
3005 }
3006
3007 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3008 {
3009 struct be_adapter *adapter = pci_get_drvdata(pdev);
3010 struct net_device *netdev = adapter->netdev;
3011
3012 if (adapter->wol)
3013 be_setup_wol(adapter, true);
3014
3015 netif_device_detach(netdev);
3016 if (netif_running(netdev)) {
3017 rtnl_lock();
3018 be_close(netdev);
3019 rtnl_unlock();
3020 }
3021 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
3022 be_clear(adapter);
3023
3024 pci_save_state(pdev);
3025 pci_disable_device(pdev);
3026 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3027 return 0;
3028 }
3029
3030 static int be_resume(struct pci_dev *pdev)
3031 {
3032 int status = 0;
3033 struct be_adapter *adapter = pci_get_drvdata(pdev);
3034 struct net_device *netdev = adapter->netdev;
3035
3036 netif_device_detach(netdev);
3037
3038 status = pci_enable_device(pdev);
3039 if (status)
3040 return status;
3041
3042 pci_set_power_state(pdev, 0);
3043 pci_restore_state(pdev);
3044
3045 /* tell fw we're ready to fire cmds */
3046 status = be_cmd_fw_init(adapter);
3047 if (status)
3048 return status;
3049
3050 be_setup(adapter);
3051 if (netif_running(netdev)) {
3052 rtnl_lock();
3053 be_open(netdev);
3054 rtnl_unlock();
3055 }
3056 netif_device_attach(netdev);
3057
3058 if (adapter->wol)
3059 be_setup_wol(adapter, false);
3060 return 0;
3061 }
3062
3063 /*
3064 * An FLR will stop BE from DMAing any data.
3065 */
3066 static void be_shutdown(struct pci_dev *pdev)
3067 {
3068 struct be_adapter *adapter = pci_get_drvdata(pdev);
3069 struct net_device *netdev = adapter->netdev;
3070
3071 netif_device_detach(netdev);
3072
3073 be_cmd_reset_function(adapter);
3074
3075 if (adapter->wol)
3076 be_setup_wol(adapter, true);
3077
3078 pci_disable_device(pdev);
3079 }
3080
3081 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3082 pci_channel_state_t state)
3083 {
3084 struct be_adapter *adapter = pci_get_drvdata(pdev);
3085 struct net_device *netdev = adapter->netdev;
3086
3087 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3088
3089 adapter->eeh_err = true;
3090
3091 netif_device_detach(netdev);
3092
3093 if (netif_running(netdev)) {
3094 rtnl_lock();
3095 be_close(netdev);
3096 rtnl_unlock();
3097 }
3098 be_clear(adapter);
3099
3100 if (state == pci_channel_io_perm_failure)
3101 return PCI_ERS_RESULT_DISCONNECT;
3102
3103 pci_disable_device(pdev);
3104
3105 return PCI_ERS_RESULT_NEED_RESET;
3106 }
3107
3108 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3109 {
3110 struct be_adapter *adapter = pci_get_drvdata(pdev);
3111 int status;
3112
3113 dev_info(&adapter->pdev->dev, "EEH reset\n");
3114 adapter->eeh_err = false;
3115
3116 status = pci_enable_device(pdev);
3117 if (status)
3118 return PCI_ERS_RESULT_DISCONNECT;
3119
3120 pci_set_master(pdev);
3121 pci_set_power_state(pdev, 0);
3122 pci_restore_state(pdev);
3123
3124 /* Check if card is ok and fw is ready */
3125 status = be_cmd_POST(adapter);
3126 if (status)
3127 return PCI_ERS_RESULT_DISCONNECT;
3128
3129 return PCI_ERS_RESULT_RECOVERED;
3130 }
3131
3132 static void be_eeh_resume(struct pci_dev *pdev)
3133 {
3134 int status = 0;
3135 struct be_adapter *adapter = pci_get_drvdata(pdev);
3136 struct net_device *netdev = adapter->netdev;
3137
3138 dev_info(&adapter->pdev->dev, "EEH resume\n");
3139
3140 pci_save_state(pdev);
3141
3142 /* tell fw we're ready to fire cmds */
3143 status = be_cmd_fw_init(adapter);
3144 if (status)
3145 goto err;
3146
3147 status = be_setup(adapter);
3148 if (status)
3149 goto err;
3150
3151 if (netif_running(netdev)) {
3152 status = be_open(netdev);
3153 if (status)
3154 goto err;
3155 }
3156 netif_device_attach(netdev);
3157 return;
3158 err:
3159 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3160 }
3161
3162 static struct pci_error_handlers be_eeh_handlers = {
3163 .error_detected = be_eeh_err_detected,
3164 .slot_reset = be_eeh_reset,
3165 .resume = be_eeh_resume,
3166 };
3167
3168 static struct pci_driver be_driver = {
3169 .name = DRV_NAME,
3170 .id_table = be_dev_ids,
3171 .probe = be_probe,
3172 .remove = be_remove,
3173 .suspend = be_suspend,
3174 .resume = be_resume,
3175 .shutdown = be_shutdown,
3176 .err_handler = &be_eeh_handlers
3177 };
3178
3179 static int __init be_init_module(void)
3180 {
3181 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3182 rx_frag_size != 2048) {
3183 printk(KERN_WARNING DRV_NAME
3184 " : Module param rx_frag_size must be 2048/4096/8192."
3185 " Using 2048\n");
3186 rx_frag_size = 2048;
3187 }
3188
3189 if (num_vfs > 32) {
3190 printk(KERN_WARNING DRV_NAME
3191 " : Module param num_vfs must not be greater than 32."
3192 "Using 32\n");
3193 num_vfs = 32;
3194 }
3195
3196 return pci_register_driver(&be_driver);
3197 }
3198 module_init(be_init_module);
3199
3200 static void __exit be_exit_module(void)
3201 {
3202 pci_unregister_driver(&be_driver);
3203 }
3204 module_exit(be_exit_module);