]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/emulex/benet/be_main.c
8790e5497ba0b2738f241d1acc93a4c9dd3258df
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50 { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55 "CEV",
56 "CTX",
57 "DBUF",
58 "ERX",
59 "Host",
60 "MPU",
61 "NDMA",
62 "PTC ",
63 "RDMA ",
64 "RXF ",
65 "RXIPS ",
66 "RXULP0 ",
67 "RXULP1 ",
68 "RXULP2 ",
69 "TIM ",
70 "TPOST ",
71 "TPRE ",
72 "TXIPS ",
73 "TXULP0 ",
74 "TXULP1 ",
75 "UC ",
76 "WDMA ",
77 "TXULP2 ",
78 "HOST1 ",
79 "P0_OB_LINK ",
80 "P1_OB_LINK ",
81 "HOST_GPIO ",
82 "MBOX ",
83 "AXGMAC0",
84 "AXGMAC1",
85 "JTAG",
86 "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90 "LPCMEMHOST",
91 "MGMT_MAC",
92 "PCS0ONLINE",
93 "MPU_IRAM",
94 "PCS1ONLINE",
95 "PCTL0",
96 "PCTL1",
97 "PMEM",
98 "RR",
99 "TXPB",
100 "RXPP",
101 "XAUI",
102 "TXP",
103 "ARM",
104 "IPC",
105 "HOST2",
106 "HOST3",
107 "HOST4",
108 "HOST5",
109 "HOST6",
110 "HOST7",
111 "HOST8",
112 "HOST9",
113 "NETC",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown",
119 "Unknown",
120 "Unknown",
121 "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127 struct be_dma_mem *mem = &q->dma_mem;
128 if (mem->va) {
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130 mem->dma);
131 mem->va = NULL;
132 }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 u16 len, u16 entry_size)
137 {
138 struct be_dma_mem *mem = &q->dma_mem;
139
140 memset(q, 0, sizeof(*q));
141 q->len = len;
142 q->entry_size = entry_size;
143 mem->size = len * entry_size;
144 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 GFP_KERNEL);
146 if (!mem->va)
147 return -ENOMEM;
148 return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153 u32 reg, enabled;
154
155 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156 &reg);
157 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159 if (!enabled && enable)
160 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161 else if (enabled && !enable)
162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 else
164 return;
165
166 pci_write_config_dword(adapter->pdev,
167 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172 int status = 0;
173
174 /* On lancer interrupts can't be controlled via this register */
175 if (lancer_chip(adapter))
176 return;
177
178 if (adapter->eeh_error)
179 return;
180
181 status = be_cmd_intr_set(adapter, enable);
182 if (status)
183 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188 u32 val = 0;
189 val |= qid & DB_RQ_RING_ID_MASK;
190 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192 wmb();
193 iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197 u16 posted)
198 {
199 u32 val = 0;
200 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203 wmb();
204 iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208 bool arm, bool clear_int, u16 num_popped)
209 {
210 u32 val = 0;
211 val |= qid & DB_EQ_RING_ID_MASK;
212 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213 DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215 if (adapter->eeh_error)
216 return;
217
218 if (arm)
219 val |= 1 << DB_EQ_REARM_SHIFT;
220 if (clear_int)
221 val |= 1 << DB_EQ_CLR_SHIFT;
222 val |= 1 << DB_EQ_EVNT_SHIFT;
223 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224 iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229 u32 val = 0;
230 val |= qid & DB_CQ_RING_ID_MASK;
231 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232 DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234 if (adapter->eeh_error)
235 return;
236
237 if (arm)
238 val |= 1 << DB_CQ_REARM_SHIFT;
239 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240 iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245 struct be_adapter *adapter = netdev_priv(netdev);
246 struct device *dev = &adapter->pdev->dev;
247 struct sockaddr *addr = p;
248 int status;
249 u8 mac[ETH_ALEN];
250 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252 if (!is_valid_ether_addr(addr->sa_data))
253 return -EADDRNOTAVAIL;
254
255 /* Proceed further only if, User provided MAC is different
256 * from active MAC
257 */
258 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259 return 0;
260
261 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262 * privilege or if PF did not provision the new MAC address.
263 * On BE3, this cmd will always fail if the VF doesn't have the
264 * FILTMGMT privilege. This failure is OK, only if the PF programmed
265 * the MAC for the VF.
266 */
267 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268 adapter->if_handle, &adapter->pmac_id[0], 0);
269 if (!status) {
270 curr_pmac_id = adapter->pmac_id[0];
271
272 /* Delete the old programmed MAC. This call may fail if the
273 * old MAC was already deleted by the PF driver.
274 */
275 if (adapter->pmac_id[0] != old_pmac_id)
276 be_cmd_pmac_del(adapter, adapter->if_handle,
277 old_pmac_id, 0);
278 }
279
280 /* Decide if the new MAC is successfully activated only after
281 * querying the FW
282 */
283 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284 adapter->if_handle, true, 0);
285 if (status)
286 goto err;
287
288 /* The MAC change did not happen, either due to lack of privilege
289 * or PF didn't pre-provision.
290 */
291 if (!ether_addr_equal(addr->sa_data, mac)) {
292 status = -EPERM;
293 goto err;
294 }
295
296 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297 dev_info(dev, "MAC address changed to %pM\n", mac);
298 return 0;
299 err:
300 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301 return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307 if (BE2_chip(adapter)) {
308 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310 return &cmd->hw_stats;
311 } else if (BE3_chip(adapter)) {
312 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314 return &cmd->hw_stats;
315 } else {
316 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318 return &cmd->hw_stats;
319 }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325 if (BE2_chip(adapter)) {
326 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328 return &hw_stats->erx;
329 } else if (BE3_chip(adapter)) {
330 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332 return &hw_stats->erx;
333 } else {
334 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336 return &hw_stats->erx;
337 }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345 struct be_port_rxf_stats_v0 *port_stats =
346 &rxf_stats->port[adapter->port_num];
347 struct be_drv_stats *drvs = &adapter->drv_stats;
348
349 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350 drvs->rx_pause_frames = port_stats->rx_pause_frames;
351 drvs->rx_crc_errors = port_stats->rx_crc_errors;
352 drvs->rx_control_frames = port_stats->rx_control_frames;
353 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365 drvs->rx_dropped_header_too_small =
366 port_stats->rx_dropped_header_too_small;
367 drvs->rx_address_filtered =
368 port_stats->rx_address_filtered +
369 port_stats->rx_vlan_filtered;
370 drvs->rx_alignment_symbol_errors =
371 port_stats->rx_alignment_symbol_errors;
372
373 drvs->tx_pauseframes = port_stats->tx_pauseframes;
374 drvs->tx_controlframes = port_stats->tx_controlframes;
375
376 if (adapter->port_num)
377 drvs->jabber_events = rxf_stats->port1_jabber_events;
378 else
379 drvs->jabber_events = rxf_stats->port0_jabber_events;
380 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382 drvs->forwarded_packets = rxf_stats->forwarded_packets;
383 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394 struct be_port_rxf_stats_v1 *port_stats =
395 &rxf_stats->port[adapter->port_num];
396 struct be_drv_stats *drvs = &adapter->drv_stats;
397
398 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401 drvs->rx_pause_frames = port_stats->rx_pause_frames;
402 drvs->rx_crc_errors = port_stats->rx_crc_errors;
403 drvs->rx_control_frames = port_stats->rx_control_frames;
404 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414 drvs->rx_dropped_header_too_small =
415 port_stats->rx_dropped_header_too_small;
416 drvs->rx_input_fifo_overflow_drop =
417 port_stats->rx_input_fifo_overflow_drop;
418 drvs->rx_address_filtered = port_stats->rx_address_filtered;
419 drvs->rx_alignment_symbol_errors =
420 port_stats->rx_alignment_symbol_errors;
421 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422 drvs->tx_pauseframes = port_stats->tx_pauseframes;
423 drvs->tx_controlframes = port_stats->tx_controlframes;
424 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425 drvs->jabber_events = port_stats->jabber_events;
426 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428 drvs->forwarded_packets = rxf_stats->forwarded_packets;
429 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440 struct be_port_rxf_stats_v2 *port_stats =
441 &rxf_stats->port[adapter->port_num];
442 struct be_drv_stats *drvs = &adapter->drv_stats;
443
444 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447 drvs->rx_pause_frames = port_stats->rx_pause_frames;
448 drvs->rx_crc_errors = port_stats->rx_crc_errors;
449 drvs->rx_control_frames = port_stats->rx_control_frames;
450 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460 drvs->rx_dropped_header_too_small =
461 port_stats->rx_dropped_header_too_small;
462 drvs->rx_input_fifo_overflow_drop =
463 port_stats->rx_input_fifo_overflow_drop;
464 drvs->rx_address_filtered = port_stats->rx_address_filtered;
465 drvs->rx_alignment_symbol_errors =
466 port_stats->rx_alignment_symbol_errors;
467 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468 drvs->tx_pauseframes = port_stats->tx_pauseframes;
469 drvs->tx_controlframes = port_stats->tx_controlframes;
470 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471 drvs->jabber_events = port_stats->jabber_events;
472 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474 drvs->forwarded_packets = rxf_stats->forwarded_packets;
475 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479 if (be_roce_supported(adapter)) {
480 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482 drvs->rx_roce_frames = port_stats->roce_frames_received;
483 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484 drvs->roce_drops_payload_len =
485 port_stats->roce_drops_payload_len;
486 }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492 struct be_drv_stats *drvs = &adapter->drv_stats;
493 struct lancer_pport_stats *pport_stats =
494 pport_stats_from_cmd(adapter);
495
496 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506 drvs->rx_dropped_tcp_length =
507 pport_stats->rx_dropped_invalid_tcp_length;
508 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511 drvs->rx_dropped_header_too_small =
512 pport_stats->rx_dropped_header_too_small;
513 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514 drvs->rx_address_filtered =
515 pport_stats->rx_address_filtered +
516 pport_stats->rx_vlan_filtered;
517 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521 drvs->jabber_events = pport_stats->rx_jabbers;
522 drvs->forwarded_packets = pport_stats->num_forwards_lo;
523 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524 drvs->rx_drops_too_many_frags =
525 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x) (x & 0xFFFF)
531 #define hi(x) (x & 0xFFFF0000)
532 bool wrapped = val < lo(*acc);
533 u32 newacc = hi(*acc) + val;
534
535 if (wrapped)
536 newacc += 65536;
537 ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541 struct be_rx_obj *rxo,
542 u32 erx_stat)
543 {
544 if (!BEx_chip(adapter))
545 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546 else
547 /* below erx HW counter can actually wrap around after
548 * 65535. Driver accumulates a 32-bit value
549 */
550 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551 (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557 struct be_rx_obj *rxo;
558 int i;
559 u32 erx_stat;
560
561 if (lancer_chip(adapter)) {
562 populate_lancer_stats(adapter);
563 } else {
564 if (BE2_chip(adapter))
565 populate_be_v0_stats(adapter);
566 else if (BE3_chip(adapter))
567 /* for BE3 */
568 populate_be_v1_stats(adapter);
569 else
570 populate_be_v2_stats(adapter);
571
572 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573 for_all_rx_queues(adapter, rxo, i) {
574 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575 populate_erx_stats(adapter, rxo, erx_stat);
576 }
577 }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581 struct rtnl_link_stats64 *stats)
582 {
583 struct be_adapter *adapter = netdev_priv(netdev);
584 struct be_drv_stats *drvs = &adapter->drv_stats;
585 struct be_rx_obj *rxo;
586 struct be_tx_obj *txo;
587 u64 pkts, bytes;
588 unsigned int start;
589 int i;
590
591 for_all_rx_queues(adapter, rxo, i) {
592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do {
594 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602 rx_stats(rxo)->rx_drops_no_frags;
603 }
604
605 for_all_tx_queues(adapter, txo, i) {
606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do {
608 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes;
614 }
615
616 /* bad pkts received */
617 stats->rx_errors = drvs->rx_crc_errors +
618 drvs->rx_alignment_symbol_errors +
619 drvs->rx_in_range_errors +
620 drvs->rx_out_range_errors +
621 drvs->rx_frame_too_long +
622 drvs->rx_dropped_too_small +
623 drvs->rx_dropped_too_short +
624 drvs->rx_dropped_header_too_small +
625 drvs->rx_dropped_tcp_length +
626 drvs->rx_dropped_runt;
627
628 /* detailed rx errors */
629 stats->rx_length_errors = drvs->rx_in_range_errors +
630 drvs->rx_out_range_errors +
631 drvs->rx_frame_too_long;
632
633 stats->rx_crc_errors = drvs->rx_crc_errors;
634
635 /* frame alignment errors */
636 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638 /* receiver fifo overrun */
639 /* drops_no_pbuf is no per i/f, it's per BE card */
640 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641 drvs->rx_input_fifo_overflow_drop +
642 drvs->rx_drops_no_pbuf;
643 return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648 struct net_device *netdev = adapter->netdev;
649
650 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651 netif_carrier_off(netdev);
652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653 }
654
655 if (link_status)
656 netif_carrier_on(netdev);
657 else
658 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664 struct be_tx_stats *stats = tx_stats(txo);
665
666 u64_stats_update_begin(&stats->sync);
667 stats->tx_reqs++;
668 stats->tx_wrbs += wrb_cnt;
669 stats->tx_bytes += copied;
670 stats->tx_pkts += (gso_segs ? gso_segs : 1);
671 if (stopped)
672 stats->tx_stops++;
673 u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678 bool *dummy)
679 {
680 int cnt = (skb->len > skb->data_len);
681
682 cnt += skb_shinfo(skb)->nr_frags;
683
684 /* to account for hdr wrb */
685 cnt++;
686 if (lancer_chip(adapter) || !(cnt & 1)) {
687 *dummy = false;
688 } else {
689 /* add a dummy to make it an even num */
690 cnt++;
691 *dummy = true;
692 }
693 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694 return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699 wrb->frag_pa_hi = upper_32_bits(addr);
700 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702 wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706 struct sk_buff *skb)
707 {
708 u8 vlan_prio;
709 u16 vlan_tag;
710
711 vlan_tag = vlan_tx_tag_get(skb);
712 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713 /* If vlan priority provided by OS is NOT in available bmap */
714 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716 adapter->recommended_prio;
717
718 return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724 u16 vlan_tag;
725
726 memset(hdr, 0, sizeof(*hdr));
727
728 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730 if (skb_is_gso(skb)) {
731 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733 hdr, skb_shinfo(skb)->gso_size);
734 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737 if (is_tcp_pkt(skb))
738 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739 else if (is_udp_pkt(skb))
740 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741 }
742
743 if (vlan_tx_tag_present(skb)) {
744 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747 }
748
749 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757 bool unmap_single)
758 {
759 dma_addr_t dma;
760
761 be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764 if (wrb->frag_len) {
765 if (unmap_single)
766 dma_unmap_single(dev, dma, wrb->frag_len,
767 DMA_TO_DEVICE);
768 else
769 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770 }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775 bool skip_hw_vlan)
776 {
777 dma_addr_t busaddr;
778 int i, copied = 0;
779 struct device *dev = &adapter->pdev->dev;
780 struct sk_buff *first_skb = skb;
781 struct be_eth_wrb *wrb;
782 struct be_eth_hdr_wrb *hdr;
783 bool map_single = false;
784 u16 map_head;
785
786 hdr = queue_head_node(txq);
787 queue_head_inc(txq);
788 map_head = txq->head;
789
790 if (skb->len > skb->data_len) {
791 int len = skb_headlen(skb);
792 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793 if (dma_mapping_error(dev, busaddr))
794 goto dma_err;
795 map_single = true;
796 wrb = queue_head_node(txq);
797 wrb_fill(wrb, busaddr, len);
798 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799 queue_head_inc(txq);
800 copied += len;
801 }
802
803 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804 const struct skb_frag_struct *frag =
805 &skb_shinfo(skb)->frags[i];
806 busaddr = skb_frag_dma_map(dev, frag, 0,
807 skb_frag_size(frag), DMA_TO_DEVICE);
808 if (dma_mapping_error(dev, busaddr))
809 goto dma_err;
810 wrb = queue_head_node(txq);
811 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813 queue_head_inc(txq);
814 copied += skb_frag_size(frag);
815 }
816
817 if (dummy_wrb) {
818 wrb = queue_head_node(txq);
819 wrb_fill(wrb, 0, 0);
820 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821 queue_head_inc(txq);
822 }
823
824 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825 be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827 return copied;
828 dma_err:
829 txq->head = map_head;
830 while (copied) {
831 wrb = queue_head_node(txq);
832 unmap_tx_frag(dev, wrb, map_single);
833 map_single = false;
834 copied -= wrb->frag_len;
835 queue_head_inc(txq);
836 }
837 return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841 struct sk_buff *skb,
842 bool *skip_hw_vlan)
843 {
844 u16 vlan_tag = 0;
845
846 skb = skb_share_check(skb, GFP_ATOMIC);
847 if (unlikely(!skb))
848 return skb;
849
850 if (vlan_tx_tag_present(skb))
851 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854 if (!vlan_tag)
855 vlan_tag = adapter->pvid;
856 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857 * skip VLAN insertion
858 */
859 if (skip_hw_vlan)
860 *skip_hw_vlan = true;
861 }
862
863 if (vlan_tag) {
864 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865 if (unlikely(!skb))
866 return skb;
867 skb->vlan_tci = 0;
868 }
869
870 /* Insert the outer VLAN, if any */
871 if (adapter->qnq_vid) {
872 vlan_tag = adapter->qnq_vid;
873 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874 if (unlikely(!skb))
875 return skb;
876 if (skip_hw_vlan)
877 *skip_hw_vlan = true;
878 }
879
880 return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885 struct ethhdr *eh = (struct ethhdr *)skb->data;
886 u16 offset = ETH_HLEN;
887
888 if (eh->h_proto == htons(ETH_P_IPV6)) {
889 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891 offset += sizeof(struct ipv6hdr);
892 if (ip6h->nexthdr != NEXTHDR_TCP &&
893 ip6h->nexthdr != NEXTHDR_UDP) {
894 struct ipv6_opt_hdr *ehdr =
895 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898 if (ehdr->hdrlen == 0xff)
899 return true;
900 }
901 }
902 return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911 struct sk_buff *skb)
912 {
913 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
917 struct sk_buff *skb,
918 bool *skip_hw_vlan)
919 {
920 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921 unsigned int eth_hdr_len;
922 struct iphdr *ip;
923
924 /* For padded packets, BE HW modifies tot_len field in IP header
925 * incorrecly when VLAN tag is inserted by HW.
926 * For padded packets, Lancer computes incorrect checksum.
927 */
928 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
929 VLAN_ETH_HLEN : ETH_HLEN;
930 if (skb->len <= 60 &&
931 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
932 is_ipv4_pkt(skb)) {
933 ip = (struct iphdr *)ip_hdr(skb);
934 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
935 }
936
937 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in pvid-tagging mode
939 */
940 if (be_pvid_tagging_enabled(adapter) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q))
942 *skip_hw_vlan = true;
943
944 /* HW has a bug wherein it will calculate CSUM for VLAN
945 * pkts even though it is disabled.
946 * Manually insert VLAN in pkt.
947 */
948 if (skb->ip_summed != CHECKSUM_PARTIAL &&
949 vlan_tx_tag_present(skb)) {
950 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
951 if (unlikely(!skb))
952 goto err;
953 }
954
955 /* HW may lockup when VLAN HW tagging is requested on
956 * certain ipv6 packets. Drop such pkts if the HW workaround to
957 * skip HW tagging is not enabled by FW.
958 */
959 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
960 (adapter->pvid || adapter->qnq_vid) &&
961 !qnq_async_evt_rcvd(adapter)))
962 goto tx_drop;
963
964 /* Manual VLAN tag insertion to prevent:
965 * ASIC lockup when the ASIC inserts VLAN tag into
966 * certain ipv6 packets. Insert VLAN tags in driver,
967 * and set event, completion, vlan bits accordingly
968 * in the Tx WRB.
969 */
970 if (be_ipv6_tx_stall_chk(adapter, skb) &&
971 be_vlan_tag_tx_chk(adapter, skb)) {
972 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
973 if (unlikely(!skb))
974 goto err;
975 }
976
977 return skb;
978 tx_drop:
979 dev_kfree_skb_any(skb);
980 err:
981 return NULL;
982 }
983
984 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
985 struct sk_buff *skb,
986 bool *skip_hw_vlan)
987 {
988 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
989 * less may cause a transmit stall on that port. So the work-around is
990 * to pad short packets (<= 32 bytes) to a 36-byte length.
991 */
992 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
993 if (skb_padto(skb, 36))
994 return NULL;
995 skb->len = 36;
996 }
997
998 if (BEx_chip(adapter) || lancer_chip(adapter)) {
999 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1000 if (!skb)
1001 return NULL;
1002 }
1003
1004 return skb;
1005 }
1006
1007 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1008 {
1009 struct be_adapter *adapter = netdev_priv(netdev);
1010 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1011 struct be_queue_info *txq = &txo->q;
1012 bool dummy_wrb, stopped = false;
1013 u32 wrb_cnt = 0, copied = 0;
1014 bool skip_hw_vlan = false;
1015 u32 start = txq->head;
1016
1017 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1018 if (!skb) {
1019 tx_stats(txo)->tx_drv_drops++;
1020 return NETDEV_TX_OK;
1021 }
1022
1023 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1024
1025 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1026 skip_hw_vlan);
1027 if (copied) {
1028 int gso_segs = skb_shinfo(skb)->gso_segs;
1029
1030 /* record the sent skb in the sent_skb table */
1031 BUG_ON(txo->sent_skb_list[start]);
1032 txo->sent_skb_list[start] = skb;
1033
1034 /* Ensure txq has space for the next skb; Else stop the queue
1035 * *BEFORE* ringing the tx doorbell, so that we serialze the
1036 * tx compls of the current transmit which'll wake up the queue
1037 */
1038 atomic_add(wrb_cnt, &txq->used);
1039 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1040 txq->len) {
1041 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1042 stopped = true;
1043 }
1044
1045 be_txq_notify(adapter, txo, wrb_cnt);
1046
1047 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1048 } else {
1049 txq->head = start;
1050 tx_stats(txo)->tx_drv_drops++;
1051 dev_kfree_skb_any(skb);
1052 }
1053 return NETDEV_TX_OK;
1054 }
1055
1056 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1057 {
1058 struct be_adapter *adapter = netdev_priv(netdev);
1059 if (new_mtu < BE_MIN_MTU ||
1060 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1061 (ETH_HLEN + ETH_FCS_LEN))) {
1062 dev_info(&adapter->pdev->dev,
1063 "MTU must be between %d and %d bytes\n",
1064 BE_MIN_MTU,
1065 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1066 return -EINVAL;
1067 }
1068 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1069 netdev->mtu, new_mtu);
1070 netdev->mtu = new_mtu;
1071 return 0;
1072 }
1073
1074 /*
1075 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1076 * If the user configures more, place BE in vlan promiscuous mode.
1077 */
1078 static int be_vid_config(struct be_adapter *adapter)
1079 {
1080 u16 vids[BE_NUM_VLANS_SUPPORTED];
1081 u16 num = 0, i;
1082 int status = 0;
1083
1084 /* No need to further configure vids if in promiscuous mode */
1085 if (adapter->promiscuous)
1086 return 0;
1087
1088 if (adapter->vlans_added > be_max_vlans(adapter))
1089 goto set_vlan_promisc;
1090
1091 /* Construct VLAN Table to give to HW */
1092 for (i = 0; i < VLAN_N_VID; i++)
1093 if (adapter->vlan_tag[i])
1094 vids[num++] = cpu_to_le16(i);
1095
1096 status = be_cmd_vlan_config(adapter, adapter->if_handle,
1097 vids, num, 0);
1098
1099 if (status) {
1100 /* Set to VLAN promisc mode as setting VLAN filter failed */
1101 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1102 goto set_vlan_promisc;
1103 dev_err(&adapter->pdev->dev,
1104 "Setting HW VLAN filtering failed.\n");
1105 } else {
1106 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1107 /* hw VLAN filtering re-enabled. */
1108 status = be_cmd_rx_filter(adapter,
1109 BE_FLAGS_VLAN_PROMISC, OFF);
1110 if (!status) {
1111 dev_info(&adapter->pdev->dev,
1112 "Disabling VLAN Promiscuous mode.\n");
1113 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1114 }
1115 }
1116 }
1117
1118 return status;
1119
1120 set_vlan_promisc:
1121 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1122 return 0;
1123
1124 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1125 if (!status) {
1126 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1127 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1128 } else
1129 dev_err(&adapter->pdev->dev,
1130 "Failed to enable VLAN Promiscuous mode.\n");
1131 return status;
1132 }
1133
1134 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1135 {
1136 struct be_adapter *adapter = netdev_priv(netdev);
1137 int status = 0;
1138
1139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0)
1141 return status;
1142
1143 if (adapter->vlan_tag[vid])
1144 return status;
1145
1146 adapter->vlan_tag[vid] = 1;
1147 adapter->vlans_added++;
1148
1149 status = be_vid_config(adapter);
1150 if (status) {
1151 adapter->vlans_added--;
1152 adapter->vlan_tag[vid] = 0;
1153 }
1154
1155 return status;
1156 }
1157
1158 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1159 {
1160 struct be_adapter *adapter = netdev_priv(netdev);
1161 int status = 0;
1162
1163 /* Packets with VID 0 are always received by Lancer by default */
1164 if (lancer_chip(adapter) && vid == 0)
1165 goto ret;
1166
1167 adapter->vlan_tag[vid] = 0;
1168 status = be_vid_config(adapter);
1169 if (!status)
1170 adapter->vlans_added--;
1171 else
1172 adapter->vlan_tag[vid] = 1;
1173 ret:
1174 return status;
1175 }
1176
1177 static void be_clear_promisc(struct be_adapter *adapter)
1178 {
1179 adapter->promiscuous = false;
1180 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1181
1182 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1183 }
1184
1185 static void be_set_rx_mode(struct net_device *netdev)
1186 {
1187 struct be_adapter *adapter = netdev_priv(netdev);
1188 int status;
1189
1190 if (netdev->flags & IFF_PROMISC) {
1191 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1192 adapter->promiscuous = true;
1193 goto done;
1194 }
1195
1196 /* BE was previously in promiscuous mode; disable it */
1197 if (adapter->promiscuous) {
1198 be_clear_promisc(adapter);
1199 if (adapter->vlans_added)
1200 be_vid_config(adapter);
1201 }
1202
1203 /* Enable multicast promisc if num configured exceeds what we support */
1204 if (netdev->flags & IFF_ALLMULTI ||
1205 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1206 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1207 goto done;
1208 }
1209
1210 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1211 struct netdev_hw_addr *ha;
1212 int i = 1; /* First slot is claimed by the Primary MAC */
1213
1214 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1215 be_cmd_pmac_del(adapter, adapter->if_handle,
1216 adapter->pmac_id[i], 0);
1217 }
1218
1219 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1220 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1221 adapter->promiscuous = true;
1222 goto done;
1223 }
1224
1225 netdev_for_each_uc_addr(ha, adapter->netdev) {
1226 adapter->uc_macs++; /* First slot is for Primary MAC */
1227 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1228 adapter->if_handle,
1229 &adapter->pmac_id[adapter->uc_macs], 0);
1230 }
1231 }
1232
1233 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1234
1235 /* Set to MCAST promisc mode if setting MULTICAST address fails */
1236 if (status) {
1237 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1238 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1239 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1240 }
1241 done:
1242 return;
1243 }
1244
1245 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1246 {
1247 struct be_adapter *adapter = netdev_priv(netdev);
1248 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1249 int status;
1250
1251 if (!sriov_enabled(adapter))
1252 return -EPERM;
1253
1254 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1255 return -EINVAL;
1256
1257 if (BEx_chip(adapter)) {
1258 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1259 vf + 1);
1260
1261 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1262 &vf_cfg->pmac_id, vf + 1);
1263 } else {
1264 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1265 vf + 1);
1266 }
1267
1268 if (status)
1269 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1270 mac, vf);
1271 else
1272 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1273
1274 return status;
1275 }
1276
1277 static int be_get_vf_config(struct net_device *netdev, int vf,
1278 struct ifla_vf_info *vi)
1279 {
1280 struct be_adapter *adapter = netdev_priv(netdev);
1281 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1282
1283 if (!sriov_enabled(adapter))
1284 return -EPERM;
1285
1286 if (vf >= adapter->num_vfs)
1287 return -EINVAL;
1288
1289 vi->vf = vf;
1290 vi->tx_rate = vf_cfg->tx_rate;
1291 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1292 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1293 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1294 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1295
1296 return 0;
1297 }
1298
1299 static int be_set_vf_vlan(struct net_device *netdev,
1300 int vf, u16 vlan, u8 qos)
1301 {
1302 struct be_adapter *adapter = netdev_priv(netdev);
1303 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1304 int status = 0;
1305
1306 if (!sriov_enabled(adapter))
1307 return -EPERM;
1308
1309 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1310 return -EINVAL;
1311
1312 if (vlan || qos) {
1313 vlan |= qos << VLAN_PRIO_SHIFT;
1314 if (vf_cfg->vlan_tag != vlan)
1315 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1316 vf_cfg->if_handle, 0);
1317 } else {
1318 /* Reset Transparent Vlan Tagging. */
1319 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1320 vf + 1, vf_cfg->if_handle, 0);
1321 }
1322
1323 if (!status)
1324 vf_cfg->vlan_tag = vlan;
1325 else
1326 dev_info(&adapter->pdev->dev,
1327 "VLAN %d config on VF %d failed\n", vlan, vf);
1328 return status;
1329 }
1330
1331 static int be_set_vf_tx_rate(struct net_device *netdev,
1332 int vf, int rate)
1333 {
1334 struct be_adapter *adapter = netdev_priv(netdev);
1335 int status = 0;
1336
1337 if (!sriov_enabled(adapter))
1338 return -EPERM;
1339
1340 if (vf >= adapter->num_vfs)
1341 return -EINVAL;
1342
1343 if (rate < 100 || rate > 10000) {
1344 dev_err(&adapter->pdev->dev,
1345 "tx rate must be between 100 and 10000 Mbps\n");
1346 return -EINVAL;
1347 }
1348
1349 status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
1350 if (status)
1351 dev_err(&adapter->pdev->dev,
1352 "tx rate %d on VF %d failed\n", rate, vf);
1353 else
1354 adapter->vf_cfg[vf].tx_rate = rate;
1355 return status;
1356 }
1357 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1358 int link_state)
1359 {
1360 struct be_adapter *adapter = netdev_priv(netdev);
1361 int status;
1362
1363 if (!sriov_enabled(adapter))
1364 return -EPERM;
1365
1366 if (vf >= adapter->num_vfs)
1367 return -EINVAL;
1368
1369 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1370 if (!status)
1371 adapter->vf_cfg[vf].plink_tracking = link_state;
1372
1373 return status;
1374 }
1375
1376 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1377 ulong now)
1378 {
1379 aic->rx_pkts_prev = rx_pkts;
1380 aic->tx_reqs_prev = tx_pkts;
1381 aic->jiffies = now;
1382 }
1383
1384 static void be_eqd_update(struct be_adapter *adapter)
1385 {
1386 struct be_set_eqd set_eqd[MAX_EVT_QS];
1387 int eqd, i, num = 0, start;
1388 struct be_aic_obj *aic;
1389 struct be_eq_obj *eqo;
1390 struct be_rx_obj *rxo;
1391 struct be_tx_obj *txo;
1392 u64 rx_pkts, tx_pkts;
1393 ulong now;
1394 u32 pps, delta;
1395
1396 for_all_evt_queues(adapter, eqo, i) {
1397 aic = &adapter->aic_obj[eqo->idx];
1398 if (!aic->enable) {
1399 if (aic->jiffies)
1400 aic->jiffies = 0;
1401 eqd = aic->et_eqd;
1402 goto modify_eqd;
1403 }
1404
1405 rxo = &adapter->rx_obj[eqo->idx];
1406 do {
1407 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1408 rx_pkts = rxo->stats.rx_pkts;
1409 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1410
1411 txo = &adapter->tx_obj[eqo->idx];
1412 do {
1413 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1414 tx_pkts = txo->stats.tx_reqs;
1415 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1416
1417
1418 /* Skip, if wrapped around or first calculation */
1419 now = jiffies;
1420 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1421 rx_pkts < aic->rx_pkts_prev ||
1422 tx_pkts < aic->tx_reqs_prev) {
1423 be_aic_update(aic, rx_pkts, tx_pkts, now);
1424 continue;
1425 }
1426
1427 delta = jiffies_to_msecs(now - aic->jiffies);
1428 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1429 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1430 eqd = (pps / 15000) << 2;
1431
1432 if (eqd < 8)
1433 eqd = 0;
1434 eqd = min_t(u32, eqd, aic->max_eqd);
1435 eqd = max_t(u32, eqd, aic->min_eqd);
1436
1437 be_aic_update(aic, rx_pkts, tx_pkts, now);
1438 modify_eqd:
1439 if (eqd != aic->prev_eqd) {
1440 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1441 set_eqd[num].eq_id = eqo->q.id;
1442 aic->prev_eqd = eqd;
1443 num++;
1444 }
1445 }
1446
1447 if (num)
1448 be_cmd_modify_eqd(adapter, set_eqd, num);
1449 }
1450
1451 static void be_rx_stats_update(struct be_rx_obj *rxo,
1452 struct be_rx_compl_info *rxcp)
1453 {
1454 struct be_rx_stats *stats = rx_stats(rxo);
1455
1456 u64_stats_update_begin(&stats->sync);
1457 stats->rx_compl++;
1458 stats->rx_bytes += rxcp->pkt_size;
1459 stats->rx_pkts++;
1460 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1461 stats->rx_mcast_pkts++;
1462 if (rxcp->err)
1463 stats->rx_compl_err++;
1464 u64_stats_update_end(&stats->sync);
1465 }
1466
1467 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1468 {
1469 /* L4 checksum is not reliable for non TCP/UDP packets.
1470 * Also ignore ipcksm for ipv6 pkts */
1471 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1472 (rxcp->ip_csum || rxcp->ipv6);
1473 }
1474
1475 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1476 {
1477 struct be_adapter *adapter = rxo->adapter;
1478 struct be_rx_page_info *rx_page_info;
1479 struct be_queue_info *rxq = &rxo->q;
1480 u16 frag_idx = rxq->tail;
1481
1482 rx_page_info = &rxo->page_info_tbl[frag_idx];
1483 BUG_ON(!rx_page_info->page);
1484
1485 if (rx_page_info->last_frag) {
1486 dma_unmap_page(&adapter->pdev->dev,
1487 dma_unmap_addr(rx_page_info, bus),
1488 adapter->big_page_size, DMA_FROM_DEVICE);
1489 rx_page_info->last_frag = false;
1490 } else {
1491 dma_sync_single_for_cpu(&adapter->pdev->dev,
1492 dma_unmap_addr(rx_page_info, bus),
1493 rx_frag_size, DMA_FROM_DEVICE);
1494 }
1495
1496 queue_tail_inc(rxq);
1497 atomic_dec(&rxq->used);
1498 return rx_page_info;
1499 }
1500
1501 /* Throwaway the data in the Rx completion */
1502 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1503 struct be_rx_compl_info *rxcp)
1504 {
1505 struct be_rx_page_info *page_info;
1506 u16 i, num_rcvd = rxcp->num_rcvd;
1507
1508 for (i = 0; i < num_rcvd; i++) {
1509 page_info = get_rx_page_info(rxo);
1510 put_page(page_info->page);
1511 memset(page_info, 0, sizeof(*page_info));
1512 }
1513 }
1514
1515 /*
1516 * skb_fill_rx_data forms a complete skb for an ether frame
1517 * indicated by rxcp.
1518 */
1519 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1520 struct be_rx_compl_info *rxcp)
1521 {
1522 struct be_rx_page_info *page_info;
1523 u16 i, j;
1524 u16 hdr_len, curr_frag_len, remaining;
1525 u8 *start;
1526
1527 page_info = get_rx_page_info(rxo);
1528 start = page_address(page_info->page) + page_info->page_offset;
1529 prefetch(start);
1530
1531 /* Copy data in the first descriptor of this completion */
1532 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1533
1534 skb->len = curr_frag_len;
1535 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1536 memcpy(skb->data, start, curr_frag_len);
1537 /* Complete packet has now been moved to data */
1538 put_page(page_info->page);
1539 skb->data_len = 0;
1540 skb->tail += curr_frag_len;
1541 } else {
1542 hdr_len = ETH_HLEN;
1543 memcpy(skb->data, start, hdr_len);
1544 skb_shinfo(skb)->nr_frags = 1;
1545 skb_frag_set_page(skb, 0, page_info->page);
1546 skb_shinfo(skb)->frags[0].page_offset =
1547 page_info->page_offset + hdr_len;
1548 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1549 skb->data_len = curr_frag_len - hdr_len;
1550 skb->truesize += rx_frag_size;
1551 skb->tail += hdr_len;
1552 }
1553 page_info->page = NULL;
1554
1555 if (rxcp->pkt_size <= rx_frag_size) {
1556 BUG_ON(rxcp->num_rcvd != 1);
1557 return;
1558 }
1559
1560 /* More frags present for this completion */
1561 remaining = rxcp->pkt_size - curr_frag_len;
1562 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1563 page_info = get_rx_page_info(rxo);
1564 curr_frag_len = min(remaining, rx_frag_size);
1565
1566 /* Coalesce all frags from the same physical page in one slot */
1567 if (page_info->page_offset == 0) {
1568 /* Fresh page */
1569 j++;
1570 skb_frag_set_page(skb, j, page_info->page);
1571 skb_shinfo(skb)->frags[j].page_offset =
1572 page_info->page_offset;
1573 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1574 skb_shinfo(skb)->nr_frags++;
1575 } else {
1576 put_page(page_info->page);
1577 }
1578
1579 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1580 skb->len += curr_frag_len;
1581 skb->data_len += curr_frag_len;
1582 skb->truesize += rx_frag_size;
1583 remaining -= curr_frag_len;
1584 page_info->page = NULL;
1585 }
1586 BUG_ON(j > MAX_SKB_FRAGS);
1587 }
1588
1589 /* Process the RX completion indicated by rxcp when GRO is disabled */
1590 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1591 struct be_rx_compl_info *rxcp)
1592 {
1593 struct be_adapter *adapter = rxo->adapter;
1594 struct net_device *netdev = adapter->netdev;
1595 struct sk_buff *skb;
1596
1597 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1598 if (unlikely(!skb)) {
1599 rx_stats(rxo)->rx_drops_no_skbs++;
1600 be_rx_compl_discard(rxo, rxcp);
1601 return;
1602 }
1603
1604 skb_fill_rx_data(rxo, skb, rxcp);
1605
1606 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1607 skb->ip_summed = CHECKSUM_UNNECESSARY;
1608 else
1609 skb_checksum_none_assert(skb);
1610
1611 skb->protocol = eth_type_trans(skb, netdev);
1612 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1613 if (netdev->features & NETIF_F_RXHASH)
1614 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1615 skb_mark_napi_id(skb, napi);
1616
1617 if (rxcp->vlanf)
1618 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1619
1620 netif_receive_skb(skb);
1621 }
1622
1623 /* Process the RX completion indicated by rxcp when GRO is enabled */
1624 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1625 struct napi_struct *napi,
1626 struct be_rx_compl_info *rxcp)
1627 {
1628 struct be_adapter *adapter = rxo->adapter;
1629 struct be_rx_page_info *page_info;
1630 struct sk_buff *skb = NULL;
1631 u16 remaining, curr_frag_len;
1632 u16 i, j;
1633
1634 skb = napi_get_frags(napi);
1635 if (!skb) {
1636 be_rx_compl_discard(rxo, rxcp);
1637 return;
1638 }
1639
1640 remaining = rxcp->pkt_size;
1641 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1642 page_info = get_rx_page_info(rxo);
1643
1644 curr_frag_len = min(remaining, rx_frag_size);
1645
1646 /* Coalesce all frags from the same physical page in one slot */
1647 if (i == 0 || page_info->page_offset == 0) {
1648 /* First frag or Fresh page */
1649 j++;
1650 skb_frag_set_page(skb, j, page_info->page);
1651 skb_shinfo(skb)->frags[j].page_offset =
1652 page_info->page_offset;
1653 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1654 } else {
1655 put_page(page_info->page);
1656 }
1657 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1658 skb->truesize += rx_frag_size;
1659 remaining -= curr_frag_len;
1660 memset(page_info, 0, sizeof(*page_info));
1661 }
1662 BUG_ON(j > MAX_SKB_FRAGS);
1663
1664 skb_shinfo(skb)->nr_frags = j + 1;
1665 skb->len = rxcp->pkt_size;
1666 skb->data_len = rxcp->pkt_size;
1667 skb->ip_summed = CHECKSUM_UNNECESSARY;
1668 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1669 if (adapter->netdev->features & NETIF_F_RXHASH)
1670 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1671 skb_mark_napi_id(skb, napi);
1672
1673 if (rxcp->vlanf)
1674 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1675
1676 napi_gro_frags(napi);
1677 }
1678
1679 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1680 struct be_rx_compl_info *rxcp)
1681 {
1682 rxcp->pkt_size =
1683 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1684 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1685 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1686 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1687 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1688 rxcp->ip_csum =
1689 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1690 rxcp->l4_csum =
1691 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1692 rxcp->ipv6 =
1693 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1694 rxcp->num_rcvd =
1695 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1696 rxcp->pkt_type =
1697 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1698 rxcp->rss_hash =
1699 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1700 if (rxcp->vlanf) {
1701 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1702 compl);
1703 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1704 compl);
1705 }
1706 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1707 }
1708
1709 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1710 struct be_rx_compl_info *rxcp)
1711 {
1712 rxcp->pkt_size =
1713 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1714 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1715 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1716 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1717 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1718 rxcp->ip_csum =
1719 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1720 rxcp->l4_csum =
1721 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1722 rxcp->ipv6 =
1723 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1724 rxcp->num_rcvd =
1725 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1726 rxcp->pkt_type =
1727 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1728 rxcp->rss_hash =
1729 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1730 if (rxcp->vlanf) {
1731 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1732 compl);
1733 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1734 compl);
1735 }
1736 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1737 rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1738 ip_frag, compl);
1739 }
1740
1741 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1742 {
1743 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1744 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1745 struct be_adapter *adapter = rxo->adapter;
1746
1747 /* For checking the valid bit it is Ok to use either definition as the
1748 * valid bit is at the same position in both v0 and v1 Rx compl */
1749 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1750 return NULL;
1751
1752 rmb();
1753 be_dws_le_to_cpu(compl, sizeof(*compl));
1754
1755 if (adapter->be3_native)
1756 be_parse_rx_compl_v1(compl, rxcp);
1757 else
1758 be_parse_rx_compl_v0(compl, rxcp);
1759
1760 if (rxcp->ip_frag)
1761 rxcp->l4_csum = 0;
1762
1763 if (rxcp->vlanf) {
1764 /* In QNQ modes, if qnq bit is not set, then the packet was
1765 * tagged only with the transparent outer vlan-tag and must
1766 * not be treated as a vlan packet by host
1767 */
1768 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1769 rxcp->vlanf = 0;
1770
1771 if (!lancer_chip(adapter))
1772 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1773
1774 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1775 !adapter->vlan_tag[rxcp->vlan_tag])
1776 rxcp->vlanf = 0;
1777 }
1778
1779 /* As the compl has been parsed, reset it; we wont touch it again */
1780 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1781
1782 queue_tail_inc(&rxo->cq);
1783 return rxcp;
1784 }
1785
1786 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1787 {
1788 u32 order = get_order(size);
1789
1790 if (order > 0)
1791 gfp |= __GFP_COMP;
1792 return alloc_pages(gfp, order);
1793 }
1794
1795 /*
1796 * Allocate a page, split it to fragments of size rx_frag_size and post as
1797 * receive buffers to BE
1798 */
1799 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1800 {
1801 struct be_adapter *adapter = rxo->adapter;
1802 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1803 struct be_queue_info *rxq = &rxo->q;
1804 struct page *pagep = NULL;
1805 struct device *dev = &adapter->pdev->dev;
1806 struct be_eth_rx_d *rxd;
1807 u64 page_dmaaddr = 0, frag_dmaaddr;
1808 u32 posted, page_offset = 0;
1809
1810 page_info = &rxo->page_info_tbl[rxq->head];
1811 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1812 if (!pagep) {
1813 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1814 if (unlikely(!pagep)) {
1815 rx_stats(rxo)->rx_post_fail++;
1816 break;
1817 }
1818 page_dmaaddr = dma_map_page(dev, pagep, 0,
1819 adapter->big_page_size,
1820 DMA_FROM_DEVICE);
1821 if (dma_mapping_error(dev, page_dmaaddr)) {
1822 put_page(pagep);
1823 pagep = NULL;
1824 rx_stats(rxo)->rx_post_fail++;
1825 break;
1826 }
1827 page_offset = 0;
1828 } else {
1829 get_page(pagep);
1830 page_offset += rx_frag_size;
1831 }
1832 page_info->page_offset = page_offset;
1833 page_info->page = pagep;
1834
1835 rxd = queue_head_node(rxq);
1836 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1837 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1838 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1839
1840 /* Any space left in the current big page for another frag? */
1841 if ((page_offset + rx_frag_size + rx_frag_size) >
1842 adapter->big_page_size) {
1843 pagep = NULL;
1844 page_info->last_frag = true;
1845 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1846 } else {
1847 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1848 }
1849
1850 prev_page_info = page_info;
1851 queue_head_inc(rxq);
1852 page_info = &rxo->page_info_tbl[rxq->head];
1853 }
1854
1855 /* Mark the last frag of a page when we break out of the above loop
1856 * with no more slots available in the RXQ
1857 */
1858 if (pagep) {
1859 prev_page_info->last_frag = true;
1860 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1861 }
1862
1863 if (posted) {
1864 atomic_add(posted, &rxq->used);
1865 if (rxo->rx_post_starved)
1866 rxo->rx_post_starved = false;
1867 be_rxq_notify(adapter, rxq->id, posted);
1868 } else if (atomic_read(&rxq->used) == 0) {
1869 /* Let be_worker replenish when memory is available */
1870 rxo->rx_post_starved = true;
1871 }
1872 }
1873
1874 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1875 {
1876 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1877
1878 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1879 return NULL;
1880
1881 rmb();
1882 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1883
1884 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1885
1886 queue_tail_inc(tx_cq);
1887 return txcp;
1888 }
1889
1890 static u16 be_tx_compl_process(struct be_adapter *adapter,
1891 struct be_tx_obj *txo, u16 last_index)
1892 {
1893 struct be_queue_info *txq = &txo->q;
1894 struct be_eth_wrb *wrb;
1895 struct sk_buff **sent_skbs = txo->sent_skb_list;
1896 struct sk_buff *sent_skb;
1897 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1898 bool unmap_skb_hdr = true;
1899
1900 sent_skb = sent_skbs[txq->tail];
1901 BUG_ON(!sent_skb);
1902 sent_skbs[txq->tail] = NULL;
1903
1904 /* skip header wrb */
1905 queue_tail_inc(txq);
1906
1907 do {
1908 cur_index = txq->tail;
1909 wrb = queue_tail_node(txq);
1910 unmap_tx_frag(&adapter->pdev->dev, wrb,
1911 (unmap_skb_hdr && skb_headlen(sent_skb)));
1912 unmap_skb_hdr = false;
1913
1914 num_wrbs++;
1915 queue_tail_inc(txq);
1916 } while (cur_index != last_index);
1917
1918 dev_kfree_skb_any(sent_skb);
1919 return num_wrbs;
1920 }
1921
1922 /* Return the number of events in the event queue */
1923 static inline int events_get(struct be_eq_obj *eqo)
1924 {
1925 struct be_eq_entry *eqe;
1926 int num = 0;
1927
1928 do {
1929 eqe = queue_tail_node(&eqo->q);
1930 if (eqe->evt == 0)
1931 break;
1932
1933 rmb();
1934 eqe->evt = 0;
1935 num++;
1936 queue_tail_inc(&eqo->q);
1937 } while (true);
1938
1939 return num;
1940 }
1941
1942 /* Leaves the EQ is disarmed state */
1943 static void be_eq_clean(struct be_eq_obj *eqo)
1944 {
1945 int num = events_get(eqo);
1946
1947 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1948 }
1949
1950 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1951 {
1952 struct be_rx_page_info *page_info;
1953 struct be_queue_info *rxq = &rxo->q;
1954 struct be_queue_info *rx_cq = &rxo->cq;
1955 struct be_rx_compl_info *rxcp;
1956 struct be_adapter *adapter = rxo->adapter;
1957 int flush_wait = 0;
1958
1959 /* Consume pending rx completions.
1960 * Wait for the flush completion (identified by zero num_rcvd)
1961 * to arrive. Notify CQ even when there are no more CQ entries
1962 * for HW to flush partially coalesced CQ entries.
1963 * In Lancer, there is no need to wait for flush compl.
1964 */
1965 for (;;) {
1966 rxcp = be_rx_compl_get(rxo);
1967 if (rxcp == NULL) {
1968 if (lancer_chip(adapter))
1969 break;
1970
1971 if (flush_wait++ > 10 || be_hw_error(adapter)) {
1972 dev_warn(&adapter->pdev->dev,
1973 "did not receive flush compl\n");
1974 break;
1975 }
1976 be_cq_notify(adapter, rx_cq->id, true, 0);
1977 mdelay(1);
1978 } else {
1979 be_rx_compl_discard(rxo, rxcp);
1980 be_cq_notify(adapter, rx_cq->id, false, 1);
1981 if (rxcp->num_rcvd == 0)
1982 break;
1983 }
1984 }
1985
1986 /* After cleanup, leave the CQ in unarmed state */
1987 be_cq_notify(adapter, rx_cq->id, false, 0);
1988
1989 /* Then free posted rx buffers that were not used */
1990 while (atomic_read(&rxq->used) > 0) {
1991 page_info = get_rx_page_info(rxo);
1992 put_page(page_info->page);
1993 memset(page_info, 0, sizeof(*page_info));
1994 }
1995 BUG_ON(atomic_read(&rxq->used));
1996 rxq->tail = rxq->head = 0;
1997 }
1998
1999 static void be_tx_compl_clean(struct be_adapter *adapter)
2000 {
2001 struct be_tx_obj *txo;
2002 struct be_queue_info *txq;
2003 struct be_eth_tx_compl *txcp;
2004 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2005 struct sk_buff *sent_skb;
2006 bool dummy_wrb;
2007 int i, pending_txqs;
2008
2009 /* Wait for a max of 200ms for all the tx-completions to arrive. */
2010 do {
2011 pending_txqs = adapter->num_tx_qs;
2012
2013 for_all_tx_queues(adapter, txo, i) {
2014 txq = &txo->q;
2015 while ((txcp = be_tx_compl_get(&txo->cq))) {
2016 end_idx =
2017 AMAP_GET_BITS(struct amap_eth_tx_compl,
2018 wrb_index, txcp);
2019 num_wrbs += be_tx_compl_process(adapter, txo,
2020 end_idx);
2021 cmpl++;
2022 }
2023 if (cmpl) {
2024 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2025 atomic_sub(num_wrbs, &txq->used);
2026 cmpl = 0;
2027 num_wrbs = 0;
2028 }
2029 if (atomic_read(&txq->used) == 0)
2030 pending_txqs--;
2031 }
2032
2033 if (pending_txqs == 0 || ++timeo > 200)
2034 break;
2035
2036 mdelay(1);
2037 } while (true);
2038
2039 for_all_tx_queues(adapter, txo, i) {
2040 txq = &txo->q;
2041 if (atomic_read(&txq->used))
2042 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2043 atomic_read(&txq->used));
2044
2045 /* free posted tx for which compls will never arrive */
2046 while (atomic_read(&txq->used)) {
2047 sent_skb = txo->sent_skb_list[txq->tail];
2048 end_idx = txq->tail;
2049 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2050 &dummy_wrb);
2051 index_adv(&end_idx, num_wrbs - 1, txq->len);
2052 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2053 atomic_sub(num_wrbs, &txq->used);
2054 }
2055 }
2056 }
2057
2058 static void be_evt_queues_destroy(struct be_adapter *adapter)
2059 {
2060 struct be_eq_obj *eqo;
2061 int i;
2062
2063 for_all_evt_queues(adapter, eqo, i) {
2064 if (eqo->q.created) {
2065 be_eq_clean(eqo);
2066 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2067 napi_hash_del(&eqo->napi);
2068 netif_napi_del(&eqo->napi);
2069 }
2070 be_queue_free(adapter, &eqo->q);
2071 }
2072 }
2073
2074 static int be_evt_queues_create(struct be_adapter *adapter)
2075 {
2076 struct be_queue_info *eq;
2077 struct be_eq_obj *eqo;
2078 struct be_aic_obj *aic;
2079 int i, rc;
2080
2081 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2082 adapter->cfg_num_qs);
2083
2084 for_all_evt_queues(adapter, eqo, i) {
2085 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2086 BE_NAPI_WEIGHT);
2087 napi_hash_add(&eqo->napi);
2088 aic = &adapter->aic_obj[i];
2089 eqo->adapter = adapter;
2090 eqo->tx_budget = BE_TX_BUDGET;
2091 eqo->idx = i;
2092 aic->max_eqd = BE_MAX_EQD;
2093 aic->enable = true;
2094
2095 eq = &eqo->q;
2096 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2097 sizeof(struct be_eq_entry));
2098 if (rc)
2099 return rc;
2100
2101 rc = be_cmd_eq_create(adapter, eqo);
2102 if (rc)
2103 return rc;
2104 }
2105 return 0;
2106 }
2107
2108 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2109 {
2110 struct be_queue_info *q;
2111
2112 q = &adapter->mcc_obj.q;
2113 if (q->created)
2114 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2115 be_queue_free(adapter, q);
2116
2117 q = &adapter->mcc_obj.cq;
2118 if (q->created)
2119 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2120 be_queue_free(adapter, q);
2121 }
2122
2123 /* Must be called only after TX qs are created as MCC shares TX EQ */
2124 static int be_mcc_queues_create(struct be_adapter *adapter)
2125 {
2126 struct be_queue_info *q, *cq;
2127
2128 cq = &adapter->mcc_obj.cq;
2129 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2130 sizeof(struct be_mcc_compl)))
2131 goto err;
2132
2133 /* Use the default EQ for MCC completions */
2134 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2135 goto mcc_cq_free;
2136
2137 q = &adapter->mcc_obj.q;
2138 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2139 goto mcc_cq_destroy;
2140
2141 if (be_cmd_mccq_create(adapter, q, cq))
2142 goto mcc_q_free;
2143
2144 return 0;
2145
2146 mcc_q_free:
2147 be_queue_free(adapter, q);
2148 mcc_cq_destroy:
2149 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2150 mcc_cq_free:
2151 be_queue_free(adapter, cq);
2152 err:
2153 return -1;
2154 }
2155
2156 static void be_tx_queues_destroy(struct be_adapter *adapter)
2157 {
2158 struct be_queue_info *q;
2159 struct be_tx_obj *txo;
2160 u8 i;
2161
2162 for_all_tx_queues(adapter, txo, i) {
2163 q = &txo->q;
2164 if (q->created)
2165 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2166 be_queue_free(adapter, q);
2167
2168 q = &txo->cq;
2169 if (q->created)
2170 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2171 be_queue_free(adapter, q);
2172 }
2173 }
2174
2175 static int be_tx_qs_create(struct be_adapter *adapter)
2176 {
2177 struct be_queue_info *cq, *eq;
2178 struct be_tx_obj *txo;
2179 int status, i;
2180
2181 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2182
2183 for_all_tx_queues(adapter, txo, i) {
2184 cq = &txo->cq;
2185 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2186 sizeof(struct be_eth_tx_compl));
2187 if (status)
2188 return status;
2189
2190 u64_stats_init(&txo->stats.sync);
2191 u64_stats_init(&txo->stats.sync_compl);
2192
2193 /* If num_evt_qs is less than num_tx_qs, then more than
2194 * one txq share an eq
2195 */
2196 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2197 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2198 if (status)
2199 return status;
2200
2201 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2202 sizeof(struct be_eth_wrb));
2203 if (status)
2204 return status;
2205
2206 status = be_cmd_txq_create(adapter, txo);
2207 if (status)
2208 return status;
2209 }
2210
2211 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2212 adapter->num_tx_qs);
2213 return 0;
2214 }
2215
2216 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2217 {
2218 struct be_queue_info *q;
2219 struct be_rx_obj *rxo;
2220 int i;
2221
2222 for_all_rx_queues(adapter, rxo, i) {
2223 q = &rxo->cq;
2224 if (q->created)
2225 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2226 be_queue_free(adapter, q);
2227 }
2228 }
2229
2230 static int be_rx_cqs_create(struct be_adapter *adapter)
2231 {
2232 struct be_queue_info *eq, *cq;
2233 struct be_rx_obj *rxo;
2234 int rc, i;
2235
2236 /* We can create as many RSS rings as there are EQs. */
2237 adapter->num_rx_qs = adapter->num_evt_qs;
2238
2239 /* We'll use RSS only if atleast 2 RSS rings are supported.
2240 * When RSS is used, we'll need a default RXQ for non-IP traffic.
2241 */
2242 if (adapter->num_rx_qs > 1)
2243 adapter->num_rx_qs++;
2244
2245 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2246 for_all_rx_queues(adapter, rxo, i) {
2247 rxo->adapter = adapter;
2248 cq = &rxo->cq;
2249 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2250 sizeof(struct be_eth_rx_compl));
2251 if (rc)
2252 return rc;
2253
2254 u64_stats_init(&rxo->stats.sync);
2255 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2256 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2257 if (rc)
2258 return rc;
2259 }
2260
2261 dev_info(&adapter->pdev->dev,
2262 "created %d RSS queue(s) and 1 default RX queue\n",
2263 adapter->num_rx_qs - 1);
2264 return 0;
2265 }
2266
2267 static irqreturn_t be_intx(int irq, void *dev)
2268 {
2269 struct be_eq_obj *eqo = dev;
2270 struct be_adapter *adapter = eqo->adapter;
2271 int num_evts = 0;
2272
2273 /* IRQ is not expected when NAPI is scheduled as the EQ
2274 * will not be armed.
2275 * But, this can happen on Lancer INTx where it takes
2276 * a while to de-assert INTx or in BE2 where occasionaly
2277 * an interrupt may be raised even when EQ is unarmed.
2278 * If NAPI is already scheduled, then counting & notifying
2279 * events will orphan them.
2280 */
2281 if (napi_schedule_prep(&eqo->napi)) {
2282 num_evts = events_get(eqo);
2283 __napi_schedule(&eqo->napi);
2284 if (num_evts)
2285 eqo->spurious_intr = 0;
2286 }
2287 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2288
2289 /* Return IRQ_HANDLED only for the the first spurious intr
2290 * after a valid intr to stop the kernel from branding
2291 * this irq as a bad one!
2292 */
2293 if (num_evts || eqo->spurious_intr++ == 0)
2294 return IRQ_HANDLED;
2295 else
2296 return IRQ_NONE;
2297 }
2298
2299 static irqreturn_t be_msix(int irq, void *dev)
2300 {
2301 struct be_eq_obj *eqo = dev;
2302
2303 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2304 napi_schedule(&eqo->napi);
2305 return IRQ_HANDLED;
2306 }
2307
2308 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2309 {
2310 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2311 }
2312
2313 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2314 int budget, int polling)
2315 {
2316 struct be_adapter *adapter = rxo->adapter;
2317 struct be_queue_info *rx_cq = &rxo->cq;
2318 struct be_rx_compl_info *rxcp;
2319 u32 work_done;
2320
2321 for (work_done = 0; work_done < budget; work_done++) {
2322 rxcp = be_rx_compl_get(rxo);
2323 if (!rxcp)
2324 break;
2325
2326 /* Is it a flush compl that has no data */
2327 if (unlikely(rxcp->num_rcvd == 0))
2328 goto loop_continue;
2329
2330 /* Discard compl with partial DMA Lancer B0 */
2331 if (unlikely(!rxcp->pkt_size)) {
2332 be_rx_compl_discard(rxo, rxcp);
2333 goto loop_continue;
2334 }
2335
2336 /* On BE drop pkts that arrive due to imperfect filtering in
2337 * promiscuous mode on some skews
2338 */
2339 if (unlikely(rxcp->port != adapter->port_num &&
2340 !lancer_chip(adapter))) {
2341 be_rx_compl_discard(rxo, rxcp);
2342 goto loop_continue;
2343 }
2344
2345 /* Don't do gro when we're busy_polling */
2346 if (do_gro(rxcp) && polling != BUSY_POLLING)
2347 be_rx_compl_process_gro(rxo, napi, rxcp);
2348 else
2349 be_rx_compl_process(rxo, napi, rxcp);
2350
2351 loop_continue:
2352 be_rx_stats_update(rxo, rxcp);
2353 }
2354
2355 if (work_done) {
2356 be_cq_notify(adapter, rx_cq->id, true, work_done);
2357
2358 /* When an rx-obj gets into post_starved state, just
2359 * let be_worker do the posting.
2360 */
2361 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2362 !rxo->rx_post_starved)
2363 be_post_rx_frags(rxo, GFP_ATOMIC);
2364 }
2365
2366 return work_done;
2367 }
2368
2369 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2370 int budget, int idx)
2371 {
2372 struct be_eth_tx_compl *txcp;
2373 int num_wrbs = 0, work_done;
2374
2375 for (work_done = 0; work_done < budget; work_done++) {
2376 txcp = be_tx_compl_get(&txo->cq);
2377 if (!txcp)
2378 break;
2379 num_wrbs += be_tx_compl_process(adapter, txo,
2380 AMAP_GET_BITS(struct amap_eth_tx_compl,
2381 wrb_index, txcp));
2382 }
2383
2384 if (work_done) {
2385 be_cq_notify(adapter, txo->cq.id, true, work_done);
2386 atomic_sub(num_wrbs, &txo->q.used);
2387
2388 /* As Tx wrbs have been freed up, wake up netdev queue
2389 * if it was stopped due to lack of tx wrbs. */
2390 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2391 atomic_read(&txo->q.used) < txo->q.len / 2) {
2392 netif_wake_subqueue(adapter->netdev, idx);
2393 }
2394
2395 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2396 tx_stats(txo)->tx_compl += work_done;
2397 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2398 }
2399 return (work_done < budget); /* Done */
2400 }
2401
2402 int be_poll(struct napi_struct *napi, int budget)
2403 {
2404 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2405 struct be_adapter *adapter = eqo->adapter;
2406 int max_work = 0, work, i, num_evts;
2407 struct be_rx_obj *rxo;
2408 bool tx_done;
2409
2410 num_evts = events_get(eqo);
2411
2412 /* Process all TXQs serviced by this EQ */
2413 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2414 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2415 eqo->tx_budget, i);
2416 if (!tx_done)
2417 max_work = budget;
2418 }
2419
2420 if (be_lock_napi(eqo)) {
2421 /* This loop will iterate twice for EQ0 in which
2422 * completions of the last RXQ (default one) are also processed
2423 * For other EQs the loop iterates only once
2424 */
2425 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2426 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2427 max_work = max(work, max_work);
2428 }
2429 be_unlock_napi(eqo);
2430 } else {
2431 max_work = budget;
2432 }
2433
2434 if (is_mcc_eqo(eqo))
2435 be_process_mcc(adapter);
2436
2437 if (max_work < budget) {
2438 napi_complete(napi);
2439 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2440 } else {
2441 /* As we'll continue in polling mode, count and clear events */
2442 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2443 }
2444 return max_work;
2445 }
2446
2447 #ifdef CONFIG_NET_RX_BUSY_POLL
2448 static int be_busy_poll(struct napi_struct *napi)
2449 {
2450 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2451 struct be_adapter *adapter = eqo->adapter;
2452 struct be_rx_obj *rxo;
2453 int i, work = 0;
2454
2455 if (!be_lock_busy_poll(eqo))
2456 return LL_FLUSH_BUSY;
2457
2458 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2459 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2460 if (work)
2461 break;
2462 }
2463
2464 be_unlock_busy_poll(eqo);
2465 return work;
2466 }
2467 #endif
2468
2469 void be_detect_error(struct be_adapter *adapter)
2470 {
2471 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2472 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2473 u32 i;
2474 bool error_detected = false;
2475 struct device *dev = &adapter->pdev->dev;
2476 struct net_device *netdev = adapter->netdev;
2477
2478 if (be_hw_error(adapter))
2479 return;
2480
2481 if (lancer_chip(adapter)) {
2482 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2483 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2484 sliport_err1 = ioread32(adapter->db +
2485 SLIPORT_ERROR1_OFFSET);
2486 sliport_err2 = ioread32(adapter->db +
2487 SLIPORT_ERROR2_OFFSET);
2488 adapter->hw_error = true;
2489 /* Do not log error messages if its a FW reset */
2490 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2491 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2492 dev_info(dev, "Firmware update in progress\n");
2493 } else {
2494 error_detected = true;
2495 dev_err(dev, "Error detected in the card\n");
2496 dev_err(dev, "ERR: sliport status 0x%x\n",
2497 sliport_status);
2498 dev_err(dev, "ERR: sliport error1 0x%x\n",
2499 sliport_err1);
2500 dev_err(dev, "ERR: sliport error2 0x%x\n",
2501 sliport_err2);
2502 }
2503 }
2504 } else {
2505 pci_read_config_dword(adapter->pdev,
2506 PCICFG_UE_STATUS_LOW, &ue_lo);
2507 pci_read_config_dword(adapter->pdev,
2508 PCICFG_UE_STATUS_HIGH, &ue_hi);
2509 pci_read_config_dword(adapter->pdev,
2510 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2511 pci_read_config_dword(adapter->pdev,
2512 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2513
2514 ue_lo = (ue_lo & ~ue_lo_mask);
2515 ue_hi = (ue_hi & ~ue_hi_mask);
2516
2517 /* On certain platforms BE hardware can indicate spurious UEs.
2518 * Allow HW to stop working completely in case of a real UE.
2519 * Hence not setting the hw_error for UE detection.
2520 */
2521
2522 if (ue_lo || ue_hi) {
2523 error_detected = true;
2524 dev_err(dev,
2525 "Unrecoverable Error detected in the adapter");
2526 dev_err(dev, "Please reboot server to recover");
2527 if (skyhawk_chip(adapter))
2528 adapter->hw_error = true;
2529 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2530 if (ue_lo & 1)
2531 dev_err(dev, "UE: %s bit set\n",
2532 ue_status_low_desc[i]);
2533 }
2534 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2535 if (ue_hi & 1)
2536 dev_err(dev, "UE: %s bit set\n",
2537 ue_status_hi_desc[i]);
2538 }
2539 }
2540 }
2541 if (error_detected)
2542 netif_carrier_off(netdev);
2543 }
2544
2545 static void be_msix_disable(struct be_adapter *adapter)
2546 {
2547 if (msix_enabled(adapter)) {
2548 pci_disable_msix(adapter->pdev);
2549 adapter->num_msix_vec = 0;
2550 adapter->num_msix_roce_vec = 0;
2551 }
2552 }
2553
2554 static int be_msix_enable(struct be_adapter *adapter)
2555 {
2556 int i, num_vec;
2557 struct device *dev = &adapter->pdev->dev;
2558
2559 /* If RoCE is supported, program the max number of NIC vectors that
2560 * may be configured via set-channels, along with vectors needed for
2561 * RoCe. Else, just program the number we'll use initially.
2562 */
2563 if (be_roce_supported(adapter))
2564 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2565 2 * num_online_cpus());
2566 else
2567 num_vec = adapter->cfg_num_qs;
2568
2569 for (i = 0; i < num_vec; i++)
2570 adapter->msix_entries[i].entry = i;
2571
2572 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2573 MIN_MSIX_VECTORS, num_vec);
2574 if (num_vec < 0)
2575 goto fail;
2576
2577 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2578 adapter->num_msix_roce_vec = num_vec / 2;
2579 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2580 adapter->num_msix_roce_vec);
2581 }
2582
2583 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2584
2585 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2586 adapter->num_msix_vec);
2587 return 0;
2588
2589 fail:
2590 dev_warn(dev, "MSIx enable failed\n");
2591
2592 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2593 if (!be_physfn(adapter))
2594 return num_vec;
2595 return 0;
2596 }
2597
2598 static inline int be_msix_vec_get(struct be_adapter *adapter,
2599 struct be_eq_obj *eqo)
2600 {
2601 return adapter->msix_entries[eqo->msix_idx].vector;
2602 }
2603
2604 static int be_msix_register(struct be_adapter *adapter)
2605 {
2606 struct net_device *netdev = adapter->netdev;
2607 struct be_eq_obj *eqo;
2608 int status, i, vec;
2609
2610 for_all_evt_queues(adapter, eqo, i) {
2611 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2612 vec = be_msix_vec_get(adapter, eqo);
2613 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2614 if (status)
2615 goto err_msix;
2616 }
2617
2618 return 0;
2619 err_msix:
2620 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2621 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2622 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2623 status);
2624 be_msix_disable(adapter);
2625 return status;
2626 }
2627
2628 static int be_irq_register(struct be_adapter *adapter)
2629 {
2630 struct net_device *netdev = adapter->netdev;
2631 int status;
2632
2633 if (msix_enabled(adapter)) {
2634 status = be_msix_register(adapter);
2635 if (status == 0)
2636 goto done;
2637 /* INTx is not supported for VF */
2638 if (!be_physfn(adapter))
2639 return status;
2640 }
2641
2642 /* INTx: only the first EQ is used */
2643 netdev->irq = adapter->pdev->irq;
2644 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2645 &adapter->eq_obj[0]);
2646 if (status) {
2647 dev_err(&adapter->pdev->dev,
2648 "INTx request IRQ failed - err %d\n", status);
2649 return status;
2650 }
2651 done:
2652 adapter->isr_registered = true;
2653 return 0;
2654 }
2655
2656 static void be_irq_unregister(struct be_adapter *adapter)
2657 {
2658 struct net_device *netdev = adapter->netdev;
2659 struct be_eq_obj *eqo;
2660 int i;
2661
2662 if (!adapter->isr_registered)
2663 return;
2664
2665 /* INTx */
2666 if (!msix_enabled(adapter)) {
2667 free_irq(netdev->irq, &adapter->eq_obj[0]);
2668 goto done;
2669 }
2670
2671 /* MSIx */
2672 for_all_evt_queues(adapter, eqo, i)
2673 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2674
2675 done:
2676 adapter->isr_registered = false;
2677 }
2678
2679 static void be_rx_qs_destroy(struct be_adapter *adapter)
2680 {
2681 struct be_queue_info *q;
2682 struct be_rx_obj *rxo;
2683 int i;
2684
2685 for_all_rx_queues(adapter, rxo, i) {
2686 q = &rxo->q;
2687 if (q->created) {
2688 be_cmd_rxq_destroy(adapter, q);
2689 be_rx_cq_clean(rxo);
2690 }
2691 be_queue_free(adapter, q);
2692 }
2693 }
2694
2695 static int be_close(struct net_device *netdev)
2696 {
2697 struct be_adapter *adapter = netdev_priv(netdev);
2698 struct be_eq_obj *eqo;
2699 int i;
2700
2701 be_roce_dev_close(adapter);
2702
2703 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2704 for_all_evt_queues(adapter, eqo, i) {
2705 napi_disable(&eqo->napi);
2706 be_disable_busy_poll(eqo);
2707 }
2708 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2709 }
2710
2711 be_async_mcc_disable(adapter);
2712
2713 /* Wait for all pending tx completions to arrive so that
2714 * all tx skbs are freed.
2715 */
2716 netif_tx_disable(netdev);
2717 be_tx_compl_clean(adapter);
2718
2719 be_rx_qs_destroy(adapter);
2720
2721 for (i = 1; i < (adapter->uc_macs + 1); i++)
2722 be_cmd_pmac_del(adapter, adapter->if_handle,
2723 adapter->pmac_id[i], 0);
2724 adapter->uc_macs = 0;
2725
2726 for_all_evt_queues(adapter, eqo, i) {
2727 if (msix_enabled(adapter))
2728 synchronize_irq(be_msix_vec_get(adapter, eqo));
2729 else
2730 synchronize_irq(netdev->irq);
2731 be_eq_clean(eqo);
2732 }
2733
2734 be_irq_unregister(adapter);
2735
2736 return 0;
2737 }
2738
2739 static int be_rx_qs_create(struct be_adapter *adapter)
2740 {
2741 struct be_rx_obj *rxo;
2742 int rc, i, j;
2743 u8 rsstable[128];
2744
2745 for_all_rx_queues(adapter, rxo, i) {
2746 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2747 sizeof(struct be_eth_rx_d));
2748 if (rc)
2749 return rc;
2750 }
2751
2752 /* The FW would like the default RXQ to be created first */
2753 rxo = default_rxo(adapter);
2754 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2755 adapter->if_handle, false, &rxo->rss_id);
2756 if (rc)
2757 return rc;
2758
2759 for_all_rss_queues(adapter, rxo, i) {
2760 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2761 rx_frag_size, adapter->if_handle,
2762 true, &rxo->rss_id);
2763 if (rc)
2764 return rc;
2765 }
2766
2767 if (be_multi_rxq(adapter)) {
2768 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2769 for_all_rss_queues(adapter, rxo, i) {
2770 if ((j + i) >= 128)
2771 break;
2772 rsstable[j + i] = rxo->rss_id;
2773 }
2774 }
2775 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2776 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2777
2778 if (!BEx_chip(adapter))
2779 adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2780 RSS_ENABLE_UDP_IPV6;
2781 } else {
2782 /* Disable RSS, if only default RX Q is created */
2783 adapter->rss_flags = RSS_ENABLE_NONE;
2784 }
2785
2786 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2787 128);
2788 if (rc) {
2789 adapter->rss_flags = RSS_ENABLE_NONE;
2790 return rc;
2791 }
2792
2793 /* First time posting */
2794 for_all_rx_queues(adapter, rxo, i)
2795 be_post_rx_frags(rxo, GFP_KERNEL);
2796 return 0;
2797 }
2798
2799 static int be_open(struct net_device *netdev)
2800 {
2801 struct be_adapter *adapter = netdev_priv(netdev);
2802 struct be_eq_obj *eqo;
2803 struct be_rx_obj *rxo;
2804 struct be_tx_obj *txo;
2805 u8 link_status;
2806 int status, i;
2807
2808 status = be_rx_qs_create(adapter);
2809 if (status)
2810 goto err;
2811
2812 status = be_irq_register(adapter);
2813 if (status)
2814 goto err;
2815
2816 for_all_rx_queues(adapter, rxo, i)
2817 be_cq_notify(adapter, rxo->cq.id, true, 0);
2818
2819 for_all_tx_queues(adapter, txo, i)
2820 be_cq_notify(adapter, txo->cq.id, true, 0);
2821
2822 be_async_mcc_enable(adapter);
2823
2824 for_all_evt_queues(adapter, eqo, i) {
2825 napi_enable(&eqo->napi);
2826 be_enable_busy_poll(eqo);
2827 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2828 }
2829 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2830
2831 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2832 if (!status)
2833 be_link_status_update(adapter, link_status);
2834
2835 netif_tx_start_all_queues(netdev);
2836 be_roce_dev_open(adapter);
2837 return 0;
2838 err:
2839 be_close(adapter->netdev);
2840 return -EIO;
2841 }
2842
2843 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2844 {
2845 struct be_dma_mem cmd;
2846 int status = 0;
2847 u8 mac[ETH_ALEN];
2848
2849 memset(mac, 0, ETH_ALEN);
2850
2851 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2852 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2853 GFP_KERNEL);
2854 if (cmd.va == NULL)
2855 return -1;
2856
2857 if (enable) {
2858 status = pci_write_config_dword(adapter->pdev,
2859 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2860 if (status) {
2861 dev_err(&adapter->pdev->dev,
2862 "Could not enable Wake-on-lan\n");
2863 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2864 cmd.dma);
2865 return status;
2866 }
2867 status = be_cmd_enable_magic_wol(adapter,
2868 adapter->netdev->dev_addr, &cmd);
2869 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2870 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2871 } else {
2872 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2873 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2874 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2875 }
2876
2877 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2878 return status;
2879 }
2880
2881 /*
2882 * Generate a seed MAC address from the PF MAC Address using jhash.
2883 * MAC Address for VFs are assigned incrementally starting from the seed.
2884 * These addresses are programmed in the ASIC by the PF and the VF driver
2885 * queries for the MAC address during its probe.
2886 */
2887 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2888 {
2889 u32 vf;
2890 int status = 0;
2891 u8 mac[ETH_ALEN];
2892 struct be_vf_cfg *vf_cfg;
2893
2894 be_vf_eth_addr_generate(adapter, mac);
2895
2896 for_all_vfs(adapter, vf_cfg, vf) {
2897 if (BEx_chip(adapter))
2898 status = be_cmd_pmac_add(adapter, mac,
2899 vf_cfg->if_handle,
2900 &vf_cfg->pmac_id, vf + 1);
2901 else
2902 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2903 vf + 1);
2904
2905 if (status)
2906 dev_err(&adapter->pdev->dev,
2907 "Mac address assignment failed for VF %d\n", vf);
2908 else
2909 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2910
2911 mac[5] += 1;
2912 }
2913 return status;
2914 }
2915
2916 static int be_vfs_mac_query(struct be_adapter *adapter)
2917 {
2918 int status, vf;
2919 u8 mac[ETH_ALEN];
2920 struct be_vf_cfg *vf_cfg;
2921
2922 for_all_vfs(adapter, vf_cfg, vf) {
2923 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2924 mac, vf_cfg->if_handle,
2925 false, vf+1);
2926 if (status)
2927 return status;
2928 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2929 }
2930 return 0;
2931 }
2932
2933 static void be_vf_clear(struct be_adapter *adapter)
2934 {
2935 struct be_vf_cfg *vf_cfg;
2936 u32 vf;
2937
2938 if (pci_vfs_assigned(adapter->pdev)) {
2939 dev_warn(&adapter->pdev->dev,
2940 "VFs are assigned to VMs: not disabling VFs\n");
2941 goto done;
2942 }
2943
2944 pci_disable_sriov(adapter->pdev);
2945
2946 for_all_vfs(adapter, vf_cfg, vf) {
2947 if (BEx_chip(adapter))
2948 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2949 vf_cfg->pmac_id, vf + 1);
2950 else
2951 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2952 vf + 1);
2953
2954 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2955 }
2956 done:
2957 kfree(adapter->vf_cfg);
2958 adapter->num_vfs = 0;
2959 }
2960
2961 static void be_clear_queues(struct be_adapter *adapter)
2962 {
2963 be_mcc_queues_destroy(adapter);
2964 be_rx_cqs_destroy(adapter);
2965 be_tx_queues_destroy(adapter);
2966 be_evt_queues_destroy(adapter);
2967 }
2968
2969 static void be_cancel_worker(struct be_adapter *adapter)
2970 {
2971 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2972 cancel_delayed_work_sync(&adapter->work);
2973 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2974 }
2975 }
2976
2977 static void be_mac_clear(struct be_adapter *adapter)
2978 {
2979 int i;
2980
2981 if (adapter->pmac_id) {
2982 for (i = 0; i < (adapter->uc_macs + 1); i++)
2983 be_cmd_pmac_del(adapter, adapter->if_handle,
2984 adapter->pmac_id[i], 0);
2985 adapter->uc_macs = 0;
2986
2987 kfree(adapter->pmac_id);
2988 adapter->pmac_id = NULL;
2989 }
2990 }
2991
2992 static int be_clear(struct be_adapter *adapter)
2993 {
2994 be_cancel_worker(adapter);
2995
2996 if (sriov_enabled(adapter))
2997 be_vf_clear(adapter);
2998
2999 /* delete the primary mac along with the uc-mac list */
3000 be_mac_clear(adapter);
3001
3002 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3003
3004 be_clear_queues(adapter);
3005
3006 be_msix_disable(adapter);
3007 return 0;
3008 }
3009
3010 static int be_vfs_if_create(struct be_adapter *adapter)
3011 {
3012 struct be_resources res = {0};
3013 struct be_vf_cfg *vf_cfg;
3014 u32 cap_flags, en_flags, vf;
3015 int status = 0;
3016
3017 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3018 BE_IF_FLAGS_MULTICAST;
3019
3020 for_all_vfs(adapter, vf_cfg, vf) {
3021 if (!BE3_chip(adapter)) {
3022 status = be_cmd_get_profile_config(adapter, &res,
3023 vf + 1);
3024 if (!status)
3025 cap_flags = res.if_cap_flags;
3026 }
3027
3028 /* If a FW profile exists, then cap_flags are updated */
3029 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3030 BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
3031 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3032 &vf_cfg->if_handle, vf + 1);
3033 if (status)
3034 goto err;
3035 }
3036 err:
3037 return status;
3038 }
3039
3040 static int be_vf_setup_init(struct be_adapter *adapter)
3041 {
3042 struct be_vf_cfg *vf_cfg;
3043 int vf;
3044
3045 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3046 GFP_KERNEL);
3047 if (!adapter->vf_cfg)
3048 return -ENOMEM;
3049
3050 for_all_vfs(adapter, vf_cfg, vf) {
3051 vf_cfg->if_handle = -1;
3052 vf_cfg->pmac_id = -1;
3053 }
3054 return 0;
3055 }
3056
3057 static int be_vf_setup(struct be_adapter *adapter)
3058 {
3059 struct device *dev = &adapter->pdev->dev;
3060 struct be_vf_cfg *vf_cfg;
3061 int status, old_vfs, vf;
3062 u32 privileges;
3063 u16 lnk_speed;
3064
3065 old_vfs = pci_num_vf(adapter->pdev);
3066 if (old_vfs) {
3067 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3068 if (old_vfs != num_vfs)
3069 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3070 adapter->num_vfs = old_vfs;
3071 } else {
3072 if (num_vfs > be_max_vfs(adapter))
3073 dev_info(dev, "Device supports %d VFs and not %d\n",
3074 be_max_vfs(adapter), num_vfs);
3075 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3076 if (!adapter->num_vfs)
3077 return 0;
3078 }
3079
3080 status = be_vf_setup_init(adapter);
3081 if (status)
3082 goto err;
3083
3084 if (old_vfs) {
3085 for_all_vfs(adapter, vf_cfg, vf) {
3086 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3087 if (status)
3088 goto err;
3089 }
3090 } else {
3091 status = be_vfs_if_create(adapter);
3092 if (status)
3093 goto err;
3094 }
3095
3096 if (old_vfs) {
3097 status = be_vfs_mac_query(adapter);
3098 if (status)
3099 goto err;
3100 } else {
3101 status = be_vf_eth_addr_config(adapter);
3102 if (status)
3103 goto err;
3104 }
3105
3106 for_all_vfs(adapter, vf_cfg, vf) {
3107 /* Allow VFs to programs MAC/VLAN filters */
3108 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3109 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3110 status = be_cmd_set_fn_privileges(adapter,
3111 privileges |
3112 BE_PRIV_FILTMGMT,
3113 vf + 1);
3114 if (!status)
3115 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3116 vf);
3117 }
3118
3119 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3120 * Allow full available bandwidth
3121 */
3122 if (BE3_chip(adapter) && !old_vfs)
3123 be_cmd_config_qos(adapter, 1000, vf + 1);
3124
3125 status = be_cmd_link_status_query(adapter, &lnk_speed,
3126 NULL, vf + 1);
3127 if (!status)
3128 vf_cfg->tx_rate = lnk_speed;
3129
3130 if (!old_vfs) {
3131 be_cmd_enable_vf(adapter, vf + 1);
3132 be_cmd_set_logical_link_config(adapter,
3133 IFLA_VF_LINK_STATE_AUTO,
3134 vf+1);
3135 }
3136 }
3137
3138 if (!old_vfs) {
3139 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3140 if (status) {
3141 dev_err(dev, "SRIOV enable failed\n");
3142 adapter->num_vfs = 0;
3143 goto err;
3144 }
3145 }
3146 return 0;
3147 err:
3148 dev_err(dev, "VF setup failed\n");
3149 be_vf_clear(adapter);
3150 return status;
3151 }
3152
3153 /* Converting function_mode bits on BE3 to SH mc_type enums */
3154
3155 static u8 be_convert_mc_type(u32 function_mode)
3156 {
3157 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3158 return vNIC1;
3159 else if (function_mode & FLEX10_MODE)
3160 return FLEX10;
3161 else if (function_mode & VNIC_MODE)
3162 return vNIC2;
3163 else if (function_mode & UMC_ENABLED)
3164 return UMC;
3165 else
3166 return MC_NONE;
3167 }
3168
3169 /* On BE2/BE3 FW does not suggest the supported limits */
3170 static void BEx_get_resources(struct be_adapter *adapter,
3171 struct be_resources *res)
3172 {
3173 struct pci_dev *pdev = adapter->pdev;
3174 bool use_sriov = false;
3175 int max_vfs = 0;
3176
3177 if (be_physfn(adapter) && BE3_chip(adapter)) {
3178 be_cmd_get_profile_config(adapter, res, 0);
3179 /* Some old versions of BE3 FW don't report max_vfs value */
3180 if (res->max_vfs == 0) {
3181 max_vfs = pci_sriov_get_totalvfs(pdev);
3182 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3183 }
3184 use_sriov = res->max_vfs && sriov_want(adapter);
3185 }
3186
3187 if (be_physfn(adapter))
3188 res->max_uc_mac = BE_UC_PMAC_COUNT;
3189 else
3190 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3191
3192 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3193
3194 if (be_is_mc(adapter)) {
3195 /* Assuming that there are 4 channels per port,
3196 * when multi-channel is enabled
3197 */
3198 if (be_is_qnq_mode(adapter))
3199 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3200 else
3201 /* In a non-qnq multichannel mode, the pvid
3202 * takes up one vlan entry
3203 */
3204 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3205 } else {
3206 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3207 }
3208
3209 res->max_mcast_mac = BE_MAX_MC;
3210
3211 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3212 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3213 * *only* if it is RSS-capable.
3214 */
3215 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3216 !be_physfn(adapter) || (be_is_mc(adapter) &&
3217 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3218 res->max_tx_qs = 1;
3219 else
3220 res->max_tx_qs = BE3_MAX_TX_QS;
3221
3222 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3223 !use_sriov && be_physfn(adapter))
3224 res->max_rss_qs = (adapter->be3_native) ?
3225 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3226 res->max_rx_qs = res->max_rss_qs + 1;
3227
3228 if (be_physfn(adapter))
3229 res->max_evt_qs = (res->max_vfs > 0) ?
3230 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3231 else
3232 res->max_evt_qs = 1;
3233
3234 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3235 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3236 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3237 }
3238
3239 static void be_setup_init(struct be_adapter *adapter)
3240 {
3241 adapter->vlan_prio_bmap = 0xff;
3242 adapter->phy.link_speed = -1;
3243 adapter->if_handle = -1;
3244 adapter->be3_native = false;
3245 adapter->promiscuous = false;
3246 if (be_physfn(adapter))
3247 adapter->cmd_privileges = MAX_PRIVILEGES;
3248 else
3249 adapter->cmd_privileges = MIN_PRIVILEGES;
3250 }
3251
3252 static int be_get_resources(struct be_adapter *adapter)
3253 {
3254 struct device *dev = &adapter->pdev->dev;
3255 struct be_resources res = {0};
3256 int status;
3257
3258 if (BEx_chip(adapter)) {
3259 BEx_get_resources(adapter, &res);
3260 adapter->res = res;
3261 }
3262
3263 /* For Lancer, SH etc read per-function resource limits from FW.
3264 * GET_FUNC_CONFIG returns per function guaranteed limits.
3265 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3266 */
3267 if (!BEx_chip(adapter)) {
3268 status = be_cmd_get_func_config(adapter, &res);
3269 if (status)
3270 return status;
3271
3272 /* If RoCE may be enabled stash away half the EQs for RoCE */
3273 if (be_roce_supported(adapter))
3274 res.max_evt_qs /= 2;
3275 adapter->res = res;
3276
3277 if (be_physfn(adapter)) {
3278 status = be_cmd_get_profile_config(adapter, &res, 0);
3279 if (status)
3280 return status;
3281 adapter->res.max_vfs = res.max_vfs;
3282 }
3283
3284 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3285 be_max_txqs(adapter), be_max_rxqs(adapter),
3286 be_max_rss(adapter), be_max_eqs(adapter),
3287 be_max_vfs(adapter));
3288 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3289 be_max_uc(adapter), be_max_mc(adapter),
3290 be_max_vlans(adapter));
3291 }
3292
3293 return 0;
3294 }
3295
3296 /* Routine to query per function resource limits */
3297 static int be_get_config(struct be_adapter *adapter)
3298 {
3299 u16 profile_id;
3300 int status;
3301
3302 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3303 &adapter->function_mode,
3304 &adapter->function_caps,
3305 &adapter->asic_rev);
3306 if (status)
3307 return status;
3308
3309 if (be_physfn(adapter)) {
3310 status = be_cmd_get_active_profile(adapter, &profile_id);
3311 if (!status)
3312 dev_info(&adapter->pdev->dev,
3313 "Using profile 0x%x\n", profile_id);
3314 }
3315
3316 status = be_get_resources(adapter);
3317 if (status)
3318 return status;
3319
3320 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3321 sizeof(*adapter->pmac_id), GFP_KERNEL);
3322 if (!adapter->pmac_id)
3323 return -ENOMEM;
3324
3325 /* Sanitize cfg_num_qs based on HW and platform limits */
3326 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3327
3328 return 0;
3329 }
3330
3331 static int be_mac_setup(struct be_adapter *adapter)
3332 {
3333 u8 mac[ETH_ALEN];
3334 int status;
3335
3336 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3337 status = be_cmd_get_perm_mac(adapter, mac);
3338 if (status)
3339 return status;
3340
3341 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3342 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3343 } else {
3344 /* Maybe the HW was reset; dev_addr must be re-programmed */
3345 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3346 }
3347
3348 /* For BE3-R VFs, the PF programs the initial MAC address */
3349 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3350 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3351 &adapter->pmac_id[0], 0);
3352 return 0;
3353 }
3354
3355 static void be_schedule_worker(struct be_adapter *adapter)
3356 {
3357 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3358 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3359 }
3360
3361 static int be_setup_queues(struct be_adapter *adapter)
3362 {
3363 struct net_device *netdev = adapter->netdev;
3364 int status;
3365
3366 status = be_evt_queues_create(adapter);
3367 if (status)
3368 goto err;
3369
3370 status = be_tx_qs_create(adapter);
3371 if (status)
3372 goto err;
3373
3374 status = be_rx_cqs_create(adapter);
3375 if (status)
3376 goto err;
3377
3378 status = be_mcc_queues_create(adapter);
3379 if (status)
3380 goto err;
3381
3382 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3383 if (status)
3384 goto err;
3385
3386 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3387 if (status)
3388 goto err;
3389
3390 return 0;
3391 err:
3392 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3393 return status;
3394 }
3395
3396 int be_update_queues(struct be_adapter *adapter)
3397 {
3398 struct net_device *netdev = adapter->netdev;
3399 int status;
3400
3401 if (netif_running(netdev))
3402 be_close(netdev);
3403
3404 be_cancel_worker(adapter);
3405
3406 /* If any vectors have been shared with RoCE we cannot re-program
3407 * the MSIx table.
3408 */
3409 if (!adapter->num_msix_roce_vec)
3410 be_msix_disable(adapter);
3411
3412 be_clear_queues(adapter);
3413
3414 if (!msix_enabled(adapter)) {
3415 status = be_msix_enable(adapter);
3416 if (status)
3417 return status;
3418 }
3419
3420 status = be_setup_queues(adapter);
3421 if (status)
3422 return status;
3423
3424 be_schedule_worker(adapter);
3425
3426 if (netif_running(netdev))
3427 status = be_open(netdev);
3428
3429 return status;
3430 }
3431
3432 static int be_setup(struct be_adapter *adapter)
3433 {
3434 struct device *dev = &adapter->pdev->dev;
3435 u32 tx_fc, rx_fc, en_flags;
3436 int status;
3437
3438 be_setup_init(adapter);
3439
3440 if (!lancer_chip(adapter))
3441 be_cmd_req_native_mode(adapter);
3442
3443 status = be_get_config(adapter);
3444 if (status)
3445 goto err;
3446
3447 status = be_msix_enable(adapter);
3448 if (status)
3449 goto err;
3450
3451 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3452 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3453 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3454 en_flags |= BE_IF_FLAGS_RSS;
3455 en_flags = en_flags & be_if_cap_flags(adapter);
3456 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3457 &adapter->if_handle, 0);
3458 if (status)
3459 goto err;
3460
3461 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3462 rtnl_lock();
3463 status = be_setup_queues(adapter);
3464 rtnl_unlock();
3465 if (status)
3466 goto err;
3467
3468 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3469
3470 status = be_mac_setup(adapter);
3471 if (status)
3472 goto err;
3473
3474 be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3475
3476 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3477 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3478 adapter->fw_ver);
3479 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3480 }
3481
3482 if (adapter->vlans_added)
3483 be_vid_config(adapter);
3484
3485 be_set_rx_mode(adapter->netdev);
3486
3487 be_cmd_get_acpi_wol_cap(adapter);
3488
3489 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3490
3491 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3492 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3493 adapter->rx_fc);
3494
3495 if (be_physfn(adapter))
3496 be_cmd_set_logical_link_config(adapter,
3497 IFLA_VF_LINK_STATE_AUTO, 0);
3498
3499 if (sriov_want(adapter)) {
3500 if (be_max_vfs(adapter))
3501 be_vf_setup(adapter);
3502 else
3503 dev_warn(dev, "device doesn't support SRIOV\n");
3504 }
3505
3506 status = be_cmd_get_phy_info(adapter);
3507 if (!status && be_pause_supported(adapter))
3508 adapter->phy.fc_autoneg = 1;
3509
3510 be_schedule_worker(adapter);
3511 return 0;
3512 err:
3513 be_clear(adapter);
3514 return status;
3515 }
3516
3517 #ifdef CONFIG_NET_POLL_CONTROLLER
3518 static void be_netpoll(struct net_device *netdev)
3519 {
3520 struct be_adapter *adapter = netdev_priv(netdev);
3521 struct be_eq_obj *eqo;
3522 int i;
3523
3524 for_all_evt_queues(adapter, eqo, i) {
3525 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3526 napi_schedule(&eqo->napi);
3527 }
3528
3529 return;
3530 }
3531 #endif
3532
3533 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
3534 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3535
3536 static bool be_flash_redboot(struct be_adapter *adapter,
3537 const u8 *p, u32 img_start, int image_size,
3538 int hdr_size)
3539 {
3540 u32 crc_offset;
3541 u8 flashed_crc[4];
3542 int status;
3543
3544 crc_offset = hdr_size + img_start + image_size - 4;
3545
3546 p += crc_offset;
3547
3548 status = be_cmd_get_flash_crc(adapter, flashed_crc,
3549 (image_size - 4));
3550 if (status) {
3551 dev_err(&adapter->pdev->dev,
3552 "could not get crc from flash, not flashing redboot\n");
3553 return false;
3554 }
3555
3556 /*update redboot only if crc does not match*/
3557 if (!memcmp(flashed_crc, p, 4))
3558 return false;
3559 else
3560 return true;
3561 }
3562
3563 static bool phy_flashing_required(struct be_adapter *adapter)
3564 {
3565 return (adapter->phy.phy_type == TN_8022 &&
3566 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3567 }
3568
3569 static bool is_comp_in_ufi(struct be_adapter *adapter,
3570 struct flash_section_info *fsec, int type)
3571 {
3572 int i = 0, img_type = 0;
3573 struct flash_section_info_g2 *fsec_g2 = NULL;
3574
3575 if (BE2_chip(adapter))
3576 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3577
3578 for (i = 0; i < MAX_FLASH_COMP; i++) {
3579 if (fsec_g2)
3580 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3581 else
3582 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3583
3584 if (img_type == type)
3585 return true;
3586 }
3587 return false;
3588
3589 }
3590
3591 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3592 int header_size,
3593 const struct firmware *fw)
3594 {
3595 struct flash_section_info *fsec = NULL;
3596 const u8 *p = fw->data;
3597
3598 p += header_size;
3599 while (p < (fw->data + fw->size)) {
3600 fsec = (struct flash_section_info *)p;
3601 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3602 return fsec;
3603 p += 32;
3604 }
3605 return NULL;
3606 }
3607
3608 static int be_flash(struct be_adapter *adapter, const u8 *img,
3609 struct be_dma_mem *flash_cmd, int optype, int img_size)
3610 {
3611 u32 total_bytes = 0, flash_op, num_bytes = 0;
3612 int status = 0;
3613 struct be_cmd_write_flashrom *req = flash_cmd->va;
3614
3615 total_bytes = img_size;
3616 while (total_bytes) {
3617 num_bytes = min_t(u32, 32*1024, total_bytes);
3618
3619 total_bytes -= num_bytes;
3620
3621 if (!total_bytes) {
3622 if (optype == OPTYPE_PHY_FW)
3623 flash_op = FLASHROM_OPER_PHY_FLASH;
3624 else
3625 flash_op = FLASHROM_OPER_FLASH;
3626 } else {
3627 if (optype == OPTYPE_PHY_FW)
3628 flash_op = FLASHROM_OPER_PHY_SAVE;
3629 else
3630 flash_op = FLASHROM_OPER_SAVE;
3631 }
3632
3633 memcpy(req->data_buf, img, num_bytes);
3634 img += num_bytes;
3635 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3636 flash_op, num_bytes);
3637 if (status) {
3638 if (status == ILLEGAL_IOCTL_REQ &&
3639 optype == OPTYPE_PHY_FW)
3640 break;
3641 dev_err(&adapter->pdev->dev,
3642 "cmd to write to flash rom failed.\n");
3643 return status;
3644 }
3645 }
3646 return 0;
3647 }
3648
3649 /* For BE2, BE3 and BE3-R */
3650 static int be_flash_BEx(struct be_adapter *adapter,
3651 const struct firmware *fw,
3652 struct be_dma_mem *flash_cmd,
3653 int num_of_images)
3654
3655 {
3656 int status = 0, i, filehdr_size = 0;
3657 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3658 const u8 *p = fw->data;
3659 const struct flash_comp *pflashcomp;
3660 int num_comp, redboot;
3661 struct flash_section_info *fsec = NULL;
3662
3663 struct flash_comp gen3_flash_types[] = {
3664 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3665 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3666 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3667 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3668 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3669 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3670 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3671 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3672 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3673 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3674 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3675 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3676 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3677 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3678 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3679 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3680 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3681 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3682 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3683 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3684 };
3685
3686 struct flash_comp gen2_flash_types[] = {
3687 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3688 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3689 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3690 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3691 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3692 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3693 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3694 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3695 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3696 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3697 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3698 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3699 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3700 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3701 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3702 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3703 };
3704
3705 if (BE3_chip(adapter)) {
3706 pflashcomp = gen3_flash_types;
3707 filehdr_size = sizeof(struct flash_file_hdr_g3);
3708 num_comp = ARRAY_SIZE(gen3_flash_types);
3709 } else {
3710 pflashcomp = gen2_flash_types;
3711 filehdr_size = sizeof(struct flash_file_hdr_g2);
3712 num_comp = ARRAY_SIZE(gen2_flash_types);
3713 }
3714
3715 /* Get flash section info*/
3716 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3717 if (!fsec) {
3718 dev_err(&adapter->pdev->dev,
3719 "Invalid Cookie. UFI corrupted ?\n");
3720 return -1;
3721 }
3722 for (i = 0; i < num_comp; i++) {
3723 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3724 continue;
3725
3726 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3727 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3728 continue;
3729
3730 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3731 !phy_flashing_required(adapter))
3732 continue;
3733
3734 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3735 redboot = be_flash_redboot(adapter, fw->data,
3736 pflashcomp[i].offset, pflashcomp[i].size,
3737 filehdr_size + img_hdrs_size);
3738 if (!redboot)
3739 continue;
3740 }
3741
3742 p = fw->data;
3743 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3744 if (p + pflashcomp[i].size > fw->data + fw->size)
3745 return -1;
3746
3747 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3748 pflashcomp[i].size);
3749 if (status) {
3750 dev_err(&adapter->pdev->dev,
3751 "Flashing section type %d failed.\n",
3752 pflashcomp[i].img_type);
3753 return status;
3754 }
3755 }
3756 return 0;
3757 }
3758
3759 static int be_flash_skyhawk(struct be_adapter *adapter,
3760 const struct firmware *fw,
3761 struct be_dma_mem *flash_cmd, int num_of_images)
3762 {
3763 int status = 0, i, filehdr_size = 0;
3764 int img_offset, img_size, img_optype, redboot;
3765 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3766 const u8 *p = fw->data;
3767 struct flash_section_info *fsec = NULL;
3768
3769 filehdr_size = sizeof(struct flash_file_hdr_g3);
3770 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3771 if (!fsec) {
3772 dev_err(&adapter->pdev->dev,
3773 "Invalid Cookie. UFI corrupted ?\n");
3774 return -1;
3775 }
3776
3777 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3778 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3779 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3780
3781 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3782 case IMAGE_FIRMWARE_iSCSI:
3783 img_optype = OPTYPE_ISCSI_ACTIVE;
3784 break;
3785 case IMAGE_BOOT_CODE:
3786 img_optype = OPTYPE_REDBOOT;
3787 break;
3788 case IMAGE_OPTION_ROM_ISCSI:
3789 img_optype = OPTYPE_BIOS;
3790 break;
3791 case IMAGE_OPTION_ROM_PXE:
3792 img_optype = OPTYPE_PXE_BIOS;
3793 break;
3794 case IMAGE_OPTION_ROM_FCoE:
3795 img_optype = OPTYPE_FCOE_BIOS;
3796 break;
3797 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3798 img_optype = OPTYPE_ISCSI_BACKUP;
3799 break;
3800 case IMAGE_NCSI:
3801 img_optype = OPTYPE_NCSI_FW;
3802 break;
3803 default:
3804 continue;
3805 }
3806
3807 if (img_optype == OPTYPE_REDBOOT) {
3808 redboot = be_flash_redboot(adapter, fw->data,
3809 img_offset, img_size,
3810 filehdr_size + img_hdrs_size);
3811 if (!redboot)
3812 continue;
3813 }
3814
3815 p = fw->data;
3816 p += filehdr_size + img_offset + img_hdrs_size;
3817 if (p + img_size > fw->data + fw->size)
3818 return -1;
3819
3820 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3821 if (status) {
3822 dev_err(&adapter->pdev->dev,
3823 "Flashing section type %d failed.\n",
3824 fsec->fsec_entry[i].type);
3825 return status;
3826 }
3827 }
3828 return 0;
3829 }
3830
3831 static int lancer_fw_download(struct be_adapter *adapter,
3832 const struct firmware *fw)
3833 {
3834 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
3835 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
3836 struct be_dma_mem flash_cmd;
3837 const u8 *data_ptr = NULL;
3838 u8 *dest_image_ptr = NULL;
3839 size_t image_size = 0;
3840 u32 chunk_size = 0;
3841 u32 data_written = 0;
3842 u32 offset = 0;
3843 int status = 0;
3844 u8 add_status = 0;
3845 u8 change_status;
3846
3847 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3848 dev_err(&adapter->pdev->dev,
3849 "FW Image not properly aligned. "
3850 "Length must be 4 byte aligned.\n");
3851 status = -EINVAL;
3852 goto lancer_fw_exit;
3853 }
3854
3855 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3856 + LANCER_FW_DOWNLOAD_CHUNK;
3857 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3858 &flash_cmd.dma, GFP_KERNEL);
3859 if (!flash_cmd.va) {
3860 status = -ENOMEM;
3861 goto lancer_fw_exit;
3862 }
3863
3864 dest_image_ptr = flash_cmd.va +
3865 sizeof(struct lancer_cmd_req_write_object);
3866 image_size = fw->size;
3867 data_ptr = fw->data;
3868
3869 while (image_size) {
3870 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3871
3872 /* Copy the image chunk content. */
3873 memcpy(dest_image_ptr, data_ptr, chunk_size);
3874
3875 status = lancer_cmd_write_object(adapter, &flash_cmd,
3876 chunk_size, offset,
3877 LANCER_FW_DOWNLOAD_LOCATION,
3878 &data_written, &change_status,
3879 &add_status);
3880 if (status)
3881 break;
3882
3883 offset += data_written;
3884 data_ptr += data_written;
3885 image_size -= data_written;
3886 }
3887
3888 if (!status) {
3889 /* Commit the FW written */
3890 status = lancer_cmd_write_object(adapter, &flash_cmd,
3891 0, offset,
3892 LANCER_FW_DOWNLOAD_LOCATION,
3893 &data_written, &change_status,
3894 &add_status);
3895 }
3896
3897 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3898 flash_cmd.dma);
3899 if (status) {
3900 dev_err(&adapter->pdev->dev,
3901 "Firmware load error. "
3902 "Status code: 0x%x Additional Status: 0x%x\n",
3903 status, add_status);
3904 goto lancer_fw_exit;
3905 }
3906
3907 if (change_status == LANCER_FW_RESET_NEEDED) {
3908 dev_info(&adapter->pdev->dev,
3909 "Resetting adapter to activate new FW\n");
3910 status = lancer_physdev_ctrl(adapter,
3911 PHYSDEV_CONTROL_FW_RESET_MASK);
3912 if (status) {
3913 dev_err(&adapter->pdev->dev,
3914 "Adapter busy for FW reset.\n"
3915 "New FW will not be active.\n");
3916 goto lancer_fw_exit;
3917 }
3918 } else if (change_status != LANCER_NO_RESET_NEEDED) {
3919 dev_err(&adapter->pdev->dev,
3920 "System reboot required for new FW"
3921 " to be active\n");
3922 }
3923
3924 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3925 lancer_fw_exit:
3926 return status;
3927 }
3928
3929 #define UFI_TYPE2 2
3930 #define UFI_TYPE3 3
3931 #define UFI_TYPE3R 10
3932 #define UFI_TYPE4 4
3933 static int be_get_ufi_type(struct be_adapter *adapter,
3934 struct flash_file_hdr_g3 *fhdr)
3935 {
3936 if (fhdr == NULL)
3937 goto be_get_ufi_exit;
3938
3939 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3940 return UFI_TYPE4;
3941 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3942 if (fhdr->asic_type_rev == 0x10)
3943 return UFI_TYPE3R;
3944 else
3945 return UFI_TYPE3;
3946 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3947 return UFI_TYPE2;
3948
3949 be_get_ufi_exit:
3950 dev_err(&adapter->pdev->dev,
3951 "UFI and Interface are not compatible for flashing\n");
3952 return -1;
3953 }
3954
3955 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3956 {
3957 struct flash_file_hdr_g3 *fhdr3;
3958 struct image_hdr *img_hdr_ptr = NULL;
3959 struct be_dma_mem flash_cmd;
3960 const u8 *p;
3961 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3962
3963 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3964 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3965 &flash_cmd.dma, GFP_KERNEL);
3966 if (!flash_cmd.va) {
3967 status = -ENOMEM;
3968 goto be_fw_exit;
3969 }
3970
3971 p = fw->data;
3972 fhdr3 = (struct flash_file_hdr_g3 *)p;
3973
3974 ufi_type = be_get_ufi_type(adapter, fhdr3);
3975
3976 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3977 for (i = 0; i < num_imgs; i++) {
3978 img_hdr_ptr = (struct image_hdr *)(fw->data +
3979 (sizeof(struct flash_file_hdr_g3) +
3980 i * sizeof(struct image_hdr)));
3981 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3982 switch (ufi_type) {
3983 case UFI_TYPE4:
3984 status = be_flash_skyhawk(adapter, fw,
3985 &flash_cmd, num_imgs);
3986 break;
3987 case UFI_TYPE3R:
3988 status = be_flash_BEx(adapter, fw, &flash_cmd,
3989 num_imgs);
3990 break;
3991 case UFI_TYPE3:
3992 /* Do not flash this ufi on BE3-R cards */
3993 if (adapter->asic_rev < 0x10)
3994 status = be_flash_BEx(adapter, fw,
3995 &flash_cmd,
3996 num_imgs);
3997 else {
3998 status = -1;
3999 dev_err(&adapter->pdev->dev,
4000 "Can't load BE3 UFI on BE3R\n");
4001 }
4002 }
4003 }
4004 }
4005
4006 if (ufi_type == UFI_TYPE2)
4007 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4008 else if (ufi_type == -1)
4009 status = -1;
4010
4011 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4012 flash_cmd.dma);
4013 if (status) {
4014 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4015 goto be_fw_exit;
4016 }
4017
4018 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4019
4020 be_fw_exit:
4021 return status;
4022 }
4023
4024 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4025 {
4026 const struct firmware *fw;
4027 int status;
4028
4029 if (!netif_running(adapter->netdev)) {
4030 dev_err(&adapter->pdev->dev,
4031 "Firmware load not allowed (interface is down)\n");
4032 return -1;
4033 }
4034
4035 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4036 if (status)
4037 goto fw_exit;
4038
4039 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4040
4041 if (lancer_chip(adapter))
4042 status = lancer_fw_download(adapter, fw);
4043 else
4044 status = be_fw_download(adapter, fw);
4045
4046 if (!status)
4047 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
4048 adapter->fw_on_flash);
4049
4050 fw_exit:
4051 release_firmware(fw);
4052 return status;
4053 }
4054
4055 static int be_ndo_bridge_setlink(struct net_device *dev,
4056 struct nlmsghdr *nlh)
4057 {
4058 struct be_adapter *adapter = netdev_priv(dev);
4059 struct nlattr *attr, *br_spec;
4060 int rem;
4061 int status = 0;
4062 u16 mode = 0;
4063
4064 if (!sriov_enabled(adapter))
4065 return -EOPNOTSUPP;
4066
4067 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4068
4069 nla_for_each_nested(attr, br_spec, rem) {
4070 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4071 continue;
4072
4073 mode = nla_get_u16(attr);
4074 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4075 return -EINVAL;
4076
4077 status = be_cmd_set_hsw_config(adapter, 0, 0,
4078 adapter->if_handle,
4079 mode == BRIDGE_MODE_VEPA ?
4080 PORT_FWD_TYPE_VEPA :
4081 PORT_FWD_TYPE_VEB);
4082 if (status)
4083 goto err;
4084
4085 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4086 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4087
4088 return status;
4089 }
4090 err:
4091 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4092 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4093
4094 return status;
4095 }
4096
4097 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4098 struct net_device *dev,
4099 u32 filter_mask)
4100 {
4101 struct be_adapter *adapter = netdev_priv(dev);
4102 int status = 0;
4103 u8 hsw_mode;
4104
4105 if (!sriov_enabled(adapter))
4106 return 0;
4107
4108 /* BE and Lancer chips support VEB mode only */
4109 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4110 hsw_mode = PORT_FWD_TYPE_VEB;
4111 } else {
4112 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4113 adapter->if_handle, &hsw_mode);
4114 if (status)
4115 return 0;
4116 }
4117
4118 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4119 hsw_mode == PORT_FWD_TYPE_VEPA ?
4120 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4121 }
4122
4123 static const struct net_device_ops be_netdev_ops = {
4124 .ndo_open = be_open,
4125 .ndo_stop = be_close,
4126 .ndo_start_xmit = be_xmit,
4127 .ndo_set_rx_mode = be_set_rx_mode,
4128 .ndo_set_mac_address = be_mac_addr_set,
4129 .ndo_change_mtu = be_change_mtu,
4130 .ndo_get_stats64 = be_get_stats64,
4131 .ndo_validate_addr = eth_validate_addr,
4132 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4133 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
4134 .ndo_set_vf_mac = be_set_vf_mac,
4135 .ndo_set_vf_vlan = be_set_vf_vlan,
4136 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4137 .ndo_get_vf_config = be_get_vf_config,
4138 .ndo_set_vf_link_state = be_set_vf_link_state,
4139 #ifdef CONFIG_NET_POLL_CONTROLLER
4140 .ndo_poll_controller = be_netpoll,
4141 #endif
4142 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4143 .ndo_bridge_getlink = be_ndo_bridge_getlink,
4144 #ifdef CONFIG_NET_RX_BUSY_POLL
4145 .ndo_busy_poll = be_busy_poll
4146 #endif
4147 };
4148
4149 static void be_netdev_init(struct net_device *netdev)
4150 {
4151 struct be_adapter *adapter = netdev_priv(netdev);
4152
4153 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4154 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4155 NETIF_F_HW_VLAN_CTAG_TX;
4156 if (be_multi_rxq(adapter))
4157 netdev->hw_features |= NETIF_F_RXHASH;
4158
4159 netdev->features |= netdev->hw_features |
4160 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4161
4162 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4163 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4164
4165 netdev->priv_flags |= IFF_UNICAST_FLT;
4166
4167 netdev->flags |= IFF_MULTICAST;
4168
4169 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4170
4171 netdev->netdev_ops = &be_netdev_ops;
4172
4173 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4174 }
4175
4176 static void be_unmap_pci_bars(struct be_adapter *adapter)
4177 {
4178 if (adapter->csr)
4179 pci_iounmap(adapter->pdev, adapter->csr);
4180 if (adapter->db)
4181 pci_iounmap(adapter->pdev, adapter->db);
4182 }
4183
4184 static int db_bar(struct be_adapter *adapter)
4185 {
4186 if (lancer_chip(adapter) || !be_physfn(adapter))
4187 return 0;
4188 else
4189 return 4;
4190 }
4191
4192 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4193 {
4194 if (skyhawk_chip(adapter)) {
4195 adapter->roce_db.size = 4096;
4196 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4197 db_bar(adapter));
4198 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4199 db_bar(adapter));
4200 }
4201 return 0;
4202 }
4203
4204 static int be_map_pci_bars(struct be_adapter *adapter)
4205 {
4206 u8 __iomem *addr;
4207
4208 if (BEx_chip(adapter) && be_physfn(adapter)) {
4209 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4210 if (adapter->csr == NULL)
4211 return -ENOMEM;
4212 }
4213
4214 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4215 if (addr == NULL)
4216 goto pci_map_err;
4217 adapter->db = addr;
4218
4219 be_roce_map_pci_bars(adapter);
4220 return 0;
4221
4222 pci_map_err:
4223 be_unmap_pci_bars(adapter);
4224 return -ENOMEM;
4225 }
4226
4227 static void be_ctrl_cleanup(struct be_adapter *adapter)
4228 {
4229 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4230
4231 be_unmap_pci_bars(adapter);
4232
4233 if (mem->va)
4234 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4235 mem->dma);
4236
4237 mem = &adapter->rx_filter;
4238 if (mem->va)
4239 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4240 mem->dma);
4241 }
4242
4243 static int be_ctrl_init(struct be_adapter *adapter)
4244 {
4245 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4246 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4247 struct be_dma_mem *rx_filter = &adapter->rx_filter;
4248 u32 sli_intf;
4249 int status;
4250
4251 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4252 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4253 SLI_INTF_FAMILY_SHIFT;
4254 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4255
4256 status = be_map_pci_bars(adapter);
4257 if (status)
4258 goto done;
4259
4260 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4261 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4262 mbox_mem_alloc->size,
4263 &mbox_mem_alloc->dma,
4264 GFP_KERNEL);
4265 if (!mbox_mem_alloc->va) {
4266 status = -ENOMEM;
4267 goto unmap_pci_bars;
4268 }
4269 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4270 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4271 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4272 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4273
4274 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4275 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4276 rx_filter->size, &rx_filter->dma,
4277 GFP_KERNEL);
4278 if (rx_filter->va == NULL) {
4279 status = -ENOMEM;
4280 goto free_mbox;
4281 }
4282
4283 mutex_init(&adapter->mbox_lock);
4284 spin_lock_init(&adapter->mcc_lock);
4285 spin_lock_init(&adapter->mcc_cq_lock);
4286
4287 init_completion(&adapter->et_cmd_compl);
4288 pci_save_state(adapter->pdev);
4289 return 0;
4290
4291 free_mbox:
4292 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4293 mbox_mem_alloc->va, mbox_mem_alloc->dma);
4294
4295 unmap_pci_bars:
4296 be_unmap_pci_bars(adapter);
4297
4298 done:
4299 return status;
4300 }
4301
4302 static void be_stats_cleanup(struct be_adapter *adapter)
4303 {
4304 struct be_dma_mem *cmd = &adapter->stats_cmd;
4305
4306 if (cmd->va)
4307 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4308 cmd->va, cmd->dma);
4309 }
4310
4311 static int be_stats_init(struct be_adapter *adapter)
4312 {
4313 struct be_dma_mem *cmd = &adapter->stats_cmd;
4314
4315 if (lancer_chip(adapter))
4316 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4317 else if (BE2_chip(adapter))
4318 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4319 else if (BE3_chip(adapter))
4320 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4321 else
4322 /* ALL non-BE ASICs */
4323 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4324
4325 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4326 GFP_KERNEL);
4327 if (cmd->va == NULL)
4328 return -1;
4329 return 0;
4330 }
4331
4332 static void be_remove(struct pci_dev *pdev)
4333 {
4334 struct be_adapter *adapter = pci_get_drvdata(pdev);
4335
4336 if (!adapter)
4337 return;
4338
4339 be_roce_dev_remove(adapter);
4340 be_intr_set(adapter, false);
4341
4342 cancel_delayed_work_sync(&adapter->func_recovery_work);
4343
4344 unregister_netdev(adapter->netdev);
4345
4346 be_clear(adapter);
4347
4348 /* tell fw we're done with firing cmds */
4349 be_cmd_fw_clean(adapter);
4350
4351 be_stats_cleanup(adapter);
4352
4353 be_ctrl_cleanup(adapter);
4354
4355 pci_disable_pcie_error_reporting(pdev);
4356
4357 pci_release_regions(pdev);
4358 pci_disable_device(pdev);
4359
4360 free_netdev(adapter->netdev);
4361 }
4362
4363 static int be_get_initial_config(struct be_adapter *adapter)
4364 {
4365 int status, level;
4366
4367 status = be_cmd_get_cntl_attributes(adapter);
4368 if (status)
4369 return status;
4370
4371 /* Must be a power of 2 or else MODULO will BUG_ON */
4372 adapter->be_get_temp_freq = 64;
4373
4374 if (BEx_chip(adapter)) {
4375 level = be_cmd_get_fw_log_level(adapter);
4376 adapter->msg_enable =
4377 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4378 }
4379
4380 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4381 return 0;
4382 }
4383
4384 static int lancer_recover_func(struct be_adapter *adapter)
4385 {
4386 struct device *dev = &adapter->pdev->dev;
4387 int status;
4388
4389 status = lancer_test_and_set_rdy_state(adapter);
4390 if (status)
4391 goto err;
4392
4393 if (netif_running(adapter->netdev))
4394 be_close(adapter->netdev);
4395
4396 be_clear(adapter);
4397
4398 be_clear_all_error(adapter);
4399
4400 status = be_setup(adapter);
4401 if (status)
4402 goto err;
4403
4404 if (netif_running(adapter->netdev)) {
4405 status = be_open(adapter->netdev);
4406 if (status)
4407 goto err;
4408 }
4409
4410 dev_err(dev, "Adapter recovery successful\n");
4411 return 0;
4412 err:
4413 if (status == -EAGAIN)
4414 dev_err(dev, "Waiting for resource provisioning\n");
4415 else
4416 dev_err(dev, "Adapter recovery failed\n");
4417
4418 return status;
4419 }
4420
4421 static void be_func_recovery_task(struct work_struct *work)
4422 {
4423 struct be_adapter *adapter =
4424 container_of(work, struct be_adapter, func_recovery_work.work);
4425 int status = 0;
4426
4427 be_detect_error(adapter);
4428
4429 if (adapter->hw_error && lancer_chip(adapter)) {
4430
4431 rtnl_lock();
4432 netif_device_detach(adapter->netdev);
4433 rtnl_unlock();
4434
4435 status = lancer_recover_func(adapter);
4436 if (!status)
4437 netif_device_attach(adapter->netdev);
4438 }
4439
4440 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4441 * no need to attempt further recovery.
4442 */
4443 if (!status || status == -EAGAIN)
4444 schedule_delayed_work(&adapter->func_recovery_work,
4445 msecs_to_jiffies(1000));
4446 }
4447
4448 static void be_worker(struct work_struct *work)
4449 {
4450 struct be_adapter *adapter =
4451 container_of(work, struct be_adapter, work.work);
4452 struct be_rx_obj *rxo;
4453 int i;
4454
4455 /* when interrupts are not yet enabled, just reap any pending
4456 * mcc completions */
4457 if (!netif_running(adapter->netdev)) {
4458 local_bh_disable();
4459 be_process_mcc(adapter);
4460 local_bh_enable();
4461 goto reschedule;
4462 }
4463
4464 if (!adapter->stats_cmd_sent) {
4465 if (lancer_chip(adapter))
4466 lancer_cmd_get_pport_stats(adapter,
4467 &adapter->stats_cmd);
4468 else
4469 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4470 }
4471
4472 if (be_physfn(adapter) &&
4473 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4474 be_cmd_get_die_temperature(adapter);
4475
4476 for_all_rx_queues(adapter, rxo, i) {
4477 /* Replenish RX-queues starved due to memory
4478 * allocation failures.
4479 */
4480 if (rxo->rx_post_starved)
4481 be_post_rx_frags(rxo, GFP_KERNEL);
4482 }
4483
4484 be_eqd_update(adapter);
4485
4486 reschedule:
4487 adapter->work_counter++;
4488 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4489 }
4490
4491 /* If any VFs are already enabled don't FLR the PF */
4492 static bool be_reset_required(struct be_adapter *adapter)
4493 {
4494 return pci_num_vf(adapter->pdev) ? false : true;
4495 }
4496
4497 static char *mc_name(struct be_adapter *adapter)
4498 {
4499 char *str = ""; /* default */
4500
4501 switch (adapter->mc_type) {
4502 case UMC:
4503 str = "UMC";
4504 break;
4505 case FLEX10:
4506 str = "FLEX10";
4507 break;
4508 case vNIC1:
4509 str = "vNIC-1";
4510 break;
4511 case nPAR:
4512 str = "nPAR";
4513 break;
4514 case UFP:
4515 str = "UFP";
4516 break;
4517 case vNIC2:
4518 str = "vNIC-2";
4519 break;
4520 default:
4521 str = "";
4522 }
4523
4524 return str;
4525 }
4526
4527 static inline char *func_name(struct be_adapter *adapter)
4528 {
4529 return be_physfn(adapter) ? "PF" : "VF";
4530 }
4531
4532 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4533 {
4534 int status = 0;
4535 struct be_adapter *adapter;
4536 struct net_device *netdev;
4537 char port_name;
4538
4539 status = pci_enable_device(pdev);
4540 if (status)
4541 goto do_none;
4542
4543 status = pci_request_regions(pdev, DRV_NAME);
4544 if (status)
4545 goto disable_dev;
4546 pci_set_master(pdev);
4547
4548 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4549 if (netdev == NULL) {
4550 status = -ENOMEM;
4551 goto rel_reg;
4552 }
4553 adapter = netdev_priv(netdev);
4554 adapter->pdev = pdev;
4555 pci_set_drvdata(pdev, adapter);
4556 adapter->netdev = netdev;
4557 SET_NETDEV_DEV(netdev, &pdev->dev);
4558
4559 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4560 if (!status) {
4561 netdev->features |= NETIF_F_HIGHDMA;
4562 } else {
4563 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4564 if (status) {
4565 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4566 goto free_netdev;
4567 }
4568 }
4569
4570 if (be_physfn(adapter)) {
4571 status = pci_enable_pcie_error_reporting(pdev);
4572 if (!status)
4573 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4574 }
4575
4576 status = be_ctrl_init(adapter);
4577 if (status)
4578 goto free_netdev;
4579
4580 /* sync up with fw's ready state */
4581 if (be_physfn(adapter)) {
4582 status = be_fw_wait_ready(adapter);
4583 if (status)
4584 goto ctrl_clean;
4585 }
4586
4587 if (be_reset_required(adapter)) {
4588 status = be_cmd_reset_function(adapter);
4589 if (status)
4590 goto ctrl_clean;
4591
4592 /* Wait for interrupts to quiesce after an FLR */
4593 msleep(100);
4594 }
4595
4596 /* Allow interrupts for other ULPs running on NIC function */
4597 be_intr_set(adapter, true);
4598
4599 /* tell fw we're ready to fire cmds */
4600 status = be_cmd_fw_init(adapter);
4601 if (status)
4602 goto ctrl_clean;
4603
4604 status = be_stats_init(adapter);
4605 if (status)
4606 goto ctrl_clean;
4607
4608 status = be_get_initial_config(adapter);
4609 if (status)
4610 goto stats_clean;
4611
4612 INIT_DELAYED_WORK(&adapter->work, be_worker);
4613 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4614 adapter->rx_fc = adapter->tx_fc = true;
4615
4616 status = be_setup(adapter);
4617 if (status)
4618 goto stats_clean;
4619
4620 be_netdev_init(netdev);
4621 status = register_netdev(netdev);
4622 if (status != 0)
4623 goto unsetup;
4624
4625 be_roce_dev_add(adapter);
4626
4627 schedule_delayed_work(&adapter->func_recovery_work,
4628 msecs_to_jiffies(1000));
4629
4630 be_cmd_query_port_name(adapter, &port_name);
4631
4632 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4633 func_name(adapter), mc_name(adapter), port_name);
4634
4635 return 0;
4636
4637 unsetup:
4638 be_clear(adapter);
4639 stats_clean:
4640 be_stats_cleanup(adapter);
4641 ctrl_clean:
4642 be_ctrl_cleanup(adapter);
4643 free_netdev:
4644 free_netdev(netdev);
4645 rel_reg:
4646 pci_release_regions(pdev);
4647 disable_dev:
4648 pci_disable_device(pdev);
4649 do_none:
4650 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4651 return status;
4652 }
4653
4654 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4655 {
4656 struct be_adapter *adapter = pci_get_drvdata(pdev);
4657 struct net_device *netdev = adapter->netdev;
4658
4659 if (adapter->wol_en)
4660 be_setup_wol(adapter, true);
4661
4662 be_intr_set(adapter, false);
4663 cancel_delayed_work_sync(&adapter->func_recovery_work);
4664
4665 netif_device_detach(netdev);
4666 if (netif_running(netdev)) {
4667 rtnl_lock();
4668 be_close(netdev);
4669 rtnl_unlock();
4670 }
4671 be_clear(adapter);
4672
4673 pci_save_state(pdev);
4674 pci_disable_device(pdev);
4675 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4676 return 0;
4677 }
4678
4679 static int be_resume(struct pci_dev *pdev)
4680 {
4681 int status = 0;
4682 struct be_adapter *adapter = pci_get_drvdata(pdev);
4683 struct net_device *netdev = adapter->netdev;
4684
4685 netif_device_detach(netdev);
4686
4687 status = pci_enable_device(pdev);
4688 if (status)
4689 return status;
4690
4691 pci_set_power_state(pdev, PCI_D0);
4692 pci_restore_state(pdev);
4693
4694 status = be_fw_wait_ready(adapter);
4695 if (status)
4696 return status;
4697
4698 be_intr_set(adapter, true);
4699 /* tell fw we're ready to fire cmds */
4700 status = be_cmd_fw_init(adapter);
4701 if (status)
4702 return status;
4703
4704 be_setup(adapter);
4705 if (netif_running(netdev)) {
4706 rtnl_lock();
4707 be_open(netdev);
4708 rtnl_unlock();
4709 }
4710
4711 schedule_delayed_work(&adapter->func_recovery_work,
4712 msecs_to_jiffies(1000));
4713 netif_device_attach(netdev);
4714
4715 if (adapter->wol_en)
4716 be_setup_wol(adapter, false);
4717
4718 return 0;
4719 }
4720
4721 /*
4722 * An FLR will stop BE from DMAing any data.
4723 */
4724 static void be_shutdown(struct pci_dev *pdev)
4725 {
4726 struct be_adapter *adapter = pci_get_drvdata(pdev);
4727
4728 if (!adapter)
4729 return;
4730
4731 cancel_delayed_work_sync(&adapter->work);
4732 cancel_delayed_work_sync(&adapter->func_recovery_work);
4733
4734 netif_device_detach(adapter->netdev);
4735
4736 be_cmd_reset_function(adapter);
4737
4738 pci_disable_device(pdev);
4739 }
4740
4741 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4742 pci_channel_state_t state)
4743 {
4744 struct be_adapter *adapter = pci_get_drvdata(pdev);
4745 struct net_device *netdev = adapter->netdev;
4746
4747 dev_err(&adapter->pdev->dev, "EEH error detected\n");
4748
4749 if (!adapter->eeh_error) {
4750 adapter->eeh_error = true;
4751
4752 cancel_delayed_work_sync(&adapter->func_recovery_work);
4753
4754 rtnl_lock();
4755 netif_device_detach(netdev);
4756 if (netif_running(netdev))
4757 be_close(netdev);
4758 rtnl_unlock();
4759
4760 be_clear(adapter);
4761 }
4762
4763 if (state == pci_channel_io_perm_failure)
4764 return PCI_ERS_RESULT_DISCONNECT;
4765
4766 pci_disable_device(pdev);
4767
4768 /* The error could cause the FW to trigger a flash debug dump.
4769 * Resetting the card while flash dump is in progress
4770 * can cause it not to recover; wait for it to finish.
4771 * Wait only for first function as it is needed only once per
4772 * adapter.
4773 */
4774 if (pdev->devfn == 0)
4775 ssleep(30);
4776
4777 return PCI_ERS_RESULT_NEED_RESET;
4778 }
4779
4780 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4781 {
4782 struct be_adapter *adapter = pci_get_drvdata(pdev);
4783 int status;
4784
4785 dev_info(&adapter->pdev->dev, "EEH reset\n");
4786
4787 status = pci_enable_device(pdev);
4788 if (status)
4789 return PCI_ERS_RESULT_DISCONNECT;
4790
4791 pci_set_master(pdev);
4792 pci_set_power_state(pdev, PCI_D0);
4793 pci_restore_state(pdev);
4794
4795 /* Check if card is ok and fw is ready */
4796 dev_info(&adapter->pdev->dev,
4797 "Waiting for FW to be ready after EEH reset\n");
4798 status = be_fw_wait_ready(adapter);
4799 if (status)
4800 return PCI_ERS_RESULT_DISCONNECT;
4801
4802 pci_cleanup_aer_uncorrect_error_status(pdev);
4803 be_clear_all_error(adapter);
4804 return PCI_ERS_RESULT_RECOVERED;
4805 }
4806
4807 static void be_eeh_resume(struct pci_dev *pdev)
4808 {
4809 int status = 0;
4810 struct be_adapter *adapter = pci_get_drvdata(pdev);
4811 struct net_device *netdev = adapter->netdev;
4812
4813 dev_info(&adapter->pdev->dev, "EEH resume\n");
4814
4815 pci_save_state(pdev);
4816
4817 status = be_cmd_reset_function(adapter);
4818 if (status)
4819 goto err;
4820
4821 /* tell fw we're ready to fire cmds */
4822 status = be_cmd_fw_init(adapter);
4823 if (status)
4824 goto err;
4825
4826 status = be_setup(adapter);
4827 if (status)
4828 goto err;
4829
4830 if (netif_running(netdev)) {
4831 status = be_open(netdev);
4832 if (status)
4833 goto err;
4834 }
4835
4836 schedule_delayed_work(&adapter->func_recovery_work,
4837 msecs_to_jiffies(1000));
4838 netif_device_attach(netdev);
4839 return;
4840 err:
4841 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4842 }
4843
4844 static const struct pci_error_handlers be_eeh_handlers = {
4845 .error_detected = be_eeh_err_detected,
4846 .slot_reset = be_eeh_reset,
4847 .resume = be_eeh_resume,
4848 };
4849
4850 static struct pci_driver be_driver = {
4851 .name = DRV_NAME,
4852 .id_table = be_dev_ids,
4853 .probe = be_probe,
4854 .remove = be_remove,
4855 .suspend = be_suspend,
4856 .resume = be_resume,
4857 .shutdown = be_shutdown,
4858 .err_handler = &be_eeh_handlers
4859 };
4860
4861 static int __init be_init_module(void)
4862 {
4863 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4864 rx_frag_size != 2048) {
4865 printk(KERN_WARNING DRV_NAME
4866 " : Module param rx_frag_size must be 2048/4096/8192."
4867 " Using 2048\n");
4868 rx_frag_size = 2048;
4869 }
4870
4871 return pci_register_driver(&be_driver);
4872 }
4873 module_init(be_init_module);
4874
4875 static void __exit be_exit_module(void)
4876 {
4877 pci_unregister_driver(&be_driver);
4878 }
4879 module_exit(be_exit_module);