]>
Commit | Line | Data |
---|---|---|
e712d52b YM |
1 | /* QLogic qede NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/pci.h> | |
11 | #include <linux/version.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/skbuff.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <asm/byteorder.h> | |
22 | #include <asm/param.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/netdev_features.h> | |
25 | #include <linux/udp.h> | |
26 | #include <linux/tcp.h> | |
27 | #include <net/vxlan.h> | |
28 | #include <linux/ip.h> | |
29 | #include <net/ipv6.h> | |
30 | #include <net/tcp.h> | |
31 | #include <linux/if_ether.h> | |
32 | #include <linux/if_vlan.h> | |
33 | #include <linux/pkt_sched.h> | |
34 | #include <linux/ethtool.h> | |
35 | #include <linux/in.h> | |
36 | #include <linux/random.h> | |
37 | #include <net/ip6_checksum.h> | |
38 | #include <linux/bitops.h> | |
39 | ||
40 | #include "qede.h" | |
41 | ||
42 | static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede " | |
43 | DRV_MODULE_VERSION "\n"; | |
44 | ||
45 | MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver"); | |
46 | MODULE_LICENSE("GPL"); | |
47 | MODULE_VERSION(DRV_MODULE_VERSION); | |
48 | ||
49 | static uint debug; | |
50 | module_param(debug, uint, 0); | |
51 | MODULE_PARM_DESC(debug, " Default debug msglevel"); | |
52 | ||
53 | static const struct qed_eth_ops *qed_ops; | |
54 | ||
55 | #define CHIP_NUM_57980S_40 0x1634 | |
56 | #define CHIP_NUM_57980S_10 0x1635 | |
57 | #define CHIP_NUM_57980S_MF 0x1636 | |
58 | #define CHIP_NUM_57980S_100 0x1644 | |
59 | #define CHIP_NUM_57980S_50 0x1654 | |
60 | #define CHIP_NUM_57980S_25 0x1656 | |
61 | ||
62 | #ifndef PCI_DEVICE_ID_NX2_57980E | |
63 | #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 | |
64 | #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 | |
65 | #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF | |
66 | #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 | |
67 | #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 | |
68 | #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 | |
69 | #endif | |
70 | ||
71 | static const struct pci_device_id qede_pci_tbl[] = { | |
72 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, | |
73 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, | |
74 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, | |
75 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, | |
76 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, | |
77 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, | |
78 | { 0 } | |
79 | }; | |
80 | ||
81 | MODULE_DEVICE_TABLE(pci, qede_pci_tbl); | |
82 | ||
83 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); | |
84 | ||
85 | #define TX_TIMEOUT (5 * HZ) | |
86 | ||
87 | static void qede_remove(struct pci_dev *pdev); | |
2950219d YM |
88 | static int qede_alloc_rx_buffer(struct qede_dev *edev, |
89 | struct qede_rx_queue *rxq); | |
a2ec6172 | 90 | static void qede_link_update(void *dev, struct qed_link_output *link); |
e712d52b YM |
91 | |
92 | static struct pci_driver qede_pci_driver = { | |
93 | .name = "qede", | |
94 | .id_table = qede_pci_tbl, | |
95 | .probe = qede_probe, | |
96 | .remove = qede_remove, | |
97 | }; | |
98 | ||
a2ec6172 SK |
99 | static struct qed_eth_cb_ops qede_ll_ops = { |
100 | { | |
101 | .link_update = qede_link_update, | |
102 | }, | |
103 | }; | |
104 | ||
2950219d YM |
105 | static int qede_netdev_event(struct notifier_block *this, unsigned long event, |
106 | void *ptr) | |
107 | { | |
108 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); | |
109 | struct ethtool_drvinfo drvinfo; | |
110 | struct qede_dev *edev; | |
111 | ||
112 | /* Currently only support name change */ | |
113 | if (event != NETDEV_CHANGENAME) | |
114 | goto done; | |
115 | ||
116 | /* Check whether this is a qede device */ | |
117 | if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) | |
118 | goto done; | |
119 | ||
120 | memset(&drvinfo, 0, sizeof(drvinfo)); | |
121 | ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); | |
122 | if (strcmp(drvinfo.driver, "qede")) | |
123 | goto done; | |
124 | edev = netdev_priv(ndev); | |
125 | ||
126 | /* Notify qed of the name change */ | |
127 | if (!edev->ops || !edev->ops->common) | |
128 | goto done; | |
129 | edev->ops->common->set_id(edev->cdev, edev->ndev->name, | |
130 | "qede"); | |
131 | ||
132 | done: | |
133 | return NOTIFY_DONE; | |
134 | } | |
135 | ||
136 | static struct notifier_block qede_netdev_notifier = { | |
137 | .notifier_call = qede_netdev_event, | |
138 | }; | |
139 | ||
e712d52b YM |
140 | static |
141 | int __init qede_init(void) | |
142 | { | |
143 | int ret; | |
144 | u32 qed_ver; | |
145 | ||
146 | pr_notice("qede_init: %s\n", version); | |
147 | ||
148 | qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); | |
149 | if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { | |
150 | pr_notice("Version mismatch [%08x != %08x]\n", | |
151 | qed_ver, | |
152 | QEDE_ETH_INTERFACE_VERSION); | |
153 | return -EINVAL; | |
154 | } | |
155 | ||
156 | qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); | |
157 | if (!qed_ops) { | |
158 | pr_notice("Failed to get qed ethtool operations\n"); | |
159 | return -EINVAL; | |
160 | } | |
161 | ||
2950219d YM |
162 | /* Must register notifier before pci ops, since we might miss |
163 | * interface rename after pci probe and netdev registeration. | |
164 | */ | |
165 | ret = register_netdevice_notifier(&qede_netdev_notifier); | |
166 | if (ret) { | |
167 | pr_notice("Failed to register netdevice_notifier\n"); | |
168 | qed_put_eth_ops(); | |
169 | return -EINVAL; | |
170 | } | |
171 | ||
e712d52b YM |
172 | ret = pci_register_driver(&qede_pci_driver); |
173 | if (ret) { | |
174 | pr_notice("Failed to register driver\n"); | |
2950219d | 175 | unregister_netdevice_notifier(&qede_netdev_notifier); |
e712d52b YM |
176 | qed_put_eth_ops(); |
177 | return -EINVAL; | |
178 | } | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
183 | static void __exit qede_cleanup(void) | |
184 | { | |
185 | pr_notice("qede_cleanup called\n"); | |
186 | ||
2950219d | 187 | unregister_netdevice_notifier(&qede_netdev_notifier); |
e712d52b YM |
188 | pci_unregister_driver(&qede_pci_driver); |
189 | qed_put_eth_ops(); | |
190 | } | |
191 | ||
192 | module_init(qede_init); | |
193 | module_exit(qede_cleanup); | |
194 | ||
2950219d YM |
195 | /* ------------------------------------------------------------------------- |
196 | * START OF FAST-PATH | |
197 | * ------------------------------------------------------------------------- | |
198 | */ | |
199 | ||
200 | /* Unmap the data and free skb */ | |
201 | static int qede_free_tx_pkt(struct qede_dev *edev, | |
202 | struct qede_tx_queue *txq, | |
203 | int *len) | |
204 | { | |
205 | u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; | |
206 | struct sk_buff *skb = txq->sw_tx_ring[idx].skb; | |
207 | struct eth_tx_1st_bd *first_bd; | |
208 | struct eth_tx_bd *tx_data_bd; | |
209 | int bds_consumed = 0; | |
210 | int nbds; | |
211 | bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; | |
212 | int i, split_bd_len = 0; | |
213 | ||
214 | if (unlikely(!skb)) { | |
215 | DP_ERR(edev, | |
216 | "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", | |
217 | idx, txq->sw_tx_cons, txq->sw_tx_prod); | |
218 | return -1; | |
219 | } | |
220 | ||
221 | *len = skb->len; | |
222 | ||
223 | first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); | |
224 | ||
225 | bds_consumed++; | |
226 | ||
227 | nbds = first_bd->data.nbds; | |
228 | ||
229 | if (data_split) { | |
230 | struct eth_tx_bd *split = (struct eth_tx_bd *) | |
231 | qed_chain_consume(&txq->tx_pbl); | |
232 | split_bd_len = BD_UNMAP_LEN(split); | |
233 | bds_consumed++; | |
234 | } | |
235 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | |
236 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | |
237 | ||
238 | /* Unmap the data of the skb frags */ | |
239 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { | |
240 | tx_data_bd = (struct eth_tx_bd *) | |
241 | qed_chain_consume(&txq->tx_pbl); | |
242 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | |
243 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | |
244 | } | |
245 | ||
246 | while (bds_consumed++ < nbds) | |
247 | qed_chain_consume(&txq->tx_pbl); | |
248 | ||
249 | /* Free skb */ | |
250 | dev_kfree_skb_any(skb); | |
251 | txq->sw_tx_ring[idx].skb = NULL; | |
252 | txq->sw_tx_ring[idx].flags = 0; | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | /* Unmap the data and free skb when mapping failed during start_xmit */ | |
258 | static void qede_free_failed_tx_pkt(struct qede_dev *edev, | |
259 | struct qede_tx_queue *txq, | |
260 | struct eth_tx_1st_bd *first_bd, | |
261 | int nbd, | |
262 | bool data_split) | |
263 | { | |
264 | u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; | |
265 | struct sk_buff *skb = txq->sw_tx_ring[idx].skb; | |
266 | struct eth_tx_bd *tx_data_bd; | |
267 | int i, split_bd_len = 0; | |
268 | ||
269 | /* Return prod to its position before this skb was handled */ | |
270 | qed_chain_set_prod(&txq->tx_pbl, | |
271 | le16_to_cpu(txq->tx_db.data.bd_prod), | |
272 | first_bd); | |
273 | ||
274 | first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); | |
275 | ||
276 | if (data_split) { | |
277 | struct eth_tx_bd *split = (struct eth_tx_bd *) | |
278 | qed_chain_produce(&txq->tx_pbl); | |
279 | split_bd_len = BD_UNMAP_LEN(split); | |
280 | nbd--; | |
281 | } | |
282 | ||
283 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | |
284 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | |
285 | ||
286 | /* Unmap the data of the skb frags */ | |
287 | for (i = 0; i < nbd; i++) { | |
288 | tx_data_bd = (struct eth_tx_bd *) | |
289 | qed_chain_produce(&txq->tx_pbl); | |
290 | if (tx_data_bd->nbytes) | |
291 | dma_unmap_page(&edev->pdev->dev, | |
292 | BD_UNMAP_ADDR(tx_data_bd), | |
293 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | |
294 | } | |
295 | ||
296 | /* Return again prod to its position before this skb was handled */ | |
297 | qed_chain_set_prod(&txq->tx_pbl, | |
298 | le16_to_cpu(txq->tx_db.data.bd_prod), | |
299 | first_bd); | |
300 | ||
301 | /* Free skb */ | |
302 | dev_kfree_skb_any(skb); | |
303 | txq->sw_tx_ring[idx].skb = NULL; | |
304 | txq->sw_tx_ring[idx].flags = 0; | |
305 | } | |
306 | ||
307 | static u32 qede_xmit_type(struct qede_dev *edev, | |
308 | struct sk_buff *skb, | |
309 | int *ipv6_ext) | |
310 | { | |
311 | u32 rc = XMIT_L4_CSUM; | |
312 | __be16 l3_proto; | |
313 | ||
314 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
315 | return XMIT_PLAIN; | |
316 | ||
317 | l3_proto = vlan_get_protocol(skb); | |
318 | if (l3_proto == htons(ETH_P_IPV6) && | |
319 | (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) | |
320 | *ipv6_ext = 1; | |
321 | ||
322 | if (skb_is_gso(skb)) | |
323 | rc |= XMIT_LSO; | |
324 | ||
325 | return rc; | |
326 | } | |
327 | ||
328 | static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, | |
329 | struct eth_tx_2nd_bd *second_bd, | |
330 | struct eth_tx_3rd_bd *third_bd) | |
331 | { | |
332 | u8 l4_proto; | |
333 | u16 bd2_bits = 0, bd2_bits2 = 0; | |
334 | ||
335 | bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); | |
336 | ||
337 | bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & | |
338 | ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) | |
339 | << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; | |
340 | ||
341 | bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << | |
342 | ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); | |
343 | ||
344 | if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) | |
345 | l4_proto = ipv6_hdr(skb)->nexthdr; | |
346 | else | |
347 | l4_proto = ip_hdr(skb)->protocol; | |
348 | ||
349 | if (l4_proto == IPPROTO_UDP) | |
350 | bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; | |
351 | ||
352 | if (third_bd) { | |
353 | third_bd->data.bitfields |= | |
354 | ((tcp_hdrlen(skb) / 4) & | |
355 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << | |
356 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; | |
357 | } | |
358 | ||
359 | second_bd->data.bitfields = cpu_to_le16(bd2_bits); | |
360 | second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); | |
361 | } | |
362 | ||
363 | static int map_frag_to_bd(struct qede_dev *edev, | |
364 | skb_frag_t *frag, | |
365 | struct eth_tx_bd *bd) | |
366 | { | |
367 | dma_addr_t mapping; | |
368 | ||
369 | /* Map skb non-linear frag data for DMA */ | |
370 | mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, | |
371 | skb_frag_size(frag), | |
372 | DMA_TO_DEVICE); | |
373 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
374 | DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); | |
375 | return -ENOMEM; | |
376 | } | |
377 | ||
378 | /* Setup the data pointer of the frag data */ | |
379 | BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
384 | /* Main transmit function */ | |
385 | static | |
386 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, | |
387 | struct net_device *ndev) | |
388 | { | |
389 | struct qede_dev *edev = netdev_priv(ndev); | |
390 | struct netdev_queue *netdev_txq; | |
391 | struct qede_tx_queue *txq; | |
392 | struct eth_tx_1st_bd *first_bd; | |
393 | struct eth_tx_2nd_bd *second_bd = NULL; | |
394 | struct eth_tx_3rd_bd *third_bd = NULL; | |
395 | struct eth_tx_bd *tx_data_bd = NULL; | |
396 | u16 txq_index; | |
397 | u8 nbd = 0; | |
398 | dma_addr_t mapping; | |
399 | int rc, frag_idx = 0, ipv6_ext = 0; | |
400 | u8 xmit_type; | |
401 | u16 idx; | |
402 | u16 hlen; | |
403 | bool data_split; | |
404 | ||
405 | /* Get tx-queue context and netdev index */ | |
406 | txq_index = skb_get_queue_mapping(skb); | |
407 | WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); | |
408 | txq = QEDE_TX_QUEUE(edev, txq_index); | |
409 | netdev_txq = netdev_get_tx_queue(ndev, txq_index); | |
410 | ||
411 | /* Current code doesn't support SKB linearization, since the max number | |
412 | * of skb frags can be passed in the FW HSI. | |
413 | */ | |
414 | BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET); | |
415 | ||
416 | WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < | |
417 | (MAX_SKB_FRAGS + 1)); | |
418 | ||
419 | xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); | |
420 | ||
421 | /* Fill the entry in the SW ring and the BDs in the FW ring */ | |
422 | idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; | |
423 | txq->sw_tx_ring[idx].skb = skb; | |
424 | first_bd = (struct eth_tx_1st_bd *) | |
425 | qed_chain_produce(&txq->tx_pbl); | |
426 | memset(first_bd, 0, sizeof(*first_bd)); | |
427 | first_bd->data.bd_flags.bitfields = | |
428 | 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; | |
429 | ||
430 | /* Map skb linear data for DMA and set in the first BD */ | |
431 | mapping = dma_map_single(&edev->pdev->dev, skb->data, | |
432 | skb_headlen(skb), DMA_TO_DEVICE); | |
433 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
434 | DP_NOTICE(edev, "SKB mapping failed\n"); | |
435 | qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); | |
436 | return NETDEV_TX_OK; | |
437 | } | |
438 | nbd++; | |
439 | BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); | |
440 | ||
441 | /* In case there is IPv6 with extension headers or LSO we need 2nd and | |
442 | * 3rd BDs. | |
443 | */ | |
444 | if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { | |
445 | second_bd = (struct eth_tx_2nd_bd *) | |
446 | qed_chain_produce(&txq->tx_pbl); | |
447 | memset(second_bd, 0, sizeof(*second_bd)); | |
448 | ||
449 | nbd++; | |
450 | third_bd = (struct eth_tx_3rd_bd *) | |
451 | qed_chain_produce(&txq->tx_pbl); | |
452 | memset(third_bd, 0, sizeof(*third_bd)); | |
453 | ||
454 | nbd++; | |
455 | /* We need to fill in additional data in second_bd... */ | |
456 | tx_data_bd = (struct eth_tx_bd *)second_bd; | |
457 | } | |
458 | ||
459 | if (skb_vlan_tag_present(skb)) { | |
460 | first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); | |
461 | first_bd->data.bd_flags.bitfields |= | |
462 | 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; | |
463 | } | |
464 | ||
465 | /* Fill the parsing flags & params according to the requested offload */ | |
466 | if (xmit_type & XMIT_L4_CSUM) { | |
467 | /* We don't re-calculate IP checksum as it is already done by | |
468 | * the upper stack | |
469 | */ | |
470 | first_bd->data.bd_flags.bitfields |= | |
471 | 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; | |
472 | ||
473 | /* If the packet is IPv6 with extension header, indicate that | |
474 | * to FW and pass few params, since the device cracker doesn't | |
475 | * support parsing IPv6 with extension header/s. | |
476 | */ | |
477 | if (unlikely(ipv6_ext)) | |
478 | qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); | |
479 | } | |
480 | ||
481 | if (xmit_type & XMIT_LSO) { | |
482 | first_bd->data.bd_flags.bitfields |= | |
483 | (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); | |
484 | third_bd->data.lso_mss = | |
485 | cpu_to_le16(skb_shinfo(skb)->gso_size); | |
486 | ||
487 | first_bd->data.bd_flags.bitfields |= | |
488 | 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; | |
489 | hlen = skb_transport_header(skb) + | |
490 | tcp_hdrlen(skb) - skb->data; | |
491 | ||
492 | /* @@@TBD - if will not be removed need to check */ | |
493 | third_bd->data.bitfields |= | |
494 | (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); | |
495 | ||
496 | /* Make life easier for FW guys who can't deal with header and | |
497 | * data on same BD. If we need to split, use the second bd... | |
498 | */ | |
499 | if (unlikely(skb_headlen(skb) > hlen)) { | |
500 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
501 | "TSO split header size is %d (%x:%x)\n", | |
502 | first_bd->nbytes, first_bd->addr.hi, | |
503 | first_bd->addr.lo); | |
504 | ||
505 | mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), | |
506 | le32_to_cpu(first_bd->addr.lo)) + | |
507 | hlen; | |
508 | ||
509 | BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, | |
510 | le16_to_cpu(first_bd->nbytes) - | |
511 | hlen); | |
512 | ||
513 | /* this marks the BD as one that has no | |
514 | * individual mapping | |
515 | */ | |
516 | txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD; | |
517 | ||
518 | first_bd->nbytes = cpu_to_le16(hlen); | |
519 | ||
520 | tx_data_bd = (struct eth_tx_bd *)third_bd; | |
521 | data_split = true; | |
522 | } | |
523 | } | |
524 | ||
525 | /* Handle fragmented skb */ | |
526 | /* special handle for frags inside 2nd and 3rd bds.. */ | |
527 | while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { | |
528 | rc = map_frag_to_bd(edev, | |
529 | &skb_shinfo(skb)->frags[frag_idx], | |
530 | tx_data_bd); | |
531 | if (rc) { | |
532 | qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, | |
533 | data_split); | |
534 | return NETDEV_TX_OK; | |
535 | } | |
536 | ||
537 | if (tx_data_bd == (struct eth_tx_bd *)second_bd) | |
538 | tx_data_bd = (struct eth_tx_bd *)third_bd; | |
539 | else | |
540 | tx_data_bd = NULL; | |
541 | ||
542 | frag_idx++; | |
543 | } | |
544 | ||
545 | /* map last frags into 4th, 5th .... */ | |
546 | for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { | |
547 | tx_data_bd = (struct eth_tx_bd *) | |
548 | qed_chain_produce(&txq->tx_pbl); | |
549 | ||
550 | memset(tx_data_bd, 0, sizeof(*tx_data_bd)); | |
551 | ||
552 | rc = map_frag_to_bd(edev, | |
553 | &skb_shinfo(skb)->frags[frag_idx], | |
554 | tx_data_bd); | |
555 | if (rc) { | |
556 | qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, | |
557 | data_split); | |
558 | return NETDEV_TX_OK; | |
559 | } | |
560 | } | |
561 | ||
562 | /* update the first BD with the actual num BDs */ | |
563 | first_bd->data.nbds = nbd; | |
564 | ||
565 | netdev_tx_sent_queue(netdev_txq, skb->len); | |
566 | ||
567 | skb_tx_timestamp(skb); | |
568 | ||
569 | /* Advance packet producer only before sending the packet since mapping | |
570 | * of pages may fail. | |
571 | */ | |
572 | txq->sw_tx_prod++; | |
573 | ||
574 | /* 'next page' entries are counted in the producer value */ | |
575 | txq->tx_db.data.bd_prod = | |
576 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); | |
577 | ||
578 | /* wmb makes sure that the BDs data is updated before updating the | |
579 | * producer, otherwise FW may read old data from the BDs. | |
580 | */ | |
581 | wmb(); | |
582 | barrier(); | |
583 | writel(txq->tx_db.raw, txq->doorbell_addr); | |
584 | ||
585 | /* mmiowb is needed to synchronize doorbell writes from more than one | |
586 | * processor. It guarantees that the write arrives to the device before | |
587 | * the queue lock is released and another start_xmit is called (possibly | |
588 | * on another CPU). Without this barrier, the next doorbell can bypass | |
589 | * this doorbell. This is applicable to IA64/Altix systems. | |
590 | */ | |
591 | mmiowb(); | |
592 | ||
593 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) | |
594 | < (MAX_SKB_FRAGS + 1))) { | |
595 | netif_tx_stop_queue(netdev_txq); | |
596 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
597 | "Stop queue was called\n"); | |
598 | /* paired memory barrier is in qede_tx_int(), we have to keep | |
599 | * ordering of set_bit() in netif_tx_stop_queue() and read of | |
600 | * fp->bd_tx_cons | |
601 | */ | |
602 | smp_mb(); | |
603 | ||
604 | if (qed_chain_get_elem_left(&txq->tx_pbl) | |
605 | >= (MAX_SKB_FRAGS + 1) && | |
606 | (edev->state == QEDE_STATE_OPEN)) { | |
607 | netif_tx_wake_queue(netdev_txq); | |
608 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
609 | "Wake queue was called\n"); | |
610 | } | |
611 | } | |
612 | ||
613 | return NETDEV_TX_OK; | |
614 | } | |
615 | ||
616 | static int qede_txq_has_work(struct qede_tx_queue *txq) | |
617 | { | |
618 | u16 hw_bd_cons; | |
619 | ||
620 | /* Tell compiler that consumer and producer can change */ | |
621 | barrier(); | |
622 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); | |
623 | if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) | |
624 | return 0; | |
625 | ||
626 | return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); | |
627 | } | |
628 | ||
629 | static int qede_tx_int(struct qede_dev *edev, | |
630 | struct qede_tx_queue *txq) | |
631 | { | |
632 | struct netdev_queue *netdev_txq; | |
633 | u16 hw_bd_cons; | |
634 | unsigned int pkts_compl = 0, bytes_compl = 0; | |
635 | int rc; | |
636 | ||
637 | netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); | |
638 | ||
639 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); | |
640 | barrier(); | |
641 | ||
642 | while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { | |
643 | int len = 0; | |
644 | ||
645 | rc = qede_free_tx_pkt(edev, txq, &len); | |
646 | if (rc) { | |
647 | DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", | |
648 | hw_bd_cons, | |
649 | qed_chain_get_cons_idx(&txq->tx_pbl)); | |
650 | break; | |
651 | } | |
652 | ||
653 | bytes_compl += len; | |
654 | pkts_compl++; | |
655 | txq->sw_tx_cons++; | |
656 | } | |
657 | ||
658 | netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); | |
659 | ||
660 | /* Need to make the tx_bd_cons update visible to start_xmit() | |
661 | * before checking for netif_tx_queue_stopped(). Without the | |
662 | * memory barrier, there is a small possibility that | |
663 | * start_xmit() will miss it and cause the queue to be stopped | |
664 | * forever. | |
665 | * On the other hand we need an rmb() here to ensure the proper | |
666 | * ordering of bit testing in the following | |
667 | * netif_tx_queue_stopped(txq) call. | |
668 | */ | |
669 | smp_mb(); | |
670 | ||
671 | if (unlikely(netif_tx_queue_stopped(netdev_txq))) { | |
672 | /* Taking tx_lock is needed to prevent reenabling the queue | |
673 | * while it's empty. This could have happen if rx_action() gets | |
674 | * suspended in qede_tx_int() after the condition before | |
675 | * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): | |
676 | * | |
677 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | |
678 | * sends some packets consuming the whole queue again-> | |
679 | * stops the queue | |
680 | */ | |
681 | ||
682 | __netif_tx_lock(netdev_txq, smp_processor_id()); | |
683 | ||
684 | if ((netif_tx_queue_stopped(netdev_txq)) && | |
685 | (edev->state == QEDE_STATE_OPEN) && | |
686 | (qed_chain_get_elem_left(&txq->tx_pbl) | |
687 | >= (MAX_SKB_FRAGS + 1))) { | |
688 | netif_tx_wake_queue(netdev_txq); | |
689 | DP_VERBOSE(edev, NETIF_MSG_TX_DONE, | |
690 | "Wake queue was called\n"); | |
691 | } | |
692 | ||
693 | __netif_tx_unlock(netdev_txq); | |
694 | } | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | static bool qede_has_rx_work(struct qede_rx_queue *rxq) | |
700 | { | |
701 | u16 hw_comp_cons, sw_comp_cons; | |
702 | ||
703 | /* Tell compiler that status block fields can change */ | |
704 | barrier(); | |
705 | ||
706 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | |
707 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
708 | ||
709 | return hw_comp_cons != sw_comp_cons; | |
710 | } | |
711 | ||
712 | static bool qede_has_tx_work(struct qede_fastpath *fp) | |
713 | { | |
714 | u8 tc; | |
715 | ||
716 | for (tc = 0; tc < fp->edev->num_tc; tc++) | |
717 | if (qede_txq_has_work(&fp->txqs[tc])) | |
718 | return true; | |
719 | return false; | |
720 | } | |
721 | ||
722 | /* This function copies the Rx buffer from the CONS position to the PROD | |
723 | * position, since we failed to allocate a new Rx buffer. | |
724 | */ | |
725 | static void qede_reuse_rx_data(struct qede_rx_queue *rxq) | |
726 | { | |
727 | struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); | |
728 | struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); | |
729 | struct sw_rx_data *sw_rx_data_cons = | |
730 | &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; | |
731 | struct sw_rx_data *sw_rx_data_prod = | |
732 | &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; | |
733 | ||
734 | dma_unmap_addr_set(sw_rx_data_prod, mapping, | |
735 | dma_unmap_addr(sw_rx_data_cons, mapping)); | |
736 | ||
737 | sw_rx_data_prod->data = sw_rx_data_cons->data; | |
738 | memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); | |
739 | ||
740 | rxq->sw_rx_cons++; | |
741 | rxq->sw_rx_prod++; | |
742 | } | |
743 | ||
744 | static inline void qede_update_rx_prod(struct qede_dev *edev, | |
745 | struct qede_rx_queue *rxq) | |
746 | { | |
747 | u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); | |
748 | u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); | |
749 | struct eth_rx_prod_data rx_prods = {0}; | |
750 | ||
751 | /* Update producers */ | |
752 | rx_prods.bd_prod = cpu_to_le16(bd_prod); | |
753 | rx_prods.cqe_prod = cpu_to_le16(cqe_prod); | |
754 | ||
755 | /* Make sure that the BD and SGE data is updated before updating the | |
756 | * producers since FW might read the BD/SGE right after the producer | |
757 | * is updated. | |
758 | */ | |
759 | wmb(); | |
760 | ||
761 | internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), | |
762 | (u32 *)&rx_prods); | |
763 | ||
764 | /* mmiowb is needed to synchronize doorbell writes from more than one | |
765 | * processor. It guarantees that the write arrives to the device before | |
766 | * the napi lock is released and another qede_poll is called (possibly | |
767 | * on another CPU). Without this barrier, the next doorbell can bypass | |
768 | * this doorbell. This is applicable to IA64/Altix systems. | |
769 | */ | |
770 | mmiowb(); | |
771 | } | |
772 | ||
773 | static u32 qede_get_rxhash(struct qede_dev *edev, | |
774 | u8 bitfields, | |
775 | __le32 rss_hash, | |
776 | enum pkt_hash_types *rxhash_type) | |
777 | { | |
778 | enum rss_hash_type htype; | |
779 | ||
780 | htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); | |
781 | ||
782 | if ((edev->ndev->features & NETIF_F_RXHASH) && htype) { | |
783 | *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) || | |
784 | (htype == RSS_HASH_TYPE_IPV6)) ? | |
785 | PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; | |
786 | return le32_to_cpu(rss_hash); | |
787 | } | |
788 | *rxhash_type = PKT_HASH_TYPE_NONE; | |
789 | return 0; | |
790 | } | |
791 | ||
792 | static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) | |
793 | { | |
794 | skb_checksum_none_assert(skb); | |
795 | ||
796 | if (csum_flag & QEDE_CSUM_UNNECESSARY) | |
797 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
798 | } | |
799 | ||
800 | static inline void qede_skb_receive(struct qede_dev *edev, | |
801 | struct qede_fastpath *fp, | |
802 | struct sk_buff *skb, | |
803 | u16 vlan_tag) | |
804 | { | |
805 | if (vlan_tag) | |
806 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
807 | vlan_tag); | |
808 | ||
809 | napi_gro_receive(&fp->napi, skb); | |
810 | } | |
811 | ||
812 | static u8 qede_check_csum(u16 flag) | |
813 | { | |
814 | u16 csum_flag = 0; | |
815 | u8 csum = 0; | |
816 | ||
817 | if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << | |
818 | PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { | |
819 | csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << | |
820 | PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; | |
821 | csum = QEDE_CSUM_UNNECESSARY; | |
822 | } | |
823 | ||
824 | csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << | |
825 | PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; | |
826 | ||
827 | if (csum_flag & flag) | |
828 | return QEDE_CSUM_ERROR; | |
829 | ||
830 | return csum; | |
831 | } | |
832 | ||
833 | static int qede_rx_int(struct qede_fastpath *fp, int budget) | |
834 | { | |
835 | struct qede_dev *edev = fp->edev; | |
836 | struct qede_rx_queue *rxq = fp->rxq; | |
837 | ||
838 | u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag; | |
839 | int rx_pkt = 0; | |
840 | u8 csum_flag; | |
841 | ||
842 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | |
843 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
844 | ||
845 | /* Memory barrier to prevent the CPU from doing speculative reads of CQE | |
846 | * / BD in the while-loop before reading hw_comp_cons. If the CQE is | |
847 | * read before it is written by FW, then FW writes CQE and SB, and then | |
848 | * the CPU reads the hw_comp_cons, it will use an old CQE. | |
849 | */ | |
850 | rmb(); | |
851 | ||
852 | /* Loop to complete all indicated BDs */ | |
853 | while (sw_comp_cons != hw_comp_cons) { | |
854 | struct eth_fast_path_rx_reg_cqe *fp_cqe; | |
855 | enum pkt_hash_types rxhash_type; | |
856 | enum eth_rx_cqe_type cqe_type; | |
857 | struct sw_rx_data *sw_rx_data; | |
858 | union eth_rx_cqe *cqe; | |
859 | struct sk_buff *skb; | |
860 | u16 len, pad; | |
861 | u32 rx_hash; | |
862 | u8 *data; | |
863 | ||
864 | /* Get the CQE from the completion ring */ | |
865 | cqe = (union eth_rx_cqe *) | |
866 | qed_chain_consume(&rxq->rx_comp_ring); | |
867 | cqe_type = cqe->fast_path_regular.type; | |
868 | ||
869 | if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { | |
870 | edev->ops->eth_cqe_completion( | |
871 | edev->cdev, fp->rss_id, | |
872 | (struct eth_slow_path_rx_cqe *)cqe); | |
873 | goto next_cqe; | |
874 | } | |
875 | ||
876 | /* Get the data from the SW ring */ | |
877 | sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; | |
878 | sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; | |
879 | data = sw_rx_data->data; | |
880 | ||
881 | fp_cqe = &cqe->fast_path_regular; | |
882 | len = le16_to_cpu(fp_cqe->pkt_len); | |
883 | pad = fp_cqe->placement_offset; | |
884 | ||
885 | /* For every Rx BD consumed, we allocate a new BD so the BD ring | |
886 | * is always with a fixed size. If allocation fails, we take the | |
887 | * consumed BD and return it to the ring in the PROD position. | |
888 | * The packet that was received on that BD will be dropped (and | |
889 | * not passed to the upper stack). | |
890 | */ | |
891 | if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { | |
892 | dma_unmap_single(&edev->pdev->dev, | |
893 | dma_unmap_addr(sw_rx_data, mapping), | |
894 | rxq->rx_buf_size, DMA_FROM_DEVICE); | |
895 | ||
896 | /* If this is an error packet then drop it */ | |
897 | parse_flag = | |
898 | le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); | |
899 | csum_flag = qede_check_csum(parse_flag); | |
900 | if (csum_flag == QEDE_CSUM_ERROR) { | |
901 | DP_NOTICE(edev, | |
902 | "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", | |
903 | sw_comp_cons, parse_flag); | |
904 | rxq->rx_hw_errors++; | |
905 | kfree(data); | |
906 | goto next_rx; | |
907 | } | |
908 | ||
909 | skb = build_skb(data, 0); | |
910 | ||
911 | if (unlikely(!skb)) { | |
912 | DP_NOTICE(edev, | |
913 | "Build_skb failed, dropping incoming packet\n"); | |
914 | kfree(data); | |
915 | rxq->rx_alloc_errors++; | |
916 | goto next_rx; | |
917 | } | |
918 | ||
919 | skb_reserve(skb, pad); | |
920 | ||
921 | } else { | |
922 | DP_NOTICE(edev, | |
923 | "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); | |
924 | qede_reuse_rx_data(rxq); | |
925 | rxq->rx_alloc_errors++; | |
926 | goto next_cqe; | |
927 | } | |
928 | ||
929 | sw_rx_data->data = NULL; | |
930 | ||
931 | skb_put(skb, len); | |
932 | ||
933 | skb->protocol = eth_type_trans(skb, edev->ndev); | |
934 | ||
935 | rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, | |
936 | fp_cqe->rss_hash, | |
937 | &rxhash_type); | |
938 | ||
939 | skb_set_hash(skb, rx_hash, rxhash_type); | |
940 | ||
941 | qede_set_skb_csum(skb, csum_flag); | |
942 | ||
943 | skb_record_rx_queue(skb, fp->rss_id); | |
944 | ||
945 | qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); | |
946 | ||
947 | qed_chain_consume(&rxq->rx_bd_ring); | |
948 | ||
949 | next_rx: | |
950 | rxq->sw_rx_cons++; | |
951 | rx_pkt++; | |
952 | ||
953 | next_cqe: /* don't consume bd rx buffer */ | |
954 | qed_chain_recycle_consumed(&rxq->rx_comp_ring); | |
955 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
956 | /* CR TPA - revisit how to handle budget in TPA perhaps | |
957 | * increase on "end" | |
958 | */ | |
959 | if (rx_pkt == budget) | |
960 | break; | |
961 | } /* repeat while sw_comp_cons != hw_comp_cons... */ | |
962 | ||
963 | /* Update producers */ | |
964 | qede_update_rx_prod(edev, rxq); | |
965 | ||
966 | return rx_pkt; | |
967 | } | |
968 | ||
969 | static int qede_poll(struct napi_struct *napi, int budget) | |
970 | { | |
971 | int work_done = 0; | |
972 | struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, | |
973 | napi); | |
974 | struct qede_dev *edev = fp->edev; | |
975 | ||
976 | while (1) { | |
977 | u8 tc; | |
978 | ||
979 | for (tc = 0; tc < edev->num_tc; tc++) | |
980 | if (qede_txq_has_work(&fp->txqs[tc])) | |
981 | qede_tx_int(edev, &fp->txqs[tc]); | |
982 | ||
983 | if (qede_has_rx_work(fp->rxq)) { | |
984 | work_done += qede_rx_int(fp, budget - work_done); | |
985 | ||
986 | /* must not complete if we consumed full budget */ | |
987 | if (work_done >= budget) | |
988 | break; | |
989 | } | |
990 | ||
991 | /* Fall out from the NAPI loop if needed */ | |
992 | if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { | |
993 | qed_sb_update_sb_idx(fp->sb_info); | |
994 | /* *_has_*_work() reads the status block, | |
995 | * thus we need to ensure that status block indices | |
996 | * have been actually read (qed_sb_update_sb_idx) | |
997 | * prior to this check (*_has_*_work) so that | |
998 | * we won't write the "newer" value of the status block | |
999 | * to HW (if there was a DMA right after | |
1000 | * qede_has_rx_work and if there is no rmb, the memory | |
1001 | * reading (qed_sb_update_sb_idx) may be postponed | |
1002 | * to right before *_ack_sb). In this case there | |
1003 | * will never be another interrupt until there is | |
1004 | * another update of the status block, while there | |
1005 | * is still unhandled work. | |
1006 | */ | |
1007 | rmb(); | |
1008 | ||
1009 | if (!(qede_has_rx_work(fp->rxq) || | |
1010 | qede_has_tx_work(fp))) { | |
1011 | napi_complete(napi); | |
1012 | /* Update and reenable interrupts */ | |
1013 | qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, | |
1014 | 1 /*update*/); | |
1015 | break; | |
1016 | } | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | return work_done; | |
1021 | } | |
1022 | ||
1023 | static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) | |
1024 | { | |
1025 | struct qede_fastpath *fp = fp_cookie; | |
1026 | ||
1027 | qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); | |
1028 | ||
1029 | napi_schedule_irqoff(&fp->napi); | |
1030 | return IRQ_HANDLED; | |
1031 | } | |
1032 | ||
1033 | /* ------------------------------------------------------------------------- | |
1034 | * END OF FAST-PATH | |
1035 | * ------------------------------------------------------------------------- | |
1036 | */ | |
1037 | ||
1038 | static int qede_open(struct net_device *ndev); | |
1039 | static int qede_close(struct net_device *ndev); | |
0d8e0aa0 SK |
1040 | static int qede_set_mac_addr(struct net_device *ndev, void *p); |
1041 | static void qede_set_rx_mode(struct net_device *ndev); | |
1042 | static void qede_config_rx_mode(struct net_device *ndev); | |
1043 | ||
1044 | static int qede_set_ucast_rx_mac(struct qede_dev *edev, | |
1045 | enum qed_filter_xcast_params_type opcode, | |
1046 | unsigned char mac[ETH_ALEN]) | |
1047 | { | |
1048 | struct qed_filter_params filter_cmd; | |
1049 | ||
1050 | memset(&filter_cmd, 0, sizeof(filter_cmd)); | |
1051 | filter_cmd.type = QED_FILTER_TYPE_UCAST; | |
1052 | filter_cmd.filter.ucast.type = opcode; | |
1053 | filter_cmd.filter.ucast.mac_valid = 1; | |
1054 | ether_addr_copy(filter_cmd.filter.ucast.mac, mac); | |
1055 | ||
1056 | return edev->ops->filter_config(edev->cdev, &filter_cmd); | |
1057 | } | |
1058 | ||
133fac0e SK |
1059 | void qede_fill_by_demand_stats(struct qede_dev *edev) |
1060 | { | |
1061 | struct qed_eth_stats stats; | |
1062 | ||
1063 | edev->ops->get_vport_stats(edev->cdev, &stats); | |
1064 | edev->stats.no_buff_discards = stats.no_buff_discards; | |
1065 | edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes; | |
1066 | edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes; | |
1067 | edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes; | |
1068 | edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts; | |
1069 | edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts; | |
1070 | edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts; | |
1071 | edev->stats.mftag_filter_discards = stats.mftag_filter_discards; | |
1072 | edev->stats.mac_filter_discards = stats.mac_filter_discards; | |
1073 | ||
1074 | edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes; | |
1075 | edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes; | |
1076 | edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes; | |
1077 | edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts; | |
1078 | edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts; | |
1079 | edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts; | |
1080 | edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts; | |
1081 | edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts; | |
1082 | edev->stats.coalesced_events = stats.tpa_coalesced_events; | |
1083 | edev->stats.coalesced_aborts_num = stats.tpa_aborts_num; | |
1084 | edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts; | |
1085 | edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; | |
1086 | ||
1087 | edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; | |
1088 | edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; | |
1089 | edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; | |
1090 | edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; | |
1091 | edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; | |
1092 | edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; | |
1093 | edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; | |
1094 | edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; | |
1095 | edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; | |
1096 | edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; | |
1097 | edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; | |
1098 | edev->stats.rx_crc_errors = stats.rx_crc_errors; | |
1099 | edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; | |
1100 | edev->stats.rx_pause_frames = stats.rx_pause_frames; | |
1101 | edev->stats.rx_pfc_frames = stats.rx_pfc_frames; | |
1102 | edev->stats.rx_align_errors = stats.rx_align_errors; | |
1103 | edev->stats.rx_carrier_errors = stats.rx_carrier_errors; | |
1104 | edev->stats.rx_oversize_packets = stats.rx_oversize_packets; | |
1105 | edev->stats.rx_jabbers = stats.rx_jabbers; | |
1106 | edev->stats.rx_undersize_packets = stats.rx_undersize_packets; | |
1107 | edev->stats.rx_fragments = stats.rx_fragments; | |
1108 | edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets; | |
1109 | edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets; | |
1110 | edev->stats.tx_128_to_255_byte_packets = | |
1111 | stats.tx_128_to_255_byte_packets; | |
1112 | edev->stats.tx_256_to_511_byte_packets = | |
1113 | stats.tx_256_to_511_byte_packets; | |
1114 | edev->stats.tx_512_to_1023_byte_packets = | |
1115 | stats.tx_512_to_1023_byte_packets; | |
1116 | edev->stats.tx_1024_to_1518_byte_packets = | |
1117 | stats.tx_1024_to_1518_byte_packets; | |
1118 | edev->stats.tx_1519_to_2047_byte_packets = | |
1119 | stats.tx_1519_to_2047_byte_packets; | |
1120 | edev->stats.tx_2048_to_4095_byte_packets = | |
1121 | stats.tx_2048_to_4095_byte_packets; | |
1122 | edev->stats.tx_4096_to_9216_byte_packets = | |
1123 | stats.tx_4096_to_9216_byte_packets; | |
1124 | edev->stats.tx_9217_to_16383_byte_packets = | |
1125 | stats.tx_9217_to_16383_byte_packets; | |
1126 | edev->stats.tx_pause_frames = stats.tx_pause_frames; | |
1127 | edev->stats.tx_pfc_frames = stats.tx_pfc_frames; | |
1128 | edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count; | |
1129 | edev->stats.tx_total_collisions = stats.tx_total_collisions; | |
1130 | edev->stats.brb_truncates = stats.brb_truncates; | |
1131 | edev->stats.brb_discards = stats.brb_discards; | |
1132 | edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames; | |
1133 | } | |
1134 | ||
1135 | static struct rtnl_link_stats64 *qede_get_stats64( | |
1136 | struct net_device *dev, | |
1137 | struct rtnl_link_stats64 *stats) | |
1138 | { | |
1139 | struct qede_dev *edev = netdev_priv(dev); | |
1140 | ||
1141 | qede_fill_by_demand_stats(edev); | |
1142 | ||
1143 | stats->rx_packets = edev->stats.rx_ucast_pkts + | |
1144 | edev->stats.rx_mcast_pkts + | |
1145 | edev->stats.rx_bcast_pkts; | |
1146 | stats->tx_packets = edev->stats.tx_ucast_pkts + | |
1147 | edev->stats.tx_mcast_pkts + | |
1148 | edev->stats.tx_bcast_pkts; | |
1149 | ||
1150 | stats->rx_bytes = edev->stats.rx_ucast_bytes + | |
1151 | edev->stats.rx_mcast_bytes + | |
1152 | edev->stats.rx_bcast_bytes; | |
1153 | ||
1154 | stats->tx_bytes = edev->stats.tx_ucast_bytes + | |
1155 | edev->stats.tx_mcast_bytes + | |
1156 | edev->stats.tx_bcast_bytes; | |
1157 | ||
1158 | stats->tx_errors = edev->stats.tx_err_drop_pkts; | |
1159 | stats->multicast = edev->stats.rx_mcast_pkts + | |
1160 | edev->stats.rx_bcast_pkts; | |
1161 | ||
1162 | stats->rx_fifo_errors = edev->stats.no_buff_discards; | |
1163 | ||
1164 | stats->collisions = edev->stats.tx_total_collisions; | |
1165 | stats->rx_crc_errors = edev->stats.rx_crc_errors; | |
1166 | stats->rx_frame_errors = edev->stats.rx_align_errors; | |
1167 | ||
1168 | return stats; | |
1169 | } | |
1170 | ||
2950219d YM |
1171 | static const struct net_device_ops qede_netdev_ops = { |
1172 | .ndo_open = qede_open, | |
1173 | .ndo_stop = qede_close, | |
1174 | .ndo_start_xmit = qede_start_xmit, | |
0d8e0aa0 SK |
1175 | .ndo_set_rx_mode = qede_set_rx_mode, |
1176 | .ndo_set_mac_address = qede_set_mac_addr, | |
2950219d | 1177 | .ndo_validate_addr = eth_validate_addr, |
133fac0e SK |
1178 | .ndo_change_mtu = qede_change_mtu, |
1179 | .ndo_get_stats64 = qede_get_stats64, | |
2950219d YM |
1180 | }; |
1181 | ||
e712d52b YM |
1182 | /* ------------------------------------------------------------------------- |
1183 | * START OF PROBE / REMOVE | |
1184 | * ------------------------------------------------------------------------- | |
1185 | */ | |
1186 | ||
1187 | static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, | |
1188 | struct pci_dev *pdev, | |
1189 | struct qed_dev_eth_info *info, | |
1190 | u32 dp_module, | |
1191 | u8 dp_level) | |
1192 | { | |
1193 | struct net_device *ndev; | |
1194 | struct qede_dev *edev; | |
1195 | ||
1196 | ndev = alloc_etherdev_mqs(sizeof(*edev), | |
1197 | info->num_queues, | |
1198 | info->num_queues); | |
1199 | if (!ndev) { | |
1200 | pr_err("etherdev allocation failed\n"); | |
1201 | return NULL; | |
1202 | } | |
1203 | ||
1204 | edev = netdev_priv(ndev); | |
1205 | edev->ndev = ndev; | |
1206 | edev->cdev = cdev; | |
1207 | edev->pdev = pdev; | |
1208 | edev->dp_module = dp_module; | |
1209 | edev->dp_level = dp_level; | |
1210 | edev->ops = qed_ops; | |
2950219d YM |
1211 | edev->q_num_rx_buffers = NUM_RX_BDS_DEF; |
1212 | edev->q_num_tx_buffers = NUM_TX_BDS_DEF; | |
e712d52b YM |
1213 | |
1214 | DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); | |
1215 | ||
1216 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1217 | ||
133fac0e | 1218 | memset(&edev->stats, 0, sizeof(edev->stats)); |
e712d52b YM |
1219 | memcpy(&edev->dev_info, info, sizeof(*info)); |
1220 | ||
1221 | edev->num_tc = edev->dev_info.num_tc; | |
1222 | ||
1223 | return edev; | |
1224 | } | |
1225 | ||
1226 | static void qede_init_ndev(struct qede_dev *edev) | |
1227 | { | |
1228 | struct net_device *ndev = edev->ndev; | |
1229 | struct pci_dev *pdev = edev->pdev; | |
1230 | u32 hw_features; | |
1231 | ||
1232 | pci_set_drvdata(pdev, ndev); | |
1233 | ||
1234 | ndev->mem_start = edev->dev_info.common.pci_mem_start; | |
1235 | ndev->base_addr = ndev->mem_start; | |
1236 | ndev->mem_end = edev->dev_info.common.pci_mem_end; | |
1237 | ndev->irq = edev->dev_info.common.pci_irq; | |
1238 | ||
1239 | ndev->watchdog_timeo = TX_TIMEOUT; | |
1240 | ||
2950219d YM |
1241 | ndev->netdev_ops = &qede_netdev_ops; |
1242 | ||
133fac0e SK |
1243 | qede_set_ethtool_ops(ndev); |
1244 | ||
e712d52b YM |
1245 | /* user-changeble features */ |
1246 | hw_features = NETIF_F_GRO | NETIF_F_SG | | |
1247 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1248 | NETIF_F_TSO | NETIF_F_TSO6; | |
1249 | ||
1250 | ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | | |
1251 | NETIF_F_HIGHDMA; | |
1252 | ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | | |
1253 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | | |
1254 | NETIF_F_HW_VLAN_CTAG_TX; | |
1255 | ||
1256 | ndev->hw_features = hw_features; | |
1257 | ||
1258 | /* Set network device HW mac */ | |
1259 | ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); | |
1260 | } | |
1261 | ||
1262 | /* This function converts from 32b param to two params of level and module | |
1263 | * Input 32b decoding: | |
1264 | * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the | |
1265 | * 'happy' flow, e.g. memory allocation failed. | |
1266 | * b30 - enable all INFO prints. INFO prints are for major steps in the flow | |
1267 | * and provide important parameters. | |
1268 | * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that | |
1269 | * module. VERBOSE prints are for tracking the specific flow in low level. | |
1270 | * | |
1271 | * Notice that the level should be that of the lowest required logs. | |
1272 | */ | |
133fac0e | 1273 | void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) |
e712d52b YM |
1274 | { |
1275 | *p_dp_level = QED_LEVEL_NOTICE; | |
1276 | *p_dp_module = 0; | |
1277 | ||
1278 | if (debug & QED_LOG_VERBOSE_MASK) { | |
1279 | *p_dp_level = QED_LEVEL_VERBOSE; | |
1280 | *p_dp_module = (debug & 0x3FFFFFFF); | |
1281 | } else if (debug & QED_LOG_INFO_MASK) { | |
1282 | *p_dp_level = QED_LEVEL_INFO; | |
1283 | } else if (debug & QED_LOG_NOTICE_MASK) { | |
1284 | *p_dp_level = QED_LEVEL_NOTICE; | |
1285 | } | |
1286 | } | |
1287 | ||
2950219d YM |
1288 | static void qede_free_fp_array(struct qede_dev *edev) |
1289 | { | |
1290 | if (edev->fp_array) { | |
1291 | struct qede_fastpath *fp; | |
1292 | int i; | |
1293 | ||
1294 | for_each_rss(i) { | |
1295 | fp = &edev->fp_array[i]; | |
1296 | ||
1297 | kfree(fp->sb_info); | |
1298 | kfree(fp->rxq); | |
1299 | kfree(fp->txqs); | |
1300 | } | |
1301 | kfree(edev->fp_array); | |
1302 | } | |
1303 | edev->num_rss = 0; | |
1304 | } | |
1305 | ||
1306 | static int qede_alloc_fp_array(struct qede_dev *edev) | |
1307 | { | |
1308 | struct qede_fastpath *fp; | |
1309 | int i; | |
1310 | ||
1311 | edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), | |
1312 | sizeof(*edev->fp_array), GFP_KERNEL); | |
1313 | if (!edev->fp_array) { | |
1314 | DP_NOTICE(edev, "fp array allocation failed\n"); | |
1315 | goto err; | |
1316 | } | |
1317 | ||
1318 | for_each_rss(i) { | |
1319 | fp = &edev->fp_array[i]; | |
1320 | ||
1321 | fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); | |
1322 | if (!fp->sb_info) { | |
1323 | DP_NOTICE(edev, "sb info struct allocation failed\n"); | |
1324 | goto err; | |
1325 | } | |
1326 | ||
1327 | fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); | |
1328 | if (!fp->rxq) { | |
1329 | DP_NOTICE(edev, "RXQ struct allocation failed\n"); | |
1330 | goto err; | |
1331 | } | |
1332 | ||
1333 | fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); | |
1334 | if (!fp->txqs) { | |
1335 | DP_NOTICE(edev, "TXQ array allocation failed\n"); | |
1336 | goto err; | |
1337 | } | |
1338 | } | |
1339 | ||
1340 | return 0; | |
1341 | err: | |
1342 | qede_free_fp_array(edev); | |
1343 | return -ENOMEM; | |
1344 | } | |
1345 | ||
0d8e0aa0 SK |
1346 | static void qede_sp_task(struct work_struct *work) |
1347 | { | |
1348 | struct qede_dev *edev = container_of(work, struct qede_dev, | |
1349 | sp_task.work); | |
1350 | mutex_lock(&edev->qede_lock); | |
1351 | ||
1352 | if (edev->state == QEDE_STATE_OPEN) { | |
1353 | if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) | |
1354 | qede_config_rx_mode(edev->ndev); | |
1355 | } | |
1356 | ||
1357 | mutex_unlock(&edev->qede_lock); | |
1358 | } | |
1359 | ||
e712d52b YM |
1360 | static void qede_update_pf_params(struct qed_dev *cdev) |
1361 | { | |
1362 | struct qed_pf_params pf_params; | |
1363 | ||
1364 | /* 16 rx + 16 tx */ | |
1365 | memset(&pf_params, 0, sizeof(struct qed_pf_params)); | |
1366 | pf_params.eth_pf_params.num_cons = 32; | |
1367 | qed_ops->common->update_pf_params(cdev, &pf_params); | |
1368 | } | |
1369 | ||
1370 | enum qede_probe_mode { | |
1371 | QEDE_PROBE_NORMAL, | |
1372 | }; | |
1373 | ||
1374 | static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, | |
1375 | enum qede_probe_mode mode) | |
1376 | { | |
1377 | struct qed_slowpath_params params; | |
1378 | struct qed_dev_eth_info dev_info; | |
1379 | struct qede_dev *edev; | |
1380 | struct qed_dev *cdev; | |
1381 | int rc; | |
1382 | ||
1383 | if (unlikely(dp_level & QED_LEVEL_INFO)) | |
1384 | pr_notice("Starting qede probe\n"); | |
1385 | ||
1386 | cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, | |
1387 | dp_module, dp_level); | |
1388 | if (!cdev) { | |
1389 | rc = -ENODEV; | |
1390 | goto err0; | |
1391 | } | |
1392 | ||
1393 | qede_update_pf_params(cdev); | |
1394 | ||
1395 | /* Start the Slowpath-process */ | |
1396 | memset(¶ms, 0, sizeof(struct qed_slowpath_params)); | |
1397 | params.int_mode = QED_INT_MODE_MSIX; | |
1398 | params.drv_major = QEDE_MAJOR_VERSION; | |
1399 | params.drv_minor = QEDE_MINOR_VERSION; | |
1400 | params.drv_rev = QEDE_REVISION_VERSION; | |
1401 | params.drv_eng = QEDE_ENGINEERING_VERSION; | |
1402 | strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); | |
1403 | rc = qed_ops->common->slowpath_start(cdev, ¶ms); | |
1404 | if (rc) { | |
1405 | pr_notice("Cannot start slowpath\n"); | |
1406 | goto err1; | |
1407 | } | |
1408 | ||
1409 | /* Learn information crucial for qede to progress */ | |
1410 | rc = qed_ops->fill_dev_info(cdev, &dev_info); | |
1411 | if (rc) | |
1412 | goto err2; | |
1413 | ||
1414 | edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, | |
1415 | dp_level); | |
1416 | if (!edev) { | |
1417 | rc = -ENOMEM; | |
1418 | goto err2; | |
1419 | } | |
1420 | ||
1421 | qede_init_ndev(edev); | |
1422 | ||
2950219d YM |
1423 | rc = register_netdev(edev->ndev); |
1424 | if (rc) { | |
1425 | DP_NOTICE(edev, "Cannot register net-device\n"); | |
1426 | goto err3; | |
1427 | } | |
1428 | ||
e712d52b YM |
1429 | edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); |
1430 | ||
a2ec6172 SK |
1431 | edev->ops->register_ops(cdev, &qede_ll_ops, edev); |
1432 | ||
0d8e0aa0 SK |
1433 | INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
1434 | mutex_init(&edev->qede_lock); | |
1435 | ||
e712d52b YM |
1436 | DP_INFO(edev, "Ending successfully qede probe\n"); |
1437 | ||
1438 | return 0; | |
1439 | ||
2950219d YM |
1440 | err3: |
1441 | free_netdev(edev->ndev); | |
e712d52b YM |
1442 | err2: |
1443 | qed_ops->common->slowpath_stop(cdev); | |
1444 | err1: | |
1445 | qed_ops->common->remove(cdev); | |
1446 | err0: | |
1447 | return rc; | |
1448 | } | |
1449 | ||
1450 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1451 | { | |
1452 | u32 dp_module = 0; | |
1453 | u8 dp_level = 0; | |
1454 | ||
1455 | qede_config_debug(debug, &dp_module, &dp_level); | |
1456 | ||
1457 | return __qede_probe(pdev, dp_module, dp_level, | |
1458 | QEDE_PROBE_NORMAL); | |
1459 | } | |
1460 | ||
1461 | enum qede_remove_mode { | |
1462 | QEDE_REMOVE_NORMAL, | |
1463 | }; | |
1464 | ||
1465 | static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) | |
1466 | { | |
1467 | struct net_device *ndev = pci_get_drvdata(pdev); | |
1468 | struct qede_dev *edev = netdev_priv(ndev); | |
1469 | struct qed_dev *cdev = edev->cdev; | |
1470 | ||
1471 | DP_INFO(edev, "Starting qede_remove\n"); | |
1472 | ||
0d8e0aa0 | 1473 | cancel_delayed_work_sync(&edev->sp_task); |
2950219d YM |
1474 | unregister_netdev(ndev); |
1475 | ||
e712d52b YM |
1476 | edev->ops->common->set_power_state(cdev, PCI_D0); |
1477 | ||
1478 | pci_set_drvdata(pdev, NULL); | |
1479 | ||
1480 | free_netdev(ndev); | |
1481 | ||
1482 | /* Use global ops since we've freed edev */ | |
1483 | qed_ops->common->slowpath_stop(cdev); | |
1484 | qed_ops->common->remove(cdev); | |
1485 | ||
1486 | pr_notice("Ending successfully qede_remove\n"); | |
1487 | } | |
1488 | ||
1489 | static void qede_remove(struct pci_dev *pdev) | |
1490 | { | |
1491 | __qede_remove(pdev, QEDE_REMOVE_NORMAL); | |
1492 | } | |
2950219d YM |
1493 | |
1494 | /* ------------------------------------------------------------------------- | |
1495 | * START OF LOAD / UNLOAD | |
1496 | * ------------------------------------------------------------------------- | |
1497 | */ | |
1498 | ||
1499 | static int qede_set_num_queues(struct qede_dev *edev) | |
1500 | { | |
1501 | int rc; | |
1502 | u16 rss_num; | |
1503 | ||
1504 | /* Setup queues according to possible resources*/ | |
1505 | rss_num = netif_get_num_default_rss_queues() * | |
1506 | edev->dev_info.common.num_hwfns; | |
1507 | ||
1508 | rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); | |
1509 | ||
1510 | rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); | |
1511 | if (rc > 0) { | |
1512 | /* Managed to request interrupts for our queues */ | |
1513 | edev->num_rss = rc; | |
1514 | DP_INFO(edev, "Managed %d [of %d] RSS queues\n", | |
1515 | QEDE_RSS_CNT(edev), rss_num); | |
1516 | rc = 0; | |
1517 | } | |
1518 | return rc; | |
1519 | } | |
1520 | ||
1521 | static void qede_free_mem_sb(struct qede_dev *edev, | |
1522 | struct qed_sb_info *sb_info) | |
1523 | { | |
1524 | if (sb_info->sb_virt) | |
1525 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), | |
1526 | (void *)sb_info->sb_virt, sb_info->sb_phys); | |
1527 | } | |
1528 | ||
1529 | /* This function allocates fast-path status block memory */ | |
1530 | static int qede_alloc_mem_sb(struct qede_dev *edev, | |
1531 | struct qed_sb_info *sb_info, | |
1532 | u16 sb_id) | |
1533 | { | |
1534 | struct status_block *sb_virt; | |
1535 | dma_addr_t sb_phys; | |
1536 | int rc; | |
1537 | ||
1538 | sb_virt = dma_alloc_coherent(&edev->pdev->dev, | |
1539 | sizeof(*sb_virt), | |
1540 | &sb_phys, GFP_KERNEL); | |
1541 | if (!sb_virt) { | |
1542 | DP_ERR(edev, "Status block allocation failed\n"); | |
1543 | return -ENOMEM; | |
1544 | } | |
1545 | ||
1546 | rc = edev->ops->common->sb_init(edev->cdev, sb_info, | |
1547 | sb_virt, sb_phys, sb_id, | |
1548 | QED_SB_TYPE_L2_QUEUE); | |
1549 | if (rc) { | |
1550 | DP_ERR(edev, "Status block initialization failed\n"); | |
1551 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), | |
1552 | sb_virt, sb_phys); | |
1553 | return rc; | |
1554 | } | |
1555 | ||
1556 | return 0; | |
1557 | } | |
1558 | ||
1559 | static void qede_free_rx_buffers(struct qede_dev *edev, | |
1560 | struct qede_rx_queue *rxq) | |
1561 | { | |
1562 | u16 i; | |
1563 | ||
1564 | for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { | |
1565 | struct sw_rx_data *rx_buf; | |
1566 | u8 *data; | |
1567 | ||
1568 | rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; | |
1569 | data = rx_buf->data; | |
1570 | ||
1571 | dma_unmap_single(&edev->pdev->dev, | |
1572 | dma_unmap_addr(rx_buf, mapping), | |
1573 | rxq->rx_buf_size, DMA_FROM_DEVICE); | |
1574 | ||
1575 | rx_buf->data = NULL; | |
1576 | kfree(data); | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | static void qede_free_mem_rxq(struct qede_dev *edev, | |
1581 | struct qede_rx_queue *rxq) | |
1582 | { | |
1583 | /* Free rx buffers */ | |
1584 | qede_free_rx_buffers(edev, rxq); | |
1585 | ||
1586 | /* Free the parallel SW ring */ | |
1587 | kfree(rxq->sw_rx_ring); | |
1588 | ||
1589 | /* Free the real RQ ring used by FW */ | |
1590 | edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); | |
1591 | edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); | |
1592 | } | |
1593 | ||
1594 | static int qede_alloc_rx_buffer(struct qede_dev *edev, | |
1595 | struct qede_rx_queue *rxq) | |
1596 | { | |
1597 | struct sw_rx_data *sw_rx_data; | |
1598 | struct eth_rx_bd *rx_bd; | |
1599 | dma_addr_t mapping; | |
1600 | u16 rx_buf_size; | |
1601 | u8 *data; | |
1602 | ||
1603 | rx_buf_size = rxq->rx_buf_size; | |
1604 | ||
1605 | data = kmalloc(rx_buf_size, GFP_ATOMIC); | |
1606 | if (unlikely(!data)) { | |
1607 | DP_NOTICE(edev, "Failed to allocate Rx data\n"); | |
1608 | return -ENOMEM; | |
1609 | } | |
1610 | ||
1611 | mapping = dma_map_single(&edev->pdev->dev, data, | |
1612 | rx_buf_size, DMA_FROM_DEVICE); | |
1613 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
1614 | kfree(data); | |
1615 | DP_NOTICE(edev, "Failed to map Rx buffer\n"); | |
1616 | return -ENOMEM; | |
1617 | } | |
1618 | ||
1619 | sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; | |
1620 | sw_rx_data->data = data; | |
1621 | ||
1622 | dma_unmap_addr_set(sw_rx_data, mapping, mapping); | |
1623 | ||
1624 | /* Advance PROD and get BD pointer */ | |
1625 | rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); | |
1626 | WARN_ON(!rx_bd); | |
1627 | rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); | |
1628 | rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); | |
1629 | ||
1630 | rxq->sw_rx_prod++; | |
1631 | ||
1632 | return 0; | |
1633 | } | |
1634 | ||
1635 | /* This function allocates all memory needed per Rx queue */ | |
1636 | static int qede_alloc_mem_rxq(struct qede_dev *edev, | |
1637 | struct qede_rx_queue *rxq) | |
1638 | { | |
1639 | int i, rc, size, num_allocated; | |
1640 | ||
1641 | rxq->num_rx_buffers = edev->q_num_rx_buffers; | |
1642 | ||
1643 | rxq->rx_buf_size = NET_IP_ALIGN + | |
1644 | ETH_OVERHEAD + | |
1645 | edev->ndev->mtu + | |
1646 | QEDE_FW_RX_ALIGN_END; | |
1647 | ||
1648 | /* Allocate the parallel driver ring for Rx buffers */ | |
1649 | size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; | |
1650 | rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); | |
1651 | if (!rxq->sw_rx_ring) { | |
1652 | DP_ERR(edev, "Rx buffers ring allocation failed\n"); | |
1653 | goto err; | |
1654 | } | |
1655 | ||
1656 | /* Allocate FW Rx ring */ | |
1657 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1658 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | |
1659 | QED_CHAIN_MODE_NEXT_PTR, | |
1660 | NUM_RX_BDS_MAX, | |
1661 | sizeof(struct eth_rx_bd), | |
1662 | &rxq->rx_bd_ring); | |
1663 | ||
1664 | if (rc) | |
1665 | goto err; | |
1666 | ||
1667 | /* Allocate FW completion ring */ | |
1668 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1669 | QED_CHAIN_USE_TO_CONSUME, | |
1670 | QED_CHAIN_MODE_PBL, | |
1671 | NUM_RX_BDS_MAX, | |
1672 | sizeof(union eth_rx_cqe), | |
1673 | &rxq->rx_comp_ring); | |
1674 | if (rc) | |
1675 | goto err; | |
1676 | ||
1677 | /* Allocate buffers for the Rx ring */ | |
1678 | for (i = 0; i < rxq->num_rx_buffers; i++) { | |
1679 | rc = qede_alloc_rx_buffer(edev, rxq); | |
1680 | if (rc) | |
1681 | break; | |
1682 | } | |
1683 | num_allocated = i; | |
1684 | if (!num_allocated) { | |
1685 | DP_ERR(edev, "Rx buffers allocation failed\n"); | |
1686 | goto err; | |
1687 | } else if (num_allocated < rxq->num_rx_buffers) { | |
1688 | DP_NOTICE(edev, | |
1689 | "Allocated less buffers than desired (%d allocated)\n", | |
1690 | num_allocated); | |
1691 | } | |
1692 | ||
1693 | return 0; | |
1694 | ||
1695 | err: | |
1696 | qede_free_mem_rxq(edev, rxq); | |
1697 | return -ENOMEM; | |
1698 | } | |
1699 | ||
1700 | static void qede_free_mem_txq(struct qede_dev *edev, | |
1701 | struct qede_tx_queue *txq) | |
1702 | { | |
1703 | /* Free the parallel SW ring */ | |
1704 | kfree(txq->sw_tx_ring); | |
1705 | ||
1706 | /* Free the real RQ ring used by FW */ | |
1707 | edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); | |
1708 | } | |
1709 | ||
1710 | /* This function allocates all memory needed per Tx queue */ | |
1711 | static int qede_alloc_mem_txq(struct qede_dev *edev, | |
1712 | struct qede_tx_queue *txq) | |
1713 | { | |
1714 | int size, rc; | |
1715 | union eth_tx_bd_types *p_virt; | |
1716 | ||
1717 | txq->num_tx_buffers = edev->q_num_tx_buffers; | |
1718 | ||
1719 | /* Allocate the parallel driver ring for Tx buffers */ | |
1720 | size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; | |
1721 | txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); | |
1722 | if (!txq->sw_tx_ring) { | |
1723 | DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); | |
1724 | goto err; | |
1725 | } | |
1726 | ||
1727 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1728 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | |
1729 | QED_CHAIN_MODE_PBL, | |
1730 | NUM_TX_BDS_MAX, | |
1731 | sizeof(*p_virt), | |
1732 | &txq->tx_pbl); | |
1733 | if (rc) | |
1734 | goto err; | |
1735 | ||
1736 | return 0; | |
1737 | ||
1738 | err: | |
1739 | qede_free_mem_txq(edev, txq); | |
1740 | return -ENOMEM; | |
1741 | } | |
1742 | ||
1743 | /* This function frees all memory of a single fp */ | |
1744 | static void qede_free_mem_fp(struct qede_dev *edev, | |
1745 | struct qede_fastpath *fp) | |
1746 | { | |
1747 | int tc; | |
1748 | ||
1749 | qede_free_mem_sb(edev, fp->sb_info); | |
1750 | ||
1751 | qede_free_mem_rxq(edev, fp->rxq); | |
1752 | ||
1753 | for (tc = 0; tc < edev->num_tc; tc++) | |
1754 | qede_free_mem_txq(edev, &fp->txqs[tc]); | |
1755 | } | |
1756 | ||
1757 | /* This function allocates all memory needed for a single fp (i.e. an entity | |
1758 | * which contains status block, one rx queue and multiple per-TC tx queues. | |
1759 | */ | |
1760 | static int qede_alloc_mem_fp(struct qede_dev *edev, | |
1761 | struct qede_fastpath *fp) | |
1762 | { | |
1763 | int rc, tc; | |
1764 | ||
1765 | rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); | |
1766 | if (rc) | |
1767 | goto err; | |
1768 | ||
1769 | rc = qede_alloc_mem_rxq(edev, fp->rxq); | |
1770 | if (rc) | |
1771 | goto err; | |
1772 | ||
1773 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1774 | rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); | |
1775 | if (rc) | |
1776 | goto err; | |
1777 | } | |
1778 | ||
1779 | return 0; | |
1780 | ||
1781 | err: | |
1782 | qede_free_mem_fp(edev, fp); | |
1783 | return -ENOMEM; | |
1784 | } | |
1785 | ||
1786 | static void qede_free_mem_load(struct qede_dev *edev) | |
1787 | { | |
1788 | int i; | |
1789 | ||
1790 | for_each_rss(i) { | |
1791 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
1792 | ||
1793 | qede_free_mem_fp(edev, fp); | |
1794 | } | |
1795 | } | |
1796 | ||
1797 | /* This function allocates all qede memory at NIC load. */ | |
1798 | static int qede_alloc_mem_load(struct qede_dev *edev) | |
1799 | { | |
1800 | int rc = 0, rss_id; | |
1801 | ||
1802 | for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { | |
1803 | struct qede_fastpath *fp = &edev->fp_array[rss_id]; | |
1804 | ||
1805 | rc = qede_alloc_mem_fp(edev, fp); | |
1806 | if (rc) | |
1807 | break; | |
1808 | } | |
1809 | ||
1810 | if (rss_id != QEDE_RSS_CNT(edev)) { | |
1811 | /* Failed allocating memory for all the queues */ | |
1812 | if (!rss_id) { | |
1813 | DP_ERR(edev, | |
1814 | "Failed to allocate memory for the leading queue\n"); | |
1815 | rc = -ENOMEM; | |
1816 | } else { | |
1817 | DP_NOTICE(edev, | |
1818 | "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", | |
1819 | QEDE_RSS_CNT(edev), rss_id); | |
1820 | } | |
1821 | edev->num_rss = rss_id; | |
1822 | } | |
1823 | ||
1824 | return 0; | |
1825 | } | |
1826 | ||
1827 | /* This function inits fp content and resets the SB, RXQ and TXQ structures */ | |
1828 | static void qede_init_fp(struct qede_dev *edev) | |
1829 | { | |
1830 | int rss_id, txq_index, tc; | |
1831 | struct qede_fastpath *fp; | |
1832 | ||
1833 | for_each_rss(rss_id) { | |
1834 | fp = &edev->fp_array[rss_id]; | |
1835 | ||
1836 | fp->edev = edev; | |
1837 | fp->rss_id = rss_id; | |
1838 | ||
1839 | memset((void *)&fp->napi, 0, sizeof(fp->napi)); | |
1840 | ||
1841 | memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); | |
1842 | ||
1843 | memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); | |
1844 | fp->rxq->rxq_id = rss_id; | |
1845 | ||
1846 | memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); | |
1847 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1848 | txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; | |
1849 | fp->txqs[tc].index = txq_index; | |
1850 | } | |
1851 | ||
1852 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
1853 | edev->ndev->name, rss_id); | |
1854 | } | |
1855 | } | |
1856 | ||
1857 | static int qede_set_real_num_queues(struct qede_dev *edev) | |
1858 | { | |
1859 | int rc = 0; | |
1860 | ||
1861 | rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); | |
1862 | if (rc) { | |
1863 | DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); | |
1864 | return rc; | |
1865 | } | |
1866 | rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); | |
1867 | if (rc) { | |
1868 | DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); | |
1869 | return rc; | |
1870 | } | |
1871 | ||
1872 | return 0; | |
1873 | } | |
1874 | ||
1875 | static void qede_napi_disable_remove(struct qede_dev *edev) | |
1876 | { | |
1877 | int i; | |
1878 | ||
1879 | for_each_rss(i) { | |
1880 | napi_disable(&edev->fp_array[i].napi); | |
1881 | ||
1882 | netif_napi_del(&edev->fp_array[i].napi); | |
1883 | } | |
1884 | } | |
1885 | ||
1886 | static void qede_napi_add_enable(struct qede_dev *edev) | |
1887 | { | |
1888 | int i; | |
1889 | ||
1890 | /* Add NAPI objects */ | |
1891 | for_each_rss(i) { | |
1892 | netif_napi_add(edev->ndev, &edev->fp_array[i].napi, | |
1893 | qede_poll, NAPI_POLL_WEIGHT); | |
1894 | napi_enable(&edev->fp_array[i].napi); | |
1895 | } | |
1896 | } | |
1897 | ||
1898 | static void qede_sync_free_irqs(struct qede_dev *edev) | |
1899 | { | |
1900 | int i; | |
1901 | ||
1902 | for (i = 0; i < edev->int_info.used_cnt; i++) { | |
1903 | if (edev->int_info.msix_cnt) { | |
1904 | synchronize_irq(edev->int_info.msix[i].vector); | |
1905 | free_irq(edev->int_info.msix[i].vector, | |
1906 | &edev->fp_array[i]); | |
1907 | } else { | |
1908 | edev->ops->common->simd_handler_clean(edev->cdev, i); | |
1909 | } | |
1910 | } | |
1911 | ||
1912 | edev->int_info.used_cnt = 0; | |
1913 | } | |
1914 | ||
1915 | static int qede_req_msix_irqs(struct qede_dev *edev) | |
1916 | { | |
1917 | int i, rc; | |
1918 | ||
1919 | /* Sanitize number of interrupts == number of prepared RSS queues */ | |
1920 | if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { | |
1921 | DP_ERR(edev, | |
1922 | "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", | |
1923 | QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); | |
1924 | return -EINVAL; | |
1925 | } | |
1926 | ||
1927 | for (i = 0; i < QEDE_RSS_CNT(edev); i++) { | |
1928 | rc = request_irq(edev->int_info.msix[i].vector, | |
1929 | qede_msix_fp_int, 0, edev->fp_array[i].name, | |
1930 | &edev->fp_array[i]); | |
1931 | if (rc) { | |
1932 | DP_ERR(edev, "Request fp %d irq failed\n", i); | |
1933 | qede_sync_free_irqs(edev); | |
1934 | return rc; | |
1935 | } | |
1936 | DP_VERBOSE(edev, NETIF_MSG_INTR, | |
1937 | "Requested fp irq for %s [entry %d]. Cookie is at %p\n", | |
1938 | edev->fp_array[i].name, i, | |
1939 | &edev->fp_array[i]); | |
1940 | edev->int_info.used_cnt++; | |
1941 | } | |
1942 | ||
1943 | return 0; | |
1944 | } | |
1945 | ||
1946 | static void qede_simd_fp_handler(void *cookie) | |
1947 | { | |
1948 | struct qede_fastpath *fp = (struct qede_fastpath *)cookie; | |
1949 | ||
1950 | napi_schedule_irqoff(&fp->napi); | |
1951 | } | |
1952 | ||
1953 | static int qede_setup_irqs(struct qede_dev *edev) | |
1954 | { | |
1955 | int i, rc = 0; | |
1956 | ||
1957 | /* Learn Interrupt configuration */ | |
1958 | rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); | |
1959 | if (rc) | |
1960 | return rc; | |
1961 | ||
1962 | if (edev->int_info.msix_cnt) { | |
1963 | rc = qede_req_msix_irqs(edev); | |
1964 | if (rc) | |
1965 | return rc; | |
1966 | edev->ndev->irq = edev->int_info.msix[0].vector; | |
1967 | } else { | |
1968 | const struct qed_common_ops *ops; | |
1969 | ||
1970 | /* qed should learn receive the RSS ids and callbacks */ | |
1971 | ops = edev->ops->common; | |
1972 | for (i = 0; i < QEDE_RSS_CNT(edev); i++) | |
1973 | ops->simd_handler_config(edev->cdev, | |
1974 | &edev->fp_array[i], i, | |
1975 | qede_simd_fp_handler); | |
1976 | edev->int_info.used_cnt = QEDE_RSS_CNT(edev); | |
1977 | } | |
1978 | return 0; | |
1979 | } | |
1980 | ||
1981 | static int qede_drain_txq(struct qede_dev *edev, | |
1982 | struct qede_tx_queue *txq, | |
1983 | bool allow_drain) | |
1984 | { | |
1985 | int rc, cnt = 1000; | |
1986 | ||
1987 | while (txq->sw_tx_cons != txq->sw_tx_prod) { | |
1988 | if (!cnt) { | |
1989 | if (allow_drain) { | |
1990 | DP_NOTICE(edev, | |
1991 | "Tx queue[%d] is stuck, requesting MCP to drain\n", | |
1992 | txq->index); | |
1993 | rc = edev->ops->common->drain(edev->cdev); | |
1994 | if (rc) | |
1995 | return rc; | |
1996 | return qede_drain_txq(edev, txq, false); | |
1997 | } | |
1998 | DP_NOTICE(edev, | |
1999 | "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", | |
2000 | txq->index, txq->sw_tx_prod, | |
2001 | txq->sw_tx_cons); | |
2002 | return -ENODEV; | |
2003 | } | |
2004 | cnt--; | |
2005 | usleep_range(1000, 2000); | |
2006 | barrier(); | |
2007 | } | |
2008 | ||
2009 | /* FW finished processing, wait for HW to transmit all tx packets */ | |
2010 | usleep_range(1000, 2000); | |
2011 | ||
2012 | return 0; | |
2013 | } | |
2014 | ||
2015 | static int qede_stop_queues(struct qede_dev *edev) | |
2016 | { | |
2017 | struct qed_update_vport_params vport_update_params; | |
2018 | struct qed_dev *cdev = edev->cdev; | |
2019 | int rc, tc, i; | |
2020 | ||
2021 | /* Disable the vport */ | |
2022 | memset(&vport_update_params, 0, sizeof(vport_update_params)); | |
2023 | vport_update_params.vport_id = 0; | |
2024 | vport_update_params.update_vport_active_flg = 1; | |
2025 | vport_update_params.vport_active_flg = 0; | |
2026 | vport_update_params.update_rss_flg = 0; | |
2027 | ||
2028 | rc = edev->ops->vport_update(cdev, &vport_update_params); | |
2029 | if (rc) { | |
2030 | DP_ERR(edev, "Failed to update vport\n"); | |
2031 | return rc; | |
2032 | } | |
2033 | ||
2034 | /* Flush Tx queues. If needed, request drain from MCP */ | |
2035 | for_each_rss(i) { | |
2036 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
2037 | ||
2038 | for (tc = 0; tc < edev->num_tc; tc++) { | |
2039 | struct qede_tx_queue *txq = &fp->txqs[tc]; | |
2040 | ||
2041 | rc = qede_drain_txq(edev, txq, true); | |
2042 | if (rc) | |
2043 | return rc; | |
2044 | } | |
2045 | } | |
2046 | ||
2047 | /* Stop all Queues in reverse order*/ | |
2048 | for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { | |
2049 | struct qed_stop_rxq_params rx_params; | |
2050 | ||
2051 | /* Stop the Tx Queue(s)*/ | |
2052 | for (tc = 0; tc < edev->num_tc; tc++) { | |
2053 | struct qed_stop_txq_params tx_params; | |
2054 | ||
2055 | tx_params.rss_id = i; | |
2056 | tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; | |
2057 | rc = edev->ops->q_tx_stop(cdev, &tx_params); | |
2058 | if (rc) { | |
2059 | DP_ERR(edev, "Failed to stop TXQ #%d\n", | |
2060 | tx_params.tx_queue_id); | |
2061 | return rc; | |
2062 | } | |
2063 | } | |
2064 | ||
2065 | /* Stop the Rx Queue*/ | |
2066 | memset(&rx_params, 0, sizeof(rx_params)); | |
2067 | rx_params.rss_id = i; | |
2068 | rx_params.rx_queue_id = i; | |
2069 | ||
2070 | rc = edev->ops->q_rx_stop(cdev, &rx_params); | |
2071 | if (rc) { | |
2072 | DP_ERR(edev, "Failed to stop RXQ #%d\n", i); | |
2073 | return rc; | |
2074 | } | |
2075 | } | |
2076 | ||
2077 | /* Stop the vport */ | |
2078 | rc = edev->ops->vport_stop(cdev, 0); | |
2079 | if (rc) | |
2080 | DP_ERR(edev, "Failed to stop VPORT\n"); | |
2081 | ||
2082 | return rc; | |
2083 | } | |
2084 | ||
2085 | static int qede_start_queues(struct qede_dev *edev) | |
2086 | { | |
2087 | int rc, tc, i; | |
2088 | int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1; | |
2089 | struct qed_dev *cdev = edev->cdev; | |
2090 | struct qed_update_vport_rss_params *rss_params = &edev->rss_params; | |
2091 | struct qed_update_vport_params vport_update_params; | |
2092 | struct qed_queue_start_common_params q_params; | |
2093 | ||
2094 | if (!edev->num_rss) { | |
2095 | DP_ERR(edev, | |
2096 | "Cannot update V-VPORT as active as there are no Rx queues\n"); | |
2097 | return -EINVAL; | |
2098 | } | |
2099 | ||
2100 | rc = edev->ops->vport_start(cdev, vport_id, | |
2101 | edev->ndev->mtu, | |
2102 | drop_ttl0_flg, | |
2103 | vlan_removal_en); | |
2104 | ||
2105 | if (rc) { | |
2106 | DP_ERR(edev, "Start V-PORT failed %d\n", rc); | |
2107 | return rc; | |
2108 | } | |
2109 | ||
2110 | DP_VERBOSE(edev, NETIF_MSG_IFUP, | |
2111 | "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", | |
2112 | vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); | |
2113 | ||
2114 | for_each_rss(i) { | |
2115 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
2116 | dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; | |
2117 | ||
2118 | memset(&q_params, 0, sizeof(q_params)); | |
2119 | q_params.rss_id = i; | |
2120 | q_params.queue_id = i; | |
2121 | q_params.vport_id = 0; | |
2122 | q_params.sb = fp->sb_info->igu_sb_id; | |
2123 | q_params.sb_idx = RX_PI; | |
2124 | ||
2125 | rc = edev->ops->q_rx_start(cdev, &q_params, | |
2126 | fp->rxq->rx_buf_size, | |
2127 | fp->rxq->rx_bd_ring.p_phys_addr, | |
2128 | phys_table, | |
2129 | fp->rxq->rx_comp_ring.page_cnt, | |
2130 | &fp->rxq->hw_rxq_prod_addr); | |
2131 | if (rc) { | |
2132 | DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); | |
2133 | return rc; | |
2134 | } | |
2135 | ||
2136 | fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; | |
2137 | ||
2138 | qede_update_rx_prod(edev, fp->rxq); | |
2139 | ||
2140 | for (tc = 0; tc < edev->num_tc; tc++) { | |
2141 | struct qede_tx_queue *txq = &fp->txqs[tc]; | |
2142 | int txq_index = tc * QEDE_RSS_CNT(edev) + i; | |
2143 | ||
2144 | memset(&q_params, 0, sizeof(q_params)); | |
2145 | q_params.rss_id = i; | |
2146 | q_params.queue_id = txq_index; | |
2147 | q_params.vport_id = 0; | |
2148 | q_params.sb = fp->sb_info->igu_sb_id; | |
2149 | q_params.sb_idx = TX_PI(tc); | |
2150 | ||
2151 | rc = edev->ops->q_tx_start(cdev, &q_params, | |
2152 | txq->tx_pbl.pbl.p_phys_table, | |
2153 | txq->tx_pbl.page_cnt, | |
2154 | &txq->doorbell_addr); | |
2155 | if (rc) { | |
2156 | DP_ERR(edev, "Start TXQ #%d failed %d\n", | |
2157 | txq_index, rc); | |
2158 | return rc; | |
2159 | } | |
2160 | ||
2161 | txq->hw_cons_ptr = | |
2162 | &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; | |
2163 | SET_FIELD(txq->tx_db.data.params, | |
2164 | ETH_DB_DATA_DEST, DB_DEST_XCM); | |
2165 | SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, | |
2166 | DB_AGG_CMD_SET); | |
2167 | SET_FIELD(txq->tx_db.data.params, | |
2168 | ETH_DB_DATA_AGG_VAL_SEL, | |
2169 | DQ_XCM_ETH_TX_BD_PROD_CMD); | |
2170 | ||
2171 | txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; | |
2172 | } | |
2173 | } | |
2174 | ||
2175 | /* Prepare and send the vport enable */ | |
2176 | memset(&vport_update_params, 0, sizeof(vport_update_params)); | |
2177 | vport_update_params.vport_id = vport_id; | |
2178 | vport_update_params.update_vport_active_flg = 1; | |
2179 | vport_update_params.vport_active_flg = 1; | |
2180 | ||
2181 | /* Fill struct with RSS params */ | |
2182 | if (QEDE_RSS_CNT(edev) > 1) { | |
2183 | vport_update_params.update_rss_flg = 1; | |
2184 | for (i = 0; i < 128; i++) | |
2185 | rss_params->rss_ind_table[i] = | |
2186 | ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); | |
2187 | netdev_rss_key_fill(rss_params->rss_key, | |
2188 | sizeof(rss_params->rss_key)); | |
2189 | } else { | |
2190 | memset(rss_params, 0, sizeof(*rss_params)); | |
2191 | } | |
2192 | memcpy(&vport_update_params.rss_params, rss_params, | |
2193 | sizeof(*rss_params)); | |
2194 | ||
2195 | rc = edev->ops->vport_update(cdev, &vport_update_params); | |
2196 | if (rc) { | |
2197 | DP_ERR(edev, "Update V-PORT failed %d\n", rc); | |
2198 | return rc; | |
2199 | } | |
2200 | ||
2201 | return 0; | |
2202 | } | |
2203 | ||
0d8e0aa0 SK |
2204 | static int qede_set_mcast_rx_mac(struct qede_dev *edev, |
2205 | enum qed_filter_xcast_params_type opcode, | |
2206 | unsigned char *mac, int num_macs) | |
2207 | { | |
2208 | struct qed_filter_params filter_cmd; | |
2209 | int i; | |
2210 | ||
2211 | memset(&filter_cmd, 0, sizeof(filter_cmd)); | |
2212 | filter_cmd.type = QED_FILTER_TYPE_MCAST; | |
2213 | filter_cmd.filter.mcast.type = opcode; | |
2214 | filter_cmd.filter.mcast.num = num_macs; | |
2215 | ||
2216 | for (i = 0; i < num_macs; i++, mac += ETH_ALEN) | |
2217 | ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); | |
2218 | ||
2219 | return edev->ops->filter_config(edev->cdev, &filter_cmd); | |
2220 | } | |
2221 | ||
2950219d YM |
2222 | enum qede_unload_mode { |
2223 | QEDE_UNLOAD_NORMAL, | |
2224 | }; | |
2225 | ||
2226 | static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) | |
2227 | { | |
a2ec6172 | 2228 | struct qed_link_params link_params; |
2950219d YM |
2229 | int rc; |
2230 | ||
2231 | DP_INFO(edev, "Starting qede unload\n"); | |
2232 | ||
0d8e0aa0 SK |
2233 | mutex_lock(&edev->qede_lock); |
2234 | edev->state = QEDE_STATE_CLOSED; | |
2235 | ||
2950219d YM |
2236 | /* Close OS Tx */ |
2237 | netif_tx_disable(edev->ndev); | |
2238 | netif_carrier_off(edev->ndev); | |
2239 | ||
a2ec6172 SK |
2240 | /* Reset the link */ |
2241 | memset(&link_params, 0, sizeof(link_params)); | |
2242 | link_params.link_up = false; | |
2243 | edev->ops->common->set_link(edev->cdev, &link_params); | |
2950219d YM |
2244 | rc = qede_stop_queues(edev); |
2245 | if (rc) { | |
2246 | qede_sync_free_irqs(edev); | |
2247 | goto out; | |
2248 | } | |
2249 | ||
2250 | DP_INFO(edev, "Stopped Queues\n"); | |
2251 | ||
2252 | edev->ops->fastpath_stop(edev->cdev); | |
2253 | ||
2254 | /* Release the interrupts */ | |
2255 | qede_sync_free_irqs(edev); | |
2256 | edev->ops->common->set_fp_int(edev->cdev, 0); | |
2257 | ||
2258 | qede_napi_disable_remove(edev); | |
2259 | ||
2260 | qede_free_mem_load(edev); | |
2261 | qede_free_fp_array(edev); | |
2262 | ||
2263 | out: | |
2264 | mutex_unlock(&edev->qede_lock); | |
2265 | DP_INFO(edev, "Ending qede unload\n"); | |
2266 | } | |
2267 | ||
2268 | enum qede_load_mode { | |
2269 | QEDE_LOAD_NORMAL, | |
2270 | }; | |
2271 | ||
2272 | static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) | |
2273 | { | |
a2ec6172 SK |
2274 | struct qed_link_params link_params; |
2275 | struct qed_link_output link_output; | |
2950219d YM |
2276 | int rc; |
2277 | ||
2278 | DP_INFO(edev, "Starting qede load\n"); | |
2279 | ||
2280 | rc = qede_set_num_queues(edev); | |
2281 | if (rc) | |
2282 | goto err0; | |
2283 | ||
2284 | rc = qede_alloc_fp_array(edev); | |
2285 | if (rc) | |
2286 | goto err0; | |
2287 | ||
2288 | qede_init_fp(edev); | |
2289 | ||
2290 | rc = qede_alloc_mem_load(edev); | |
2291 | if (rc) | |
2292 | goto err1; | |
2293 | DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", | |
2294 | QEDE_RSS_CNT(edev), edev->num_tc); | |
2295 | ||
2296 | rc = qede_set_real_num_queues(edev); | |
2297 | if (rc) | |
2298 | goto err2; | |
2299 | ||
2300 | qede_napi_add_enable(edev); | |
2301 | DP_INFO(edev, "Napi added and enabled\n"); | |
2302 | ||
2303 | rc = qede_setup_irqs(edev); | |
2304 | if (rc) | |
2305 | goto err3; | |
2306 | DP_INFO(edev, "Setup IRQs succeeded\n"); | |
2307 | ||
2308 | rc = qede_start_queues(edev); | |
2309 | if (rc) | |
2310 | goto err4; | |
2311 | DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); | |
2312 | ||
2313 | /* Add primary mac and set Rx filters */ | |
2314 | ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); | |
2315 | ||
0d8e0aa0 SK |
2316 | mutex_lock(&edev->qede_lock); |
2317 | edev->state = QEDE_STATE_OPEN; | |
2318 | mutex_unlock(&edev->qede_lock); | |
a2ec6172 SK |
2319 | |
2320 | /* Ask for link-up using current configuration */ | |
2321 | memset(&link_params, 0, sizeof(link_params)); | |
2322 | link_params.link_up = true; | |
2323 | edev->ops->common->set_link(edev->cdev, &link_params); | |
2324 | ||
2325 | /* Query whether link is already-up */ | |
2326 | memset(&link_output, 0, sizeof(link_output)); | |
2327 | edev->ops->common->get_link(edev->cdev, &link_output); | |
2328 | qede_link_update(edev, &link_output); | |
2329 | ||
2950219d YM |
2330 | DP_INFO(edev, "Ending successfully qede load\n"); |
2331 | ||
2332 | return 0; | |
2333 | ||
2334 | err4: | |
2335 | qede_sync_free_irqs(edev); | |
2336 | memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); | |
2337 | err3: | |
2338 | qede_napi_disable_remove(edev); | |
2339 | err2: | |
2340 | qede_free_mem_load(edev); | |
2341 | err1: | |
2342 | edev->ops->common->set_fp_int(edev->cdev, 0); | |
2343 | qede_free_fp_array(edev); | |
2344 | edev->num_rss = 0; | |
2345 | err0: | |
2346 | return rc; | |
2347 | } | |
2348 | ||
133fac0e SK |
2349 | void qede_reload(struct qede_dev *edev, |
2350 | void (*func)(struct qede_dev *, union qede_reload_args *), | |
2351 | union qede_reload_args *args) | |
2352 | { | |
2353 | qede_unload(edev, QEDE_UNLOAD_NORMAL); | |
2354 | /* Call function handler to update parameters | |
2355 | * needed for function load. | |
2356 | */ | |
2357 | if (func) | |
2358 | func(edev, args); | |
2359 | ||
2360 | qede_load(edev, QEDE_LOAD_NORMAL); | |
2361 | ||
2362 | mutex_lock(&edev->qede_lock); | |
2363 | qede_config_rx_mode(edev->ndev); | |
2364 | mutex_unlock(&edev->qede_lock); | |
2365 | } | |
2366 | ||
2950219d YM |
2367 | /* called with rtnl_lock */ |
2368 | static int qede_open(struct net_device *ndev) | |
2369 | { | |
2370 | struct qede_dev *edev = netdev_priv(ndev); | |
2371 | ||
2372 | netif_carrier_off(ndev); | |
2373 | ||
2374 | edev->ops->common->set_power_state(edev->cdev, PCI_D0); | |
2375 | ||
2376 | return qede_load(edev, QEDE_LOAD_NORMAL); | |
2377 | } | |
2378 | ||
2379 | static int qede_close(struct net_device *ndev) | |
2380 | { | |
2381 | struct qede_dev *edev = netdev_priv(ndev); | |
2382 | ||
2383 | qede_unload(edev, QEDE_UNLOAD_NORMAL); | |
2384 | ||
2385 | return 0; | |
2386 | } | |
0d8e0aa0 | 2387 | |
a2ec6172 SK |
2388 | static void qede_link_update(void *dev, struct qed_link_output *link) |
2389 | { | |
2390 | struct qede_dev *edev = dev; | |
2391 | ||
2392 | if (!netif_running(edev->ndev)) { | |
2393 | DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); | |
2394 | return; | |
2395 | } | |
2396 | ||
2397 | if (link->link_up) { | |
2398 | DP_NOTICE(edev, "Link is up\n"); | |
2399 | netif_tx_start_all_queues(edev->ndev); | |
2400 | netif_carrier_on(edev->ndev); | |
2401 | } else { | |
2402 | DP_NOTICE(edev, "Link is down\n"); | |
2403 | netif_tx_disable(edev->ndev); | |
2404 | netif_carrier_off(edev->ndev); | |
2405 | } | |
2406 | } | |
2407 | ||
0d8e0aa0 SK |
2408 | static int qede_set_mac_addr(struct net_device *ndev, void *p) |
2409 | { | |
2410 | struct qede_dev *edev = netdev_priv(ndev); | |
2411 | struct sockaddr *addr = p; | |
2412 | int rc; | |
2413 | ||
2414 | ASSERT_RTNL(); /* @@@TBD To be removed */ | |
2415 | ||
2416 | DP_INFO(edev, "Set_mac_addr called\n"); | |
2417 | ||
2418 | if (!is_valid_ether_addr(addr->sa_data)) { | |
2419 | DP_NOTICE(edev, "The MAC address is not valid\n"); | |
2420 | return -EFAULT; | |
2421 | } | |
2422 | ||
2423 | ether_addr_copy(ndev->dev_addr, addr->sa_data); | |
2424 | ||
2425 | if (!netif_running(ndev)) { | |
2426 | DP_NOTICE(edev, "The device is currently down\n"); | |
2427 | return 0; | |
2428 | } | |
2429 | ||
2430 | /* Remove the previous primary mac */ | |
2431 | rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, | |
2432 | edev->primary_mac); | |
2433 | if (rc) | |
2434 | return rc; | |
2435 | ||
2436 | /* Add MAC filter according to the new unicast HW MAC address */ | |
2437 | ether_addr_copy(edev->primary_mac, ndev->dev_addr); | |
2438 | return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, | |
2439 | edev->primary_mac); | |
2440 | } | |
2441 | ||
2442 | static int | |
2443 | qede_configure_mcast_filtering(struct net_device *ndev, | |
2444 | enum qed_filter_rx_mode_type *accept_flags) | |
2445 | { | |
2446 | struct qede_dev *edev = netdev_priv(ndev); | |
2447 | unsigned char *mc_macs, *temp; | |
2448 | struct netdev_hw_addr *ha; | |
2449 | int rc = 0, mc_count; | |
2450 | size_t size; | |
2451 | ||
2452 | size = 64 * ETH_ALEN; | |
2453 | ||
2454 | mc_macs = kzalloc(size, GFP_KERNEL); | |
2455 | if (!mc_macs) { | |
2456 | DP_NOTICE(edev, | |
2457 | "Failed to allocate memory for multicast MACs\n"); | |
2458 | rc = -ENOMEM; | |
2459 | goto exit; | |
2460 | } | |
2461 | ||
2462 | temp = mc_macs; | |
2463 | ||
2464 | /* Remove all previously configured MAC filters */ | |
2465 | rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, | |
2466 | mc_macs, 1); | |
2467 | if (rc) | |
2468 | goto exit; | |
2469 | ||
2470 | netif_addr_lock_bh(ndev); | |
2471 | ||
2472 | mc_count = netdev_mc_count(ndev); | |
2473 | if (mc_count < 64) { | |
2474 | netdev_for_each_mc_addr(ha, ndev) { | |
2475 | ether_addr_copy(temp, ha->addr); | |
2476 | temp += ETH_ALEN; | |
2477 | } | |
2478 | } | |
2479 | ||
2480 | netif_addr_unlock_bh(ndev); | |
2481 | ||
2482 | /* Check for all multicast @@@TBD resource allocation */ | |
2483 | if ((ndev->flags & IFF_ALLMULTI) || | |
2484 | (mc_count > 64)) { | |
2485 | if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) | |
2486 | *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; | |
2487 | } else { | |
2488 | /* Add all multicast MAC filters */ | |
2489 | rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, | |
2490 | mc_macs, mc_count); | |
2491 | } | |
2492 | ||
2493 | exit: | |
2494 | kfree(mc_macs); | |
2495 | return rc; | |
2496 | } | |
2497 | ||
2498 | static void qede_set_rx_mode(struct net_device *ndev) | |
2499 | { | |
2500 | struct qede_dev *edev = netdev_priv(ndev); | |
2501 | ||
2502 | DP_INFO(edev, "qede_set_rx_mode called\n"); | |
2503 | ||
2504 | if (edev->state != QEDE_STATE_OPEN) { | |
2505 | DP_INFO(edev, | |
2506 | "qede_set_rx_mode called while interface is down\n"); | |
2507 | } else { | |
2508 | set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); | |
2509 | schedule_delayed_work(&edev->sp_task, 0); | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | /* Must be called with qede_lock held */ | |
2514 | static void qede_config_rx_mode(struct net_device *ndev) | |
2515 | { | |
2516 | enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; | |
2517 | struct qede_dev *edev = netdev_priv(ndev); | |
2518 | struct qed_filter_params rx_mode; | |
2519 | unsigned char *uc_macs, *temp; | |
2520 | struct netdev_hw_addr *ha; | |
2521 | int rc, uc_count; | |
2522 | size_t size; | |
2523 | ||
2524 | netif_addr_lock_bh(ndev); | |
2525 | ||
2526 | uc_count = netdev_uc_count(ndev); | |
2527 | size = uc_count * ETH_ALEN; | |
2528 | ||
2529 | uc_macs = kzalloc(size, GFP_ATOMIC); | |
2530 | if (!uc_macs) { | |
2531 | DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); | |
2532 | netif_addr_unlock_bh(ndev); | |
2533 | return; | |
2534 | } | |
2535 | ||
2536 | temp = uc_macs; | |
2537 | netdev_for_each_uc_addr(ha, ndev) { | |
2538 | ether_addr_copy(temp, ha->addr); | |
2539 | temp += ETH_ALEN; | |
2540 | } | |
2541 | ||
2542 | netif_addr_unlock_bh(ndev); | |
2543 | ||
2544 | /* Configure the struct for the Rx mode */ | |
2545 | memset(&rx_mode, 0, sizeof(struct qed_filter_params)); | |
2546 | rx_mode.type = QED_FILTER_TYPE_RX_MODE; | |
2547 | ||
2548 | /* Remove all previous unicast secondary macs and multicast macs | |
2549 | * (configrue / leave the primary mac) | |
2550 | */ | |
2551 | rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, | |
2552 | edev->primary_mac); | |
2553 | if (rc) | |
2554 | goto out; | |
2555 | ||
2556 | /* Check for promiscuous */ | |
2557 | if ((ndev->flags & IFF_PROMISC) || | |
2558 | (uc_count > 15)) { /* @@@TBD resource allocation - 1 */ | |
2559 | accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; | |
2560 | } else { | |
2561 | /* Add MAC filters according to the unicast secondary macs */ | |
2562 | int i; | |
2563 | ||
2564 | temp = uc_macs; | |
2565 | for (i = 0; i < uc_count; i++) { | |
2566 | rc = qede_set_ucast_rx_mac(edev, | |
2567 | QED_FILTER_XCAST_TYPE_ADD, | |
2568 | temp); | |
2569 | if (rc) | |
2570 | goto out; | |
2571 | ||
2572 | temp += ETH_ALEN; | |
2573 | } | |
2574 | ||
2575 | rc = qede_configure_mcast_filtering(ndev, &accept_flags); | |
2576 | if (rc) | |
2577 | goto out; | |
2578 | } | |
2579 | ||
2580 | rx_mode.filter.accept_flags = accept_flags; | |
2581 | edev->ops->filter_config(edev->cdev, &rx_mode); | |
2582 | out: | |
2583 | kfree(uc_macs); | |
2584 | } |