]>
Commit | Line | Data |
---|---|---|
1738cd3e NB |
1 | /* |
2 | * Copyright 2015 Amazon.com, Inc. or its affiliates. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
34 | ||
35 | #ifdef CONFIG_RFS_ACCEL | |
36 | #include <linux/cpu_rmap.h> | |
37 | #endif /* CONFIG_RFS_ACCEL */ | |
38 | #include <linux/ethtool.h> | |
39 | #include <linux/if_vlan.h> | |
40 | #include <linux/kernel.h> | |
41 | #include <linux/module.h> | |
42 | #include <linux/moduleparam.h> | |
43 | #include <linux/numa.h> | |
44 | #include <linux/pci.h> | |
45 | #include <linux/utsname.h> | |
46 | #include <linux/version.h> | |
47 | #include <linux/vmalloc.h> | |
48 | #include <net/ip.h> | |
49 | ||
50 | #include "ena_netdev.h" | |
51 | #include "ena_pci_id_tbl.h" | |
52 | ||
53 | static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n"; | |
54 | ||
55 | MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); | |
56 | MODULE_DESCRIPTION(DEVICE_NAME); | |
57 | MODULE_LICENSE("GPL"); | |
58 | MODULE_VERSION(DRV_MODULE_VERSION); | |
59 | ||
60 | /* Time in jiffies before concluding the transmitter is hung. */ | |
61 | #define TX_TIMEOUT (5 * HZ) | |
62 | ||
63 | #define ENA_NAPI_BUDGET 64 | |
64 | ||
65 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ | |
66 | NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) | |
67 | static int debug = -1; | |
68 | module_param(debug, int, 0); | |
69 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
70 | ||
71 | static struct ena_aenq_handlers aenq_handlers; | |
72 | ||
73 | static struct workqueue_struct *ena_wq; | |
74 | ||
75 | MODULE_DEVICE_TABLE(pci, ena_pci_tbl); | |
76 | ||
77 | static int ena_rss_init_default(struct ena_adapter *adapter); | |
78 | ||
79 | static void ena_tx_timeout(struct net_device *dev) | |
80 | { | |
81 | struct ena_adapter *adapter = netdev_priv(dev); | |
82 | ||
83 | u64_stats_update_begin(&adapter->syncp); | |
84 | adapter->dev_stats.tx_timeout++; | |
85 | u64_stats_update_end(&adapter->syncp); | |
86 | ||
87 | netif_err(adapter, tx_err, dev, "Transmit time out\n"); | |
88 | ||
89 | /* Change the state of the device to trigger reset */ | |
90 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
91 | } | |
92 | ||
93 | static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) | |
94 | { | |
95 | int i; | |
96 | ||
97 | for (i = 0; i < adapter->num_queues; i++) | |
98 | adapter->rx_ring[i].mtu = mtu; | |
99 | } | |
100 | ||
101 | static int ena_change_mtu(struct net_device *dev, int new_mtu) | |
102 | { | |
103 | struct ena_adapter *adapter = netdev_priv(dev); | |
104 | int ret; | |
105 | ||
1738cd3e NB |
106 | ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); |
107 | if (!ret) { | |
108 | netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu); | |
109 | update_rx_ring_mtu(adapter, new_mtu); | |
110 | dev->mtu = new_mtu; | |
111 | } else { | |
112 | netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", | |
113 | new_mtu); | |
114 | } | |
115 | ||
116 | return ret; | |
117 | } | |
118 | ||
119 | static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) | |
120 | { | |
121 | #ifdef CONFIG_RFS_ACCEL | |
122 | u32 i; | |
123 | int rc; | |
124 | ||
125 | adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues); | |
126 | if (!adapter->netdev->rx_cpu_rmap) | |
127 | return -ENOMEM; | |
128 | for (i = 0; i < adapter->num_queues; i++) { | |
129 | int irq_idx = ENA_IO_IRQ_IDX(i); | |
130 | ||
131 | rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, | |
132 | adapter->msix_entries[irq_idx].vector); | |
133 | if (rc) { | |
134 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
135 | adapter->netdev->rx_cpu_rmap = NULL; | |
136 | return rc; | |
137 | } | |
138 | } | |
139 | #endif /* CONFIG_RFS_ACCEL */ | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static void ena_init_io_rings_common(struct ena_adapter *adapter, | |
144 | struct ena_ring *ring, u16 qid) | |
145 | { | |
146 | ring->qid = qid; | |
147 | ring->pdev = adapter->pdev; | |
148 | ring->dev = &adapter->pdev->dev; | |
149 | ring->netdev = adapter->netdev; | |
150 | ring->napi = &adapter->ena_napi[qid].napi; | |
151 | ring->adapter = adapter; | |
152 | ring->ena_dev = adapter->ena_dev; | |
153 | ring->per_napi_packets = 0; | |
154 | ring->per_napi_bytes = 0; | |
155 | ring->cpu = 0; | |
156 | u64_stats_init(&ring->syncp); | |
157 | } | |
158 | ||
159 | static void ena_init_io_rings(struct ena_adapter *adapter) | |
160 | { | |
161 | struct ena_com_dev *ena_dev; | |
162 | struct ena_ring *txr, *rxr; | |
163 | int i; | |
164 | ||
165 | ena_dev = adapter->ena_dev; | |
166 | ||
167 | for (i = 0; i < adapter->num_queues; i++) { | |
168 | txr = &adapter->tx_ring[i]; | |
169 | rxr = &adapter->rx_ring[i]; | |
170 | ||
171 | /* TX/RX common ring state */ | |
172 | ena_init_io_rings_common(adapter, txr, i); | |
173 | ena_init_io_rings_common(adapter, rxr, i); | |
174 | ||
175 | /* TX specific ring state */ | |
176 | txr->ring_size = adapter->tx_ring_size; | |
177 | txr->tx_max_header_size = ena_dev->tx_max_header_size; | |
178 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | |
179 | txr->sgl_size = adapter->max_tx_sgl_size; | |
180 | txr->smoothed_interval = | |
181 | ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); | |
182 | ||
183 | /* RX specific ring state */ | |
184 | rxr->ring_size = adapter->rx_ring_size; | |
185 | rxr->rx_copybreak = adapter->rx_copybreak; | |
186 | rxr->sgl_size = adapter->max_rx_sgl_size; | |
187 | rxr->smoothed_interval = | |
188 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | |
189 | } | |
190 | } | |
191 | ||
192 | /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) | |
193 | * @adapter: network interface device structure | |
194 | * @qid: queue index | |
195 | * | |
196 | * Return 0 on success, negative on failure | |
197 | */ | |
198 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) | |
199 | { | |
200 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
201 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
202 | int size, i, node; | |
203 | ||
204 | if (tx_ring->tx_buffer_info) { | |
205 | netif_err(adapter, ifup, | |
206 | adapter->netdev, "tx_buffer_info info is not NULL"); | |
207 | return -EEXIST; | |
208 | } | |
209 | ||
210 | size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; | |
211 | node = cpu_to_node(ena_irq->cpu); | |
212 | ||
213 | tx_ring->tx_buffer_info = vzalloc_node(size, node); | |
214 | if (!tx_ring->tx_buffer_info) { | |
215 | tx_ring->tx_buffer_info = vzalloc(size); | |
216 | if (!tx_ring->tx_buffer_info) | |
217 | return -ENOMEM; | |
218 | } | |
219 | ||
220 | size = sizeof(u16) * tx_ring->ring_size; | |
221 | tx_ring->free_tx_ids = vzalloc_node(size, node); | |
222 | if (!tx_ring->free_tx_ids) { | |
223 | tx_ring->free_tx_ids = vzalloc(size); | |
224 | if (!tx_ring->free_tx_ids) { | |
225 | vfree(tx_ring->tx_buffer_info); | |
226 | return -ENOMEM; | |
227 | } | |
228 | } | |
229 | ||
230 | /* Req id ring for TX out of order completions */ | |
231 | for (i = 0; i < tx_ring->ring_size; i++) | |
232 | tx_ring->free_tx_ids[i] = i; | |
233 | ||
234 | /* Reset tx statistics */ | |
235 | memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); | |
236 | ||
237 | tx_ring->next_to_use = 0; | |
238 | tx_ring->next_to_clean = 0; | |
239 | tx_ring->cpu = ena_irq->cpu; | |
240 | return 0; | |
241 | } | |
242 | ||
243 | /* ena_free_tx_resources - Free I/O Tx Resources per Queue | |
244 | * @adapter: network interface device structure | |
245 | * @qid: queue index | |
246 | * | |
247 | * Free all transmit software resources | |
248 | */ | |
249 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) | |
250 | { | |
251 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
252 | ||
253 | vfree(tx_ring->tx_buffer_info); | |
254 | tx_ring->tx_buffer_info = NULL; | |
255 | ||
256 | vfree(tx_ring->free_tx_ids); | |
257 | tx_ring->free_tx_ids = NULL; | |
258 | } | |
259 | ||
260 | /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues | |
261 | * @adapter: private structure | |
262 | * | |
263 | * Return 0 on success, negative on failure | |
264 | */ | |
265 | static int ena_setup_all_tx_resources(struct ena_adapter *adapter) | |
266 | { | |
267 | int i, rc = 0; | |
268 | ||
269 | for (i = 0; i < adapter->num_queues; i++) { | |
270 | rc = ena_setup_tx_resources(adapter, i); | |
271 | if (rc) | |
272 | goto err_setup_tx; | |
273 | } | |
274 | ||
275 | return 0; | |
276 | ||
277 | err_setup_tx: | |
278 | ||
279 | netif_err(adapter, ifup, adapter->netdev, | |
280 | "Tx queue %d: allocation failed\n", i); | |
281 | ||
282 | /* rewind the index freeing the rings as we go */ | |
283 | while (i--) | |
284 | ena_free_tx_resources(adapter, i); | |
285 | return rc; | |
286 | } | |
287 | ||
288 | /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues | |
289 | * @adapter: board private structure | |
290 | * | |
291 | * Free all transmit software resources | |
292 | */ | |
293 | static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) | |
294 | { | |
295 | int i; | |
296 | ||
297 | for (i = 0; i < adapter->num_queues; i++) | |
298 | ena_free_tx_resources(adapter, i); | |
299 | } | |
300 | ||
301 | /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) | |
302 | * @adapter: network interface device structure | |
303 | * @qid: queue index | |
304 | * | |
305 | * Returns 0 on success, negative on failure | |
306 | */ | |
307 | static int ena_setup_rx_resources(struct ena_adapter *adapter, | |
308 | u32 qid) | |
309 | { | |
310 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
311 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
312 | int size, node; | |
313 | ||
314 | if (rx_ring->rx_buffer_info) { | |
315 | netif_err(adapter, ifup, adapter->netdev, | |
316 | "rx_buffer_info is not NULL"); | |
317 | return -EEXIST; | |
318 | } | |
319 | ||
320 | /* alloc extra element so in rx path | |
321 | * we can always prefetch rx_info + 1 | |
322 | */ | |
323 | size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); | |
324 | node = cpu_to_node(ena_irq->cpu); | |
325 | ||
326 | rx_ring->rx_buffer_info = vzalloc_node(size, node); | |
327 | if (!rx_ring->rx_buffer_info) { | |
328 | rx_ring->rx_buffer_info = vzalloc(size); | |
329 | if (!rx_ring->rx_buffer_info) | |
330 | return -ENOMEM; | |
331 | } | |
332 | ||
333 | /* Reset rx statistics */ | |
334 | memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); | |
335 | ||
336 | rx_ring->next_to_clean = 0; | |
337 | rx_ring->next_to_use = 0; | |
338 | rx_ring->cpu = ena_irq->cpu; | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
343 | /* ena_free_rx_resources - Free I/O Rx Resources | |
344 | * @adapter: network interface device structure | |
345 | * @qid: queue index | |
346 | * | |
347 | * Free all receive software resources | |
348 | */ | |
349 | static void ena_free_rx_resources(struct ena_adapter *adapter, | |
350 | u32 qid) | |
351 | { | |
352 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
353 | ||
354 | vfree(rx_ring->rx_buffer_info); | |
355 | rx_ring->rx_buffer_info = NULL; | |
356 | } | |
357 | ||
358 | /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues | |
359 | * @adapter: board private structure | |
360 | * | |
361 | * Return 0 on success, negative on failure | |
362 | */ | |
363 | static int ena_setup_all_rx_resources(struct ena_adapter *adapter) | |
364 | { | |
365 | int i, rc = 0; | |
366 | ||
367 | for (i = 0; i < adapter->num_queues; i++) { | |
368 | rc = ena_setup_rx_resources(adapter, i); | |
369 | if (rc) | |
370 | goto err_setup_rx; | |
371 | } | |
372 | ||
373 | return 0; | |
374 | ||
375 | err_setup_rx: | |
376 | ||
377 | netif_err(adapter, ifup, adapter->netdev, | |
378 | "Rx queue %d: allocation failed\n", i); | |
379 | ||
380 | /* rewind the index freeing the rings as we go */ | |
381 | while (i--) | |
382 | ena_free_rx_resources(adapter, i); | |
383 | return rc; | |
384 | } | |
385 | ||
386 | /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues | |
387 | * @adapter: board private structure | |
388 | * | |
389 | * Free all receive software resources | |
390 | */ | |
391 | static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) | |
392 | { | |
393 | int i; | |
394 | ||
395 | for (i = 0; i < adapter->num_queues; i++) | |
396 | ena_free_rx_resources(adapter, i); | |
397 | } | |
398 | ||
399 | static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, | |
400 | struct ena_rx_buffer *rx_info, gfp_t gfp) | |
401 | { | |
402 | struct ena_com_buf *ena_buf; | |
403 | struct page *page; | |
404 | dma_addr_t dma; | |
405 | ||
406 | /* if previous allocated page is not used */ | |
407 | if (unlikely(rx_info->page)) | |
408 | return 0; | |
409 | ||
410 | page = alloc_page(gfp); | |
411 | if (unlikely(!page)) { | |
412 | u64_stats_update_begin(&rx_ring->syncp); | |
413 | rx_ring->rx_stats.page_alloc_fail++; | |
414 | u64_stats_update_end(&rx_ring->syncp); | |
415 | return -ENOMEM; | |
416 | } | |
417 | ||
418 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, | |
419 | DMA_FROM_DEVICE); | |
420 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { | |
421 | u64_stats_update_begin(&rx_ring->syncp); | |
422 | rx_ring->rx_stats.dma_mapping_err++; | |
423 | u64_stats_update_end(&rx_ring->syncp); | |
424 | ||
425 | __free_page(page); | |
426 | return -EIO; | |
427 | } | |
428 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
429 | "alloc page %p, rx_info %p\n", page, rx_info); | |
430 | ||
431 | rx_info->page = page; | |
432 | rx_info->page_offset = 0; | |
433 | ena_buf = &rx_info->ena_buf; | |
434 | ena_buf->paddr = dma; | |
435 | ena_buf->len = PAGE_SIZE; | |
436 | ||
437 | return 0; | |
438 | } | |
439 | ||
440 | static void ena_free_rx_page(struct ena_ring *rx_ring, | |
441 | struct ena_rx_buffer *rx_info) | |
442 | { | |
443 | struct page *page = rx_info->page; | |
444 | struct ena_com_buf *ena_buf = &rx_info->ena_buf; | |
445 | ||
446 | if (unlikely(!page)) { | |
447 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
448 | "Trying to free unallocated buffer\n"); | |
449 | return; | |
450 | } | |
451 | ||
452 | dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, | |
453 | DMA_FROM_DEVICE); | |
454 | ||
455 | __free_page(page); | |
456 | rx_info->page = NULL; | |
457 | } | |
458 | ||
459 | static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) | |
460 | { | |
461 | u16 next_to_use; | |
462 | u32 i; | |
463 | int rc; | |
464 | ||
465 | next_to_use = rx_ring->next_to_use; | |
466 | ||
467 | for (i = 0; i < num; i++) { | |
468 | struct ena_rx_buffer *rx_info = | |
469 | &rx_ring->rx_buffer_info[next_to_use]; | |
470 | ||
471 | rc = ena_alloc_rx_page(rx_ring, rx_info, | |
472 | __GFP_COLD | GFP_ATOMIC | __GFP_COMP); | |
473 | if (unlikely(rc < 0)) { | |
474 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
475 | "failed to alloc buffer for rx queue %d\n", | |
476 | rx_ring->qid); | |
477 | break; | |
478 | } | |
479 | rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, | |
480 | &rx_info->ena_buf, | |
481 | next_to_use); | |
482 | if (unlikely(rc)) { | |
483 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
484 | "failed to add buffer for rx queue %d\n", | |
485 | rx_ring->qid); | |
486 | break; | |
487 | } | |
488 | next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, | |
489 | rx_ring->ring_size); | |
490 | } | |
491 | ||
492 | if (unlikely(i < num)) { | |
493 | u64_stats_update_begin(&rx_ring->syncp); | |
494 | rx_ring->rx_stats.refil_partial++; | |
495 | u64_stats_update_end(&rx_ring->syncp); | |
496 | netdev_warn(rx_ring->netdev, | |
497 | "refilled rx qid %d with only %d buffers (from %d)\n", | |
498 | rx_ring->qid, i, num); | |
499 | } | |
500 | ||
501 | if (likely(i)) { | |
502 | /* Add memory barrier to make sure the desc were written before | |
503 | * issue a doorbell | |
504 | */ | |
505 | wmb(); | |
506 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); | |
507 | } | |
508 | ||
509 | rx_ring->next_to_use = next_to_use; | |
510 | ||
511 | return i; | |
512 | } | |
513 | ||
514 | static void ena_free_rx_bufs(struct ena_adapter *adapter, | |
515 | u32 qid) | |
516 | { | |
517 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
518 | u32 i; | |
519 | ||
520 | for (i = 0; i < rx_ring->ring_size; i++) { | |
521 | struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; | |
522 | ||
523 | if (rx_info->page) | |
524 | ena_free_rx_page(rx_ring, rx_info); | |
525 | } | |
526 | } | |
527 | ||
528 | /* ena_refill_all_rx_bufs - allocate all queues Rx buffers | |
529 | * @adapter: board private structure | |
530 | * | |
531 | */ | |
532 | static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) | |
533 | { | |
534 | struct ena_ring *rx_ring; | |
535 | int i, rc, bufs_num; | |
536 | ||
537 | for (i = 0; i < adapter->num_queues; i++) { | |
538 | rx_ring = &adapter->rx_ring[i]; | |
539 | bufs_num = rx_ring->ring_size - 1; | |
540 | rc = ena_refill_rx_bufs(rx_ring, bufs_num); | |
541 | ||
542 | if (unlikely(rc != bufs_num)) | |
543 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
544 | "refilling Queue %d failed. allocated %d buffers from: %d\n", | |
545 | i, rc, bufs_num); | |
546 | } | |
547 | } | |
548 | ||
549 | static void ena_free_all_rx_bufs(struct ena_adapter *adapter) | |
550 | { | |
551 | int i; | |
552 | ||
553 | for (i = 0; i < adapter->num_queues; i++) | |
554 | ena_free_rx_bufs(adapter, i); | |
555 | } | |
556 | ||
557 | /* ena_free_tx_bufs - Free Tx Buffers per Queue | |
558 | * @tx_ring: TX ring for which buffers be freed | |
559 | */ | |
560 | static void ena_free_tx_bufs(struct ena_ring *tx_ring) | |
561 | { | |
562 | u32 i; | |
563 | ||
564 | for (i = 0; i < tx_ring->ring_size; i++) { | |
565 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; | |
566 | struct ena_com_buf *ena_buf; | |
567 | int nr_frags; | |
568 | int j; | |
569 | ||
570 | if (!tx_info->skb) | |
571 | continue; | |
572 | ||
573 | netdev_notice(tx_ring->netdev, | |
574 | "free uncompleted tx skb qid %d idx 0x%x\n", | |
575 | tx_ring->qid, i); | |
576 | ||
577 | ena_buf = tx_info->bufs; | |
578 | dma_unmap_single(tx_ring->dev, | |
579 | ena_buf->paddr, | |
580 | ena_buf->len, | |
581 | DMA_TO_DEVICE); | |
582 | ||
583 | /* unmap remaining mapped pages */ | |
584 | nr_frags = tx_info->num_of_bufs - 1; | |
585 | for (j = 0; j < nr_frags; j++) { | |
586 | ena_buf++; | |
587 | dma_unmap_page(tx_ring->dev, | |
588 | ena_buf->paddr, | |
589 | ena_buf->len, | |
590 | DMA_TO_DEVICE); | |
591 | } | |
592 | ||
593 | dev_kfree_skb_any(tx_info->skb); | |
594 | } | |
595 | netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, | |
596 | tx_ring->qid)); | |
597 | } | |
598 | ||
599 | static void ena_free_all_tx_bufs(struct ena_adapter *adapter) | |
600 | { | |
601 | struct ena_ring *tx_ring; | |
602 | int i; | |
603 | ||
604 | for (i = 0; i < adapter->num_queues; i++) { | |
605 | tx_ring = &adapter->tx_ring[i]; | |
606 | ena_free_tx_bufs(tx_ring); | |
607 | } | |
608 | } | |
609 | ||
610 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) | |
611 | { | |
612 | u16 ena_qid; | |
613 | int i; | |
614 | ||
615 | for (i = 0; i < adapter->num_queues; i++) { | |
616 | ena_qid = ENA_IO_TXQ_IDX(i); | |
617 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); | |
618 | } | |
619 | } | |
620 | ||
621 | static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) | |
622 | { | |
623 | u16 ena_qid; | |
624 | int i; | |
625 | ||
626 | for (i = 0; i < adapter->num_queues; i++) { | |
627 | ena_qid = ENA_IO_RXQ_IDX(i); | |
628 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); | |
629 | } | |
630 | } | |
631 | ||
632 | static void ena_destroy_all_io_queues(struct ena_adapter *adapter) | |
633 | { | |
634 | ena_destroy_all_tx_queues(adapter); | |
635 | ena_destroy_all_rx_queues(adapter); | |
636 | } | |
637 | ||
638 | static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) | |
639 | { | |
640 | struct ena_tx_buffer *tx_info = NULL; | |
641 | ||
642 | if (likely(req_id < tx_ring->ring_size)) { | |
643 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
644 | if (likely(tx_info->skb)) | |
645 | return 0; | |
646 | } | |
647 | ||
648 | if (tx_info) | |
649 | netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, | |
650 | "tx_info doesn't have valid skb\n"); | |
651 | else | |
652 | netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, | |
653 | "Invalid req_id: %hu\n", req_id); | |
654 | ||
655 | u64_stats_update_begin(&tx_ring->syncp); | |
656 | tx_ring->tx_stats.bad_req_id++; | |
657 | u64_stats_update_end(&tx_ring->syncp); | |
658 | ||
659 | /* Trigger device reset */ | |
660 | set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); | |
661 | return -EFAULT; | |
662 | } | |
663 | ||
664 | static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) | |
665 | { | |
666 | struct netdev_queue *txq; | |
667 | bool above_thresh; | |
668 | u32 tx_bytes = 0; | |
669 | u32 total_done = 0; | |
670 | u16 next_to_clean; | |
671 | u16 req_id; | |
672 | int tx_pkts = 0; | |
673 | int rc; | |
674 | ||
675 | next_to_clean = tx_ring->next_to_clean; | |
676 | txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); | |
677 | ||
678 | while (tx_pkts < budget) { | |
679 | struct ena_tx_buffer *tx_info; | |
680 | struct sk_buff *skb; | |
681 | struct ena_com_buf *ena_buf; | |
682 | int i, nr_frags; | |
683 | ||
684 | rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, | |
685 | &req_id); | |
686 | if (rc) | |
687 | break; | |
688 | ||
689 | rc = validate_tx_req_id(tx_ring, req_id); | |
690 | if (rc) | |
691 | break; | |
692 | ||
693 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
694 | skb = tx_info->skb; | |
695 | ||
696 | /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ | |
697 | prefetch(&skb->end); | |
698 | ||
699 | tx_info->skb = NULL; | |
700 | tx_info->last_jiffies = 0; | |
701 | ||
702 | if (likely(tx_info->num_of_bufs != 0)) { | |
703 | ena_buf = tx_info->bufs; | |
704 | ||
705 | dma_unmap_single(tx_ring->dev, | |
706 | dma_unmap_addr(ena_buf, paddr), | |
707 | dma_unmap_len(ena_buf, len), | |
708 | DMA_TO_DEVICE); | |
709 | ||
710 | /* unmap remaining mapped pages */ | |
711 | nr_frags = tx_info->num_of_bufs - 1; | |
712 | for (i = 0; i < nr_frags; i++) { | |
713 | ena_buf++; | |
714 | dma_unmap_page(tx_ring->dev, | |
715 | dma_unmap_addr(ena_buf, paddr), | |
716 | dma_unmap_len(ena_buf, len), | |
717 | DMA_TO_DEVICE); | |
718 | } | |
719 | } | |
720 | ||
721 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
722 | "tx_poll: q %d skb %p completed\n", tx_ring->qid, | |
723 | skb); | |
724 | ||
725 | tx_bytes += skb->len; | |
726 | dev_kfree_skb(skb); | |
727 | tx_pkts++; | |
728 | total_done += tx_info->tx_descs; | |
729 | ||
730 | tx_ring->free_tx_ids[next_to_clean] = req_id; | |
731 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, | |
732 | tx_ring->ring_size); | |
733 | } | |
734 | ||
735 | tx_ring->next_to_clean = next_to_clean; | |
736 | ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); | |
737 | ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); | |
738 | ||
739 | netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); | |
740 | ||
741 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
742 | "tx_poll: q %d done. total pkts: %d\n", | |
743 | tx_ring->qid, tx_pkts); | |
744 | ||
745 | /* need to make the rings circular update visible to | |
746 | * ena_start_xmit() before checking for netif_queue_stopped(). | |
747 | */ | |
748 | smp_mb(); | |
749 | ||
750 | above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > | |
751 | ENA_TX_WAKEUP_THRESH; | |
752 | if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { | |
753 | __netif_tx_lock(txq, smp_processor_id()); | |
754 | above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) > | |
755 | ENA_TX_WAKEUP_THRESH; | |
756 | if (netif_tx_queue_stopped(txq) && above_thresh) { | |
757 | netif_tx_wake_queue(txq); | |
758 | u64_stats_update_begin(&tx_ring->syncp); | |
759 | tx_ring->tx_stats.queue_wakeup++; | |
760 | u64_stats_update_end(&tx_ring->syncp); | |
761 | } | |
762 | __netif_tx_unlock(txq); | |
763 | } | |
764 | ||
765 | tx_ring->per_napi_bytes += tx_bytes; | |
766 | tx_ring->per_napi_packets += tx_pkts; | |
767 | ||
768 | return tx_pkts; | |
769 | } | |
770 | ||
771 | static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, | |
772 | struct ena_com_rx_buf_info *ena_bufs, | |
773 | u32 descs, | |
774 | u16 *next_to_clean) | |
775 | { | |
776 | struct sk_buff *skb; | |
777 | struct ena_rx_buffer *rx_info = | |
778 | &rx_ring->rx_buffer_info[*next_to_clean]; | |
779 | u32 len; | |
780 | u32 buf = 0; | |
781 | void *va; | |
782 | ||
783 | len = ena_bufs[0].len; | |
784 | if (unlikely(!rx_info->page)) { | |
785 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | |
786 | "Page is NULL\n"); | |
787 | return NULL; | |
788 | } | |
789 | ||
790 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
791 | "rx_info %p page %p\n", | |
792 | rx_info, rx_info->page); | |
793 | ||
794 | /* save virt address of first buffer */ | |
795 | va = page_address(rx_info->page) + rx_info->page_offset; | |
796 | prefetch(va + NET_IP_ALIGN); | |
797 | ||
798 | if (len <= rx_ring->rx_copybreak) { | |
799 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | |
800 | rx_ring->rx_copybreak); | |
801 | if (unlikely(!skb)) { | |
802 | u64_stats_update_begin(&rx_ring->syncp); | |
803 | rx_ring->rx_stats.skb_alloc_fail++; | |
804 | u64_stats_update_end(&rx_ring->syncp); | |
805 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | |
806 | "Failed to allocate skb\n"); | |
807 | return NULL; | |
808 | } | |
809 | ||
810 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
811 | "rx allocated small packet. len %d. data_len %d\n", | |
812 | skb->len, skb->data_len); | |
813 | ||
814 | /* sync this buffer for CPU use */ | |
815 | dma_sync_single_for_cpu(rx_ring->dev, | |
816 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
817 | len, | |
818 | DMA_FROM_DEVICE); | |
819 | skb_copy_to_linear_data(skb, va, len); | |
820 | dma_sync_single_for_device(rx_ring->dev, | |
821 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
822 | len, | |
823 | DMA_FROM_DEVICE); | |
824 | ||
825 | skb_put(skb, len); | |
826 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | |
827 | *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, | |
828 | rx_ring->ring_size); | |
829 | return skb; | |
830 | } | |
831 | ||
832 | skb = napi_get_frags(rx_ring->napi); | |
833 | if (unlikely(!skb)) { | |
834 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
835 | "Failed allocating skb\n"); | |
836 | u64_stats_update_begin(&rx_ring->syncp); | |
837 | rx_ring->rx_stats.skb_alloc_fail++; | |
838 | u64_stats_update_end(&rx_ring->syncp); | |
839 | return NULL; | |
840 | } | |
841 | ||
842 | do { | |
843 | dma_unmap_page(rx_ring->dev, | |
844 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
845 | PAGE_SIZE, DMA_FROM_DEVICE); | |
846 | ||
847 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, | |
848 | rx_info->page_offset, len, PAGE_SIZE); | |
849 | ||
850 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
851 | "rx skb updated. len %d. data_len %d\n", | |
852 | skb->len, skb->data_len); | |
853 | ||
854 | rx_info->page = NULL; | |
855 | *next_to_clean = | |
856 | ENA_RX_RING_IDX_NEXT(*next_to_clean, | |
857 | rx_ring->ring_size); | |
858 | if (likely(--descs == 0)) | |
859 | break; | |
860 | rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; | |
861 | len = ena_bufs[++buf].len; | |
862 | } while (1); | |
863 | ||
864 | return skb; | |
865 | } | |
866 | ||
867 | /* ena_rx_checksum - indicate in skb if hw indicated a good cksum | |
868 | * @adapter: structure containing adapter specific data | |
869 | * @ena_rx_ctx: received packet context/metadata | |
870 | * @skb: skb currently being received and modified | |
871 | */ | |
872 | static inline void ena_rx_checksum(struct ena_ring *rx_ring, | |
873 | struct ena_com_rx_ctx *ena_rx_ctx, | |
874 | struct sk_buff *skb) | |
875 | { | |
876 | /* Rx csum disabled */ | |
877 | if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { | |
878 | skb->ip_summed = CHECKSUM_NONE; | |
879 | return; | |
880 | } | |
881 | ||
882 | /* For fragmented packets the checksum isn't valid */ | |
883 | if (ena_rx_ctx->frag) { | |
884 | skb->ip_summed = CHECKSUM_NONE; | |
885 | return; | |
886 | } | |
887 | ||
888 | /* if IP and error */ | |
889 | if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && | |
890 | (ena_rx_ctx->l3_csum_err))) { | |
891 | /* ipv4 checksum error */ | |
892 | skb->ip_summed = CHECKSUM_NONE; | |
893 | u64_stats_update_begin(&rx_ring->syncp); | |
894 | rx_ring->rx_stats.bad_csum++; | |
895 | u64_stats_update_end(&rx_ring->syncp); | |
896 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | |
897 | "RX IPv4 header checksum error\n"); | |
898 | return; | |
899 | } | |
900 | ||
901 | /* if TCP/UDP */ | |
902 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
903 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { | |
904 | if (unlikely(ena_rx_ctx->l4_csum_err)) { | |
905 | /* TCP/UDP checksum error */ | |
906 | u64_stats_update_begin(&rx_ring->syncp); | |
907 | rx_ring->rx_stats.bad_csum++; | |
908 | u64_stats_update_end(&rx_ring->syncp); | |
909 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | |
910 | "RX L4 checksum error\n"); | |
911 | skb->ip_summed = CHECKSUM_NONE; | |
912 | return; | |
913 | } | |
914 | ||
915 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
916 | } | |
917 | } | |
918 | ||
919 | static void ena_set_rx_hash(struct ena_ring *rx_ring, | |
920 | struct ena_com_rx_ctx *ena_rx_ctx, | |
921 | struct sk_buff *skb) | |
922 | { | |
923 | enum pkt_hash_types hash_type; | |
924 | ||
925 | if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { | |
926 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
927 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) | |
928 | ||
929 | hash_type = PKT_HASH_TYPE_L4; | |
930 | else | |
931 | hash_type = PKT_HASH_TYPE_NONE; | |
932 | ||
933 | /* Override hash type if the packet is fragmented */ | |
934 | if (ena_rx_ctx->frag) | |
935 | hash_type = PKT_HASH_TYPE_NONE; | |
936 | ||
937 | skb_set_hash(skb, ena_rx_ctx->hash, hash_type); | |
938 | } | |
939 | } | |
940 | ||
941 | /* ena_clean_rx_irq - Cleanup RX irq | |
942 | * @rx_ring: RX ring to clean | |
943 | * @napi: napi handler | |
944 | * @budget: how many packets driver is allowed to clean | |
945 | * | |
946 | * Returns the number of cleaned buffers. | |
947 | */ | |
948 | static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, | |
949 | u32 budget) | |
950 | { | |
951 | u16 next_to_clean = rx_ring->next_to_clean; | |
952 | u32 res_budget, work_done; | |
953 | ||
954 | struct ena_com_rx_ctx ena_rx_ctx; | |
955 | struct ena_adapter *adapter; | |
956 | struct sk_buff *skb; | |
957 | int refill_required; | |
958 | int refill_threshold; | |
959 | int rc = 0; | |
960 | int total_len = 0; | |
961 | int rx_copybreak_pkt = 0; | |
962 | ||
963 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
964 | "%s qid %d\n", __func__, rx_ring->qid); | |
965 | res_budget = budget; | |
966 | ||
967 | do { | |
968 | ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; | |
969 | ena_rx_ctx.max_bufs = rx_ring->sgl_size; | |
970 | ena_rx_ctx.descs = 0; | |
971 | rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, | |
972 | rx_ring->ena_com_io_sq, | |
973 | &ena_rx_ctx); | |
974 | if (unlikely(rc)) | |
975 | goto error; | |
976 | ||
977 | if (unlikely(ena_rx_ctx.descs == 0)) | |
978 | break; | |
979 | ||
980 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
981 | "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", | |
982 | rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, | |
983 | ena_rx_ctx.l4_proto, ena_rx_ctx.hash); | |
984 | ||
985 | /* allocate skb and fill it */ | |
986 | skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs, | |
987 | &next_to_clean); | |
988 | ||
989 | /* exit if we failed to retrieve a buffer */ | |
990 | if (unlikely(!skb)) { | |
991 | next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean, | |
992 | ena_rx_ctx.descs, | |
993 | rx_ring->ring_size); | |
994 | break; | |
995 | } | |
996 | ||
997 | ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); | |
998 | ||
999 | ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); | |
1000 | ||
1001 | skb_record_rx_queue(skb, rx_ring->qid); | |
1002 | ||
1003 | if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { | |
1004 | total_len += rx_ring->ena_bufs[0].len; | |
1005 | rx_copybreak_pkt++; | |
1006 | napi_gro_receive(napi, skb); | |
1007 | } else { | |
1008 | total_len += skb->len; | |
1009 | napi_gro_frags(napi); | |
1010 | } | |
1011 | ||
1012 | res_budget--; | |
1013 | } while (likely(res_budget)); | |
1014 | ||
1015 | work_done = budget - res_budget; | |
1016 | rx_ring->per_napi_bytes += total_len; | |
1017 | rx_ring->per_napi_packets += work_done; | |
1018 | u64_stats_update_begin(&rx_ring->syncp); | |
1019 | rx_ring->rx_stats.bytes += total_len; | |
1020 | rx_ring->rx_stats.cnt += work_done; | |
1021 | rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; | |
1022 | u64_stats_update_end(&rx_ring->syncp); | |
1023 | ||
1024 | rx_ring->next_to_clean = next_to_clean; | |
1025 | ||
1026 | refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq); | |
1027 | refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER; | |
1028 | ||
1029 | /* Optimization, try to batch new rx buffers */ | |
1030 | if (refill_required > refill_threshold) { | |
1031 | ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); | |
1032 | ena_refill_rx_bufs(rx_ring, refill_required); | |
1033 | } | |
1034 | ||
1035 | return work_done; | |
1036 | ||
1037 | error: | |
1038 | adapter = netdev_priv(rx_ring->netdev); | |
1039 | ||
1040 | u64_stats_update_begin(&rx_ring->syncp); | |
1041 | rx_ring->rx_stats.bad_desc_num++; | |
1042 | u64_stats_update_end(&rx_ring->syncp); | |
1043 | ||
1044 | /* Too many desc from the device. Trigger reset */ | |
1045 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
1046 | ||
1047 | return 0; | |
1048 | } | |
1049 | ||
1050 | inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, | |
1051 | struct ena_ring *tx_ring) | |
1052 | { | |
1053 | /* We apply adaptive moderation on Rx path only. | |
1054 | * Tx uses static interrupt moderation. | |
1055 | */ | |
1056 | ena_com_calculate_interrupt_delay(rx_ring->ena_dev, | |
1057 | rx_ring->per_napi_packets, | |
1058 | rx_ring->per_napi_bytes, | |
1059 | &rx_ring->smoothed_interval, | |
1060 | &rx_ring->moder_tbl_idx); | |
1061 | ||
1062 | /* Reset per napi packets/bytes */ | |
1063 | tx_ring->per_napi_packets = 0; | |
1064 | tx_ring->per_napi_bytes = 0; | |
1065 | rx_ring->per_napi_packets = 0; | |
1066 | rx_ring->per_napi_bytes = 0; | |
1067 | } | |
1068 | ||
1069 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, | |
1070 | struct ena_ring *rx_ring) | |
1071 | { | |
1072 | int cpu = get_cpu(); | |
1073 | int numa_node; | |
1074 | ||
1075 | /* Check only one ring since the 2 rings are running on the same cpu */ | |
1076 | if (likely(tx_ring->cpu == cpu)) | |
1077 | goto out; | |
1078 | ||
1079 | numa_node = cpu_to_node(cpu); | |
1080 | put_cpu(); | |
1081 | ||
1082 | if (numa_node != NUMA_NO_NODE) { | |
1083 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); | |
1084 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node); | |
1085 | } | |
1086 | ||
1087 | tx_ring->cpu = cpu; | |
1088 | rx_ring->cpu = cpu; | |
1089 | ||
1090 | return; | |
1091 | out: | |
1092 | put_cpu(); | |
1093 | } | |
1094 | ||
1095 | static int ena_io_poll(struct napi_struct *napi, int budget) | |
1096 | { | |
1097 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | |
1098 | struct ena_ring *tx_ring, *rx_ring; | |
1099 | struct ena_eth_io_intr_reg intr_reg; | |
1100 | ||
1101 | u32 tx_work_done; | |
1102 | u32 rx_work_done; | |
1103 | int tx_budget; | |
1104 | int napi_comp_call = 0; | |
1105 | int ret; | |
1106 | ||
1107 | tx_ring = ena_napi->tx_ring; | |
1108 | rx_ring = ena_napi->rx_ring; | |
1109 | ||
1110 | tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; | |
1111 | ||
1112 | if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { | |
1113 | napi_complete_done(napi, 0); | |
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); | |
1118 | rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); | |
1119 | ||
1120 | if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { | |
1121 | napi_complete_done(napi, rx_work_done); | |
1122 | ||
1123 | napi_comp_call = 1; | |
1124 | /* Tx and Rx share the same interrupt vector */ | |
1125 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) | |
1126 | ena_adjust_intr_moderation(rx_ring, tx_ring); | |
1127 | ||
1128 | /* Update intr register: rx intr delay, tx intr delay and | |
1129 | * interrupt unmask | |
1130 | */ | |
1131 | ena_com_update_intr_reg(&intr_reg, | |
1132 | rx_ring->smoothed_interval, | |
1133 | tx_ring->smoothed_interval, | |
1134 | true); | |
1135 | ||
1136 | /* It is a shared MSI-X. Tx and Rx CQ have pointer to it. | |
1137 | * So we use one of them to reach the intr reg | |
1138 | */ | |
1139 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | |
1140 | ||
1141 | ena_update_ring_numa_node(tx_ring, rx_ring); | |
1142 | ||
1143 | ret = rx_work_done; | |
1144 | } else { | |
1145 | ret = budget; | |
1146 | } | |
1147 | ||
1148 | u64_stats_update_begin(&tx_ring->syncp); | |
1149 | tx_ring->tx_stats.napi_comp += napi_comp_call; | |
1150 | tx_ring->tx_stats.tx_poll++; | |
1151 | u64_stats_update_end(&tx_ring->syncp); | |
1152 | ||
1153 | return ret; | |
1154 | } | |
1155 | ||
1156 | static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) | |
1157 | { | |
1158 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
1159 | ||
1160 | ena_com_admin_q_comp_intr_handler(adapter->ena_dev); | |
1161 | ||
1162 | /* Don't call the aenq handler before probe is done */ | |
1163 | if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) | |
1164 | ena_com_aenq_intr_handler(adapter->ena_dev, data); | |
1165 | ||
1166 | return IRQ_HANDLED; | |
1167 | } | |
1168 | ||
1169 | /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx | |
1170 | * @irq: interrupt number | |
1171 | * @data: pointer to a network interface private napi device structure | |
1172 | */ | |
1173 | static irqreturn_t ena_intr_msix_io(int irq, void *data) | |
1174 | { | |
1175 | struct ena_napi *ena_napi = data; | |
1176 | ||
1177 | napi_schedule(&ena_napi->napi); | |
1178 | ||
1179 | return IRQ_HANDLED; | |
1180 | } | |
1181 | ||
1182 | static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) | |
1183 | { | |
1184 | int i, msix_vecs, rc; | |
1185 | ||
1186 | if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { | |
1187 | netif_err(adapter, probe, adapter->netdev, | |
1188 | "Error, MSI-X is already enabled\n"); | |
1189 | return -EPERM; | |
1190 | } | |
1191 | ||
1192 | /* Reserved the max msix vectors we might need */ | |
1193 | msix_vecs = ENA_MAX_MSIX_VEC(num_queues); | |
1194 | ||
1195 | netif_dbg(adapter, probe, adapter->netdev, | |
1196 | "trying to enable MSI-X, vectors %d\n", msix_vecs); | |
1197 | ||
1198 | adapter->msix_entries = vzalloc(msix_vecs * sizeof(struct msix_entry)); | |
1199 | ||
1200 | if (!adapter->msix_entries) | |
1201 | return -ENOMEM; | |
1202 | ||
1203 | for (i = 0; i < msix_vecs; i++) | |
1204 | adapter->msix_entries[i].entry = i; | |
1205 | ||
1206 | rc = pci_enable_msix(adapter->pdev, adapter->msix_entries, msix_vecs); | |
1207 | if (rc != 0) { | |
1208 | netif_err(adapter, probe, adapter->netdev, | |
1209 | "Failed to enable MSI-X, vectors %d rc %d\n", | |
1210 | msix_vecs, rc); | |
1211 | return -ENOSPC; | |
1212 | } | |
1213 | ||
1214 | netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n", | |
1215 | msix_vecs); | |
1216 | ||
1217 | if (msix_vecs >= 1) { | |
1218 | if (ena_init_rx_cpu_rmap(adapter)) | |
1219 | netif_warn(adapter, probe, adapter->netdev, | |
1220 | "Failed to map IRQs to CPUs\n"); | |
1221 | } | |
1222 | ||
1223 | adapter->msix_vecs = msix_vecs; | |
1224 | set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); | |
1225 | ||
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) | |
1230 | { | |
1231 | u32 cpu; | |
1232 | ||
1233 | snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, | |
1234 | ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", | |
1235 | pci_name(adapter->pdev)); | |
1236 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = | |
1237 | ena_intr_msix_mgmnt; | |
1238 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; | |
1239 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = | |
1240 | adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; | |
1241 | cpu = cpumask_first(cpu_online_mask); | |
1242 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; | |
1243 | cpumask_set_cpu(cpu, | |
1244 | &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); | |
1245 | } | |
1246 | ||
1247 | static void ena_setup_io_intr(struct ena_adapter *adapter) | |
1248 | { | |
1249 | struct net_device *netdev; | |
1250 | int irq_idx, i, cpu; | |
1251 | ||
1252 | netdev = adapter->netdev; | |
1253 | ||
1254 | for (i = 0; i < adapter->num_queues; i++) { | |
1255 | irq_idx = ENA_IO_IRQ_IDX(i); | |
1256 | cpu = i % num_online_cpus(); | |
1257 | ||
1258 | snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, | |
1259 | "%s-Tx-Rx-%d", netdev->name, i); | |
1260 | adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; | |
1261 | adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; | |
1262 | adapter->irq_tbl[irq_idx].vector = | |
1263 | adapter->msix_entries[irq_idx].vector; | |
1264 | adapter->irq_tbl[irq_idx].cpu = cpu; | |
1265 | ||
1266 | cpumask_set_cpu(cpu, | |
1267 | &adapter->irq_tbl[irq_idx].affinity_hint_mask); | |
1268 | } | |
1269 | } | |
1270 | ||
1271 | static int ena_request_mgmnt_irq(struct ena_adapter *adapter) | |
1272 | { | |
1273 | unsigned long flags = 0; | |
1274 | struct ena_irq *irq; | |
1275 | int rc; | |
1276 | ||
1277 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
1278 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
1279 | irq->data); | |
1280 | if (rc) { | |
1281 | netif_err(adapter, probe, adapter->netdev, | |
1282 | "failed to request admin irq\n"); | |
1283 | return rc; | |
1284 | } | |
1285 | ||
1286 | netif_dbg(adapter, probe, adapter->netdev, | |
1287 | "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", | |
1288 | irq->affinity_hint_mask.bits[0], irq->vector); | |
1289 | ||
1290 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
1291 | ||
1292 | return rc; | |
1293 | } | |
1294 | ||
1295 | static int ena_request_io_irq(struct ena_adapter *adapter) | |
1296 | { | |
1297 | unsigned long flags = 0; | |
1298 | struct ena_irq *irq; | |
1299 | int rc = 0, i, k; | |
1300 | ||
1301 | if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { | |
1302 | netif_err(adapter, ifup, adapter->netdev, | |
1303 | "Failed to request I/O IRQ: MSI-X is not enabled\n"); | |
1304 | return -EINVAL; | |
1305 | } | |
1306 | ||
1307 | for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | |
1308 | irq = &adapter->irq_tbl[i]; | |
1309 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
1310 | irq->data); | |
1311 | if (rc) { | |
1312 | netif_err(adapter, ifup, adapter->netdev, | |
1313 | "Failed to request I/O IRQ. index %d rc %d\n", | |
1314 | i, rc); | |
1315 | goto err; | |
1316 | } | |
1317 | ||
1318 | netif_dbg(adapter, ifup, adapter->netdev, | |
1319 | "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", | |
1320 | i, irq->affinity_hint_mask.bits[0], irq->vector); | |
1321 | ||
1322 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
1323 | } | |
1324 | ||
1325 | return rc; | |
1326 | ||
1327 | err: | |
1328 | for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { | |
1329 | irq = &adapter->irq_tbl[k]; | |
1330 | free_irq(irq->vector, irq->data); | |
1331 | } | |
1332 | ||
1333 | return rc; | |
1334 | } | |
1335 | ||
1336 | static void ena_free_mgmnt_irq(struct ena_adapter *adapter) | |
1337 | { | |
1338 | struct ena_irq *irq; | |
1339 | ||
1340 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
1341 | synchronize_irq(irq->vector); | |
1342 | irq_set_affinity_hint(irq->vector, NULL); | |
1343 | free_irq(irq->vector, irq->data); | |
1344 | } | |
1345 | ||
1346 | static void ena_free_io_irq(struct ena_adapter *adapter) | |
1347 | { | |
1348 | struct ena_irq *irq; | |
1349 | int i; | |
1350 | ||
1351 | #ifdef CONFIG_RFS_ACCEL | |
1352 | if (adapter->msix_vecs >= 1) { | |
1353 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
1354 | adapter->netdev->rx_cpu_rmap = NULL; | |
1355 | } | |
1356 | #endif /* CONFIG_RFS_ACCEL */ | |
1357 | ||
1358 | for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { | |
1359 | irq = &adapter->irq_tbl[i]; | |
1360 | irq_set_affinity_hint(irq->vector, NULL); | |
1361 | free_irq(irq->vector, irq->data); | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | static void ena_disable_msix(struct ena_adapter *adapter) | |
1366 | { | |
1367 | if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) | |
1368 | pci_disable_msix(adapter->pdev); | |
1369 | ||
1370 | if (adapter->msix_entries) | |
1371 | vfree(adapter->msix_entries); | |
1372 | adapter->msix_entries = NULL; | |
1373 | } | |
1374 | ||
1375 | static void ena_disable_io_intr_sync(struct ena_adapter *adapter) | |
1376 | { | |
1377 | int i; | |
1378 | ||
1379 | if (!netif_running(adapter->netdev)) | |
1380 | return; | |
1381 | ||
1382 | for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) | |
1383 | synchronize_irq(adapter->irq_tbl[i].vector); | |
1384 | } | |
1385 | ||
1386 | static void ena_del_napi(struct ena_adapter *adapter) | |
1387 | { | |
1388 | int i; | |
1389 | ||
1390 | for (i = 0; i < adapter->num_queues; i++) | |
1391 | netif_napi_del(&adapter->ena_napi[i].napi); | |
1392 | } | |
1393 | ||
1394 | static void ena_init_napi(struct ena_adapter *adapter) | |
1395 | { | |
1396 | struct ena_napi *napi; | |
1397 | int i; | |
1398 | ||
1399 | for (i = 0; i < adapter->num_queues; i++) { | |
1400 | napi = &adapter->ena_napi[i]; | |
1401 | ||
1402 | netif_napi_add(adapter->netdev, | |
1403 | &adapter->ena_napi[i].napi, | |
1404 | ena_io_poll, | |
1405 | ENA_NAPI_BUDGET); | |
1406 | napi->rx_ring = &adapter->rx_ring[i]; | |
1407 | napi->tx_ring = &adapter->tx_ring[i]; | |
1408 | napi->qid = i; | |
1409 | } | |
1410 | } | |
1411 | ||
1412 | static void ena_napi_disable_all(struct ena_adapter *adapter) | |
1413 | { | |
1414 | int i; | |
1415 | ||
1416 | for (i = 0; i < adapter->num_queues; i++) | |
1417 | napi_disable(&adapter->ena_napi[i].napi); | |
1418 | } | |
1419 | ||
1420 | static void ena_napi_enable_all(struct ena_adapter *adapter) | |
1421 | { | |
1422 | int i; | |
1423 | ||
1424 | for (i = 0; i < adapter->num_queues; i++) | |
1425 | napi_enable(&adapter->ena_napi[i].napi); | |
1426 | } | |
1427 | ||
1428 | static void ena_restore_ethtool_params(struct ena_adapter *adapter) | |
1429 | { | |
1430 | adapter->tx_usecs = 0; | |
1431 | adapter->rx_usecs = 0; | |
1432 | adapter->tx_frames = 1; | |
1433 | adapter->rx_frames = 1; | |
1434 | } | |
1435 | ||
1436 | /* Configure the Rx forwarding */ | |
1437 | static int ena_rss_configure(struct ena_adapter *adapter) | |
1438 | { | |
1439 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
1440 | int rc; | |
1441 | ||
1442 | /* In case the RSS table wasn't initialized by probe */ | |
1443 | if (!ena_dev->rss.tbl_log_size) { | |
1444 | rc = ena_rss_init_default(adapter); | |
1445 | if (rc && (rc != -EPERM)) { | |
1446 | netif_err(adapter, ifup, adapter->netdev, | |
1447 | "Failed to init RSS rc: %d\n", rc); | |
1448 | return rc; | |
1449 | } | |
1450 | } | |
1451 | ||
1452 | /* Set indirect table */ | |
1453 | rc = ena_com_indirect_table_set(ena_dev); | |
1454 | if (unlikely(rc && rc != -EPERM)) | |
1455 | return rc; | |
1456 | ||
1457 | /* Configure hash function (if supported) */ | |
1458 | rc = ena_com_set_hash_function(ena_dev); | |
1459 | if (unlikely(rc && (rc != -EPERM))) | |
1460 | return rc; | |
1461 | ||
1462 | /* Configure hash inputs (if supported) */ | |
1463 | rc = ena_com_set_hash_ctrl(ena_dev); | |
1464 | if (unlikely(rc && (rc != -EPERM))) | |
1465 | return rc; | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | static int ena_up_complete(struct ena_adapter *adapter) | |
1471 | { | |
1472 | int rc, i; | |
1473 | ||
1474 | rc = ena_rss_configure(adapter); | |
1475 | if (rc) | |
1476 | return rc; | |
1477 | ||
1478 | ena_init_napi(adapter); | |
1479 | ||
1480 | ena_change_mtu(adapter->netdev, adapter->netdev->mtu); | |
1481 | ||
1482 | ena_refill_all_rx_bufs(adapter); | |
1483 | ||
1484 | /* enable transmits */ | |
1485 | netif_tx_start_all_queues(adapter->netdev); | |
1486 | ||
1487 | ena_restore_ethtool_params(adapter); | |
1488 | ||
1489 | ena_napi_enable_all(adapter); | |
1490 | ||
1491 | /* schedule napi in case we had pending packets | |
1492 | * from the last time we disable napi | |
1493 | */ | |
1494 | for (i = 0; i < adapter->num_queues; i++) | |
1495 | napi_schedule(&adapter->ena_napi[i].napi); | |
1496 | ||
1497 | return 0; | |
1498 | } | |
1499 | ||
1500 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |
1501 | { | |
1502 | struct ena_com_create_io_ctx ctx = { 0 }; | |
1503 | struct ena_com_dev *ena_dev; | |
1504 | struct ena_ring *tx_ring; | |
1505 | u32 msix_vector; | |
1506 | u16 ena_qid; | |
1507 | int rc; | |
1508 | ||
1509 | ena_dev = adapter->ena_dev; | |
1510 | ||
1511 | tx_ring = &adapter->tx_ring[qid]; | |
1512 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
1513 | ena_qid = ENA_IO_TXQ_IDX(qid); | |
1514 | ||
1515 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; | |
1516 | ctx.qid = ena_qid; | |
1517 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; | |
1518 | ctx.msix_vector = msix_vector; | |
1519 | ctx.queue_size = adapter->tx_ring_size; | |
1520 | ctx.numa_node = cpu_to_node(tx_ring->cpu); | |
1521 | ||
1522 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
1523 | if (rc) { | |
1524 | netif_err(adapter, ifup, adapter->netdev, | |
1525 | "Failed to create I/O TX queue num %d rc: %d\n", | |
1526 | qid, rc); | |
1527 | return rc; | |
1528 | } | |
1529 | ||
1530 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
1531 | &tx_ring->ena_com_io_sq, | |
1532 | &tx_ring->ena_com_io_cq); | |
1533 | if (rc) { | |
1534 | netif_err(adapter, ifup, adapter->netdev, | |
1535 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | |
1536 | qid, rc); | |
1537 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1538 | } | |
1539 | ||
1540 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | |
1541 | return rc; | |
1542 | } | |
1543 | ||
1544 | static int ena_create_all_io_tx_queues(struct ena_adapter *adapter) | |
1545 | { | |
1546 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
1547 | int rc, i; | |
1548 | ||
1549 | for (i = 0; i < adapter->num_queues; i++) { | |
1550 | rc = ena_create_io_tx_queue(adapter, i); | |
1551 | if (rc) | |
1552 | goto create_err; | |
1553 | } | |
1554 | ||
1555 | return 0; | |
1556 | ||
1557 | create_err: | |
1558 | while (i--) | |
1559 | ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); | |
1560 | ||
1561 | return rc; | |
1562 | } | |
1563 | ||
1564 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |
1565 | { | |
1566 | struct ena_com_dev *ena_dev; | |
1567 | struct ena_com_create_io_ctx ctx = { 0 }; | |
1568 | struct ena_ring *rx_ring; | |
1569 | u32 msix_vector; | |
1570 | u16 ena_qid; | |
1571 | int rc; | |
1572 | ||
1573 | ena_dev = adapter->ena_dev; | |
1574 | ||
1575 | rx_ring = &adapter->rx_ring[qid]; | |
1576 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
1577 | ena_qid = ENA_IO_RXQ_IDX(qid); | |
1578 | ||
1579 | ctx.qid = ena_qid; | |
1580 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | |
1581 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
1582 | ctx.msix_vector = msix_vector; | |
1583 | ctx.queue_size = adapter->rx_ring_size; | |
1584 | ctx.numa_node = cpu_to_node(rx_ring->cpu); | |
1585 | ||
1586 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
1587 | if (rc) { | |
1588 | netif_err(adapter, ifup, adapter->netdev, | |
1589 | "Failed to create I/O RX queue num %d rc: %d\n", | |
1590 | qid, rc); | |
1591 | return rc; | |
1592 | } | |
1593 | ||
1594 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
1595 | &rx_ring->ena_com_io_sq, | |
1596 | &rx_ring->ena_com_io_cq); | |
1597 | if (rc) { | |
1598 | netif_err(adapter, ifup, adapter->netdev, | |
1599 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | |
1600 | qid, rc); | |
1601 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1602 | } | |
1603 | ||
1604 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | |
1605 | ||
1606 | return rc; | |
1607 | } | |
1608 | ||
1609 | static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) | |
1610 | { | |
1611 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
1612 | int rc, i; | |
1613 | ||
1614 | for (i = 0; i < adapter->num_queues; i++) { | |
1615 | rc = ena_create_io_rx_queue(adapter, i); | |
1616 | if (rc) | |
1617 | goto create_err; | |
1618 | } | |
1619 | ||
1620 | return 0; | |
1621 | ||
1622 | create_err: | |
1623 | while (i--) | |
1624 | ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); | |
1625 | ||
1626 | return rc; | |
1627 | } | |
1628 | ||
1629 | static int ena_up(struct ena_adapter *adapter) | |
1630 | { | |
1631 | int rc; | |
1632 | ||
1633 | netdev_dbg(adapter->netdev, "%s\n", __func__); | |
1634 | ||
1635 | ena_setup_io_intr(adapter); | |
1636 | ||
1637 | rc = ena_request_io_irq(adapter); | |
1638 | if (rc) | |
1639 | goto err_req_irq; | |
1640 | ||
1641 | /* allocate transmit descriptors */ | |
1642 | rc = ena_setup_all_tx_resources(adapter); | |
1643 | if (rc) | |
1644 | goto err_setup_tx; | |
1645 | ||
1646 | /* allocate receive descriptors */ | |
1647 | rc = ena_setup_all_rx_resources(adapter); | |
1648 | if (rc) | |
1649 | goto err_setup_rx; | |
1650 | ||
1651 | /* Create TX queues */ | |
1652 | rc = ena_create_all_io_tx_queues(adapter); | |
1653 | if (rc) | |
1654 | goto err_create_tx_queues; | |
1655 | ||
1656 | /* Create RX queues */ | |
1657 | rc = ena_create_all_io_rx_queues(adapter); | |
1658 | if (rc) | |
1659 | goto err_create_rx_queues; | |
1660 | ||
1661 | rc = ena_up_complete(adapter); | |
1662 | if (rc) | |
1663 | goto err_up; | |
1664 | ||
1665 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | |
1666 | netif_carrier_on(adapter->netdev); | |
1667 | ||
1668 | u64_stats_update_begin(&adapter->syncp); | |
1669 | adapter->dev_stats.interface_up++; | |
1670 | u64_stats_update_end(&adapter->syncp); | |
1671 | ||
1672 | set_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
1673 | ||
1674 | return rc; | |
1675 | ||
1676 | err_up: | |
1677 | ena_destroy_all_rx_queues(adapter); | |
1678 | err_create_rx_queues: | |
1679 | ena_destroy_all_tx_queues(adapter); | |
1680 | err_create_tx_queues: | |
1681 | ena_free_all_io_rx_resources(adapter); | |
1682 | err_setup_rx: | |
1683 | ena_free_all_io_tx_resources(adapter); | |
1684 | err_setup_tx: | |
1685 | ena_free_io_irq(adapter); | |
1686 | err_req_irq: | |
1687 | ||
1688 | return rc; | |
1689 | } | |
1690 | ||
1691 | static void ena_down(struct ena_adapter *adapter) | |
1692 | { | |
1693 | netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); | |
1694 | ||
1695 | clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
1696 | ||
1697 | u64_stats_update_begin(&adapter->syncp); | |
1698 | adapter->dev_stats.interface_down++; | |
1699 | u64_stats_update_end(&adapter->syncp); | |
1700 | ||
1701 | /* After this point the napi handler won't enable the tx queue */ | |
1702 | ena_napi_disable_all(adapter); | |
1703 | netif_carrier_off(adapter->netdev); | |
1704 | netif_tx_disable(adapter->netdev); | |
1705 | ||
1706 | /* After destroy the queue there won't be any new interrupts */ | |
1707 | ena_destroy_all_io_queues(adapter); | |
1708 | ||
1709 | ena_disable_io_intr_sync(adapter); | |
1710 | ena_free_io_irq(adapter); | |
1711 | ena_del_napi(adapter); | |
1712 | ||
1713 | ena_free_all_tx_bufs(adapter); | |
1714 | ena_free_all_rx_bufs(adapter); | |
1715 | ena_free_all_io_tx_resources(adapter); | |
1716 | ena_free_all_io_rx_resources(adapter); | |
1717 | } | |
1718 | ||
1719 | /* ena_open - Called when a network interface is made active | |
1720 | * @netdev: network interface device structure | |
1721 | * | |
1722 | * Returns 0 on success, negative value on failure | |
1723 | * | |
1724 | * The open entry point is called when a network interface is made | |
1725 | * active by the system (IFF_UP). At this point all resources needed | |
1726 | * for transmit and receive operations are allocated, the interrupt | |
1727 | * handler is registered with the OS, the watchdog timer is started, | |
1728 | * and the stack is notified that the interface is ready. | |
1729 | */ | |
1730 | static int ena_open(struct net_device *netdev) | |
1731 | { | |
1732 | struct ena_adapter *adapter = netdev_priv(netdev); | |
1733 | int rc; | |
1734 | ||
1735 | /* Notify the stack of the actual queue counts. */ | |
1736 | rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues); | |
1737 | if (rc) { | |
1738 | netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); | |
1739 | return rc; | |
1740 | } | |
1741 | ||
1742 | rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues); | |
1743 | if (rc) { | |
1744 | netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); | |
1745 | return rc; | |
1746 | } | |
1747 | ||
1748 | rc = ena_up(adapter); | |
1749 | if (rc) | |
1750 | return rc; | |
1751 | ||
1752 | return rc; | |
1753 | } | |
1754 | ||
1755 | /* ena_close - Disables a network interface | |
1756 | * @netdev: network interface device structure | |
1757 | * | |
1758 | * Returns 0, this is not allowed to fail | |
1759 | * | |
1760 | * The close entry point is called when an interface is de-activated | |
1761 | * by the OS. The hardware is still under the drivers control, but | |
1762 | * needs to be disabled. A global MAC reset is issued to stop the | |
1763 | * hardware, and all transmit and receive resources are freed. | |
1764 | */ | |
1765 | static int ena_close(struct net_device *netdev) | |
1766 | { | |
1767 | struct ena_adapter *adapter = netdev_priv(netdev); | |
1768 | ||
1769 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); | |
1770 | ||
1771 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
1772 | ena_down(adapter); | |
1773 | ||
1774 | return 0; | |
1775 | } | |
1776 | ||
1777 | static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) | |
1778 | { | |
1779 | u32 mss = skb_shinfo(skb)->gso_size; | |
1780 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; | |
1781 | u8 l4_protocol = 0; | |
1782 | ||
1783 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { | |
1784 | ena_tx_ctx->l4_csum_enable = 1; | |
1785 | if (mss) { | |
1786 | ena_tx_ctx->tso_enable = 1; | |
1787 | ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; | |
1788 | ena_tx_ctx->l4_csum_partial = 0; | |
1789 | } else { | |
1790 | ena_tx_ctx->tso_enable = 0; | |
1791 | ena_meta->l4_hdr_len = 0; | |
1792 | ena_tx_ctx->l4_csum_partial = 1; | |
1793 | } | |
1794 | ||
1795 | switch (ip_hdr(skb)->version) { | |
1796 | case IPVERSION: | |
1797 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; | |
1798 | if (ip_hdr(skb)->frag_off & htons(IP_DF)) | |
1799 | ena_tx_ctx->df = 1; | |
1800 | if (mss) | |
1801 | ena_tx_ctx->l3_csum_enable = 1; | |
1802 | l4_protocol = ip_hdr(skb)->protocol; | |
1803 | break; | |
1804 | case 6: | |
1805 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; | |
1806 | l4_protocol = ipv6_hdr(skb)->nexthdr; | |
1807 | break; | |
1808 | default: | |
1809 | break; | |
1810 | } | |
1811 | ||
1812 | if (l4_protocol == IPPROTO_TCP) | |
1813 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; | |
1814 | else | |
1815 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; | |
1816 | ||
1817 | ena_meta->mss = mss; | |
1818 | ena_meta->l3_hdr_len = skb_network_header_len(skb); | |
1819 | ena_meta->l3_hdr_offset = skb_network_offset(skb); | |
1820 | ena_tx_ctx->meta_valid = 1; | |
1821 | ||
1822 | } else { | |
1823 | ena_tx_ctx->meta_valid = 0; | |
1824 | } | |
1825 | } | |
1826 | ||
1827 | static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, | |
1828 | struct sk_buff *skb) | |
1829 | { | |
1830 | int num_frags, header_len, rc; | |
1831 | ||
1832 | num_frags = skb_shinfo(skb)->nr_frags; | |
1833 | header_len = skb_headlen(skb); | |
1834 | ||
1835 | if (num_frags < tx_ring->sgl_size) | |
1836 | return 0; | |
1837 | ||
1838 | if ((num_frags == tx_ring->sgl_size) && | |
1839 | (header_len < tx_ring->tx_max_header_size)) | |
1840 | return 0; | |
1841 | ||
1842 | u64_stats_update_begin(&tx_ring->syncp); | |
1843 | tx_ring->tx_stats.linearize++; | |
1844 | u64_stats_update_end(&tx_ring->syncp); | |
1845 | ||
1846 | rc = skb_linearize(skb); | |
1847 | if (unlikely(rc)) { | |
1848 | u64_stats_update_begin(&tx_ring->syncp); | |
1849 | tx_ring->tx_stats.linearize_failed++; | |
1850 | u64_stats_update_end(&tx_ring->syncp); | |
1851 | } | |
1852 | ||
1853 | return rc; | |
1854 | } | |
1855 | ||
1856 | /* Called with netif_tx_lock. */ | |
1857 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1858 | { | |
1859 | struct ena_adapter *adapter = netdev_priv(dev); | |
1860 | struct ena_tx_buffer *tx_info; | |
1861 | struct ena_com_tx_ctx ena_tx_ctx; | |
1862 | struct ena_ring *tx_ring; | |
1863 | struct netdev_queue *txq; | |
1864 | struct ena_com_buf *ena_buf; | |
1865 | void *push_hdr; | |
1866 | u32 len, last_frag; | |
1867 | u16 next_to_use; | |
1868 | u16 req_id; | |
1869 | u16 push_len; | |
1870 | u16 header_len; | |
1871 | dma_addr_t dma; | |
1872 | int qid, rc, nb_hw_desc; | |
1873 | int i = -1; | |
1874 | ||
1875 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); | |
1876 | /* Determine which tx ring we will be placed on */ | |
1877 | qid = skb_get_queue_mapping(skb); | |
1878 | tx_ring = &adapter->tx_ring[qid]; | |
1879 | txq = netdev_get_tx_queue(dev, qid); | |
1880 | ||
1881 | rc = ena_check_and_linearize_skb(tx_ring, skb); | |
1882 | if (unlikely(rc)) | |
1883 | goto error_drop_packet; | |
1884 | ||
1885 | skb_tx_timestamp(skb); | |
1886 | len = skb_headlen(skb); | |
1887 | ||
1888 | next_to_use = tx_ring->next_to_use; | |
1889 | req_id = tx_ring->free_tx_ids[next_to_use]; | |
1890 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1891 | tx_info->num_of_bufs = 0; | |
1892 | ||
1893 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); | |
1894 | ena_buf = tx_info->bufs; | |
1895 | tx_info->skb = skb; | |
1896 | ||
1897 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
1898 | /* prepared the push buffer */ | |
1899 | push_len = min_t(u32, len, tx_ring->tx_max_header_size); | |
1900 | header_len = push_len; | |
1901 | push_hdr = skb->data; | |
1902 | } else { | |
1903 | push_len = 0; | |
1904 | header_len = min_t(u32, len, tx_ring->tx_max_header_size); | |
1905 | push_hdr = NULL; | |
1906 | } | |
1907 | ||
1908 | netif_dbg(adapter, tx_queued, dev, | |
1909 | "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, | |
1910 | push_hdr, push_len); | |
1911 | ||
1912 | if (len > push_len) { | |
1913 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, | |
1914 | len - push_len, DMA_TO_DEVICE); | |
1915 | if (dma_mapping_error(tx_ring->dev, dma)) | |
1916 | goto error_report_dma_error; | |
1917 | ||
1918 | ena_buf->paddr = dma; | |
1919 | ena_buf->len = len - push_len; | |
1920 | ||
1921 | ena_buf++; | |
1922 | tx_info->num_of_bufs++; | |
1923 | } | |
1924 | ||
1925 | last_frag = skb_shinfo(skb)->nr_frags; | |
1926 | ||
1927 | for (i = 0; i < last_frag; i++) { | |
1928 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1929 | ||
1930 | len = skb_frag_size(frag); | |
1931 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, | |
1932 | DMA_TO_DEVICE); | |
1933 | if (dma_mapping_error(tx_ring->dev, dma)) | |
1934 | goto error_report_dma_error; | |
1935 | ||
1936 | ena_buf->paddr = dma; | |
1937 | ena_buf->len = len; | |
1938 | ena_buf++; | |
1939 | } | |
1940 | ||
1941 | tx_info->num_of_bufs += last_frag; | |
1942 | ||
1943 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); | |
1944 | ena_tx_ctx.ena_bufs = tx_info->bufs; | |
1945 | ena_tx_ctx.push_header = push_hdr; | |
1946 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | |
1947 | ena_tx_ctx.req_id = req_id; | |
1948 | ena_tx_ctx.header_len = header_len; | |
1949 | ||
1950 | /* set flags and meta data */ | |
1951 | ena_tx_csum(&ena_tx_ctx, skb); | |
1952 | ||
1953 | /* prepare the packet's descriptors to dma engine */ | |
1954 | rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, | |
1955 | &nb_hw_desc); | |
1956 | ||
1957 | if (unlikely(rc)) { | |
1958 | netif_err(adapter, tx_queued, dev, | |
1959 | "failed to prepare tx bufs\n"); | |
1960 | u64_stats_update_begin(&tx_ring->syncp); | |
1961 | tx_ring->tx_stats.queue_stop++; | |
1962 | tx_ring->tx_stats.prepare_ctx_err++; | |
1963 | u64_stats_update_end(&tx_ring->syncp); | |
1964 | netif_tx_stop_queue(txq); | |
1965 | goto error_unmap_dma; | |
1966 | } | |
1967 | ||
1968 | netdev_tx_sent_queue(txq, skb->len); | |
1969 | ||
1970 | u64_stats_update_begin(&tx_ring->syncp); | |
1971 | tx_ring->tx_stats.cnt++; | |
1972 | tx_ring->tx_stats.bytes += skb->len; | |
1973 | u64_stats_update_end(&tx_ring->syncp); | |
1974 | ||
1975 | tx_info->tx_descs = nb_hw_desc; | |
1976 | tx_info->last_jiffies = jiffies; | |
1977 | ||
1978 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | |
1979 | tx_ring->ring_size); | |
1980 | ||
1981 | /* This WMB is aimed to: | |
1982 | * 1 - perform smp barrier before reading next_to_completion | |
1983 | * 2 - make sure the desc were written before trigger DB | |
1984 | */ | |
1985 | wmb(); | |
1986 | ||
1987 | /* stop the queue when no more space available, the packet can have up | |
1988 | * to sgl_size + 2. one for the meta descriptor and one for header | |
1989 | * (if the header is larger than tx_max_header_size). | |
1990 | */ | |
1991 | if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) < | |
1992 | (tx_ring->sgl_size + 2))) { | |
1993 | netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", | |
1994 | __func__, qid); | |
1995 | ||
1996 | netif_tx_stop_queue(txq); | |
1997 | u64_stats_update_begin(&tx_ring->syncp); | |
1998 | tx_ring->tx_stats.queue_stop++; | |
1999 | u64_stats_update_end(&tx_ring->syncp); | |
2000 | ||
2001 | /* There is a rare condition where this function decide to | |
2002 | * stop the queue but meanwhile clean_tx_irq updates | |
2003 | * next_to_completion and terminates. | |
2004 | * The queue will remain stopped forever. | |
2005 | * To solve this issue this function perform rmb, check | |
2006 | * the wakeup condition and wake up the queue if needed. | |
2007 | */ | |
2008 | smp_rmb(); | |
2009 | ||
2010 | if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) | |
2011 | > ENA_TX_WAKEUP_THRESH) { | |
2012 | netif_tx_wake_queue(txq); | |
2013 | u64_stats_update_begin(&tx_ring->syncp); | |
2014 | tx_ring->tx_stats.queue_wakeup++; | |
2015 | u64_stats_update_end(&tx_ring->syncp); | |
2016 | } | |
2017 | } | |
2018 | ||
2019 | if (netif_xmit_stopped(txq) || !skb->xmit_more) { | |
2020 | /* trigger the dma engine */ | |
2021 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | |
2022 | u64_stats_update_begin(&tx_ring->syncp); | |
2023 | tx_ring->tx_stats.doorbells++; | |
2024 | u64_stats_update_end(&tx_ring->syncp); | |
2025 | } | |
2026 | ||
2027 | return NETDEV_TX_OK; | |
2028 | ||
2029 | error_report_dma_error: | |
2030 | u64_stats_update_begin(&tx_ring->syncp); | |
2031 | tx_ring->tx_stats.dma_mapping_err++; | |
2032 | u64_stats_update_end(&tx_ring->syncp); | |
2033 | netdev_warn(adapter->netdev, "failed to map skb\n"); | |
2034 | ||
2035 | tx_info->skb = NULL; | |
2036 | ||
2037 | error_unmap_dma: | |
2038 | if (i >= 0) { | |
2039 | /* save value of frag that failed */ | |
2040 | last_frag = i; | |
2041 | ||
2042 | /* start back at beginning and unmap skb */ | |
2043 | tx_info->skb = NULL; | |
2044 | ena_buf = tx_info->bufs; | |
2045 | dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | |
2046 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | |
2047 | ||
2048 | /* unmap remaining mapped pages */ | |
2049 | for (i = 0; i < last_frag; i++) { | |
2050 | ena_buf++; | |
2051 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | |
2052 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | |
2053 | } | |
2054 | } | |
2055 | ||
2056 | error_drop_packet: | |
2057 | ||
2058 | dev_kfree_skb(skb); | |
2059 | return NETDEV_TX_OK; | |
2060 | } | |
2061 | ||
2062 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2063 | static void ena_netpoll(struct net_device *netdev) | |
2064 | { | |
2065 | struct ena_adapter *adapter = netdev_priv(netdev); | |
2066 | int i; | |
2067 | ||
2068 | for (i = 0; i < adapter->num_queues; i++) | |
2069 | napi_schedule(&adapter->ena_napi[i].napi); | |
2070 | } | |
2071 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
2072 | ||
2073 | static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, | |
2074 | void *accel_priv, select_queue_fallback_t fallback) | |
2075 | { | |
2076 | u16 qid; | |
2077 | /* we suspect that this is good for in--kernel network services that | |
2078 | * want to loop incoming skb rx to tx in normal user generated traffic, | |
2079 | * most probably we will not get to this | |
2080 | */ | |
2081 | if (skb_rx_queue_recorded(skb)) | |
2082 | qid = skb_get_rx_queue(skb); | |
2083 | else | |
2084 | qid = fallback(dev, skb); | |
2085 | ||
2086 | return qid; | |
2087 | } | |
2088 | ||
2089 | static void ena_config_host_info(struct ena_com_dev *ena_dev) | |
2090 | { | |
2091 | struct ena_admin_host_info *host_info; | |
2092 | int rc; | |
2093 | ||
2094 | /* Allocate only the host info */ | |
2095 | rc = ena_com_allocate_host_info(ena_dev); | |
2096 | if (rc) { | |
2097 | pr_err("Cannot allocate host info\n"); | |
2098 | return; | |
2099 | } | |
2100 | ||
2101 | host_info = ena_dev->host_attr.host_info; | |
2102 | ||
2103 | host_info->os_type = ENA_ADMIN_OS_LINUX; | |
2104 | host_info->kernel_ver = LINUX_VERSION_CODE; | |
2105 | strncpy(host_info->kernel_ver_str, utsname()->version, | |
2106 | sizeof(host_info->kernel_ver_str) - 1); | |
2107 | host_info->os_dist = 0; | |
2108 | strncpy(host_info->os_dist_str, utsname()->release, | |
2109 | sizeof(host_info->os_dist_str) - 1); | |
2110 | host_info->driver_version = | |
2111 | (DRV_MODULE_VER_MAJOR) | | |
2112 | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | |
2113 | (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); | |
2114 | ||
2115 | rc = ena_com_set_host_attributes(ena_dev); | |
2116 | if (rc) { | |
2117 | if (rc == -EPERM) | |
2118 | pr_warn("Cannot set host attributes\n"); | |
2119 | else | |
2120 | pr_err("Cannot set host attributes\n"); | |
2121 | ||
2122 | goto err; | |
2123 | } | |
2124 | ||
2125 | return; | |
2126 | ||
2127 | err: | |
2128 | ena_com_delete_host_info(ena_dev); | |
2129 | } | |
2130 | ||
2131 | static void ena_config_debug_area(struct ena_adapter *adapter) | |
2132 | { | |
2133 | u32 debug_area_size; | |
2134 | int rc, ss_count; | |
2135 | ||
2136 | ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); | |
2137 | if (ss_count <= 0) { | |
2138 | netif_err(adapter, drv, adapter->netdev, | |
2139 | "SS count is negative\n"); | |
2140 | return; | |
2141 | } | |
2142 | ||
2143 | /* allocate 32 bytes for each string and 64bit for the value */ | |
2144 | debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; | |
2145 | ||
2146 | rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); | |
2147 | if (rc) { | |
2148 | pr_err("Cannot allocate debug area\n"); | |
2149 | return; | |
2150 | } | |
2151 | ||
2152 | rc = ena_com_set_host_attributes(adapter->ena_dev); | |
2153 | if (rc) { | |
2154 | if (rc == -EPERM) | |
2155 | netif_warn(adapter, drv, adapter->netdev, | |
2156 | "Cannot set host attributes\n"); | |
2157 | else | |
2158 | netif_err(adapter, drv, adapter->netdev, | |
2159 | "Cannot set host attributes\n"); | |
2160 | goto err; | |
2161 | } | |
2162 | ||
2163 | return; | |
2164 | err: | |
2165 | ena_com_delete_debug_area(adapter->ena_dev); | |
2166 | } | |
2167 | ||
bc1f4470 | 2168 | static void ena_get_stats64(struct net_device *netdev, |
2169 | struct rtnl_link_stats64 *stats) | |
1738cd3e NB |
2170 | { |
2171 | struct ena_adapter *adapter = netdev_priv(netdev); | |
d81db240 NB |
2172 | struct ena_ring *rx_ring, *tx_ring; |
2173 | unsigned int start; | |
2174 | u64 rx_drops; | |
2175 | int i; | |
1738cd3e NB |
2176 | |
2177 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
bc1f4470 | 2178 | return; |
1738cd3e | 2179 | |
d81db240 NB |
2180 | for (i = 0; i < adapter->num_queues; i++) { |
2181 | u64 bytes, packets; | |
2182 | ||
2183 | tx_ring = &adapter->tx_ring[i]; | |
1738cd3e | 2184 | |
d81db240 NB |
2185 | do { |
2186 | start = u64_stats_fetch_begin_irq(&tx_ring->syncp); | |
2187 | packets = tx_ring->tx_stats.cnt; | |
2188 | bytes = tx_ring->tx_stats.bytes; | |
2189 | } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); | |
1738cd3e | 2190 | |
d81db240 NB |
2191 | stats->tx_packets += packets; |
2192 | stats->tx_bytes += bytes; | |
2193 | ||
2194 | rx_ring = &adapter->rx_ring[i]; | |
2195 | ||
2196 | do { | |
2197 | start = u64_stats_fetch_begin_irq(&rx_ring->syncp); | |
2198 | packets = rx_ring->rx_stats.cnt; | |
2199 | bytes = rx_ring->rx_stats.bytes; | |
2200 | } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); | |
2201 | ||
2202 | stats->rx_packets += packets; | |
2203 | stats->rx_bytes += bytes; | |
2204 | } | |
2205 | ||
2206 | do { | |
2207 | start = u64_stats_fetch_begin_irq(&adapter->syncp); | |
2208 | rx_drops = adapter->dev_stats.rx_drops; | |
2209 | } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); | |
1738cd3e | 2210 | |
d81db240 | 2211 | stats->rx_dropped = rx_drops; |
1738cd3e NB |
2212 | |
2213 | stats->multicast = 0; | |
2214 | stats->collisions = 0; | |
2215 | ||
2216 | stats->rx_length_errors = 0; | |
2217 | stats->rx_crc_errors = 0; | |
2218 | stats->rx_frame_errors = 0; | |
2219 | stats->rx_fifo_errors = 0; | |
2220 | stats->rx_missed_errors = 0; | |
2221 | stats->tx_window_errors = 0; | |
2222 | ||
2223 | stats->rx_errors = 0; | |
2224 | stats->tx_errors = 0; | |
1738cd3e NB |
2225 | } |
2226 | ||
2227 | static const struct net_device_ops ena_netdev_ops = { | |
2228 | .ndo_open = ena_open, | |
2229 | .ndo_stop = ena_close, | |
2230 | .ndo_start_xmit = ena_start_xmit, | |
2231 | .ndo_select_queue = ena_select_queue, | |
2232 | .ndo_get_stats64 = ena_get_stats64, | |
2233 | .ndo_tx_timeout = ena_tx_timeout, | |
2234 | .ndo_change_mtu = ena_change_mtu, | |
2235 | .ndo_set_mac_address = NULL, | |
2236 | .ndo_validate_addr = eth_validate_addr, | |
2237 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
2238 | .ndo_poll_controller = ena_netpoll, | |
2239 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
2240 | }; | |
2241 | ||
2242 | static void ena_device_io_suspend(struct work_struct *work) | |
2243 | { | |
2244 | struct ena_adapter *adapter = | |
2245 | container_of(work, struct ena_adapter, suspend_io_task); | |
2246 | struct net_device *netdev = adapter->netdev; | |
2247 | ||
2248 | /* ena_napi_disable_all disables only the IO handling. | |
2249 | * We are still subject to AENQ keep alive watchdog. | |
2250 | */ | |
2251 | u64_stats_update_begin(&adapter->syncp); | |
2252 | adapter->dev_stats.io_suspend++; | |
2253 | u64_stats_update_begin(&adapter->syncp); | |
2254 | ena_napi_disable_all(adapter); | |
2255 | netif_tx_lock(netdev); | |
2256 | netif_device_detach(netdev); | |
2257 | netif_tx_unlock(netdev); | |
2258 | } | |
2259 | ||
2260 | static void ena_device_io_resume(struct work_struct *work) | |
2261 | { | |
2262 | struct ena_adapter *adapter = | |
2263 | container_of(work, struct ena_adapter, resume_io_task); | |
2264 | struct net_device *netdev = adapter->netdev; | |
2265 | ||
2266 | u64_stats_update_begin(&adapter->syncp); | |
2267 | adapter->dev_stats.io_resume++; | |
2268 | u64_stats_update_end(&adapter->syncp); | |
2269 | ||
2270 | netif_device_attach(netdev); | |
2271 | ena_napi_enable_all(adapter); | |
2272 | } | |
2273 | ||
2274 | static int ena_device_validate_params(struct ena_adapter *adapter, | |
2275 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
2276 | { | |
2277 | struct net_device *netdev = adapter->netdev; | |
2278 | int rc; | |
2279 | ||
2280 | rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, | |
2281 | adapter->mac_addr); | |
2282 | if (!rc) { | |
2283 | netif_err(adapter, drv, netdev, | |
2284 | "Error, mac address are different\n"); | |
2285 | return -EINVAL; | |
2286 | } | |
2287 | ||
2288 | if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) || | |
2289 | (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) { | |
2290 | netif_err(adapter, drv, netdev, | |
2291 | "Error, device doesn't support enough queues\n"); | |
2292 | return -EINVAL; | |
2293 | } | |
2294 | ||
2295 | if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { | |
2296 | netif_err(adapter, drv, netdev, | |
2297 | "Error, device max mtu is smaller than netdev MTU\n"); | |
2298 | return -EINVAL; | |
2299 | } | |
2300 | ||
2301 | return 0; | |
2302 | } | |
2303 | ||
2304 | static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, | |
2305 | struct ena_com_dev_get_features_ctx *get_feat_ctx, | |
2306 | bool *wd_state) | |
2307 | { | |
2308 | struct device *dev = &pdev->dev; | |
2309 | bool readless_supported; | |
2310 | u32 aenq_groups; | |
2311 | int dma_width; | |
2312 | int rc; | |
2313 | ||
2314 | rc = ena_com_mmio_reg_read_request_init(ena_dev); | |
2315 | if (rc) { | |
2316 | dev_err(dev, "failed to init mmio read less\n"); | |
2317 | return rc; | |
2318 | } | |
2319 | ||
2320 | /* The PCIe configuration space revision id indicate if mmio reg | |
2321 | * read is disabled | |
2322 | */ | |
2323 | readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); | |
2324 | ena_com_set_mmio_read_mode(ena_dev, readless_supported); | |
2325 | ||
2326 | rc = ena_com_dev_reset(ena_dev); | |
2327 | if (rc) { | |
2328 | dev_err(dev, "Can not reset device\n"); | |
2329 | goto err_mmio_read_less; | |
2330 | } | |
2331 | ||
2332 | rc = ena_com_validate_version(ena_dev); | |
2333 | if (rc) { | |
2334 | dev_err(dev, "device version is too low\n"); | |
2335 | goto err_mmio_read_less; | |
2336 | } | |
2337 | ||
2338 | dma_width = ena_com_get_dma_width(ena_dev); | |
2339 | if (dma_width < 0) { | |
2340 | dev_err(dev, "Invalid dma width value %d", dma_width); | |
6e22066f | 2341 | rc = dma_width; |
1738cd3e NB |
2342 | goto err_mmio_read_less; |
2343 | } | |
2344 | ||
2345 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); | |
2346 | if (rc) { | |
2347 | dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc); | |
2348 | goto err_mmio_read_less; | |
2349 | } | |
2350 | ||
2351 | rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); | |
2352 | if (rc) { | |
2353 | dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n", | |
2354 | rc); | |
2355 | goto err_mmio_read_less; | |
2356 | } | |
2357 | ||
2358 | /* ENA admin level init */ | |
2359 | rc = ena_com_admin_init(ena_dev, &aenq_handlers, true); | |
2360 | if (rc) { | |
2361 | dev_err(dev, | |
2362 | "Can not initialize ena admin queue with device\n"); | |
2363 | goto err_mmio_read_less; | |
2364 | } | |
2365 | ||
2366 | /* To enable the msix interrupts the driver needs to know the number | |
2367 | * of queues. So the driver uses polling mode to retrieve this | |
2368 | * information | |
2369 | */ | |
2370 | ena_com_set_admin_polling_mode(ena_dev, true); | |
2371 | ||
2372 | /* Get Device Attributes*/ | |
2373 | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | |
2374 | if (rc) { | |
2375 | dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); | |
2376 | goto err_admin_init; | |
2377 | } | |
2378 | ||
2379 | /* Try to turn all the available aenq groups */ | |
2380 | aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | | |
2381 | BIT(ENA_ADMIN_FATAL_ERROR) | | |
2382 | BIT(ENA_ADMIN_WARNING) | | |
2383 | BIT(ENA_ADMIN_NOTIFICATION) | | |
2384 | BIT(ENA_ADMIN_KEEP_ALIVE); | |
2385 | ||
2386 | aenq_groups &= get_feat_ctx->aenq.supported_groups; | |
2387 | ||
2388 | rc = ena_com_set_aenq_config(ena_dev, aenq_groups); | |
2389 | if (rc) { | |
2390 | dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); | |
2391 | goto err_admin_init; | |
2392 | } | |
2393 | ||
2394 | *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); | |
2395 | ||
2396 | ena_config_host_info(ena_dev); | |
2397 | ||
2398 | return 0; | |
2399 | ||
2400 | err_admin_init: | |
2401 | ena_com_admin_destroy(ena_dev); | |
2402 | err_mmio_read_less: | |
2403 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
2404 | ||
2405 | return rc; | |
2406 | } | |
2407 | ||
2408 | static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, | |
2409 | int io_vectors) | |
2410 | { | |
2411 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2412 | struct device *dev = &adapter->pdev->dev; | |
2413 | int rc; | |
2414 | ||
2415 | rc = ena_enable_msix(adapter, io_vectors); | |
2416 | if (rc) { | |
2417 | dev_err(dev, "Can not reserve msix vectors\n"); | |
2418 | return rc; | |
2419 | } | |
2420 | ||
2421 | ena_setup_mgmnt_intr(adapter); | |
2422 | ||
2423 | rc = ena_request_mgmnt_irq(adapter); | |
2424 | if (rc) { | |
2425 | dev_err(dev, "Can not setup management interrupts\n"); | |
2426 | goto err_disable_msix; | |
2427 | } | |
2428 | ||
2429 | ena_com_set_admin_polling_mode(ena_dev, false); | |
2430 | ||
2431 | ena_com_admin_aenq_enable(ena_dev); | |
2432 | ||
2433 | return 0; | |
2434 | ||
2435 | err_disable_msix: | |
2436 | ena_disable_msix(adapter); | |
2437 | ||
2438 | return rc; | |
2439 | } | |
2440 | ||
2441 | static void ena_fw_reset_device(struct work_struct *work) | |
2442 | { | |
2443 | struct ena_com_dev_get_features_ctx get_feat_ctx; | |
2444 | struct ena_adapter *adapter = | |
2445 | container_of(work, struct ena_adapter, reset_task); | |
2446 | struct net_device *netdev = adapter->netdev; | |
2447 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2448 | struct pci_dev *pdev = adapter->pdev; | |
2449 | bool dev_up, wd_state; | |
2450 | int rc; | |
2451 | ||
2452 | del_timer_sync(&adapter->timer_service); | |
2453 | ||
2454 | rtnl_lock(); | |
2455 | ||
2456 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2457 | ena_com_set_admin_running_state(ena_dev, false); | |
2458 | ||
2459 | /* After calling ena_close the tx queues and the napi | |
2460 | * are disabled so no one can interfere or touch the | |
2461 | * data structures | |
2462 | */ | |
2463 | ena_close(netdev); | |
2464 | ||
2465 | rc = ena_com_dev_reset(ena_dev); | |
2466 | if (rc) { | |
2467 | dev_err(&pdev->dev, "Device reset failed\n"); | |
2468 | goto err; | |
2469 | } | |
2470 | ||
2471 | ena_free_mgmnt_irq(adapter); | |
2472 | ||
2473 | ena_disable_msix(adapter); | |
2474 | ||
2475 | ena_com_abort_admin_commands(ena_dev); | |
2476 | ||
2477 | ena_com_wait_for_abort_completion(ena_dev); | |
2478 | ||
2479 | ena_com_admin_destroy(ena_dev); | |
2480 | ||
2481 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
2482 | ||
2483 | /* Finish with the destroy part. Start the init part */ | |
2484 | ||
2485 | rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); | |
2486 | if (rc) { | |
2487 | dev_err(&pdev->dev, "Can not initialize device\n"); | |
2488 | goto err; | |
2489 | } | |
2490 | adapter->wd_state = wd_state; | |
2491 | ||
2492 | rc = ena_device_validate_params(adapter, &get_feat_ctx); | |
2493 | if (rc) { | |
2494 | dev_err(&pdev->dev, "Validation of device parameters failed\n"); | |
2495 | goto err_device_destroy; | |
2496 | } | |
2497 | ||
2498 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, | |
2499 | adapter->num_queues); | |
2500 | if (rc) { | |
2501 | dev_err(&pdev->dev, "Enable MSI-X failed\n"); | |
2502 | goto err_device_destroy; | |
2503 | } | |
2504 | /* If the interface was up before the reset bring it up */ | |
2505 | if (dev_up) { | |
2506 | rc = ena_up(adapter); | |
2507 | if (rc) { | |
2508 | dev_err(&pdev->dev, "Failed to create I/O queues\n"); | |
2509 | goto err_disable_msix; | |
2510 | } | |
2511 | } | |
2512 | ||
2513 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | |
2514 | ||
2515 | rtnl_unlock(); | |
2516 | ||
2517 | dev_err(&pdev->dev, "Device reset completed successfully\n"); | |
2518 | ||
2519 | return; | |
2520 | err_disable_msix: | |
2521 | ena_free_mgmnt_irq(adapter); | |
2522 | ena_disable_msix(adapter); | |
2523 | err_device_destroy: | |
2524 | ena_com_admin_destroy(ena_dev); | |
2525 | err: | |
2526 | rtnl_unlock(); | |
2527 | ||
22b331c9 NB |
2528 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
2529 | ||
1738cd3e NB |
2530 | dev_err(&pdev->dev, |
2531 | "Reset attempt failed. Can not reset the device\n"); | |
2532 | } | |
2533 | ||
2534 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |
2535 | { | |
2536 | struct ena_tx_buffer *tx_buf; | |
2537 | unsigned long last_jiffies; | |
2538 | struct ena_ring *tx_ring; | |
2539 | int i, j, budget; | |
2540 | u32 missed_tx; | |
2541 | ||
2542 | /* Make sure the driver doesn't turn the device in other process */ | |
2543 | smp_rmb(); | |
2544 | ||
2545 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
2546 | return; | |
2547 | ||
2548 | budget = ENA_MONITORED_TX_QUEUES; | |
2549 | ||
2550 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { | |
2551 | tx_ring = &adapter->tx_ring[i]; | |
2552 | ||
2553 | for (j = 0; j < tx_ring->ring_size; j++) { | |
2554 | tx_buf = &tx_ring->tx_buffer_info[j]; | |
2555 | last_jiffies = tx_buf->last_jiffies; | |
2556 | if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | |
2557 | netif_notice(adapter, tx_err, adapter->netdev, | |
2558 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | |
2559 | tx_ring->qid, j); | |
2560 | ||
2561 | u64_stats_update_begin(&tx_ring->syncp); | |
2562 | missed_tx = tx_ring->tx_stats.missing_tx_comp++; | |
2563 | u64_stats_update_end(&tx_ring->syncp); | |
2564 | ||
2565 | /* Clear last jiffies so the lost buffer won't | |
2566 | * be counted twice. | |
2567 | */ | |
2568 | tx_buf->last_jiffies = 0; | |
2569 | ||
2570 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | |
2571 | netif_err(adapter, tx_err, adapter->netdev, | |
2572 | "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", | |
2573 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | |
2574 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
2575 | } | |
2576 | } | |
2577 | } | |
2578 | ||
2579 | budget--; | |
2580 | if (!budget) | |
2581 | break; | |
2582 | } | |
2583 | ||
2584 | adapter->last_monitored_tx_qid = i % adapter->num_queues; | |
2585 | } | |
2586 | ||
2587 | /* Check for keep alive expiration */ | |
2588 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | |
2589 | { | |
2590 | unsigned long keep_alive_expired; | |
2591 | ||
2592 | if (!adapter->wd_state) | |
2593 | return; | |
2594 | ||
2595 | keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies | |
2596 | + ENA_DEVICE_KALIVE_TIMEOUT); | |
2597 | if (unlikely(time_is_before_jiffies(keep_alive_expired))) { | |
2598 | netif_err(adapter, drv, adapter->netdev, | |
2599 | "Keep alive watchdog timeout.\n"); | |
2600 | u64_stats_update_begin(&adapter->syncp); | |
2601 | adapter->dev_stats.wd_expired++; | |
2602 | u64_stats_update_end(&adapter->syncp); | |
2603 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
2604 | } | |
2605 | } | |
2606 | ||
2607 | static void check_for_admin_com_state(struct ena_adapter *adapter) | |
2608 | { | |
2609 | if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { | |
2610 | netif_err(adapter, drv, adapter->netdev, | |
2611 | "ENA admin queue is not in running state!\n"); | |
2612 | u64_stats_update_begin(&adapter->syncp); | |
2613 | adapter->dev_stats.admin_q_pause++; | |
2614 | u64_stats_update_end(&adapter->syncp); | |
2615 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
2616 | } | |
2617 | } | |
2618 | ||
2619 | static void ena_update_host_info(struct ena_admin_host_info *host_info, | |
2620 | struct net_device *netdev) | |
2621 | { | |
2622 | host_info->supported_network_features[0] = | |
2623 | netdev->features & GENMASK_ULL(31, 0); | |
2624 | host_info->supported_network_features[1] = | |
2625 | (netdev->features & GENMASK_ULL(63, 32)) >> 32; | |
2626 | } | |
2627 | ||
2628 | static void ena_timer_service(unsigned long data) | |
2629 | { | |
2630 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
2631 | u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; | |
2632 | struct ena_admin_host_info *host_info = | |
2633 | adapter->ena_dev->host_attr.host_info; | |
2634 | ||
2635 | check_for_missing_keep_alive(adapter); | |
2636 | ||
2637 | check_for_admin_com_state(adapter); | |
2638 | ||
2639 | check_for_missing_tx_completions(adapter); | |
2640 | ||
2641 | if (debug_area) | |
2642 | ena_dump_stats_to_buf(adapter, debug_area); | |
2643 | ||
2644 | if (host_info) | |
2645 | ena_update_host_info(host_info, adapter->netdev); | |
2646 | ||
2647 | if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
2648 | netif_err(adapter, drv, adapter->netdev, | |
2649 | "Trigger reset is on\n"); | |
2650 | ena_dump_stats_to_dmesg(adapter); | |
2651 | queue_work(ena_wq, &adapter->reset_task); | |
2652 | return; | |
2653 | } | |
2654 | ||
2655 | /* Reset the timer */ | |
2656 | mod_timer(&adapter->timer_service, jiffies + HZ); | |
2657 | } | |
2658 | ||
2659 | static int ena_calc_io_queue_num(struct pci_dev *pdev, | |
2660 | struct ena_com_dev *ena_dev, | |
2661 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
2662 | { | |
2663 | int io_sq_num, io_queue_num; | |
2664 | ||
2665 | /* In case of LLQ use the llq number in the get feature cmd */ | |
2666 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
2667 | io_sq_num = get_feat_ctx->max_queues.max_llq_num; | |
2668 | ||
2669 | if (io_sq_num == 0) { | |
2670 | dev_err(&pdev->dev, | |
2671 | "Trying to use LLQ but llq_num is 0. Fall back into regular queues\n"); | |
2672 | ||
2673 | ena_dev->tx_mem_queue_type = | |
2674 | ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
2675 | io_sq_num = get_feat_ctx->max_queues.max_sq_num; | |
2676 | } | |
2677 | } else { | |
2678 | io_sq_num = get_feat_ctx->max_queues.max_sq_num; | |
2679 | } | |
2680 | ||
6a1ce2fb | 2681 | io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); |
1738cd3e NB |
2682 | io_queue_num = min_t(int, io_queue_num, io_sq_num); |
2683 | io_queue_num = min_t(int, io_queue_num, | |
2684 | get_feat_ctx->max_queues.max_cq_num); | |
2685 | /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ | |
2686 | io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1); | |
2687 | if (unlikely(!io_queue_num)) { | |
2688 | dev_err(&pdev->dev, "The device doesn't have io queues\n"); | |
2689 | return -EFAULT; | |
2690 | } | |
2691 | ||
2692 | return io_queue_num; | |
2693 | } | |
2694 | ||
184b49c8 RR |
2695 | static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, |
2696 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
1738cd3e NB |
2697 | { |
2698 | bool has_mem_bar; | |
2699 | ||
2700 | has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); | |
2701 | ||
2702 | /* Enable push mode if device supports LLQ */ | |
2703 | if (has_mem_bar && (get_feat_ctx->max_queues.max_llq_num > 0)) | |
2704 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; | |
2705 | else | |
2706 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
1738cd3e NB |
2707 | } |
2708 | ||
2709 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, | |
2710 | struct net_device *netdev) | |
2711 | { | |
2712 | netdev_features_t dev_features = 0; | |
2713 | ||
2714 | /* Set offload features */ | |
2715 | if (feat->offload.tx & | |
2716 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) | |
2717 | dev_features |= NETIF_F_IP_CSUM; | |
2718 | ||
2719 | if (feat->offload.tx & | |
2720 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) | |
2721 | dev_features |= NETIF_F_IPV6_CSUM; | |
2722 | ||
2723 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) | |
2724 | dev_features |= NETIF_F_TSO; | |
2725 | ||
2726 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) | |
2727 | dev_features |= NETIF_F_TSO6; | |
2728 | ||
2729 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) | |
2730 | dev_features |= NETIF_F_TSO_ECN; | |
2731 | ||
2732 | if (feat->offload.rx_supported & | |
2733 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) | |
2734 | dev_features |= NETIF_F_RXCSUM; | |
2735 | ||
2736 | if (feat->offload.rx_supported & | |
2737 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) | |
2738 | dev_features |= NETIF_F_RXCSUM; | |
2739 | ||
2740 | netdev->features = | |
2741 | dev_features | | |
2742 | NETIF_F_SG | | |
1738cd3e NB |
2743 | NETIF_F_RXHASH | |
2744 | NETIF_F_HIGHDMA; | |
2745 | ||
2746 | netdev->hw_features |= netdev->features; | |
2747 | netdev->vlan_features |= netdev->features; | |
2748 | } | |
2749 | ||
2750 | static void ena_set_conf_feat_params(struct ena_adapter *adapter, | |
2751 | struct ena_com_dev_get_features_ctx *feat) | |
2752 | { | |
2753 | struct net_device *netdev = adapter->netdev; | |
2754 | ||
2755 | /* Copy mac address */ | |
2756 | if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { | |
2757 | eth_hw_addr_random(netdev); | |
2758 | ether_addr_copy(adapter->mac_addr, netdev->dev_addr); | |
2759 | } else { | |
2760 | ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); | |
2761 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); | |
2762 | } | |
2763 | ||
2764 | /* Set offload features */ | |
2765 | ena_set_dev_offloads(feat, netdev); | |
2766 | ||
2767 | adapter->max_mtu = feat->dev_attr.max_mtu; | |
d894be57 JW |
2768 | netdev->max_mtu = adapter->max_mtu; |
2769 | netdev->min_mtu = ENA_MIN_MTU; | |
1738cd3e NB |
2770 | } |
2771 | ||
2772 | static int ena_rss_init_default(struct ena_adapter *adapter) | |
2773 | { | |
2774 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2775 | struct device *dev = &adapter->pdev->dev; | |
2776 | int rc, i; | |
2777 | u32 val; | |
2778 | ||
2779 | rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); | |
2780 | if (unlikely(rc)) { | |
2781 | dev_err(dev, "Cannot init indirect table\n"); | |
2782 | goto err_rss_init; | |
2783 | } | |
2784 | ||
2785 | for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { | |
2786 | val = ethtool_rxfh_indir_default(i, adapter->num_queues); | |
2787 | rc = ena_com_indirect_table_fill_entry(ena_dev, i, | |
2788 | ENA_IO_RXQ_IDX(val)); | |
2789 | if (unlikely(rc && (rc != -EPERM))) { | |
2790 | dev_err(dev, "Cannot fill indirect table\n"); | |
2791 | goto err_fill_indir; | |
2792 | } | |
2793 | } | |
2794 | ||
2795 | rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, | |
2796 | ENA_HASH_KEY_SIZE, 0xFFFFFFFF); | |
2797 | if (unlikely(rc && (rc != -EPERM))) { | |
2798 | dev_err(dev, "Cannot fill hash function\n"); | |
2799 | goto err_fill_indir; | |
2800 | } | |
2801 | ||
2802 | rc = ena_com_set_default_hash_ctrl(ena_dev); | |
2803 | if (unlikely(rc && (rc != -EPERM))) { | |
2804 | dev_err(dev, "Cannot fill hash control\n"); | |
2805 | goto err_fill_indir; | |
2806 | } | |
2807 | ||
2808 | return 0; | |
2809 | ||
2810 | err_fill_indir: | |
2811 | ena_com_rss_destroy(ena_dev); | |
2812 | err_rss_init: | |
2813 | ||
2814 | return rc; | |
2815 | } | |
2816 | ||
2817 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |
2818 | { | |
2819 | int release_bars; | |
2820 | ||
2821 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | |
2822 | pci_release_selected_regions(pdev, release_bars); | |
2823 | } | |
2824 | ||
2825 | static int ena_calc_queue_size(struct pci_dev *pdev, | |
2826 | struct ena_com_dev *ena_dev, | |
2827 | u16 *max_tx_sgl_size, | |
2828 | u16 *max_rx_sgl_size, | |
2829 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
2830 | { | |
2831 | u32 queue_size = ENA_DEFAULT_RING_SIZE; | |
2832 | ||
2833 | queue_size = min_t(u32, queue_size, | |
2834 | get_feat_ctx->max_queues.max_cq_depth); | |
2835 | queue_size = min_t(u32, queue_size, | |
2836 | get_feat_ctx->max_queues.max_sq_depth); | |
2837 | ||
2838 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
2839 | queue_size = min_t(u32, queue_size, | |
2840 | get_feat_ctx->max_queues.max_llq_depth); | |
2841 | ||
2842 | queue_size = rounddown_pow_of_two(queue_size); | |
2843 | ||
2844 | if (unlikely(!queue_size)) { | |
2845 | dev_err(&pdev->dev, "Invalid queue size\n"); | |
2846 | return -EFAULT; | |
2847 | } | |
2848 | ||
2849 | *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
2850 | get_feat_ctx->max_queues.max_packet_tx_descs); | |
2851 | *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
2852 | get_feat_ctx->max_queues.max_packet_rx_descs); | |
2853 | ||
2854 | return queue_size; | |
2855 | } | |
2856 | ||
2857 | /* ena_probe - Device Initialization Routine | |
2858 | * @pdev: PCI device information struct | |
2859 | * @ent: entry in ena_pci_tbl | |
2860 | * | |
2861 | * Returns 0 on success, negative on failure | |
2862 | * | |
2863 | * ena_probe initializes an adapter identified by a pci_dev structure. | |
2864 | * The OS initialization, configuring of the adapter private structure, | |
2865 | * and a hardware reset occur. | |
2866 | */ | |
2867 | static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
2868 | { | |
2869 | struct ena_com_dev_get_features_ctx get_feat_ctx; | |
2870 | static int version_printed; | |
2871 | struct net_device *netdev; | |
2872 | struct ena_adapter *adapter; | |
2873 | struct ena_com_dev *ena_dev = NULL; | |
2874 | static int adapters_found; | |
2875 | int io_queue_num, bars, rc; | |
2876 | int queue_size; | |
2877 | u16 tx_sgl_size = 0; | |
2878 | u16 rx_sgl_size = 0; | |
2879 | bool wd_state; | |
2880 | ||
2881 | dev_dbg(&pdev->dev, "%s\n", __func__); | |
2882 | ||
2883 | if (version_printed++ == 0) | |
2884 | dev_info(&pdev->dev, "%s", version); | |
2885 | ||
2886 | rc = pci_enable_device_mem(pdev); | |
2887 | if (rc) { | |
2888 | dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); | |
2889 | return rc; | |
2890 | } | |
2891 | ||
2892 | pci_set_master(pdev); | |
2893 | ||
2894 | ena_dev = vzalloc(sizeof(*ena_dev)); | |
2895 | if (!ena_dev) { | |
2896 | rc = -ENOMEM; | |
2897 | goto err_disable_device; | |
2898 | } | |
2899 | ||
2900 | bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | |
2901 | rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); | |
2902 | if (rc) { | |
2903 | dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", | |
2904 | rc); | |
2905 | goto err_free_ena_dev; | |
2906 | } | |
2907 | ||
2908 | ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), | |
2909 | pci_resource_len(pdev, ENA_REG_BAR)); | |
2910 | if (!ena_dev->reg_bar) { | |
2911 | dev_err(&pdev->dev, "failed to remap regs bar\n"); | |
2912 | rc = -EFAULT; | |
2913 | goto err_free_region; | |
2914 | } | |
2915 | ||
2916 | ena_dev->dmadev = &pdev->dev; | |
2917 | ||
2918 | rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); | |
2919 | if (rc) { | |
2920 | dev_err(&pdev->dev, "ena device init failed\n"); | |
2921 | if (rc == -ETIME) | |
2922 | rc = -EPROBE_DEFER; | |
2923 | goto err_free_region; | |
2924 | } | |
2925 | ||
184b49c8 | 2926 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
1738cd3e NB |
2927 | |
2928 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
2929 | ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), | |
2930 | pci_resource_len(pdev, ENA_MEM_BAR)); | |
2931 | if (!ena_dev->mem_bar) { | |
2932 | rc = -EFAULT; | |
2933 | goto err_device_destroy; | |
2934 | } | |
2935 | } | |
2936 | ||
2937 | /* initial Tx interrupt delay, Assumes 1 usec granularity. | |
2938 | * Updated during device initialization with the real granularity | |
2939 | */ | |
2940 | ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; | |
2941 | io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); | |
2942 | queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size, | |
2943 | &rx_sgl_size, &get_feat_ctx); | |
2944 | if ((queue_size <= 0) || (io_queue_num <= 0)) { | |
2945 | rc = -EFAULT; | |
2946 | goto err_device_destroy; | |
2947 | } | |
2948 | ||
2949 | dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", | |
2950 | io_queue_num, queue_size); | |
2951 | ||
2952 | /* dev zeroed in init_etherdev */ | |
2953 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); | |
2954 | if (!netdev) { | |
2955 | dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); | |
2956 | rc = -ENOMEM; | |
2957 | goto err_device_destroy; | |
2958 | } | |
2959 | ||
2960 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
2961 | ||
2962 | adapter = netdev_priv(netdev); | |
2963 | pci_set_drvdata(pdev, adapter); | |
2964 | ||
2965 | adapter->ena_dev = ena_dev; | |
2966 | adapter->netdev = netdev; | |
2967 | adapter->pdev = pdev; | |
2968 | ||
2969 | ena_set_conf_feat_params(adapter, &get_feat_ctx); | |
2970 | ||
2971 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); | |
2972 | ||
2973 | adapter->tx_ring_size = queue_size; | |
2974 | adapter->rx_ring_size = queue_size; | |
2975 | ||
2976 | adapter->max_tx_sgl_size = tx_sgl_size; | |
2977 | adapter->max_rx_sgl_size = rx_sgl_size; | |
2978 | ||
2979 | adapter->num_queues = io_queue_num; | |
2980 | adapter->last_monitored_tx_qid = 0; | |
2981 | ||
2982 | adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; | |
2983 | adapter->wd_state = wd_state; | |
2984 | ||
2985 | snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); | |
2986 | ||
2987 | rc = ena_com_init_interrupt_moderation(adapter->ena_dev); | |
2988 | if (rc) { | |
2989 | dev_err(&pdev->dev, | |
2990 | "Failed to query interrupt moderation feature\n"); | |
2991 | goto err_netdev_destroy; | |
2992 | } | |
2993 | ena_init_io_rings(adapter); | |
2994 | ||
2995 | netdev->netdev_ops = &ena_netdev_ops; | |
2996 | netdev->watchdog_timeo = TX_TIMEOUT; | |
2997 | ena_set_ethtool_ops(netdev); | |
2998 | ||
2999 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
3000 | ||
3001 | u64_stats_init(&adapter->syncp); | |
3002 | ||
3003 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); | |
3004 | if (rc) { | |
3005 | dev_err(&pdev->dev, | |
3006 | "Failed to enable and set the admin interrupts\n"); | |
3007 | goto err_worker_destroy; | |
3008 | } | |
3009 | rc = ena_rss_init_default(adapter); | |
3010 | if (rc && (rc != -EPERM)) { | |
3011 | dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); | |
3012 | goto err_free_msix; | |
3013 | } | |
3014 | ||
3015 | ena_config_debug_area(adapter); | |
3016 | ||
3017 | memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); | |
3018 | ||
3019 | netif_carrier_off(netdev); | |
3020 | ||
3021 | rc = register_netdev(netdev); | |
3022 | if (rc) { | |
3023 | dev_err(&pdev->dev, "Cannot register net device\n"); | |
3024 | goto err_rss; | |
3025 | } | |
3026 | ||
3027 | INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend); | |
3028 | INIT_WORK(&adapter->resume_io_task, ena_device_io_resume); | |
3029 | INIT_WORK(&adapter->reset_task, ena_fw_reset_device); | |
3030 | ||
3031 | adapter->last_keep_alive_jiffies = jiffies; | |
3032 | ||
f850b4a7 WY |
3033 | setup_timer(&adapter->timer_service, ena_timer_service, |
3034 | (unsigned long)adapter); | |
3035 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | |
1738cd3e NB |
3036 | |
3037 | dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", | |
3038 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), | |
3039 | netdev->dev_addr, io_queue_num); | |
3040 | ||
3041 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | |
3042 | ||
3043 | adapters_found++; | |
3044 | ||
3045 | return 0; | |
3046 | ||
3047 | err_rss: | |
3048 | ena_com_delete_debug_area(ena_dev); | |
3049 | ena_com_rss_destroy(ena_dev); | |
3050 | err_free_msix: | |
3051 | ena_com_dev_reset(ena_dev); | |
3052 | ena_free_mgmnt_irq(adapter); | |
3053 | ena_disable_msix(adapter); | |
3054 | err_worker_destroy: | |
3055 | ena_com_destroy_interrupt_moderation(ena_dev); | |
3056 | del_timer(&adapter->timer_service); | |
3057 | cancel_work_sync(&adapter->suspend_io_task); | |
3058 | cancel_work_sync(&adapter->resume_io_task); | |
3059 | err_netdev_destroy: | |
3060 | free_netdev(netdev); | |
3061 | err_device_destroy: | |
3062 | ena_com_delete_host_info(ena_dev); | |
3063 | ena_com_admin_destroy(ena_dev); | |
3064 | err_free_region: | |
3065 | ena_release_bars(ena_dev, pdev); | |
3066 | err_free_ena_dev: | |
1738cd3e NB |
3067 | vfree(ena_dev); |
3068 | err_disable_device: | |
3069 | pci_disable_device(pdev); | |
3070 | return rc; | |
3071 | } | |
3072 | ||
3073 | /*****************************************************************************/ | |
3074 | static int ena_sriov_configure(struct pci_dev *dev, int numvfs) | |
3075 | { | |
3076 | int rc; | |
3077 | ||
3078 | if (numvfs > 0) { | |
3079 | rc = pci_enable_sriov(dev, numvfs); | |
3080 | if (rc != 0) { | |
3081 | dev_err(&dev->dev, | |
3082 | "pci_enable_sriov failed to enable: %d vfs with the error: %d\n", | |
3083 | numvfs, rc); | |
3084 | return rc; | |
3085 | } | |
3086 | ||
3087 | return numvfs; | |
3088 | } | |
3089 | ||
3090 | if (numvfs == 0) { | |
3091 | pci_disable_sriov(dev); | |
3092 | return 0; | |
3093 | } | |
3094 | ||
3095 | return -EINVAL; | |
3096 | } | |
3097 | ||
3098 | /*****************************************************************************/ | |
3099 | /*****************************************************************************/ | |
3100 | ||
3101 | /* ena_remove - Device Removal Routine | |
3102 | * @pdev: PCI device information struct | |
3103 | * | |
3104 | * ena_remove is called by the PCI subsystem to alert the driver | |
3105 | * that it should release a PCI device. | |
3106 | */ | |
3107 | static void ena_remove(struct pci_dev *pdev) | |
3108 | { | |
3109 | struct ena_adapter *adapter = pci_get_drvdata(pdev); | |
3110 | struct ena_com_dev *ena_dev; | |
3111 | struct net_device *netdev; | |
3112 | ||
3113 | if (!adapter) | |
3114 | /* This device didn't load properly and it's resources | |
3115 | * already released, nothing to do | |
3116 | */ | |
3117 | return; | |
3118 | ||
3119 | ena_dev = adapter->ena_dev; | |
3120 | netdev = adapter->netdev; | |
3121 | ||
3122 | #ifdef CONFIG_RFS_ACCEL | |
3123 | if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { | |
3124 | free_irq_cpu_rmap(netdev->rx_cpu_rmap); | |
3125 | netdev->rx_cpu_rmap = NULL; | |
3126 | } | |
3127 | #endif /* CONFIG_RFS_ACCEL */ | |
3128 | ||
3129 | unregister_netdev(netdev); | |
3130 | del_timer_sync(&adapter->timer_service); | |
3131 | ||
3132 | cancel_work_sync(&adapter->reset_task); | |
3133 | ||
3134 | cancel_work_sync(&adapter->suspend_io_task); | |
3135 | ||
3136 | cancel_work_sync(&adapter->resume_io_task); | |
3137 | ||
22b331c9 NB |
3138 | /* Reset the device only if the device is running. */ |
3139 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | |
3140 | ena_com_dev_reset(ena_dev); | |
1738cd3e NB |
3141 | |
3142 | ena_free_mgmnt_irq(adapter); | |
3143 | ||
3144 | ena_disable_msix(adapter); | |
3145 | ||
3146 | free_netdev(netdev); | |
3147 | ||
3148 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
3149 | ||
3150 | ena_com_abort_admin_commands(ena_dev); | |
3151 | ||
3152 | ena_com_wait_for_abort_completion(ena_dev); | |
3153 | ||
3154 | ena_com_admin_destroy(ena_dev); | |
3155 | ||
3156 | ena_com_rss_destroy(ena_dev); | |
3157 | ||
3158 | ena_com_delete_debug_area(ena_dev); | |
3159 | ||
3160 | ena_com_delete_host_info(ena_dev); | |
3161 | ||
3162 | ena_release_bars(ena_dev, pdev); | |
3163 | ||
1738cd3e NB |
3164 | pci_disable_device(pdev); |
3165 | ||
3166 | ena_com_destroy_interrupt_moderation(ena_dev); | |
3167 | ||
3168 | vfree(ena_dev); | |
3169 | } | |
3170 | ||
3171 | static struct pci_driver ena_pci_driver = { | |
3172 | .name = DRV_MODULE_NAME, | |
3173 | .id_table = ena_pci_tbl, | |
3174 | .probe = ena_probe, | |
3175 | .remove = ena_remove, | |
3176 | .sriov_configure = ena_sriov_configure, | |
3177 | }; | |
3178 | ||
3179 | static int __init ena_init(void) | |
3180 | { | |
3181 | pr_info("%s", version); | |
3182 | ||
3183 | ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); | |
3184 | if (!ena_wq) { | |
3185 | pr_err("Failed to create workqueue\n"); | |
3186 | return -ENOMEM; | |
3187 | } | |
3188 | ||
3189 | return pci_register_driver(&ena_pci_driver); | |
3190 | } | |
3191 | ||
3192 | static void __exit ena_cleanup(void) | |
3193 | { | |
3194 | pci_unregister_driver(&ena_pci_driver); | |
3195 | ||
3196 | if (ena_wq) { | |
3197 | destroy_workqueue(ena_wq); | |
3198 | ena_wq = NULL; | |
3199 | } | |
3200 | } | |
3201 | ||
3202 | /****************************************************************************** | |
3203 | ******************************** AENQ Handlers ******************************* | |
3204 | *****************************************************************************/ | |
3205 | /* ena_update_on_link_change: | |
3206 | * Notify the network interface about the change in link status | |
3207 | */ | |
3208 | static void ena_update_on_link_change(void *adapter_data, | |
3209 | struct ena_admin_aenq_entry *aenq_e) | |
3210 | { | |
3211 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
3212 | struct ena_admin_aenq_link_change_desc *aenq_desc = | |
3213 | (struct ena_admin_aenq_link_change_desc *)aenq_e; | |
3214 | int status = aenq_desc->flags & | |
3215 | ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; | |
3216 | ||
3217 | if (status) { | |
3218 | netdev_dbg(adapter->netdev, "%s\n", __func__); | |
3219 | set_bit(ENA_FLAG_LINK_UP, &adapter->flags); | |
3220 | netif_carrier_on(adapter->netdev); | |
3221 | } else { | |
3222 | clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); | |
3223 | netif_carrier_off(adapter->netdev); | |
3224 | } | |
3225 | } | |
3226 | ||
3227 | static void ena_keep_alive_wd(void *adapter_data, | |
3228 | struct ena_admin_aenq_entry *aenq_e) | |
3229 | { | |
3230 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
3231 | ||
3232 | adapter->last_keep_alive_jiffies = jiffies; | |
3233 | } | |
3234 | ||
3235 | static void ena_notification(void *adapter_data, | |
3236 | struct ena_admin_aenq_entry *aenq_e) | |
3237 | { | |
3238 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
3239 | ||
3240 | WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, | |
3241 | "Invalid group(%x) expected %x\n", | |
3242 | aenq_e->aenq_common_desc.group, | |
3243 | ENA_ADMIN_NOTIFICATION); | |
3244 | ||
3245 | switch (aenq_e->aenq_common_desc.syndrom) { | |
3246 | case ENA_ADMIN_SUSPEND: | |
3247 | /* Suspend just the IO queues. | |
3248 | * We deliberately don't suspend admin so the timer and | |
3249 | * the keep_alive events should remain. | |
3250 | */ | |
3251 | queue_work(ena_wq, &adapter->suspend_io_task); | |
3252 | break; | |
3253 | case ENA_ADMIN_RESUME: | |
3254 | queue_work(ena_wq, &adapter->resume_io_task); | |
3255 | break; | |
3256 | default: | |
3257 | netif_err(adapter, drv, adapter->netdev, | |
3258 | "Invalid aenq notification link state %d\n", | |
3259 | aenq_e->aenq_common_desc.syndrom); | |
3260 | } | |
3261 | } | |
3262 | ||
3263 | /* This handler will called for unknown event group or unimplemented handlers*/ | |
3264 | static void unimplemented_aenq_handler(void *data, | |
3265 | struct ena_admin_aenq_entry *aenq_e) | |
3266 | { | |
3267 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
3268 | ||
3269 | netif_err(adapter, drv, adapter->netdev, | |
3270 | "Unknown event was received or event with unimplemented handler\n"); | |
3271 | } | |
3272 | ||
3273 | static struct ena_aenq_handlers aenq_handlers = { | |
3274 | .handlers = { | |
3275 | [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, | |
3276 | [ENA_ADMIN_NOTIFICATION] = ena_notification, | |
3277 | [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, | |
3278 | }, | |
3279 | .unimplemented_handler = unimplemented_aenq_handler | |
3280 | }; | |
3281 | ||
3282 | module_init(ena_init); | |
3283 | module_exit(ena_cleanup); |