]>
Commit | Line | Data |
---|---|---|
2246cbc2 | 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
1738cd3e | 2 | /* |
2246cbc2 | 3 | * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
1738cd3e NB |
4 | */ |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
8 | #ifdef CONFIG_RFS_ACCEL | |
9 | #include <linux/cpu_rmap.h> | |
10 | #endif /* CONFIG_RFS_ACCEL */ | |
11 | #include <linux/ethtool.h> | |
1738cd3e NB |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | |
1738cd3e NB |
14 | #include <linux/numa.h> |
15 | #include <linux/pci.h> | |
16 | #include <linux/utsname.h> | |
17 | #include <linux/version.h> | |
18 | #include <linux/vmalloc.h> | |
19 | #include <net/ip.h> | |
20 | ||
21 | #include "ena_netdev.h" | |
838c93dc | 22 | #include <linux/bpf_trace.h> |
1738cd3e NB |
23 | #include "ena_pci_id_tbl.h" |
24 | ||
1738cd3e NB |
25 | MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); |
26 | MODULE_DESCRIPTION(DEVICE_NAME); | |
27 | MODULE_LICENSE("GPL"); | |
1738cd3e NB |
28 | |
29 | /* Time in jiffies before concluding the transmitter is hung. */ | |
30 | #define TX_TIMEOUT (5 * HZ) | |
31 | ||
32 | #define ENA_NAPI_BUDGET 64 | |
33 | ||
34 | #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ | |
35 | NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) | |
36 | static int debug = -1; | |
37 | module_param(debug, int, 0); | |
38 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
39 | ||
40 | static struct ena_aenq_handlers aenq_handlers; | |
41 | ||
42 | static struct workqueue_struct *ena_wq; | |
43 | ||
44 | MODULE_DEVICE_TABLE(pci, ena_pci_tbl); | |
45 | ||
46 | static int ena_rss_init_default(struct ena_adapter *adapter); | |
ee4552aa | 47 | static void check_for_admin_com_state(struct ena_adapter *adapter); |
cfa324a5 | 48 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); |
ee4552aa | 49 | static int ena_restore_device(struct ena_adapter *adapter); |
548c4940 SJ |
50 | |
51 | static void ena_init_io_rings(struct ena_adapter *adapter, | |
52 | int first_index, int count); | |
53 | static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, | |
54 | int count); | |
55 | static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index, | |
56 | int count); | |
57 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid); | |
58 | static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, | |
59 | int first_index, | |
60 | int count); | |
61 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid); | |
62 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid); | |
63 | static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget); | |
64 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter); | |
65 | static void ena_free_all_io_tx_resources(struct ena_adapter *adapter); | |
66 | static void ena_napi_disable_in_range(struct ena_adapter *adapter, | |
67 | int first_index, int count); | |
68 | static void ena_napi_enable_in_range(struct ena_adapter *adapter, | |
69 | int first_index, int count); | |
838c93dc | 70 | static int ena_up(struct ena_adapter *adapter); |
548c4940 SJ |
71 | static void ena_down(struct ena_adapter *adapter); |
72 | static void ena_unmask_interrupt(struct ena_ring *tx_ring, | |
73 | struct ena_ring *rx_ring); | |
74 | static void ena_update_ring_numa_node(struct ena_ring *tx_ring, | |
75 | struct ena_ring *rx_ring); | |
76 | static void ena_unmap_tx_buff(struct ena_ring *tx_ring, | |
77 | struct ena_tx_buffer *tx_info); | |
78 | static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, | |
79 | int first_index, int count); | |
1738cd3e | 80 | |
0290bd29 | 81 | static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) |
1738cd3e NB |
82 | { |
83 | struct ena_adapter *adapter = netdev_priv(dev); | |
84 | ||
3f6159db NB |
85 | /* Change the state of the device to trigger reset |
86 | * Check that we are not in the middle or a trigger already | |
87 | */ | |
88 | ||
89 | if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | |
90 | return; | |
91 | ||
e2eed0e3 | 92 | adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; |
1738cd3e NB |
93 | u64_stats_update_begin(&adapter->syncp); |
94 | adapter->dev_stats.tx_timeout++; | |
95 | u64_stats_update_end(&adapter->syncp); | |
96 | ||
97 | netif_err(adapter, tx_err, dev, "Transmit time out\n"); | |
1738cd3e NB |
98 | } |
99 | ||
100 | static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) | |
101 | { | |
102 | int i; | |
103 | ||
faa615f9 | 104 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
105 | adapter->rx_ring[i].mtu = mtu; |
106 | } | |
107 | ||
108 | static int ena_change_mtu(struct net_device *dev, int new_mtu) | |
109 | { | |
110 | struct ena_adapter *adapter = netdev_priv(dev); | |
111 | int ret; | |
112 | ||
1738cd3e NB |
113 | ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); |
114 | if (!ret) { | |
bf2746e8 | 115 | netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); |
1738cd3e NB |
116 | update_rx_ring_mtu(adapter, new_mtu); |
117 | dev->mtu = new_mtu; | |
118 | } else { | |
119 | netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", | |
120 | new_mtu); | |
121 | } | |
122 | ||
123 | return ret; | |
124 | } | |
125 | ||
548c4940 SJ |
126 | static int ena_xmit_common(struct net_device *dev, |
127 | struct ena_ring *ring, | |
128 | struct ena_tx_buffer *tx_info, | |
129 | struct ena_com_tx_ctx *ena_tx_ctx, | |
130 | u16 next_to_use, | |
131 | u32 bytes) | |
132 | { | |
133 | struct ena_adapter *adapter = netdev_priv(dev); | |
134 | int rc, nb_hw_desc; | |
135 | ||
136 | if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, | |
137 | ena_tx_ctx))) { | |
138 | netif_dbg(adapter, tx_queued, dev, | |
139 | "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", | |
140 | ring->qid); | |
141 | ena_com_write_sq_doorbell(ring->ena_com_io_sq); | |
142 | } | |
143 | ||
144 | /* prepare the packet's descriptors to dma engine */ | |
145 | rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, | |
146 | &nb_hw_desc); | |
147 | ||
148 | /* In case there isn't enough space in the queue for the packet, | |
149 | * we simply drop it. All other failure reasons of | |
150 | * ena_com_prepare_tx() are fatal and therefore require a device reset. | |
151 | */ | |
152 | if (unlikely(rc)) { | |
153 | netif_err(adapter, tx_queued, dev, | |
bf2746e8 | 154 | "Failed to prepare tx bufs\n"); |
548c4940 SJ |
155 | u64_stats_update_begin(&ring->syncp); |
156 | ring->tx_stats.prepare_ctx_err++; | |
157 | u64_stats_update_end(&ring->syncp); | |
158 | if (rc != -ENOMEM) { | |
159 | adapter->reset_reason = | |
160 | ENA_REGS_RESET_DRIVER_INVALID_STATE; | |
161 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
162 | } | |
163 | return rc; | |
164 | } | |
165 | ||
166 | u64_stats_update_begin(&ring->syncp); | |
167 | ring->tx_stats.cnt++; | |
168 | ring->tx_stats.bytes += bytes; | |
169 | u64_stats_update_end(&ring->syncp); | |
170 | ||
171 | tx_info->tx_descs = nb_hw_desc; | |
172 | tx_info->last_jiffies = jiffies; | |
173 | tx_info->print_once = 0; | |
174 | ||
175 | ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | |
176 | ring->ring_size); | |
177 | return 0; | |
178 | } | |
179 | ||
180 | /* This is the XDP napi callback. XDP queues use a separate napi callback | |
181 | * than Rx/Tx queues. | |
182 | */ | |
183 | static int ena_xdp_io_poll(struct napi_struct *napi, int budget) | |
184 | { | |
185 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | |
186 | u32 xdp_work_done, xdp_budget; | |
187 | struct ena_ring *xdp_ring; | |
188 | int napi_comp_call = 0; | |
189 | int ret; | |
190 | ||
191 | xdp_ring = ena_napi->xdp_ring; | |
913b0bfd | 192 | xdp_ring->first_interrupt = ena_napi->first_interrupt; |
548c4940 SJ |
193 | |
194 | xdp_budget = budget; | |
195 | ||
196 | if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) || | |
197 | test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) { | |
198 | napi_complete_done(napi, 0); | |
199 | return 0; | |
200 | } | |
201 | ||
202 | xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget); | |
203 | ||
204 | /* If the device is about to reset or down, avoid unmask | |
205 | * the interrupt and return 0 so NAPI won't reschedule | |
206 | */ | |
207 | if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) { | |
208 | napi_complete_done(napi, 0); | |
209 | ret = 0; | |
210 | } else if (xdp_budget > xdp_work_done) { | |
211 | napi_comp_call = 1; | |
212 | if (napi_complete_done(napi, xdp_work_done)) | |
213 | ena_unmask_interrupt(xdp_ring, NULL); | |
214 | ena_update_ring_numa_node(xdp_ring, NULL); | |
215 | ret = xdp_work_done; | |
216 | } else { | |
217 | ret = xdp_budget; | |
218 | } | |
219 | ||
220 | u64_stats_update_begin(&xdp_ring->syncp); | |
221 | xdp_ring->tx_stats.napi_comp += napi_comp_call; | |
222 | xdp_ring->tx_stats.tx_poll++; | |
223 | u64_stats_update_end(&xdp_ring->syncp); | |
224 | ||
225 | return ret; | |
226 | } | |
227 | ||
228 | static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, | |
229 | struct ena_tx_buffer *tx_info, | |
230 | struct xdp_buff *xdp, | |
231 | void **push_hdr, | |
232 | u32 *push_len) | |
233 | { | |
234 | struct ena_adapter *adapter = xdp_ring->adapter; | |
235 | struct ena_com_buf *ena_buf; | |
236 | dma_addr_t dma = 0; | |
237 | u32 size; | |
238 | ||
1b698fa5 | 239 | tx_info->xdpf = xdp_convert_buff_to_frame(xdp); |
548c4940 SJ |
240 | size = tx_info->xdpf->len; |
241 | ena_buf = tx_info->bufs; | |
242 | ||
243 | /* llq push buffer */ | |
244 | *push_len = min_t(u32, size, xdp_ring->tx_max_header_size); | |
245 | *push_hdr = tx_info->xdpf->data; | |
246 | ||
247 | if (size - *push_len > 0) { | |
248 | dma = dma_map_single(xdp_ring->dev, | |
249 | *push_hdr + *push_len, | |
250 | size - *push_len, | |
251 | DMA_TO_DEVICE); | |
252 | if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) | |
253 | goto error_report_dma_error; | |
254 | ||
255 | tx_info->map_linear_data = 1; | |
256 | tx_info->num_of_bufs = 1; | |
257 | } | |
258 | ||
259 | ena_buf->paddr = dma; | |
260 | ena_buf->len = size; | |
261 | ||
262 | return 0; | |
263 | ||
264 | error_report_dma_error: | |
265 | u64_stats_update_begin(&xdp_ring->syncp); | |
266 | xdp_ring->tx_stats.dma_mapping_err++; | |
267 | u64_stats_update_end(&xdp_ring->syncp); | |
bf2746e8 | 268 | netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n"); |
548c4940 SJ |
269 | |
270 | xdp_return_frame_rx_napi(tx_info->xdpf); | |
271 | tx_info->xdpf = NULL; | |
272 | tx_info->num_of_bufs = 0; | |
273 | ||
274 | return -EINVAL; | |
275 | } | |
276 | ||
277 | static int ena_xdp_xmit_buff(struct net_device *dev, | |
278 | struct xdp_buff *xdp, | |
279 | int qid, | |
280 | struct ena_rx_buffer *rx_info) | |
281 | { | |
282 | struct ena_adapter *adapter = netdev_priv(dev); | |
79890d3f | 283 | struct ena_com_tx_ctx ena_tx_ctx = {}; |
548c4940 SJ |
284 | struct ena_tx_buffer *tx_info; |
285 | struct ena_ring *xdp_ring; | |
548c4940 SJ |
286 | u16 next_to_use, req_id; |
287 | int rc; | |
288 | void *push_hdr; | |
289 | u32 push_len; | |
290 | ||
291 | xdp_ring = &adapter->tx_ring[qid]; | |
292 | next_to_use = xdp_ring->next_to_use; | |
293 | req_id = xdp_ring->free_ids[next_to_use]; | |
294 | tx_info = &xdp_ring->tx_buffer_info[req_id]; | |
295 | tx_info->num_of_bufs = 0; | |
548c4940 SJ |
296 | page_ref_inc(rx_info->page); |
297 | tx_info->xdp_rx_page = rx_info->page; | |
298 | ||
299 | rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len); | |
300 | if (unlikely(rc)) | |
301 | goto error_drop_packet; | |
302 | ||
303 | ena_tx_ctx.ena_bufs = tx_info->bufs; | |
304 | ena_tx_ctx.push_header = push_hdr; | |
305 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | |
306 | ena_tx_ctx.req_id = req_id; | |
307 | ena_tx_ctx.header_len = push_len; | |
308 | ||
309 | rc = ena_xmit_common(dev, | |
310 | xdp_ring, | |
311 | tx_info, | |
312 | &ena_tx_ctx, | |
313 | next_to_use, | |
314 | xdp->data_end - xdp->data); | |
315 | if (rc) | |
316 | goto error_unmap_dma; | |
317 | /* trigger the dma engine. ena_com_write_sq_doorbell() | |
318 | * has a mb | |
319 | */ | |
320 | ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); | |
321 | u64_stats_update_begin(&xdp_ring->syncp); | |
322 | xdp_ring->tx_stats.doorbells++; | |
323 | u64_stats_update_end(&xdp_ring->syncp); | |
324 | ||
325 | return NETDEV_TX_OK; | |
326 | ||
327 | error_unmap_dma: | |
328 | ena_unmap_tx_buff(xdp_ring, tx_info); | |
329 | tx_info->xdpf = NULL; | |
330 | error_drop_packet: | |
cd07eccc | 331 | __free_page(tx_info->xdp_rx_page); |
548c4940 SJ |
332 | return NETDEV_TX_OK; |
333 | } | |
334 | ||
335 | static int ena_xdp_execute(struct ena_ring *rx_ring, | |
336 | struct xdp_buff *xdp, | |
337 | struct ena_rx_buffer *rx_info) | |
838c93dc SJ |
338 | { |
339 | struct bpf_prog *xdp_prog; | |
340 | u32 verdict = XDP_PASS; | |
4cd28b21 | 341 | u64 *xdp_stat; |
838c93dc SJ |
342 | |
343 | rcu_read_lock(); | |
344 | xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); | |
345 | ||
346 | if (!xdp_prog) | |
347 | goto out; | |
348 | ||
349 | verdict = bpf_prog_run_xdp(xdp_prog, xdp); | |
350 | ||
4cd28b21 | 351 | if (verdict == XDP_TX) { |
548c4940 SJ |
352 | ena_xdp_xmit_buff(rx_ring->netdev, |
353 | xdp, | |
354 | rx_ring->qid + rx_ring->adapter->num_io_queues, | |
355 | rx_info); | |
4cd28b21 SJ |
356 | |
357 | xdp_stat = &rx_ring->rx_stats.xdp_tx; | |
358 | } else if (unlikely(verdict == XDP_ABORTED)) { | |
838c93dc | 359 | trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); |
4cd28b21 SJ |
360 | xdp_stat = &rx_ring->rx_stats.xdp_aborted; |
361 | } else if (unlikely(verdict == XDP_DROP)) { | |
362 | xdp_stat = &rx_ring->rx_stats.xdp_drop; | |
363 | } else if (unlikely(verdict == XDP_PASS)) { | |
364 | xdp_stat = &rx_ring->rx_stats.xdp_pass; | |
365 | } else { | |
838c93dc | 366 | bpf_warn_invalid_xdp_action(verdict); |
4cd28b21 SJ |
367 | xdp_stat = &rx_ring->rx_stats.xdp_invalid; |
368 | } | |
369 | ||
370 | u64_stats_update_begin(&rx_ring->syncp); | |
371 | (*xdp_stat)++; | |
372 | u64_stats_update_end(&rx_ring->syncp); | |
838c93dc SJ |
373 | out: |
374 | rcu_read_unlock(); | |
4cd28b21 | 375 | |
838c93dc SJ |
376 | return verdict; |
377 | } | |
378 | ||
548c4940 SJ |
379 | static void ena_init_all_xdp_queues(struct ena_adapter *adapter) |
380 | { | |
381 | adapter->xdp_first_ring = adapter->num_io_queues; | |
382 | adapter->xdp_num_queues = adapter->num_io_queues; | |
383 | ||
384 | ena_init_io_rings(adapter, | |
385 | adapter->xdp_first_ring, | |
386 | adapter->xdp_num_queues); | |
387 | } | |
388 | ||
389 | static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) | |
390 | { | |
391 | int rc = 0; | |
392 | ||
393 | rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, | |
394 | adapter->xdp_num_queues); | |
395 | if (rc) | |
396 | goto setup_err; | |
397 | ||
398 | rc = ena_create_io_tx_queues_in_range(adapter, | |
399 | adapter->xdp_first_ring, | |
400 | adapter->xdp_num_queues); | |
401 | if (rc) | |
402 | goto create_err; | |
403 | ||
404 | return 0; | |
405 | ||
406 | create_err: | |
407 | ena_free_all_io_tx_resources(adapter); | |
408 | setup_err: | |
409 | return rc; | |
410 | } | |
411 | ||
412 | /* Provides a way for both kernel and bpf-prog to know | |
413 | * more about the RX-queue a given XDP frame arrived on. | |
414 | */ | |
415 | static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring) | |
416 | { | |
417 | int rc; | |
418 | ||
b02e5a0e | 419 | rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0); |
548c4940 SJ |
420 | |
421 | if (rc) { | |
422 | netif_err(rx_ring->adapter, ifup, rx_ring->netdev, | |
423 | "Failed to register xdp rx queue info. RX queue num %d rc: %d\n", | |
424 | rx_ring->qid, rc); | |
425 | goto err; | |
426 | } | |
427 | ||
428 | rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, | |
429 | NULL); | |
430 | ||
431 | if (rc) { | |
432 | netif_err(rx_ring->adapter, ifup, rx_ring->netdev, | |
433 | "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n", | |
434 | rx_ring->qid, rc); | |
435 | xdp_rxq_info_unreg(&rx_ring->xdp_rxq); | |
436 | } | |
437 | ||
438 | err: | |
439 | return rc; | |
440 | } | |
441 | ||
442 | static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring) | |
443 | { | |
444 | xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq); | |
445 | xdp_rxq_info_unreg(&rx_ring->xdp_rxq); | |
446 | } | |
447 | ||
32109c70 Y |
448 | static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter, |
449 | struct bpf_prog *prog, | |
450 | int first, int count) | |
838c93dc SJ |
451 | { |
452 | struct ena_ring *rx_ring; | |
453 | int i = 0; | |
454 | ||
455 | for (i = first; i < count; i++) { | |
456 | rx_ring = &adapter->rx_ring[i]; | |
457 | xchg(&rx_ring->xdp_bpf_prog, prog); | |
548c4940 SJ |
458 | if (prog) { |
459 | ena_xdp_register_rxq_info(rx_ring); | |
838c93dc | 460 | rx_ring->rx_headroom = XDP_PACKET_HEADROOM; |
548c4940 SJ |
461 | } else { |
462 | ena_xdp_unregister_rxq_info(rx_ring); | |
838c93dc | 463 | rx_ring->rx_headroom = 0; |
548c4940 | 464 | } |
838c93dc SJ |
465 | } |
466 | } | |
467 | ||
32109c70 Y |
468 | static void ena_xdp_exchange_program(struct ena_adapter *adapter, |
469 | struct bpf_prog *prog) | |
838c93dc SJ |
470 | { |
471 | struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog); | |
472 | ||
473 | ena_xdp_exchange_program_rx_in_range(adapter, | |
474 | prog, | |
475 | 0, | |
476 | adapter->num_io_queues); | |
477 | ||
478 | if (old_bpf_prog) | |
479 | bpf_prog_put(old_bpf_prog); | |
480 | } | |
481 | ||
548c4940 SJ |
482 | static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter) |
483 | { | |
484 | bool was_up; | |
485 | int rc; | |
486 | ||
487 | was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
488 | ||
489 | if (was_up) | |
490 | ena_down(adapter); | |
491 | ||
492 | adapter->xdp_first_ring = 0; | |
493 | adapter->xdp_num_queues = 0; | |
494 | ena_xdp_exchange_program(adapter, NULL); | |
495 | if (was_up) { | |
496 | rc = ena_up(adapter); | |
497 | if (rc) | |
498 | return rc; | |
499 | } | |
500 | return 0; | |
501 | } | |
502 | ||
838c93dc SJ |
503 | static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) |
504 | { | |
505 | struct ena_adapter *adapter = netdev_priv(netdev); | |
506 | struct bpf_prog *prog = bpf->prog; | |
548c4940 | 507 | struct bpf_prog *old_bpf_prog; |
838c93dc SJ |
508 | int rc, prev_mtu; |
509 | bool is_up; | |
510 | ||
511 | is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
548c4940 SJ |
512 | rc = ena_xdp_allowed(adapter); |
513 | if (rc == ENA_XDP_ALLOWED) { | |
514 | old_bpf_prog = adapter->xdp_bpf_prog; | |
515 | if (prog) { | |
516 | if (!is_up) { | |
517 | ena_init_all_xdp_queues(adapter); | |
518 | } else if (!old_bpf_prog) { | |
519 | ena_down(adapter); | |
520 | ena_init_all_xdp_queues(adapter); | |
521 | } | |
522 | ena_xdp_exchange_program(adapter, prog); | |
838c93dc | 523 | |
548c4940 SJ |
524 | if (is_up && !old_bpf_prog) { |
525 | rc = ena_up(adapter); | |
526 | if (rc) | |
527 | return rc; | |
528 | } | |
529 | } else if (old_bpf_prog) { | |
530 | rc = ena_destroy_and_free_all_xdp_queues(adapter); | |
838c93dc SJ |
531 | if (rc) |
532 | return rc; | |
533 | } | |
838c93dc | 534 | |
548c4940 SJ |
535 | prev_mtu = netdev->max_mtu; |
536 | netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu; | |
537 | ||
538 | if (!old_bpf_prog) | |
539 | netif_info(adapter, drv, adapter->netdev, | |
bf2746e8 | 540 | "XDP program is set, changing the max_mtu from %d to %d", |
548c4940 SJ |
541 | prev_mtu, netdev->max_mtu); |
542 | ||
543 | } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) { | |
544 | netif_err(adapter, drv, adapter->netdev, | |
545 | "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on", | |
838c93dc | 546 | netdev->mtu, ENA_XDP_MAX_MTU); |
548c4940 SJ |
547 | NL_SET_ERR_MSG_MOD(bpf->extack, |
548 | "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info"); | |
549 | return -EINVAL; | |
550 | } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) { | |
551 | netif_err(adapter, drv, adapter->netdev, | |
552 | "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n", | |
553 | adapter->num_io_queues, adapter->max_num_io_queues); | |
554 | NL_SET_ERR_MSG_MOD(bpf->extack, | |
555 | "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info"); | |
838c93dc SJ |
556 | return -EINVAL; |
557 | } | |
558 | ||
559 | return 0; | |
560 | } | |
561 | ||
562 | /* This is the main xdp callback, it's used by the kernel to set/unset the xdp | |
563 | * program as well as to query the current xdp program id. | |
564 | */ | |
565 | static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) | |
566 | { | |
838c93dc SJ |
567 | switch (bpf->command) { |
568 | case XDP_SETUP_PROG: | |
569 | return ena_xdp_set(netdev, bpf); | |
838c93dc SJ |
570 | default: |
571 | return -EINVAL; | |
572 | } | |
573 | return 0; | |
574 | } | |
575 | ||
1738cd3e NB |
576 | static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) |
577 | { | |
578 | #ifdef CONFIG_RFS_ACCEL | |
579 | u32 i; | |
580 | int rc; | |
581 | ||
faa615f9 | 582 | adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); |
1738cd3e NB |
583 | if (!adapter->netdev->rx_cpu_rmap) |
584 | return -ENOMEM; | |
faa615f9 | 585 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
586 | int irq_idx = ENA_IO_IRQ_IDX(i); |
587 | ||
588 | rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, | |
da6f4cf5 | 589 | pci_irq_vector(adapter->pdev, irq_idx)); |
1738cd3e NB |
590 | if (rc) { |
591 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
592 | adapter->netdev->rx_cpu_rmap = NULL; | |
593 | return rc; | |
594 | } | |
595 | } | |
596 | #endif /* CONFIG_RFS_ACCEL */ | |
597 | return 0; | |
598 | } | |
599 | ||
600 | static void ena_init_io_rings_common(struct ena_adapter *adapter, | |
601 | struct ena_ring *ring, u16 qid) | |
602 | { | |
603 | ring->qid = qid; | |
604 | ring->pdev = adapter->pdev; | |
605 | ring->dev = &adapter->pdev->dev; | |
606 | ring->netdev = adapter->netdev; | |
607 | ring->napi = &adapter->ena_napi[qid].napi; | |
608 | ring->adapter = adapter; | |
609 | ring->ena_dev = adapter->ena_dev; | |
610 | ring->per_napi_packets = 0; | |
1738cd3e | 611 | ring->cpu = 0; |
8510e1a3 NB |
612 | ring->first_interrupt = false; |
613 | ring->no_interrupt_event_cnt = 0; | |
1738cd3e NB |
614 | u64_stats_init(&ring->syncp); |
615 | } | |
616 | ||
548c4940 SJ |
617 | static void ena_init_io_rings(struct ena_adapter *adapter, |
618 | int first_index, int count) | |
1738cd3e NB |
619 | { |
620 | struct ena_com_dev *ena_dev; | |
621 | struct ena_ring *txr, *rxr; | |
622 | int i; | |
623 | ||
624 | ena_dev = adapter->ena_dev; | |
625 | ||
548c4940 | 626 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
627 | txr = &adapter->tx_ring[i]; |
628 | rxr = &adapter->rx_ring[i]; | |
629 | ||
548c4940 | 630 | /* TX common ring state */ |
1738cd3e | 631 | ena_init_io_rings_common(adapter, txr, i); |
1738cd3e NB |
632 | |
633 | /* TX specific ring state */ | |
13ca32a6 | 634 | txr->ring_size = adapter->requested_tx_ring_size; |
1738cd3e NB |
635 | txr->tx_max_header_size = ena_dev->tx_max_header_size; |
636 | txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; | |
637 | txr->sgl_size = adapter->max_tx_sgl_size; | |
638 | txr->smoothed_interval = | |
639 | ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); | |
0e3a3f6d | 640 | txr->disable_meta_caching = adapter->disable_meta_caching; |
1738cd3e | 641 | |
548c4940 SJ |
642 | /* Don't init RX queues for xdp queues */ |
643 | if (!ENA_IS_XDP_INDEX(adapter, i)) { | |
644 | /* RX common ring state */ | |
645 | ena_init_io_rings_common(adapter, rxr, i); | |
646 | ||
647 | /* RX specific ring state */ | |
648 | rxr->ring_size = adapter->requested_rx_ring_size; | |
649 | rxr->rx_copybreak = adapter->rx_copybreak; | |
650 | rxr->sgl_size = adapter->max_rx_sgl_size; | |
651 | rxr->smoothed_interval = | |
652 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | |
653 | rxr->empty_rx_queue = 0; | |
654 | adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; | |
655 | } | |
1738cd3e NB |
656 | } |
657 | } | |
658 | ||
659 | /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors) | |
660 | * @adapter: network interface device structure | |
661 | * @qid: queue index | |
662 | * | |
663 | * Return 0 on success, negative on failure | |
664 | */ | |
665 | static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) | |
666 | { | |
667 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
668 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
669 | int size, i, node; | |
670 | ||
671 | if (tx_ring->tx_buffer_info) { | |
672 | netif_err(adapter, ifup, | |
673 | adapter->netdev, "tx_buffer_info info is not NULL"); | |
674 | return -EEXIST; | |
675 | } | |
676 | ||
677 | size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; | |
678 | node = cpu_to_node(ena_irq->cpu); | |
679 | ||
680 | tx_ring->tx_buffer_info = vzalloc_node(size, node); | |
681 | if (!tx_ring->tx_buffer_info) { | |
682 | tx_ring->tx_buffer_info = vzalloc(size); | |
683 | if (!tx_ring->tx_buffer_info) | |
8ee8ee7f | 684 | goto err_tx_buffer_info; |
1738cd3e NB |
685 | } |
686 | ||
687 | size = sizeof(u16) * tx_ring->ring_size; | |
f9172498 SJ |
688 | tx_ring->free_ids = vzalloc_node(size, node); |
689 | if (!tx_ring->free_ids) { | |
690 | tx_ring->free_ids = vzalloc(size); | |
691 | if (!tx_ring->free_ids) | |
692 | goto err_tx_free_ids; | |
1738cd3e NB |
693 | } |
694 | ||
38005ca8 AK |
695 | size = tx_ring->tx_max_header_size; |
696 | tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); | |
697 | if (!tx_ring->push_buf_intermediate_buf) { | |
698 | tx_ring->push_buf_intermediate_buf = vzalloc(size); | |
8ee8ee7f SJ |
699 | if (!tx_ring->push_buf_intermediate_buf) |
700 | goto err_push_buf_intermediate_buf; | |
38005ca8 AK |
701 | } |
702 | ||
1738cd3e NB |
703 | /* Req id ring for TX out of order completions */ |
704 | for (i = 0; i < tx_ring->ring_size; i++) | |
f9172498 | 705 | tx_ring->free_ids[i] = i; |
1738cd3e NB |
706 | |
707 | /* Reset tx statistics */ | |
708 | memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); | |
709 | ||
710 | tx_ring->next_to_use = 0; | |
711 | tx_ring->next_to_clean = 0; | |
712 | tx_ring->cpu = ena_irq->cpu; | |
713 | return 0; | |
8ee8ee7f SJ |
714 | |
715 | err_push_buf_intermediate_buf: | |
f9172498 SJ |
716 | vfree(tx_ring->free_ids); |
717 | tx_ring->free_ids = NULL; | |
718 | err_tx_free_ids: | |
8ee8ee7f SJ |
719 | vfree(tx_ring->tx_buffer_info); |
720 | tx_ring->tx_buffer_info = NULL; | |
721 | err_tx_buffer_info: | |
722 | return -ENOMEM; | |
1738cd3e NB |
723 | } |
724 | ||
725 | /* ena_free_tx_resources - Free I/O Tx Resources per Queue | |
726 | * @adapter: network interface device structure | |
727 | * @qid: queue index | |
728 | * | |
729 | * Free all transmit software resources | |
730 | */ | |
731 | static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) | |
732 | { | |
733 | struct ena_ring *tx_ring = &adapter->tx_ring[qid]; | |
734 | ||
735 | vfree(tx_ring->tx_buffer_info); | |
736 | tx_ring->tx_buffer_info = NULL; | |
737 | ||
f9172498 SJ |
738 | vfree(tx_ring->free_ids); |
739 | tx_ring->free_ids = NULL; | |
38005ca8 AK |
740 | |
741 | vfree(tx_ring->push_buf_intermediate_buf); | |
742 | tx_ring->push_buf_intermediate_buf = NULL; | |
1738cd3e NB |
743 | } |
744 | ||
548c4940 SJ |
745 | static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter, |
746 | int first_index, | |
747 | int count) | |
1738cd3e NB |
748 | { |
749 | int i, rc = 0; | |
750 | ||
548c4940 | 751 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
752 | rc = ena_setup_tx_resources(adapter, i); |
753 | if (rc) | |
754 | goto err_setup_tx; | |
755 | } | |
756 | ||
757 | return 0; | |
758 | ||
759 | err_setup_tx: | |
760 | ||
761 | netif_err(adapter, ifup, adapter->netdev, | |
762 | "Tx queue %d: allocation failed\n", i); | |
763 | ||
764 | /* rewind the index freeing the rings as we go */ | |
548c4940 | 765 | while (first_index < i--) |
1738cd3e NB |
766 | ena_free_tx_resources(adapter, i); |
767 | return rc; | |
768 | } | |
769 | ||
548c4940 SJ |
770 | static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, |
771 | int first_index, int count) | |
772 | { | |
773 | int i; | |
774 | ||
775 | for (i = first_index; i < first_index + count; i++) | |
776 | ena_free_tx_resources(adapter, i); | |
777 | } | |
778 | ||
1738cd3e NB |
779 | /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues |
780 | * @adapter: board private structure | |
781 | * | |
782 | * Free all transmit software resources | |
783 | */ | |
784 | static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) | |
785 | { | |
548c4940 SJ |
786 | ena_free_all_io_tx_resources_in_range(adapter, |
787 | 0, | |
788 | adapter->xdp_num_queues + | |
789 | adapter->num_io_queues); | |
1738cd3e NB |
790 | } |
791 | ||
792 | /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) | |
793 | * @adapter: network interface device structure | |
794 | * @qid: queue index | |
795 | * | |
796 | * Returns 0 on success, negative on failure | |
797 | */ | |
798 | static int ena_setup_rx_resources(struct ena_adapter *adapter, | |
799 | u32 qid) | |
800 | { | |
801 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
802 | struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; | |
ad974bae | 803 | int size, node, i; |
1738cd3e NB |
804 | |
805 | if (rx_ring->rx_buffer_info) { | |
806 | netif_err(adapter, ifup, adapter->netdev, | |
807 | "rx_buffer_info is not NULL"); | |
808 | return -EEXIST; | |
809 | } | |
810 | ||
811 | /* alloc extra element so in rx path | |
812 | * we can always prefetch rx_info + 1 | |
813 | */ | |
814 | size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); | |
815 | node = cpu_to_node(ena_irq->cpu); | |
816 | ||
817 | rx_ring->rx_buffer_info = vzalloc_node(size, node); | |
818 | if (!rx_ring->rx_buffer_info) { | |
819 | rx_ring->rx_buffer_info = vzalloc(size); | |
820 | if (!rx_ring->rx_buffer_info) | |
821 | return -ENOMEM; | |
822 | } | |
823 | ||
ad974bae | 824 | size = sizeof(u16) * rx_ring->ring_size; |
f9172498 SJ |
825 | rx_ring->free_ids = vzalloc_node(size, node); |
826 | if (!rx_ring->free_ids) { | |
827 | rx_ring->free_ids = vzalloc(size); | |
828 | if (!rx_ring->free_ids) { | |
ad974bae | 829 | vfree(rx_ring->rx_buffer_info); |
8ee8ee7f | 830 | rx_ring->rx_buffer_info = NULL; |
ad974bae NB |
831 | return -ENOMEM; |
832 | } | |
833 | } | |
834 | ||
835 | /* Req id ring for receiving RX pkts out of order */ | |
836 | for (i = 0; i < rx_ring->ring_size; i++) | |
f9172498 | 837 | rx_ring->free_ids[i] = i; |
ad974bae | 838 | |
1738cd3e NB |
839 | /* Reset rx statistics */ |
840 | memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); | |
841 | ||
842 | rx_ring->next_to_clean = 0; | |
843 | rx_ring->next_to_use = 0; | |
844 | rx_ring->cpu = ena_irq->cpu; | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | /* ena_free_rx_resources - Free I/O Rx Resources | |
850 | * @adapter: network interface device structure | |
851 | * @qid: queue index | |
852 | * | |
853 | * Free all receive software resources | |
854 | */ | |
855 | static void ena_free_rx_resources(struct ena_adapter *adapter, | |
856 | u32 qid) | |
857 | { | |
858 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
859 | ||
860 | vfree(rx_ring->rx_buffer_info); | |
861 | rx_ring->rx_buffer_info = NULL; | |
ad974bae | 862 | |
f9172498 SJ |
863 | vfree(rx_ring->free_ids); |
864 | rx_ring->free_ids = NULL; | |
1738cd3e NB |
865 | } |
866 | ||
867 | /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues | |
868 | * @adapter: board private structure | |
869 | * | |
870 | * Return 0 on success, negative on failure | |
871 | */ | |
872 | static int ena_setup_all_rx_resources(struct ena_adapter *adapter) | |
873 | { | |
874 | int i, rc = 0; | |
875 | ||
faa615f9 | 876 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
877 | rc = ena_setup_rx_resources(adapter, i); |
878 | if (rc) | |
879 | goto err_setup_rx; | |
880 | } | |
881 | ||
882 | return 0; | |
883 | ||
884 | err_setup_rx: | |
885 | ||
886 | netif_err(adapter, ifup, adapter->netdev, | |
887 | "Rx queue %d: allocation failed\n", i); | |
888 | ||
889 | /* rewind the index freeing the rings as we go */ | |
890 | while (i--) | |
891 | ena_free_rx_resources(adapter, i); | |
892 | return rc; | |
893 | } | |
894 | ||
895 | /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues | |
896 | * @adapter: board private structure | |
897 | * | |
898 | * Free all receive software resources | |
899 | */ | |
900 | static void ena_free_all_io_rx_resources(struct ena_adapter *adapter) | |
901 | { | |
902 | int i; | |
903 | ||
faa615f9 | 904 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
905 | ena_free_rx_resources(adapter, i); |
906 | } | |
907 | ||
c2b54204 | 908 | static int ena_alloc_rx_page(struct ena_ring *rx_ring, |
1738cd3e NB |
909 | struct ena_rx_buffer *rx_info, gfp_t gfp) |
910 | { | |
1396d314 | 911 | int headroom = rx_ring->rx_headroom; |
1738cd3e NB |
912 | struct ena_com_buf *ena_buf; |
913 | struct page *page; | |
914 | dma_addr_t dma; | |
915 | ||
1396d314 SA |
916 | /* restore page offset value in case it has been changed by device */ |
917 | rx_info->page_offset = headroom; | |
918 | ||
1738cd3e NB |
919 | /* if previous allocated page is not used */ |
920 | if (unlikely(rx_info->page)) | |
921 | return 0; | |
922 | ||
923 | page = alloc_page(gfp); | |
924 | if (unlikely(!page)) { | |
925 | u64_stats_update_begin(&rx_ring->syncp); | |
926 | rx_ring->rx_stats.page_alloc_fail++; | |
927 | u64_stats_update_end(&rx_ring->syncp); | |
928 | return -ENOMEM; | |
929 | } | |
930 | ||
0f505c60 AK |
931 | /* To enable NIC-side port-mirroring, AKA SPAN port, |
932 | * we make the buffer readable from the nic as well | |
933 | */ | |
ef5b0771 | 934 | dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, |
0f505c60 | 935 | DMA_BIDIRECTIONAL); |
1738cd3e NB |
936 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { |
937 | u64_stats_update_begin(&rx_ring->syncp); | |
938 | rx_ring->rx_stats.dma_mapping_err++; | |
939 | u64_stats_update_end(&rx_ring->syncp); | |
940 | ||
941 | __free_page(page); | |
942 | return -EIO; | |
943 | } | |
944 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 945 | "Allocate page %p, rx_info %p\n", page, rx_info); |
1738cd3e NB |
946 | |
947 | rx_info->page = page; | |
1738cd3e | 948 | ena_buf = &rx_info->ena_buf; |
1396d314 SA |
949 | ena_buf->paddr = dma + headroom; |
950 | ena_buf->len = ENA_PAGE_SIZE - headroom; | |
1738cd3e NB |
951 | |
952 | return 0; | |
953 | } | |
954 | ||
955 | static void ena_free_rx_page(struct ena_ring *rx_ring, | |
956 | struct ena_rx_buffer *rx_info) | |
957 | { | |
958 | struct page *page = rx_info->page; | |
959 | struct ena_com_buf *ena_buf = &rx_info->ena_buf; | |
960 | ||
961 | if (unlikely(!page)) { | |
962 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
963 | "Trying to free unallocated buffer\n"); | |
964 | return; | |
965 | } | |
966 | ||
0f505c60 | 967 | dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, |
548c4940 | 968 | ENA_PAGE_SIZE, |
0f505c60 | 969 | DMA_BIDIRECTIONAL); |
1738cd3e NB |
970 | |
971 | __free_page(page); | |
972 | rx_info->page = NULL; | |
973 | } | |
974 | ||
975 | static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) | |
976 | { | |
ad974bae | 977 | u16 next_to_use, req_id; |
1738cd3e NB |
978 | u32 i; |
979 | int rc; | |
980 | ||
981 | next_to_use = rx_ring->next_to_use; | |
982 | ||
983 | for (i = 0; i < num; i++) { | |
ad974bae NB |
984 | struct ena_rx_buffer *rx_info; |
985 | ||
f9172498 | 986 | req_id = rx_ring->free_ids[next_to_use]; |
ad974bae NB |
987 | |
988 | rx_info = &rx_ring->rx_buffer_info[req_id]; | |
989 | ||
1738cd3e | 990 | rc = ena_alloc_rx_page(rx_ring, rx_info, |
453f85d4 | 991 | GFP_ATOMIC | __GFP_COMP); |
1738cd3e NB |
992 | if (unlikely(rc < 0)) { |
993 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, | |
bf2746e8 | 994 | "Failed to allocate buffer for rx queue %d\n", |
1738cd3e NB |
995 | rx_ring->qid); |
996 | break; | |
997 | } | |
998 | rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, | |
999 | &rx_info->ena_buf, | |
ad974bae | 1000 | req_id); |
1738cd3e NB |
1001 | if (unlikely(rc)) { |
1002 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1003 | "Failed to add buffer for rx queue %d\n", |
1738cd3e NB |
1004 | rx_ring->qid); |
1005 | break; | |
1006 | } | |
1007 | next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, | |
1008 | rx_ring->ring_size); | |
1009 | } | |
1010 | ||
1011 | if (unlikely(i < num)) { | |
1012 | u64_stats_update_begin(&rx_ring->syncp); | |
1013 | rx_ring->rx_stats.refil_partial++; | |
1014 | u64_stats_update_end(&rx_ring->syncp); | |
f0525298 | 1015 | netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, |
bf2746e8 | 1016 | "Refilled rx qid %d with only %d buffers (from %d)\n", |
f0525298 | 1017 | rx_ring->qid, i, num); |
1738cd3e NB |
1018 | } |
1019 | ||
37dff155 NB |
1020 | /* ena_com_write_sq_doorbell issues a wmb() */ |
1021 | if (likely(i)) | |
1022 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); | |
1738cd3e NB |
1023 | |
1024 | rx_ring->next_to_use = next_to_use; | |
1025 | ||
1026 | return i; | |
1027 | } | |
1028 | ||
1029 | static void ena_free_rx_bufs(struct ena_adapter *adapter, | |
1030 | u32 qid) | |
1031 | { | |
1032 | struct ena_ring *rx_ring = &adapter->rx_ring[qid]; | |
1033 | u32 i; | |
1034 | ||
1035 | for (i = 0; i < rx_ring->ring_size; i++) { | |
1036 | struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; | |
1037 | ||
1038 | if (rx_info->page) | |
1039 | ena_free_rx_page(rx_ring, rx_info); | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | /* ena_refill_all_rx_bufs - allocate all queues Rx buffers | |
1044 | * @adapter: board private structure | |
1738cd3e NB |
1045 | */ |
1046 | static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) | |
1047 | { | |
1048 | struct ena_ring *rx_ring; | |
1049 | int i, rc, bufs_num; | |
1050 | ||
faa615f9 | 1051 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
1052 | rx_ring = &adapter->rx_ring[i]; |
1053 | bufs_num = rx_ring->ring_size - 1; | |
1054 | rc = ena_refill_rx_bufs(rx_ring, bufs_num); | |
1055 | ||
1056 | if (unlikely(rc != bufs_num)) | |
1057 | netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1058 | "Refilling Queue %d failed. allocated %d buffers from: %d\n", |
1738cd3e NB |
1059 | i, rc, bufs_num); |
1060 | } | |
1061 | } | |
1062 | ||
1063 | static void ena_free_all_rx_bufs(struct ena_adapter *adapter) | |
1064 | { | |
1065 | int i; | |
1066 | ||
faa615f9 | 1067 | for (i = 0; i < adapter->num_io_queues; i++) |
1738cd3e NB |
1068 | ena_free_rx_bufs(adapter, i); |
1069 | } | |
1070 | ||
548c4940 SJ |
1071 | static void ena_unmap_tx_buff(struct ena_ring *tx_ring, |
1072 | struct ena_tx_buffer *tx_info) | |
38005ca8 AK |
1073 | { |
1074 | struct ena_com_buf *ena_buf; | |
1075 | u32 cnt; | |
1076 | int i; | |
1077 | ||
1078 | ena_buf = tx_info->bufs; | |
1079 | cnt = tx_info->num_of_bufs; | |
1080 | ||
1081 | if (unlikely(!cnt)) | |
1082 | return; | |
1083 | ||
1084 | if (tx_info->map_linear_data) { | |
1085 | dma_unmap_single(tx_ring->dev, | |
1086 | dma_unmap_addr(ena_buf, paddr), | |
1087 | dma_unmap_len(ena_buf, len), | |
1088 | DMA_TO_DEVICE); | |
1089 | ena_buf++; | |
1090 | cnt--; | |
1091 | } | |
1092 | ||
1093 | /* unmap remaining mapped pages */ | |
1094 | for (i = 0; i < cnt; i++) { | |
1095 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | |
1096 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | |
1097 | ena_buf++; | |
1098 | } | |
1099 | } | |
1100 | ||
1738cd3e NB |
1101 | /* ena_free_tx_bufs - Free Tx Buffers per Queue |
1102 | * @tx_ring: TX ring for which buffers be freed | |
1103 | */ | |
1104 | static void ena_free_tx_bufs(struct ena_ring *tx_ring) | |
1105 | { | |
5add6e4a | 1106 | bool print_once = true; |
1738cd3e NB |
1107 | u32 i; |
1108 | ||
1109 | for (i = 0; i < tx_ring->ring_size; i++) { | |
1110 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; | |
1738cd3e NB |
1111 | |
1112 | if (!tx_info->skb) | |
1113 | continue; | |
1114 | ||
5add6e4a | 1115 | if (print_once) { |
f0525298 | 1116 | netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, |
bf2746e8 | 1117 | "Free uncompleted tx skb qid %d idx 0x%x\n", |
f0525298 | 1118 | tx_ring->qid, i); |
5add6e4a NB |
1119 | print_once = false; |
1120 | } else { | |
f0525298 | 1121 | netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, |
bf2746e8 | 1122 | "Free uncompleted tx skb qid %d idx 0x%x\n", |
f0525298 | 1123 | tx_ring->qid, i); |
5add6e4a | 1124 | } |
1738cd3e | 1125 | |
548c4940 | 1126 | ena_unmap_tx_buff(tx_ring, tx_info); |
1738cd3e NB |
1127 | |
1128 | dev_kfree_skb_any(tx_info->skb); | |
1129 | } | |
1130 | netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, | |
1131 | tx_ring->qid)); | |
1132 | } | |
1133 | ||
1134 | static void ena_free_all_tx_bufs(struct ena_adapter *adapter) | |
1135 | { | |
1136 | struct ena_ring *tx_ring; | |
1137 | int i; | |
1138 | ||
548c4940 | 1139 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
1738cd3e NB |
1140 | tx_ring = &adapter->tx_ring[i]; |
1141 | ena_free_tx_bufs(tx_ring); | |
1142 | } | |
1143 | } | |
1144 | ||
1145 | static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) | |
1146 | { | |
1147 | u16 ena_qid; | |
1148 | int i; | |
1149 | ||
548c4940 | 1150 | for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { |
1738cd3e NB |
1151 | ena_qid = ENA_IO_TXQ_IDX(i); |
1152 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) | |
1157 | { | |
1158 | u16 ena_qid; | |
1159 | int i; | |
1160 | ||
faa615f9 | 1161 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e | 1162 | ena_qid = ENA_IO_RXQ_IDX(i); |
282faf61 | 1163 | cancel_work_sync(&adapter->ena_napi[i].dim.work); |
1738cd3e NB |
1164 | ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); |
1165 | } | |
1166 | } | |
1167 | ||
1168 | static void ena_destroy_all_io_queues(struct ena_adapter *adapter) | |
1169 | { | |
1170 | ena_destroy_all_tx_queues(adapter); | |
1171 | ena_destroy_all_rx_queues(adapter); | |
1172 | } | |
1173 | ||
548c4940 SJ |
1174 | static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, |
1175 | struct ena_tx_buffer *tx_info, bool is_xdp) | |
1176 | { | |
1177 | if (tx_info) | |
1178 | netif_err(ring->adapter, | |
1179 | tx_done, | |
1180 | ring->netdev, | |
1181 | "tx_info doesn't have valid %s", | |
1182 | is_xdp ? "xdp frame" : "skb"); | |
1183 | else | |
1184 | netif_err(ring->adapter, | |
1185 | tx_done, | |
1186 | ring->netdev, | |
1187 | "Invalid req_id: %hu\n", | |
1188 | req_id); | |
1189 | ||
1190 | u64_stats_update_begin(&ring->syncp); | |
1191 | ring->tx_stats.bad_req_id++; | |
1192 | u64_stats_update_end(&ring->syncp); | |
1193 | ||
1194 | /* Trigger device reset */ | |
1195 | ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; | |
1196 | set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags); | |
1197 | return -EFAULT; | |
1198 | } | |
1199 | ||
1738cd3e NB |
1200 | static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) |
1201 | { | |
1202 | struct ena_tx_buffer *tx_info = NULL; | |
1203 | ||
1204 | if (likely(req_id < tx_ring->ring_size)) { | |
1205 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1206 | if (likely(tx_info->skb)) | |
1207 | return 0; | |
1208 | } | |
1209 | ||
548c4940 SJ |
1210 | return handle_invalid_req_id(tx_ring, req_id, tx_info, false); |
1211 | } | |
1738cd3e | 1212 | |
548c4940 SJ |
1213 | static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) |
1214 | { | |
1215 | struct ena_tx_buffer *tx_info = NULL; | |
1738cd3e | 1216 | |
548c4940 SJ |
1217 | if (likely(req_id < xdp_ring->ring_size)) { |
1218 | tx_info = &xdp_ring->tx_buffer_info[req_id]; | |
1219 | if (likely(tx_info->xdpf)) | |
1220 | return 0; | |
1221 | } | |
1222 | ||
1223 | return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); | |
1738cd3e NB |
1224 | } |
1225 | ||
1226 | static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) | |
1227 | { | |
1228 | struct netdev_queue *txq; | |
1229 | bool above_thresh; | |
1230 | u32 tx_bytes = 0; | |
1231 | u32 total_done = 0; | |
1232 | u16 next_to_clean; | |
1233 | u16 req_id; | |
1234 | int tx_pkts = 0; | |
1235 | int rc; | |
1236 | ||
1237 | next_to_clean = tx_ring->next_to_clean; | |
1238 | txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); | |
1239 | ||
1240 | while (tx_pkts < budget) { | |
1241 | struct ena_tx_buffer *tx_info; | |
1242 | struct sk_buff *skb; | |
1738cd3e NB |
1243 | |
1244 | rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, | |
1245 | &req_id); | |
1246 | if (rc) | |
1247 | break; | |
1248 | ||
1249 | rc = validate_tx_req_id(tx_ring, req_id); | |
1250 | if (rc) | |
1251 | break; | |
1252 | ||
1253 | tx_info = &tx_ring->tx_buffer_info[req_id]; | |
1254 | skb = tx_info->skb; | |
1255 | ||
1256 | /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */ | |
1257 | prefetch(&skb->end); | |
1258 | ||
1259 | tx_info->skb = NULL; | |
1260 | tx_info->last_jiffies = 0; | |
1261 | ||
548c4940 | 1262 | ena_unmap_tx_buff(tx_ring, tx_info); |
1738cd3e NB |
1263 | |
1264 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
1265 | "tx_poll: q %d skb %p completed\n", tx_ring->qid, | |
1266 | skb); | |
1267 | ||
1268 | tx_bytes += skb->len; | |
1269 | dev_kfree_skb(skb); | |
1270 | tx_pkts++; | |
1271 | total_done += tx_info->tx_descs; | |
1272 | ||
f9172498 | 1273 | tx_ring->free_ids[next_to_clean] = req_id; |
1738cd3e NB |
1274 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, |
1275 | tx_ring->ring_size); | |
1276 | } | |
1277 | ||
1278 | tx_ring->next_to_clean = next_to_clean; | |
1279 | ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); | |
1280 | ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); | |
1281 | ||
1282 | netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); | |
1283 | ||
1284 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | |
1285 | "tx_poll: q %d done. total pkts: %d\n", | |
1286 | tx_ring->qid, tx_pkts); | |
1287 | ||
1288 | /* need to make the rings circular update visible to | |
1289 | * ena_start_xmit() before checking for netif_queue_stopped(). | |
1290 | */ | |
1291 | smp_mb(); | |
1292 | ||
689b2bda AK |
1293 | above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
1294 | ENA_TX_WAKEUP_THRESH); | |
1738cd3e NB |
1295 | if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) { |
1296 | __netif_tx_lock(txq, smp_processor_id()); | |
689b2bda AK |
1297 | above_thresh = |
1298 | ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, | |
1299 | ENA_TX_WAKEUP_THRESH); | |
a53651ec SJ |
1300 | if (netif_tx_queue_stopped(txq) && above_thresh && |
1301 | test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { | |
1738cd3e NB |
1302 | netif_tx_wake_queue(txq); |
1303 | u64_stats_update_begin(&tx_ring->syncp); | |
1304 | tx_ring->tx_stats.queue_wakeup++; | |
1305 | u64_stats_update_end(&tx_ring->syncp); | |
1306 | } | |
1307 | __netif_tx_unlock(txq); | |
1308 | } | |
1309 | ||
1738cd3e NB |
1310 | return tx_pkts; |
1311 | } | |
1312 | ||
4265114d NB |
1313 | static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) |
1314 | { | |
1315 | struct sk_buff *skb; | |
1316 | ||
1317 | if (frags) | |
1318 | skb = napi_get_frags(rx_ring->napi); | |
1319 | else | |
1320 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | |
1321 | rx_ring->rx_copybreak); | |
1322 | ||
1323 | if (unlikely(!skb)) { | |
1324 | u64_stats_update_begin(&rx_ring->syncp); | |
1325 | rx_ring->rx_stats.skb_alloc_fail++; | |
1326 | u64_stats_update_end(&rx_ring->syncp); | |
1327 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, | |
1328 | "Failed to allocate skb. frags: %d\n", frags); | |
1329 | return NULL; | |
1330 | } | |
1331 | ||
1332 | return skb; | |
1333 | } | |
1334 | ||
1738cd3e NB |
1335 | static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, |
1336 | struct ena_com_rx_buf_info *ena_bufs, | |
1337 | u32 descs, | |
1338 | u16 *next_to_clean) | |
1339 | { | |
1340 | struct sk_buff *skb; | |
ad974bae NB |
1341 | struct ena_rx_buffer *rx_info; |
1342 | u16 len, req_id, buf = 0; | |
1738cd3e NB |
1343 | void *va; |
1344 | ||
ad974bae NB |
1345 | len = ena_bufs[buf].len; |
1346 | req_id = ena_bufs[buf].req_id; | |
30623e1e | 1347 | |
ad974bae NB |
1348 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
1349 | ||
1738cd3e NB |
1350 | if (unlikely(!rx_info->page)) { |
1351 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | |
1352 | "Page is NULL\n"); | |
1353 | return NULL; | |
1354 | } | |
1355 | ||
1356 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
1357 | "rx_info %p page %p\n", | |
1358 | rx_info, rx_info->page); | |
1359 | ||
1360 | /* save virt address of first buffer */ | |
1361 | va = page_address(rx_info->page) + rx_info->page_offset; | |
1396d314 SA |
1362 | |
1363 | prefetch(va); | |
1738cd3e NB |
1364 | |
1365 | if (len <= rx_ring->rx_copybreak) { | |
4265114d NB |
1366 | skb = ena_alloc_skb(rx_ring, false); |
1367 | if (unlikely(!skb)) | |
1738cd3e | 1368 | return NULL; |
1738cd3e NB |
1369 | |
1370 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1371 | "RX allocated small packet. len %d. data_len %d\n", |
1738cd3e NB |
1372 | skb->len, skb->data_len); |
1373 | ||
1374 | /* sync this buffer for CPU use */ | |
1375 | dma_sync_single_for_cpu(rx_ring->dev, | |
1376 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
1377 | len, | |
1378 | DMA_FROM_DEVICE); | |
1379 | skb_copy_to_linear_data(skb, va, len); | |
1380 | dma_sync_single_for_device(rx_ring->dev, | |
1381 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
1382 | len, | |
1383 | DMA_FROM_DEVICE); | |
1384 | ||
1385 | skb_put(skb, len); | |
1386 | skb->protocol = eth_type_trans(skb, rx_ring->netdev); | |
f9172498 | 1387 | rx_ring->free_ids[*next_to_clean] = req_id; |
1738cd3e NB |
1388 | *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, |
1389 | rx_ring->ring_size); | |
1390 | return skb; | |
1391 | } | |
1392 | ||
4265114d NB |
1393 | skb = ena_alloc_skb(rx_ring, true); |
1394 | if (unlikely(!skb)) | |
1738cd3e | 1395 | return NULL; |
1738cd3e NB |
1396 | |
1397 | do { | |
1398 | dma_unmap_page(rx_ring->dev, | |
1399 | dma_unmap_addr(&rx_info->ena_buf, paddr), | |
0f505c60 | 1400 | ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); |
1738cd3e NB |
1401 | |
1402 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, | |
ef5b0771 | 1403 | rx_info->page_offset, len, ENA_PAGE_SIZE); |
1738cd3e NB |
1404 | |
1405 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
bf2746e8 | 1406 | "RX skb updated. len %d. data_len %d\n", |
1738cd3e NB |
1407 | skb->len, skb->data_len); |
1408 | ||
1409 | rx_info->page = NULL; | |
ad974bae | 1410 | |
f9172498 | 1411 | rx_ring->free_ids[*next_to_clean] = req_id; |
1738cd3e NB |
1412 | *next_to_clean = |
1413 | ENA_RX_RING_IDX_NEXT(*next_to_clean, | |
1414 | rx_ring->ring_size); | |
1415 | if (likely(--descs == 0)) | |
1416 | break; | |
ad974bae NB |
1417 | |
1418 | buf++; | |
1419 | len = ena_bufs[buf].len; | |
1420 | req_id = ena_bufs[buf].req_id; | |
30623e1e | 1421 | |
ad974bae | 1422 | rx_info = &rx_ring->rx_buffer_info[req_id]; |
1738cd3e NB |
1423 | } while (1); |
1424 | ||
1425 | return skb; | |
1426 | } | |
1427 | ||
1428 | /* ena_rx_checksum - indicate in skb if hw indicated a good cksum | |
1429 | * @adapter: structure containing adapter specific data | |
1430 | * @ena_rx_ctx: received packet context/metadata | |
1431 | * @skb: skb currently being received and modified | |
1432 | */ | |
c2b54204 | 1433 | static void ena_rx_checksum(struct ena_ring *rx_ring, |
1738cd3e NB |
1434 | struct ena_com_rx_ctx *ena_rx_ctx, |
1435 | struct sk_buff *skb) | |
1436 | { | |
1437 | /* Rx csum disabled */ | |
1438 | if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { | |
1439 | skb->ip_summed = CHECKSUM_NONE; | |
1440 | return; | |
1441 | } | |
1442 | ||
1443 | /* For fragmented packets the checksum isn't valid */ | |
1444 | if (ena_rx_ctx->frag) { | |
1445 | skb->ip_summed = CHECKSUM_NONE; | |
1446 | return; | |
1447 | } | |
1448 | ||
1449 | /* if IP and error */ | |
1450 | if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && | |
1451 | (ena_rx_ctx->l3_csum_err))) { | |
1452 | /* ipv4 checksum error */ | |
1453 | skb->ip_summed = CHECKSUM_NONE; | |
1454 | u64_stats_update_begin(&rx_ring->syncp); | |
1455 | rx_ring->rx_stats.bad_csum++; | |
1456 | u64_stats_update_end(&rx_ring->syncp); | |
cd7aea18 | 1457 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1738cd3e NB |
1458 | "RX IPv4 header checksum error\n"); |
1459 | return; | |
1460 | } | |
1461 | ||
1462 | /* if TCP/UDP */ | |
1463 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
1464 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { | |
1465 | if (unlikely(ena_rx_ctx->l4_csum_err)) { | |
1466 | /* TCP/UDP checksum error */ | |
1467 | u64_stats_update_begin(&rx_ring->syncp); | |
1468 | rx_ring->rx_stats.bad_csum++; | |
1469 | u64_stats_update_end(&rx_ring->syncp); | |
cd7aea18 | 1470 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
1738cd3e NB |
1471 | "RX L4 checksum error\n"); |
1472 | skb->ip_summed = CHECKSUM_NONE; | |
1473 | return; | |
1474 | } | |
1475 | ||
cb36bb36 AK |
1476 | if (likely(ena_rx_ctx->l4_csum_checked)) { |
1477 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
d2eecc6e SJ |
1478 | u64_stats_update_begin(&rx_ring->syncp); |
1479 | rx_ring->rx_stats.csum_good++; | |
1480 | u64_stats_update_end(&rx_ring->syncp); | |
cb36bb36 AK |
1481 | } else { |
1482 | u64_stats_update_begin(&rx_ring->syncp); | |
1483 | rx_ring->rx_stats.csum_unchecked++; | |
1484 | u64_stats_update_end(&rx_ring->syncp); | |
1485 | skb->ip_summed = CHECKSUM_NONE; | |
1486 | } | |
1487 | } else { | |
1488 | skb->ip_summed = CHECKSUM_NONE; | |
1489 | return; | |
1738cd3e | 1490 | } |
cb36bb36 | 1491 | |
1738cd3e NB |
1492 | } |
1493 | ||
1494 | static void ena_set_rx_hash(struct ena_ring *rx_ring, | |
1495 | struct ena_com_rx_ctx *ena_rx_ctx, | |
1496 | struct sk_buff *skb) | |
1497 | { | |
1498 | enum pkt_hash_types hash_type; | |
1499 | ||
1500 | if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { | |
1501 | if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || | |
1502 | (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) | |
1503 | ||
1504 | hash_type = PKT_HASH_TYPE_L4; | |
1505 | else | |
1506 | hash_type = PKT_HASH_TYPE_NONE; | |
1507 | ||
1508 | /* Override hash type if the packet is fragmented */ | |
1509 | if (ena_rx_ctx->frag) | |
1510 | hash_type = PKT_HASH_TYPE_NONE; | |
1511 | ||
1512 | skb_set_hash(skb, ena_rx_ctx->hash, hash_type); | |
1513 | } | |
1514 | } | |
1515 | ||
32109c70 | 1516 | static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) |
838c93dc SJ |
1517 | { |
1518 | struct ena_rx_buffer *rx_info; | |
1519 | int ret; | |
1520 | ||
1521 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; | |
1396d314 | 1522 | xdp->data = page_address(rx_info->page) + rx_info->page_offset; |
838c93dc SJ |
1523 | xdp_set_data_meta_invalid(xdp); |
1524 | xdp->data_hard_start = page_address(rx_info->page); | |
1525 | xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len; | |
1526 | /* If for some reason we received a bigger packet than | |
1527 | * we expect, then we simply drop it | |
1528 | */ | |
1529 | if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) | |
1530 | return XDP_DROP; | |
1531 | ||
548c4940 | 1532 | ret = ena_xdp_execute(rx_ring, xdp, rx_info); |
838c93dc SJ |
1533 | |
1534 | /* The xdp program might expand the headers */ | |
1535 | if (ret == XDP_PASS) { | |
1536 | rx_info->page_offset = xdp->data - xdp->data_hard_start; | |
1537 | rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; | |
1538 | } | |
1539 | ||
1540 | return ret; | |
1541 | } | |
1738cd3e NB |
1542 | /* ena_clean_rx_irq - Cleanup RX irq |
1543 | * @rx_ring: RX ring to clean | |
1544 | * @napi: napi handler | |
1545 | * @budget: how many packets driver is allowed to clean | |
1546 | * | |
1547 | * Returns the number of cleaned buffers. | |
1548 | */ | |
1549 | static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, | |
1550 | u32 budget) | |
1551 | { | |
1552 | u16 next_to_clean = rx_ring->next_to_clean; | |
1738cd3e | 1553 | struct ena_com_rx_ctx ena_rx_ctx; |
68f236df | 1554 | struct ena_rx_buffer *rx_info; |
1738cd3e | 1555 | struct ena_adapter *adapter; |
548c4940 | 1556 | u32 res_budget, work_done; |
838c93dc SJ |
1557 | int rx_copybreak_pkt = 0; |
1558 | int refill_threshold; | |
1738cd3e NB |
1559 | struct sk_buff *skb; |
1560 | int refill_required; | |
838c93dc | 1561 | struct xdp_buff xdp; |
1738cd3e | 1562 | int total_len = 0; |
838c93dc SJ |
1563 | int xdp_verdict; |
1564 | int rc = 0; | |
ad974bae | 1565 | int i; |
1738cd3e NB |
1566 | |
1567 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | |
1568 | "%s qid %d\n", __func__, rx_ring->qid); | |
1569 | res_budget = budget; | |
838c93dc | 1570 | xdp.rxq = &rx_ring->xdp_rxq; |
08fc1cfd | 1571 | xdp.frame_sz = ENA_PAGE_SIZE; |
548c4940 | 1572 | |
1738cd3e | 1573 | do { |
838c93dc SJ |
1574 | xdp_verdict = XDP_PASS; |
1575 | skb = NULL; | |
1738cd3e NB |
1576 | ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; |
1577 | ena_rx_ctx.max_bufs = rx_ring->sgl_size; | |
1578 | ena_rx_ctx.descs = 0; | |
68f236df | 1579 | ena_rx_ctx.pkt_offset = 0; |
1738cd3e NB |
1580 | rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, |
1581 | rx_ring->ena_com_io_sq, | |
1582 | &ena_rx_ctx); | |
1583 | if (unlikely(rc)) | |
1584 | goto error; | |
1585 | ||
1586 | if (unlikely(ena_rx_ctx.descs == 0)) | |
1587 | break; | |
1588 | ||
1396d314 | 1589 | /* First descriptor might have an offset set by the device */ |
68f236df | 1590 | rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; |
1396d314 | 1591 | rx_info->page_offset += ena_rx_ctx.pkt_offset; |
68f236df | 1592 | |
1738cd3e NB |
1593 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
1594 | "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", | |
1595 | rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, | |
1596 | ena_rx_ctx.l4_proto, ena_rx_ctx.hash); | |
1597 | ||
838c93dc SJ |
1598 | if (ena_xdp_present_ring(rx_ring)) |
1599 | xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); | |
1600 | ||
1738cd3e | 1601 | /* allocate skb and fill it */ |
838c93dc SJ |
1602 | if (xdp_verdict == XDP_PASS) |
1603 | skb = ena_rx_skb(rx_ring, | |
1604 | rx_ring->ena_bufs, | |
1605 | ena_rx_ctx.descs, | |
1606 | &next_to_clean); | |
1738cd3e | 1607 | |
1738cd3e | 1608 | if (unlikely(!skb)) { |
bf2746e8 SA |
1609 | /* The page might not actually be freed here since the |
1610 | * page reference count is incremented in | |
1611 | * ena_xdp_xmit_buff(), and it will be decreased only | |
1612 | * when send completion was received from the device | |
1613 | */ | |
3921a81c | 1614 | if (xdp_verdict == XDP_TX) |
548c4940 SJ |
1615 | ena_free_rx_page(rx_ring, |
1616 | &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]); | |
ad974bae | 1617 | for (i = 0; i < ena_rx_ctx.descs; i++) { |
f9172498 | 1618 | rx_ring->free_ids[next_to_clean] = |
ad974bae NB |
1619 | rx_ring->ena_bufs[i].req_id; |
1620 | next_to_clean = | |
1621 | ENA_RX_RING_IDX_NEXT(next_to_clean, | |
1622 | rx_ring->ring_size); | |
1623 | } | |
3921a81c SJ |
1624 | if (xdp_verdict != XDP_PASS) { |
1625 | res_budget--; | |
838c93dc | 1626 | continue; |
3921a81c | 1627 | } |
1738cd3e NB |
1628 | break; |
1629 | } | |
1630 | ||
1631 | ena_rx_checksum(rx_ring, &ena_rx_ctx, skb); | |
1632 | ||
1633 | ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb); | |
1634 | ||
1635 | skb_record_rx_queue(skb, rx_ring->qid); | |
1636 | ||
1637 | if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { | |
1638 | total_len += rx_ring->ena_bufs[0].len; | |
1639 | rx_copybreak_pkt++; | |
1640 | napi_gro_receive(napi, skb); | |
1641 | } else { | |
1642 | total_len += skb->len; | |
1643 | napi_gro_frags(napi); | |
1644 | } | |
1645 | ||
1646 | res_budget--; | |
1647 | } while (likely(res_budget)); | |
1648 | ||
1649 | work_done = budget - res_budget; | |
1738cd3e NB |
1650 | rx_ring->per_napi_packets += work_done; |
1651 | u64_stats_update_begin(&rx_ring->syncp); | |
1652 | rx_ring->rx_stats.bytes += total_len; | |
1653 | rx_ring->rx_stats.cnt += work_done; | |
1654 | rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; | |
1655 | u64_stats_update_end(&rx_ring->syncp); | |
1656 | ||
1657 | rx_ring->next_to_clean = next_to_clean; | |
1658 | ||
7cfe9a55 | 1659 | refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); |
0574bb80 AK |
1660 | refill_threshold = |
1661 | min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, | |
1662 | ENA_RX_REFILL_THRESH_PACKET); | |
1738cd3e NB |
1663 | |
1664 | /* Optimization, try to batch new rx buffers */ | |
1665 | if (refill_required > refill_threshold) { | |
1666 | ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); | |
1667 | ena_refill_rx_bufs(rx_ring, refill_required); | |
1668 | } | |
1669 | ||
1670 | return work_done; | |
1671 | ||
1672 | error: | |
1673 | adapter = netdev_priv(rx_ring->netdev); | |
1674 | ||
5b7022cf SA |
1675 | if (rc == -ENOSPC) { |
1676 | u64_stats_update_begin(&rx_ring->syncp); | |
1677 | rx_ring->rx_stats.bad_desc_num++; | |
1678 | u64_stats_update_end(&rx_ring->syncp); | |
1679 | adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS; | |
1680 | } else { | |
1681 | u64_stats_update_begin(&rx_ring->syncp); | |
1682 | rx_ring->rx_stats.bad_req_id++; | |
1683 | u64_stats_update_end(&rx_ring->syncp); | |
1684 | adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; | |
1685 | } | |
1738cd3e | 1686 | |
1738cd3e NB |
1687 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
1688 | ||
1689 | return 0; | |
1690 | } | |
1691 | ||
282faf61 | 1692 | static void ena_dim_work(struct work_struct *w) |
1738cd3e | 1693 | { |
282faf61 AK |
1694 | struct dim *dim = container_of(w, struct dim, work); |
1695 | struct dim_cq_moder cur_moder = | |
1696 | net_dim_get_rx_moderation(dim->mode, dim->profile_ix); | |
1697 | struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim); | |
1698 | ||
1699 | ena_napi->rx_ring->smoothed_interval = cur_moder.usec; | |
1700 | dim->state = DIM_START_MEASURE; | |
1701 | } | |
1702 | ||
1703 | static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi) | |
1704 | { | |
1705 | struct dim_sample dim_sample; | |
1706 | struct ena_ring *rx_ring = ena_napi->rx_ring; | |
1707 | ||
1708 | if (!rx_ring->per_napi_packets) | |
1709 | return; | |
1710 | ||
1711 | rx_ring->non_empty_napi_events++; | |
1712 | ||
1713 | dim_update_sample(rx_ring->non_empty_napi_events, | |
1714 | rx_ring->rx_stats.cnt, | |
1715 | rx_ring->rx_stats.bytes, | |
1716 | &dim_sample); | |
1717 | ||
1718 | net_dim(&ena_napi->dim, dim_sample); | |
1719 | ||
1738cd3e | 1720 | rx_ring->per_napi_packets = 0; |
1738cd3e NB |
1721 | } |
1722 | ||
c2b54204 | 1723 | static void ena_unmask_interrupt(struct ena_ring *tx_ring, |
418df30f NB |
1724 | struct ena_ring *rx_ring) |
1725 | { | |
1726 | struct ena_eth_io_intr_reg intr_reg; | |
548c4940 SJ |
1727 | u32 rx_interval = 0; |
1728 | /* Rx ring can be NULL when for XDP tx queues which don't have an | |
1729 | * accompanying rx_ring pair. | |
1730 | */ | |
1731 | if (rx_ring) | |
1732 | rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? | |
1733 | rx_ring->smoothed_interval : | |
1734 | ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); | |
418df30f NB |
1735 | |
1736 | /* Update intr register: rx intr delay, | |
1737 | * tx intr delay and interrupt unmask | |
1738 | */ | |
1739 | ena_com_update_intr_reg(&intr_reg, | |
7b8a2878 | 1740 | rx_interval, |
418df30f NB |
1741 | tx_ring->smoothed_interval, |
1742 | true); | |
1743 | ||
d4a8b3bb SJ |
1744 | u64_stats_update_begin(&tx_ring->syncp); |
1745 | tx_ring->tx_stats.unmask_interrupt++; | |
1746 | u64_stats_update_end(&tx_ring->syncp); | |
bf2746e8 | 1747 | |
418df30f NB |
1748 | /* It is a shared MSI-X. |
1749 | * Tx and Rx CQ have pointer to it. | |
1750 | * So we use one of them to reach the intr reg | |
548c4940 | 1751 | * The Tx ring is used because the rx_ring is NULL for XDP queues |
418df30f | 1752 | */ |
548c4940 | 1753 | ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); |
418df30f NB |
1754 | } |
1755 | ||
c2b54204 | 1756 | static void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
1738cd3e NB |
1757 | struct ena_ring *rx_ring) |
1758 | { | |
1759 | int cpu = get_cpu(); | |
1760 | int numa_node; | |
1761 | ||
1762 | /* Check only one ring since the 2 rings are running on the same cpu */ | |
1763 | if (likely(tx_ring->cpu == cpu)) | |
1764 | goto out; | |
1765 | ||
1766 | numa_node = cpu_to_node(cpu); | |
1767 | put_cpu(); | |
1768 | ||
1769 | if (numa_node != NUMA_NO_NODE) { | |
1770 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); | |
548c4940 SJ |
1771 | if (rx_ring) |
1772 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, | |
1773 | numa_node); | |
1738cd3e NB |
1774 | } |
1775 | ||
1776 | tx_ring->cpu = cpu; | |
548c4940 SJ |
1777 | if (rx_ring) |
1778 | rx_ring->cpu = cpu; | |
1738cd3e NB |
1779 | |
1780 | return; | |
1781 | out: | |
1782 | put_cpu(); | |
1783 | } | |
1784 | ||
548c4940 SJ |
1785 | static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) |
1786 | { | |
1787 | u32 total_done = 0; | |
1788 | u16 next_to_clean; | |
1789 | u32 tx_bytes = 0; | |
1790 | int tx_pkts = 0; | |
1791 | u16 req_id; | |
1792 | int rc; | |
1793 | ||
1794 | if (unlikely(!xdp_ring)) | |
1795 | return 0; | |
1796 | next_to_clean = xdp_ring->next_to_clean; | |
1797 | ||
1798 | while (tx_pkts < budget) { | |
1799 | struct ena_tx_buffer *tx_info; | |
1800 | struct xdp_frame *xdpf; | |
1801 | ||
1802 | rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, | |
1803 | &req_id); | |
1804 | if (rc) | |
1805 | break; | |
1806 | ||
1807 | rc = validate_xdp_req_id(xdp_ring, req_id); | |
1808 | if (rc) | |
1809 | break; | |
1810 | ||
1811 | tx_info = &xdp_ring->tx_buffer_info[req_id]; | |
1812 | xdpf = tx_info->xdpf; | |
1813 | ||
1814 | tx_info->xdpf = NULL; | |
1815 | tx_info->last_jiffies = 0; | |
1816 | ena_unmap_tx_buff(xdp_ring, tx_info); | |
1817 | ||
1818 | netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, | |
1819 | "tx_poll: q %d skb %p completed\n", xdp_ring->qid, | |
1820 | xdpf); | |
1821 | ||
1822 | tx_bytes += xdpf->len; | |
1823 | tx_pkts++; | |
1824 | total_done += tx_info->tx_descs; | |
1825 | ||
1826 | __free_page(tx_info->xdp_rx_page); | |
1827 | xdp_ring->free_ids[next_to_clean] = req_id; | |
1828 | next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean, | |
1829 | xdp_ring->ring_size); | |
1830 | } | |
1831 | ||
1832 | xdp_ring->next_to_clean = next_to_clean; | |
1833 | ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done); | |
1834 | ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq); | |
1835 | ||
1836 | netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev, | |
1837 | "tx_poll: q %d done. total pkts: %d\n", | |
1838 | xdp_ring->qid, tx_pkts); | |
1839 | ||
1840 | return tx_pkts; | |
1841 | } | |
1842 | ||
1738cd3e NB |
1843 | static int ena_io_poll(struct napi_struct *napi, int budget) |
1844 | { | |
1845 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | |
1846 | struct ena_ring *tx_ring, *rx_ring; | |
24dee0c7 NB |
1847 | int tx_work_done; |
1848 | int rx_work_done = 0; | |
1738cd3e NB |
1849 | int tx_budget; |
1850 | int napi_comp_call = 0; | |
1851 | int ret; | |
1852 | ||
1853 | tx_ring = ena_napi->tx_ring; | |
1854 | rx_ring = ena_napi->rx_ring; | |
1855 | ||
913b0bfd SJ |
1856 | tx_ring->first_interrupt = ena_napi->first_interrupt; |
1857 | rx_ring->first_interrupt = ena_napi->first_interrupt; | |
1858 | ||
1738cd3e NB |
1859 | tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; |
1860 | ||
3f6159db NB |
1861 | if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || |
1862 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { | |
1738cd3e NB |
1863 | napi_complete_done(napi, 0); |
1864 | return 0; | |
1865 | } | |
1866 | ||
1867 | tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); | |
24dee0c7 NB |
1868 | /* On netpoll the budget is zero and the handler should only clean the |
1869 | * tx completions. | |
1870 | */ | |
1871 | if (likely(budget)) | |
1872 | rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); | |
1738cd3e | 1873 | |
b1669c9f NB |
1874 | /* If the device is about to reset or down, avoid unmask |
1875 | * the interrupt and return 0 so NAPI won't reschedule | |
1876 | */ | |
1877 | if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || | |
1878 | test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { | |
1879 | napi_complete_done(napi, 0); | |
1880 | ret = 0; | |
1738cd3e | 1881 | |
b1669c9f | 1882 | } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { |
1738cd3e | 1883 | napi_comp_call = 1; |
1738cd3e | 1884 | |
b1669c9f NB |
1885 | /* Update numa and unmask the interrupt only when schedule |
1886 | * from the interrupt context (vs from sk_busy_loop) | |
1738cd3e | 1887 | */ |
1e5ae350 AK |
1888 | if (napi_complete_done(napi, rx_work_done) && |
1889 | READ_ONCE(ena_napi->interrupts_masked)) { | |
1890 | smp_rmb(); /* make sure interrupts_masked is read */ | |
1891 | WRITE_ONCE(ena_napi->interrupts_masked, false); | |
282faf61 AK |
1892 | /* We apply adaptive moderation on Rx path only. |
1893 | * Tx uses static interrupt moderation. | |
1894 | */ | |
b1669c9f | 1895 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) |
282faf61 | 1896 | ena_adjust_adaptive_rx_intr_moderation(ena_napi); |
b1669c9f | 1897 | |
418df30f | 1898 | ena_unmask_interrupt(tx_ring, rx_ring); |
b1669c9f | 1899 | } |
1738cd3e | 1900 | |
1738cd3e NB |
1901 | ena_update_ring_numa_node(tx_ring, rx_ring); |
1902 | ||
1903 | ret = rx_work_done; | |
1904 | } else { | |
1905 | ret = budget; | |
1906 | } | |
1907 | ||
1908 | u64_stats_update_begin(&tx_ring->syncp); | |
1909 | tx_ring->tx_stats.napi_comp += napi_comp_call; | |
1910 | tx_ring->tx_stats.tx_poll++; | |
1911 | u64_stats_update_end(&tx_ring->syncp); | |
1912 | ||
1913 | return ret; | |
1914 | } | |
1915 | ||
1916 | static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data) | |
1917 | { | |
1918 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
1919 | ||
1920 | ena_com_admin_q_comp_intr_handler(adapter->ena_dev); | |
1921 | ||
1922 | /* Don't call the aenq handler before probe is done */ | |
1923 | if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) | |
1924 | ena_com_aenq_intr_handler(adapter->ena_dev, data); | |
1925 | ||
1926 | return IRQ_HANDLED; | |
1927 | } | |
1928 | ||
1929 | /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx | |
1930 | * @irq: interrupt number | |
1931 | * @data: pointer to a network interface private napi device structure | |
1932 | */ | |
1933 | static irqreturn_t ena_intr_msix_io(int irq, void *data) | |
1934 | { | |
1935 | struct ena_napi *ena_napi = data; | |
1936 | ||
913b0bfd | 1937 | ena_napi->first_interrupt = true; |
8510e1a3 | 1938 | |
1e5ae350 AK |
1939 | WRITE_ONCE(ena_napi->interrupts_masked, true); |
1940 | smp_wmb(); /* write interrupts_masked before calling napi */ | |
1941 | ||
e745dafa | 1942 | napi_schedule_irqoff(&ena_napi->napi); |
1738cd3e NB |
1943 | |
1944 | return IRQ_HANDLED; | |
1945 | } | |
1946 | ||
06443684 NB |
1947 | /* Reserve a single MSI-X vector for management (admin + aenq). |
1948 | * plus reserve one vector for each potential io queue. | |
1949 | * the number of potential io queues is the minimum of what the device | |
1950 | * supports and the number of vCPUs. | |
1951 | */ | |
4d192660 | 1952 | static int ena_enable_msix(struct ena_adapter *adapter) |
1738cd3e | 1953 | { |
06443684 NB |
1954 | int msix_vecs, irq_cnt; |
1955 | ||
1956 | if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { | |
1957 | netif_err(adapter, probe, adapter->netdev, | |
1958 | "Error, MSI-X is already enabled\n"); | |
1959 | return -EPERM; | |
1960 | } | |
1738cd3e NB |
1961 | |
1962 | /* Reserved the max msix vectors we might need */ | |
ce1f3521 | 1963 | msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); |
1738cd3e | 1964 | netif_dbg(adapter, probe, adapter->netdev, |
bf2746e8 | 1965 | "Trying to enable MSI-X, vectors %d\n", msix_vecs); |
1738cd3e | 1966 | |
06443684 NB |
1967 | irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, |
1968 | msix_vecs, PCI_IRQ_MSIX); | |
1969 | ||
1970 | if (irq_cnt < 0) { | |
1738cd3e | 1971 | netif_err(adapter, probe, adapter->netdev, |
06443684 | 1972 | "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); |
1738cd3e NB |
1973 | return -ENOSPC; |
1974 | } | |
1975 | ||
06443684 NB |
1976 | if (irq_cnt != msix_vecs) { |
1977 | netif_notice(adapter, probe, adapter->netdev, | |
bf2746e8 | 1978 | "Enable only %d MSI-X (out of %d), reduce the number of queues\n", |
06443684 | 1979 | irq_cnt, msix_vecs); |
faa615f9 | 1980 | adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; |
1738cd3e NB |
1981 | } |
1982 | ||
06443684 NB |
1983 | if (ena_init_rx_cpu_rmap(adapter)) |
1984 | netif_warn(adapter, probe, adapter->netdev, | |
1985 | "Failed to map IRQs to CPUs\n"); | |
1986 | ||
1987 | adapter->msix_vecs = irq_cnt; | |
1988 | set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); | |
1738cd3e NB |
1989 | |
1990 | return 0; | |
1991 | } | |
1992 | ||
1993 | static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) | |
1994 | { | |
1995 | u32 cpu; | |
1996 | ||
1997 | snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, | |
1998 | ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", | |
1999 | pci_name(adapter->pdev)); | |
2000 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = | |
2001 | ena_intr_msix_mgmnt; | |
2002 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; | |
2003 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = | |
da6f4cf5 | 2004 | pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); |
1738cd3e NB |
2005 | cpu = cpumask_first(cpu_online_mask); |
2006 | adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; | |
2007 | cpumask_set_cpu(cpu, | |
2008 | &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); | |
2009 | } | |
2010 | ||
2011 | static void ena_setup_io_intr(struct ena_adapter *adapter) | |
2012 | { | |
2013 | struct net_device *netdev; | |
2014 | int irq_idx, i, cpu; | |
548c4940 | 2015 | int io_queue_count; |
1738cd3e NB |
2016 | |
2017 | netdev = adapter->netdev; | |
548c4940 | 2018 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e | 2019 | |
548c4940 | 2020 | for (i = 0; i < io_queue_count; i++) { |
1738cd3e NB |
2021 | irq_idx = ENA_IO_IRQ_IDX(i); |
2022 | cpu = i % num_online_cpus(); | |
2023 | ||
2024 | snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, | |
2025 | "%s-Tx-Rx-%d", netdev->name, i); | |
2026 | adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; | |
2027 | adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; | |
2028 | adapter->irq_tbl[irq_idx].vector = | |
da6f4cf5 | 2029 | pci_irq_vector(adapter->pdev, irq_idx); |
1738cd3e NB |
2030 | adapter->irq_tbl[irq_idx].cpu = cpu; |
2031 | ||
2032 | cpumask_set_cpu(cpu, | |
2033 | &adapter->irq_tbl[irq_idx].affinity_hint_mask); | |
2034 | } | |
2035 | } | |
2036 | ||
2037 | static int ena_request_mgmnt_irq(struct ena_adapter *adapter) | |
2038 | { | |
2039 | unsigned long flags = 0; | |
2040 | struct ena_irq *irq; | |
2041 | int rc; | |
2042 | ||
2043 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
2044 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
2045 | irq->data); | |
2046 | if (rc) { | |
2047 | netif_err(adapter, probe, adapter->netdev, | |
bf2746e8 | 2048 | "Failed to request admin irq\n"); |
1738cd3e NB |
2049 | return rc; |
2050 | } | |
2051 | ||
2052 | netif_dbg(adapter, probe, adapter->netdev, | |
bf2746e8 | 2053 | "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n", |
1738cd3e NB |
2054 | irq->affinity_hint_mask.bits[0], irq->vector); |
2055 | ||
2056 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
2057 | ||
2058 | return rc; | |
2059 | } | |
2060 | ||
2061 | static int ena_request_io_irq(struct ena_adapter *adapter) | |
2062 | { | |
e02ae6ed | 2063 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2064 | unsigned long flags = 0; |
2065 | struct ena_irq *irq; | |
2066 | int rc = 0, i, k; | |
2067 | ||
06443684 NB |
2068 | if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { |
2069 | netif_err(adapter, ifup, adapter->netdev, | |
2070 | "Failed to request I/O IRQ: MSI-X is not enabled\n"); | |
2071 | return -EINVAL; | |
2072 | } | |
2073 | ||
e02ae6ed | 2074 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1738cd3e NB |
2075 | irq = &adapter->irq_tbl[i]; |
2076 | rc = request_irq(irq->vector, irq->handler, flags, irq->name, | |
2077 | irq->data); | |
2078 | if (rc) { | |
2079 | netif_err(adapter, ifup, adapter->netdev, | |
2080 | "Failed to request I/O IRQ. index %d rc %d\n", | |
2081 | i, rc); | |
2082 | goto err; | |
2083 | } | |
2084 | ||
2085 | netif_dbg(adapter, ifup, adapter->netdev, | |
bf2746e8 | 2086 | "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n", |
1738cd3e NB |
2087 | i, irq->affinity_hint_mask.bits[0], irq->vector); |
2088 | ||
2089 | irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); | |
2090 | } | |
2091 | ||
2092 | return rc; | |
2093 | ||
2094 | err: | |
2095 | for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) { | |
2096 | irq = &adapter->irq_tbl[k]; | |
2097 | free_irq(irq->vector, irq->data); | |
2098 | } | |
2099 | ||
2100 | return rc; | |
2101 | } | |
2102 | ||
2103 | static void ena_free_mgmnt_irq(struct ena_adapter *adapter) | |
2104 | { | |
2105 | struct ena_irq *irq; | |
2106 | ||
2107 | irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; | |
2108 | synchronize_irq(irq->vector); | |
2109 | irq_set_affinity_hint(irq->vector, NULL); | |
2110 | free_irq(irq->vector, irq->data); | |
2111 | } | |
2112 | ||
2113 | static void ena_free_io_irq(struct ena_adapter *adapter) | |
2114 | { | |
e02ae6ed | 2115 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2116 | struct ena_irq *irq; |
2117 | int i; | |
2118 | ||
2119 | #ifdef CONFIG_RFS_ACCEL | |
2120 | if (adapter->msix_vecs >= 1) { | |
2121 | free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); | |
2122 | adapter->netdev->rx_cpu_rmap = NULL; | |
2123 | } | |
2124 | #endif /* CONFIG_RFS_ACCEL */ | |
2125 | ||
e02ae6ed | 2126 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { |
1738cd3e NB |
2127 | irq = &adapter->irq_tbl[i]; |
2128 | irq_set_affinity_hint(irq->vector, NULL); | |
2129 | free_irq(irq->vector, irq->data); | |
2130 | } | |
2131 | } | |
2132 | ||
06443684 NB |
2133 | static void ena_disable_msix(struct ena_adapter *adapter) |
2134 | { | |
2135 | if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) | |
2136 | pci_free_irq_vectors(adapter->pdev); | |
2137 | } | |
2138 | ||
1738cd3e NB |
2139 | static void ena_disable_io_intr_sync(struct ena_adapter *adapter) |
2140 | { | |
e02ae6ed | 2141 | u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2142 | int i; |
2143 | ||
2144 | if (!netif_running(adapter->netdev)) | |
2145 | return; | |
2146 | ||
e02ae6ed | 2147 | for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) |
1738cd3e NB |
2148 | synchronize_irq(adapter->irq_tbl[i].vector); |
2149 | } | |
2150 | ||
548c4940 SJ |
2151 | static void ena_del_napi_in_range(struct ena_adapter *adapter, |
2152 | int first_index, | |
2153 | int count) | |
1738cd3e NB |
2154 | { |
2155 | int i; | |
2156 | ||
548c4940 | 2157 | for (i = first_index; i < first_index + count; i++) { |
8b147f6f SA |
2158 | netif_napi_del(&adapter->ena_napi[i].napi); |
2159 | ||
2160 | WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) && | |
2161 | adapter->ena_napi[i].xdp_ring); | |
548c4940 | 2162 | } |
1738cd3e NB |
2163 | } |
2164 | ||
548c4940 SJ |
2165 | static void ena_init_napi_in_range(struct ena_adapter *adapter, |
2166 | int first_index, int count) | |
1738cd3e | 2167 | { |
1738cd3e NB |
2168 | int i; |
2169 | ||
548c4940 | 2170 | for (i = first_index; i < first_index + count; i++) { |
d89d8d4d | 2171 | struct ena_napi *napi = &adapter->ena_napi[i]; |
1738cd3e NB |
2172 | |
2173 | netif_napi_add(adapter->netdev, | |
d89d8d4d | 2174 | &napi->napi, |
548c4940 | 2175 | ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll, |
1738cd3e | 2176 | ENA_NAPI_BUDGET); |
548c4940 SJ |
2177 | |
2178 | if (!ENA_IS_XDP_INDEX(adapter, i)) { | |
2179 | napi->rx_ring = &adapter->rx_ring[i]; | |
2180 | napi->tx_ring = &adapter->tx_ring[i]; | |
2181 | } else { | |
2182 | napi->xdp_ring = &adapter->tx_ring[i]; | |
2183 | } | |
1738cd3e NB |
2184 | napi->qid = i; |
2185 | } | |
2186 | } | |
2187 | ||
548c4940 SJ |
2188 | static void ena_napi_disable_in_range(struct ena_adapter *adapter, |
2189 | int first_index, | |
2190 | int count) | |
1738cd3e NB |
2191 | { |
2192 | int i; | |
2193 | ||
548c4940 | 2194 | for (i = first_index; i < first_index + count; i++) |
1738cd3e NB |
2195 | napi_disable(&adapter->ena_napi[i].napi); |
2196 | } | |
2197 | ||
548c4940 SJ |
2198 | static void ena_napi_enable_in_range(struct ena_adapter *adapter, |
2199 | int first_index, | |
2200 | int count) | |
1738cd3e NB |
2201 | { |
2202 | int i; | |
2203 | ||
548c4940 | 2204 | for (i = first_index; i < first_index + count; i++) |
1738cd3e NB |
2205 | napi_enable(&adapter->ena_napi[i].napi); |
2206 | } | |
2207 | ||
1738cd3e NB |
2208 | /* Configure the Rx forwarding */ |
2209 | static int ena_rss_configure(struct ena_adapter *adapter) | |
2210 | { | |
2211 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2212 | int rc; | |
2213 | ||
2214 | /* In case the RSS table wasn't initialized by probe */ | |
2215 | if (!ena_dev->rss.tbl_log_size) { | |
2216 | rc = ena_rss_init_default(adapter); | |
d1497638 | 2217 | if (rc && (rc != -EOPNOTSUPP)) { |
1738cd3e | 2218 | netif_err(adapter, ifup, adapter->netdev, |
46143e58 | 2219 | "Failed to init RSS rc: %d\n", rc); |
1738cd3e NB |
2220 | return rc; |
2221 | } | |
2222 | } | |
2223 | ||
2224 | /* Set indirect table */ | |
2225 | rc = ena_com_indirect_table_set(ena_dev); | |
d1497638 | 2226 | if (unlikely(rc && rc != -EOPNOTSUPP)) |
1738cd3e NB |
2227 | return rc; |
2228 | ||
2229 | /* Configure hash function (if supported) */ | |
2230 | rc = ena_com_set_hash_function(ena_dev); | |
d1497638 | 2231 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1738cd3e NB |
2232 | return rc; |
2233 | ||
2234 | /* Configure hash inputs (if supported) */ | |
2235 | rc = ena_com_set_hash_ctrl(ena_dev); | |
d1497638 | 2236 | if (unlikely(rc && (rc != -EOPNOTSUPP))) |
1738cd3e NB |
2237 | return rc; |
2238 | ||
2239 | return 0; | |
2240 | } | |
2241 | ||
2242 | static int ena_up_complete(struct ena_adapter *adapter) | |
2243 | { | |
7853b49c | 2244 | int rc; |
1738cd3e NB |
2245 | |
2246 | rc = ena_rss_configure(adapter); | |
2247 | if (rc) | |
2248 | return rc; | |
2249 | ||
1738cd3e NB |
2250 | ena_change_mtu(adapter->netdev, adapter->netdev->mtu); |
2251 | ||
2252 | ena_refill_all_rx_bufs(adapter); | |
2253 | ||
2254 | /* enable transmits */ | |
2255 | netif_tx_start_all_queues(adapter->netdev); | |
2256 | ||
548c4940 SJ |
2257 | ena_napi_enable_in_range(adapter, |
2258 | 0, | |
2259 | adapter->xdp_num_queues + adapter->num_io_queues); | |
1738cd3e | 2260 | |
1738cd3e NB |
2261 | return 0; |
2262 | } | |
2263 | ||
2264 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |
2265 | { | |
38005ca8 | 2266 | struct ena_com_create_io_ctx ctx; |
1738cd3e NB |
2267 | struct ena_com_dev *ena_dev; |
2268 | struct ena_ring *tx_ring; | |
2269 | u32 msix_vector; | |
2270 | u16 ena_qid; | |
2271 | int rc; | |
2272 | ||
2273 | ena_dev = adapter->ena_dev; | |
2274 | ||
2275 | tx_ring = &adapter->tx_ring[qid]; | |
2276 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
2277 | ena_qid = ENA_IO_TXQ_IDX(qid); | |
2278 | ||
38005ca8 AK |
2279 | memset(&ctx, 0x0, sizeof(ctx)); |
2280 | ||
1738cd3e NB |
2281 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; |
2282 | ctx.qid = ena_qid; | |
2283 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; | |
2284 | ctx.msix_vector = msix_vector; | |
13ca32a6 | 2285 | ctx.queue_size = tx_ring->ring_size; |
1738cd3e NB |
2286 | ctx.numa_node = cpu_to_node(tx_ring->cpu); |
2287 | ||
2288 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
2289 | if (rc) { | |
2290 | netif_err(adapter, ifup, adapter->netdev, | |
2291 | "Failed to create I/O TX queue num %d rc: %d\n", | |
46143e58 | 2292 | qid, rc); |
1738cd3e NB |
2293 | return rc; |
2294 | } | |
2295 | ||
2296 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
2297 | &tx_ring->ena_com_io_sq, | |
2298 | &tx_ring->ena_com_io_cq); | |
2299 | if (rc) { | |
2300 | netif_err(adapter, ifup, adapter->netdev, | |
2301 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | |
2302 | qid, rc); | |
2303 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
2d2c600a | 2304 | return rc; |
1738cd3e NB |
2305 | } |
2306 | ||
2307 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | |
2308 | return rc; | |
2309 | } | |
2310 | ||
548c4940 SJ |
2311 | static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, |
2312 | int first_index, int count) | |
1738cd3e NB |
2313 | { |
2314 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2315 | int rc, i; | |
2316 | ||
548c4940 | 2317 | for (i = first_index; i < first_index + count; i++) { |
1738cd3e NB |
2318 | rc = ena_create_io_tx_queue(adapter, i); |
2319 | if (rc) | |
2320 | goto create_err; | |
2321 | } | |
2322 | ||
2323 | return 0; | |
2324 | ||
2325 | create_err: | |
548c4940 | 2326 | while (i-- > first_index) |
1738cd3e NB |
2327 | ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); |
2328 | ||
2329 | return rc; | |
2330 | } | |
2331 | ||
2332 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |
2333 | { | |
2334 | struct ena_com_dev *ena_dev; | |
38005ca8 | 2335 | struct ena_com_create_io_ctx ctx; |
1738cd3e NB |
2336 | struct ena_ring *rx_ring; |
2337 | u32 msix_vector; | |
2338 | u16 ena_qid; | |
2339 | int rc; | |
2340 | ||
2341 | ena_dev = adapter->ena_dev; | |
2342 | ||
2343 | rx_ring = &adapter->rx_ring[qid]; | |
2344 | msix_vector = ENA_IO_IRQ_IDX(qid); | |
2345 | ena_qid = ENA_IO_RXQ_IDX(qid); | |
2346 | ||
38005ca8 AK |
2347 | memset(&ctx, 0x0, sizeof(ctx)); |
2348 | ||
1738cd3e NB |
2349 | ctx.qid = ena_qid; |
2350 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | |
2351 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
2352 | ctx.msix_vector = msix_vector; | |
13ca32a6 | 2353 | ctx.queue_size = rx_ring->ring_size; |
1738cd3e NB |
2354 | ctx.numa_node = cpu_to_node(rx_ring->cpu); |
2355 | ||
2356 | rc = ena_com_create_io_queue(ena_dev, &ctx); | |
2357 | if (rc) { | |
2358 | netif_err(adapter, ifup, adapter->netdev, | |
2359 | "Failed to create I/O RX queue num %d rc: %d\n", | |
2360 | qid, rc); | |
2361 | return rc; | |
2362 | } | |
2363 | ||
2364 | rc = ena_com_get_io_handlers(ena_dev, ena_qid, | |
2365 | &rx_ring->ena_com_io_sq, | |
2366 | &rx_ring->ena_com_io_cq); | |
2367 | if (rc) { | |
2368 | netif_err(adapter, ifup, adapter->netdev, | |
2369 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | |
2370 | qid, rc); | |
838c93dc | 2371 | goto err; |
1738cd3e NB |
2372 | } |
2373 | ||
2374 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | |
2375 | ||
838c93dc SJ |
2376 | return rc; |
2377 | err: | |
2378 | ena_com_destroy_io_queue(ena_dev, ena_qid); | |
1738cd3e NB |
2379 | return rc; |
2380 | } | |
2381 | ||
2382 | static int ena_create_all_io_rx_queues(struct ena_adapter *adapter) | |
2383 | { | |
2384 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
2385 | int rc, i; | |
2386 | ||
faa615f9 | 2387 | for (i = 0; i < adapter->num_io_queues; i++) { |
1738cd3e NB |
2388 | rc = ena_create_io_rx_queue(adapter, i); |
2389 | if (rc) | |
2390 | goto create_err; | |
282faf61 | 2391 | INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); |
1738cd3e NB |
2392 | } |
2393 | ||
2394 | return 0; | |
2395 | ||
2396 | create_err: | |
282faf61 AK |
2397 | while (i--) { |
2398 | cancel_work_sync(&adapter->ena_napi[i].dim.work); | |
1738cd3e | 2399 | ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); |
282faf61 | 2400 | } |
1738cd3e NB |
2401 | |
2402 | return rc; | |
2403 | } | |
2404 | ||
13ca32a6 | 2405 | static void set_io_rings_size(struct ena_adapter *adapter, |
548c4940 SJ |
2406 | int new_tx_size, |
2407 | int new_rx_size) | |
13ca32a6 SJ |
2408 | { |
2409 | int i; | |
2410 | ||
faa615f9 | 2411 | for (i = 0; i < adapter->num_io_queues; i++) { |
13ca32a6 SJ |
2412 | adapter->tx_ring[i].ring_size = new_tx_size; |
2413 | adapter->rx_ring[i].ring_size = new_rx_size; | |
2414 | } | |
2415 | } | |
2416 | ||
2417 | /* This function allows queue allocation to backoff when the system is | |
2418 | * low on memory. If there is not enough memory to allocate io queues | |
2419 | * the driver will try to allocate smaller queues. | |
2420 | * | |
2421 | * The backoff algorithm is as follows: | |
2422 | * 1. Try to allocate TX and RX and if successful. | |
2423 | * 1.1. return success | |
2424 | * | |
2425 | * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same). | |
2426 | * | |
2427 | * 3. If TX or RX is smaller than 256 | |
2428 | * 3.1. return failure. | |
2429 | * 4. else | |
2430 | * 4.1. go back to 1. | |
2431 | */ | |
2432 | static int create_queues_with_size_backoff(struct ena_adapter *adapter) | |
2433 | { | |
2434 | int rc, cur_rx_ring_size, cur_tx_ring_size; | |
2435 | int new_rx_ring_size, new_tx_ring_size; | |
2436 | ||
2437 | /* current queue sizes might be set to smaller than the requested | |
2438 | * ones due to past queue allocation failures. | |
2439 | */ | |
2440 | set_io_rings_size(adapter, adapter->requested_tx_ring_size, | |
46143e58 | 2441 | adapter->requested_rx_ring_size); |
13ca32a6 SJ |
2442 | |
2443 | while (1) { | |
548c4940 SJ |
2444 | if (ena_xdp_present(adapter)) { |
2445 | rc = ena_setup_and_create_all_xdp_queues(adapter); | |
2446 | ||
2447 | if (rc) | |
2448 | goto err_setup_tx; | |
2449 | } | |
2450 | rc = ena_setup_tx_resources_in_range(adapter, | |
2451 | 0, | |
2452 | adapter->num_io_queues); | |
13ca32a6 SJ |
2453 | if (rc) |
2454 | goto err_setup_tx; | |
2455 | ||
548c4940 SJ |
2456 | rc = ena_create_io_tx_queues_in_range(adapter, |
2457 | 0, | |
2458 | adapter->num_io_queues); | |
13ca32a6 SJ |
2459 | if (rc) |
2460 | goto err_create_tx_queues; | |
2461 | ||
2462 | rc = ena_setup_all_rx_resources(adapter); | |
2463 | if (rc) | |
2464 | goto err_setup_rx; | |
2465 | ||
2466 | rc = ena_create_all_io_rx_queues(adapter); | |
2467 | if (rc) | |
2468 | goto err_create_rx_queues; | |
2469 | ||
2470 | return 0; | |
2471 | ||
2472 | err_create_rx_queues: | |
2473 | ena_free_all_io_rx_resources(adapter); | |
2474 | err_setup_rx: | |
2475 | ena_destroy_all_tx_queues(adapter); | |
2476 | err_create_tx_queues: | |
2477 | ena_free_all_io_tx_resources(adapter); | |
2478 | err_setup_tx: | |
2479 | if (rc != -ENOMEM) { | |
2480 | netif_err(adapter, ifup, adapter->netdev, | |
2481 | "Queue creation failed with error code %d\n", | |
46143e58 | 2482 | rc); |
13ca32a6 SJ |
2483 | return rc; |
2484 | } | |
2485 | ||
2486 | cur_tx_ring_size = adapter->tx_ring[0].ring_size; | |
2487 | cur_rx_ring_size = adapter->rx_ring[0].ring_size; | |
2488 | ||
2489 | netif_err(adapter, ifup, adapter->netdev, | |
2490 | "Not enough memory to create queues with sizes TX=%d, RX=%d\n", | |
2491 | cur_tx_ring_size, cur_rx_ring_size); | |
2492 | ||
2493 | new_tx_ring_size = cur_tx_ring_size; | |
2494 | new_rx_ring_size = cur_rx_ring_size; | |
2495 | ||
2496 | /* Decrease the size of the larger queue, or | |
2497 | * decrease both if they are the same size. | |
2498 | */ | |
2499 | if (cur_rx_ring_size <= cur_tx_ring_size) | |
2500 | new_tx_ring_size = cur_tx_ring_size / 2; | |
2501 | if (cur_rx_ring_size >= cur_tx_ring_size) | |
2502 | new_rx_ring_size = cur_rx_ring_size / 2; | |
2503 | ||
3e5bfb18 | 2504 | if (new_tx_ring_size < ENA_MIN_RING_SIZE || |
46143e58 | 2505 | new_rx_ring_size < ENA_MIN_RING_SIZE) { |
13ca32a6 SJ |
2506 | netif_err(adapter, ifup, adapter->netdev, |
2507 | "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", | |
2508 | ENA_MIN_RING_SIZE); | |
2509 | return rc; | |
2510 | } | |
2511 | ||
2512 | netif_err(adapter, ifup, adapter->netdev, | |
2513 | "Retrying queue creation with sizes TX=%d, RX=%d\n", | |
2514 | new_tx_ring_size, | |
2515 | new_rx_ring_size); | |
2516 | ||
2517 | set_io_rings_size(adapter, new_tx_ring_size, | |
2518 | new_rx_ring_size); | |
2519 | } | |
2520 | } | |
2521 | ||
1738cd3e NB |
2522 | static int ena_up(struct ena_adapter *adapter) |
2523 | { | |
548c4940 | 2524 | int io_queue_count, rc, i; |
1738cd3e | 2525 | |
f0525298 | 2526 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); |
1738cd3e | 2527 | |
548c4940 | 2528 | io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
1738cd3e NB |
2529 | ena_setup_io_intr(adapter); |
2530 | ||
78a55d05 AK |
2531 | /* napi poll functions should be initialized before running |
2532 | * request_irq(), to handle a rare condition where there is a pending | |
2533 | * interrupt, causing the ISR to fire immediately while the poll | |
2534 | * function wasn't set yet, causing a null dereference | |
2535 | */ | |
548c4940 | 2536 | ena_init_napi_in_range(adapter, 0, io_queue_count); |
78a55d05 | 2537 | |
1738cd3e NB |
2538 | rc = ena_request_io_irq(adapter); |
2539 | if (rc) | |
2540 | goto err_req_irq; | |
2541 | ||
13ca32a6 | 2542 | rc = create_queues_with_size_backoff(adapter); |
1738cd3e | 2543 | if (rc) |
13ca32a6 | 2544 | goto err_create_queues_with_backoff; |
1738cd3e NB |
2545 | |
2546 | rc = ena_up_complete(adapter); | |
2547 | if (rc) | |
2548 | goto err_up; | |
2549 | ||
2550 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | |
2551 | netif_carrier_on(adapter->netdev); | |
2552 | ||
2553 | u64_stats_update_begin(&adapter->syncp); | |
2554 | adapter->dev_stats.interface_up++; | |
2555 | u64_stats_update_end(&adapter->syncp); | |
2556 | ||
2557 | set_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2558 | ||
7853b49c | 2559 | /* Enable completion queues interrupt */ |
faa615f9 | 2560 | for (i = 0; i < adapter->num_io_queues; i++) |
7853b49c NB |
2561 | ena_unmask_interrupt(&adapter->tx_ring[i], |
2562 | &adapter->rx_ring[i]); | |
2563 | ||
2564 | /* schedule napi in case we had pending packets | |
2565 | * from the last time we disable napi | |
2566 | */ | |
548c4940 | 2567 | for (i = 0; i < io_queue_count; i++) |
7853b49c NB |
2568 | napi_schedule(&adapter->ena_napi[i].napi); |
2569 | ||
1738cd3e NB |
2570 | return rc; |
2571 | ||
2572 | err_up: | |
1738cd3e | 2573 | ena_destroy_all_tx_queues(adapter); |
1738cd3e | 2574 | ena_free_all_io_tx_resources(adapter); |
13ca32a6 SJ |
2575 | ena_destroy_all_rx_queues(adapter); |
2576 | ena_free_all_io_rx_resources(adapter); | |
2577 | err_create_queues_with_backoff: | |
1738cd3e NB |
2578 | ena_free_io_irq(adapter); |
2579 | err_req_irq: | |
548c4940 | 2580 | ena_del_napi_in_range(adapter, 0, io_queue_count); |
1738cd3e NB |
2581 | |
2582 | return rc; | |
2583 | } | |
2584 | ||
2585 | static void ena_down(struct ena_adapter *adapter) | |
2586 | { | |
548c4940 SJ |
2587 | int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; |
2588 | ||
1738cd3e NB |
2589 | netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); |
2590 | ||
2591 | clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2592 | ||
2593 | u64_stats_update_begin(&adapter->syncp); | |
2594 | adapter->dev_stats.interface_down++; | |
2595 | u64_stats_update_end(&adapter->syncp); | |
2596 | ||
1738cd3e NB |
2597 | netif_carrier_off(adapter->netdev); |
2598 | netif_tx_disable(adapter->netdev); | |
2599 | ||
3f6159db | 2600 | /* After this point the napi handler won't enable the tx queue */ |
548c4940 | 2601 | ena_napi_disable_in_range(adapter, 0, io_queue_count); |
3f6159db | 2602 | |
1738cd3e | 2603 | /* After destroy the queue there won't be any new interrupts */ |
3f6159db NB |
2604 | |
2605 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { | |
2606 | int rc; | |
2607 | ||
e2eed0e3 | 2608 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
3f6159db | 2609 | if (rc) |
f0525298 SA |
2610 | netif_err(adapter, ifdown, adapter->netdev, |
2611 | "Device reset failed\n"); | |
58a54b9c AK |
2612 | /* stop submitting admin commands on a device that was reset */ |
2613 | ena_com_set_admin_running_state(adapter->ena_dev, false); | |
3f6159db NB |
2614 | } |
2615 | ||
1738cd3e NB |
2616 | ena_destroy_all_io_queues(adapter); |
2617 | ||
2618 | ena_disable_io_intr_sync(adapter); | |
2619 | ena_free_io_irq(adapter); | |
548c4940 | 2620 | ena_del_napi_in_range(adapter, 0, io_queue_count); |
1738cd3e NB |
2621 | |
2622 | ena_free_all_tx_bufs(adapter); | |
2623 | ena_free_all_rx_bufs(adapter); | |
2624 | ena_free_all_io_tx_resources(adapter); | |
2625 | ena_free_all_io_rx_resources(adapter); | |
2626 | } | |
2627 | ||
2628 | /* ena_open - Called when a network interface is made active | |
2629 | * @netdev: network interface device structure | |
2630 | * | |
2631 | * Returns 0 on success, negative value on failure | |
2632 | * | |
2633 | * The open entry point is called when a network interface is made | |
2634 | * active by the system (IFF_UP). At this point all resources needed | |
2635 | * for transmit and receive operations are allocated, the interrupt | |
2636 | * handler is registered with the OS, the watchdog timer is started, | |
2637 | * and the stack is notified that the interface is ready. | |
2638 | */ | |
2639 | static int ena_open(struct net_device *netdev) | |
2640 | { | |
2641 | struct ena_adapter *adapter = netdev_priv(netdev); | |
2642 | int rc; | |
2643 | ||
2644 | /* Notify the stack of the actual queue counts. */ | |
faa615f9 | 2645 | rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); |
1738cd3e NB |
2646 | if (rc) { |
2647 | netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); | |
2648 | return rc; | |
2649 | } | |
2650 | ||
faa615f9 | 2651 | rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); |
1738cd3e NB |
2652 | if (rc) { |
2653 | netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); | |
2654 | return rc; | |
2655 | } | |
2656 | ||
2657 | rc = ena_up(adapter); | |
2658 | if (rc) | |
2659 | return rc; | |
2660 | ||
2661 | return rc; | |
2662 | } | |
2663 | ||
2664 | /* ena_close - Disables a network interface | |
2665 | * @netdev: network interface device structure | |
2666 | * | |
2667 | * Returns 0, this is not allowed to fail | |
2668 | * | |
2669 | * The close entry point is called when an interface is de-activated | |
2670 | * by the OS. The hardware is still under the drivers control, but | |
2671 | * needs to be disabled. A global MAC reset is issued to stop the | |
2672 | * hardware, and all transmit and receive resources are freed. | |
2673 | */ | |
2674 | static int ena_close(struct net_device *netdev) | |
2675 | { | |
2676 | struct ena_adapter *adapter = netdev_priv(netdev); | |
2677 | ||
2678 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); | |
2679 | ||
58a54b9c AK |
2680 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
2681 | return 0; | |
2682 | ||
1738cd3e NB |
2683 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
2684 | ena_down(adapter); | |
2685 | ||
ee4552aa NB |
2686 | /* Check for device status and issue reset if needed*/ |
2687 | check_for_admin_com_state(adapter); | |
2688 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
2689 | netif_err(adapter, ifdown, adapter->netdev, | |
2690 | "Destroy failure, restarting device\n"); | |
2691 | ena_dump_stats_to_dmesg(adapter); | |
2692 | /* rtnl lock already obtained in dev_ioctl() layer */ | |
cfa324a5 | 2693 | ena_destroy_device(adapter, false); |
ee4552aa NB |
2694 | ena_restore_device(adapter); |
2695 | } | |
2696 | ||
1738cd3e NB |
2697 | return 0; |
2698 | } | |
2699 | ||
eece4d2a SJ |
2700 | int ena_update_queue_sizes(struct ena_adapter *adapter, |
2701 | u32 new_tx_size, | |
2702 | u32 new_rx_size) | |
2703 | { | |
2413ea97 | 2704 | bool dev_was_up; |
eece4d2a | 2705 | |
2413ea97 | 2706 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
eece4d2a SJ |
2707 | ena_close(adapter->netdev); |
2708 | adapter->requested_tx_ring_size = new_tx_size; | |
2709 | adapter->requested_rx_ring_size = new_rx_size; | |
548c4940 SJ |
2710 | ena_init_io_rings(adapter, |
2711 | 0, | |
2712 | adapter->xdp_num_queues + | |
2713 | adapter->num_io_queues); | |
2413ea97 SJ |
2714 | return dev_was_up ? ena_up(adapter) : 0; |
2715 | } | |
2716 | ||
2717 | int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) | |
2718 | { | |
2719 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
838c93dc | 2720 | int prev_channel_count; |
2413ea97 SJ |
2721 | bool dev_was_up; |
2722 | ||
2723 | dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | |
2724 | ena_close(adapter->netdev); | |
838c93dc | 2725 | prev_channel_count = adapter->num_io_queues; |
2413ea97 | 2726 | adapter->num_io_queues = new_channel_count; |
548c4940 SJ |
2727 | if (ena_xdp_present(adapter) && |
2728 | ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) { | |
2729 | adapter->xdp_first_ring = new_channel_count; | |
2730 | adapter->xdp_num_queues = new_channel_count; | |
838c93dc SJ |
2731 | if (prev_channel_count > new_channel_count) |
2732 | ena_xdp_exchange_program_rx_in_range(adapter, | |
2733 | NULL, | |
2734 | new_channel_count, | |
2735 | prev_channel_count); | |
2736 | else | |
2737 | ena_xdp_exchange_program_rx_in_range(adapter, | |
2738 | adapter->xdp_bpf_prog, | |
2739 | prev_channel_count, | |
2740 | new_channel_count); | |
2741 | } | |
2742 | ||
2413ea97 SJ |
2743 | /* We need to destroy the rss table so that the indirection |
2744 | * table will be reinitialized by ena_up() | |
2745 | */ | |
2746 | ena_com_rss_destroy(ena_dev); | |
548c4940 SJ |
2747 | ena_init_io_rings(adapter, |
2748 | 0, | |
2749 | adapter->xdp_num_queues + | |
2750 | adapter->num_io_queues); | |
2413ea97 | 2751 | return dev_was_up ? ena_open(adapter->netdev) : 0; |
eece4d2a SJ |
2752 | } |
2753 | ||
0e3a3f6d AK |
2754 | static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, |
2755 | struct sk_buff *skb, | |
2756 | bool disable_meta_caching) | |
1738cd3e NB |
2757 | { |
2758 | u32 mss = skb_shinfo(skb)->gso_size; | |
2759 | struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; | |
2760 | u8 l4_protocol = 0; | |
2761 | ||
2762 | if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { | |
2763 | ena_tx_ctx->l4_csum_enable = 1; | |
2764 | if (mss) { | |
2765 | ena_tx_ctx->tso_enable = 1; | |
2766 | ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; | |
2767 | ena_tx_ctx->l4_csum_partial = 0; | |
2768 | } else { | |
2769 | ena_tx_ctx->tso_enable = 0; | |
2770 | ena_meta->l4_hdr_len = 0; | |
2771 | ena_tx_ctx->l4_csum_partial = 1; | |
2772 | } | |
2773 | ||
2774 | switch (ip_hdr(skb)->version) { | |
2775 | case IPVERSION: | |
2776 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; | |
2777 | if (ip_hdr(skb)->frag_off & htons(IP_DF)) | |
2778 | ena_tx_ctx->df = 1; | |
2779 | if (mss) | |
2780 | ena_tx_ctx->l3_csum_enable = 1; | |
2781 | l4_protocol = ip_hdr(skb)->protocol; | |
2782 | break; | |
2783 | case 6: | |
2784 | ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; | |
2785 | l4_protocol = ipv6_hdr(skb)->nexthdr; | |
2786 | break; | |
2787 | default: | |
2788 | break; | |
2789 | } | |
2790 | ||
2791 | if (l4_protocol == IPPROTO_TCP) | |
2792 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; | |
2793 | else | |
2794 | ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; | |
2795 | ||
2796 | ena_meta->mss = mss; | |
2797 | ena_meta->l3_hdr_len = skb_network_header_len(skb); | |
2798 | ena_meta->l3_hdr_offset = skb_network_offset(skb); | |
2799 | ena_tx_ctx->meta_valid = 1; | |
0e3a3f6d AK |
2800 | } else if (disable_meta_caching) { |
2801 | memset(ena_meta, 0, sizeof(*ena_meta)); | |
2802 | ena_tx_ctx->meta_valid = 1; | |
1738cd3e NB |
2803 | } else { |
2804 | ena_tx_ctx->meta_valid = 0; | |
2805 | } | |
2806 | } | |
2807 | ||
2808 | static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, | |
2809 | struct sk_buff *skb) | |
2810 | { | |
2811 | int num_frags, header_len, rc; | |
2812 | ||
2813 | num_frags = skb_shinfo(skb)->nr_frags; | |
2814 | header_len = skb_headlen(skb); | |
2815 | ||
2816 | if (num_frags < tx_ring->sgl_size) | |
2817 | return 0; | |
2818 | ||
2819 | if ((num_frags == tx_ring->sgl_size) && | |
2820 | (header_len < tx_ring->tx_max_header_size)) | |
2821 | return 0; | |
2822 | ||
2823 | u64_stats_update_begin(&tx_ring->syncp); | |
2824 | tx_ring->tx_stats.linearize++; | |
2825 | u64_stats_update_end(&tx_ring->syncp); | |
2826 | ||
2827 | rc = skb_linearize(skb); | |
2828 | if (unlikely(rc)) { | |
2829 | u64_stats_update_begin(&tx_ring->syncp); | |
2830 | tx_ring->tx_stats.linearize_failed++; | |
2831 | u64_stats_update_end(&tx_ring->syncp); | |
2832 | } | |
2833 | ||
2834 | return rc; | |
2835 | } | |
2836 | ||
38005ca8 AK |
2837 | static int ena_tx_map_skb(struct ena_ring *tx_ring, |
2838 | struct ena_tx_buffer *tx_info, | |
2839 | struct sk_buff *skb, | |
2840 | void **push_hdr, | |
2841 | u16 *header_len) | |
1738cd3e | 2842 | { |
38005ca8 | 2843 | struct ena_adapter *adapter = tx_ring->adapter; |
1738cd3e | 2844 | struct ena_com_buf *ena_buf; |
1738cd3e | 2845 | dma_addr_t dma; |
38005ca8 AK |
2846 | u32 skb_head_len, frag_len, last_frag; |
2847 | u16 push_len = 0; | |
2848 | u16 delta = 0; | |
2849 | int i = 0; | |
1738cd3e | 2850 | |
38005ca8 | 2851 | skb_head_len = skb_headlen(skb); |
1738cd3e | 2852 | tx_info->skb = skb; |
38005ca8 | 2853 | ena_buf = tx_info->bufs; |
1738cd3e NB |
2854 | |
2855 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
38005ca8 AK |
2856 | /* When the device is LLQ mode, the driver will copy |
2857 | * the header into the device memory space. | |
2858 | * the ena_com layer assume the header is in a linear | |
2859 | * memory space. | |
2860 | * This assumption might be wrong since part of the header | |
2861 | * can be in the fragmented buffers. | |
2862 | * Use skb_header_pointer to make sure the header is in a | |
2863 | * linear memory space. | |
2864 | */ | |
2865 | ||
2866 | push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); | |
2867 | *push_hdr = skb_header_pointer(skb, 0, push_len, | |
2868 | tx_ring->push_buf_intermediate_buf); | |
2869 | *header_len = push_len; | |
2870 | if (unlikely(skb->data != *push_hdr)) { | |
2871 | u64_stats_update_begin(&tx_ring->syncp); | |
2872 | tx_ring->tx_stats.llq_buffer_copy++; | |
2873 | u64_stats_update_end(&tx_ring->syncp); | |
2874 | ||
2875 | delta = push_len - skb_head_len; | |
2876 | } | |
1738cd3e | 2877 | } else { |
38005ca8 AK |
2878 | *push_hdr = NULL; |
2879 | *header_len = min_t(u32, skb_head_len, | |
2880 | tx_ring->tx_max_header_size); | |
1738cd3e NB |
2881 | } |
2882 | ||
38005ca8 | 2883 | netif_dbg(adapter, tx_queued, adapter->netdev, |
1738cd3e | 2884 | "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, |
38005ca8 | 2885 | *push_hdr, push_len); |
1738cd3e | 2886 | |
38005ca8 | 2887 | if (skb_head_len > push_len) { |
1738cd3e | 2888 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, |
38005ca8 AK |
2889 | skb_head_len - push_len, DMA_TO_DEVICE); |
2890 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) | |
1738cd3e NB |
2891 | goto error_report_dma_error; |
2892 | ||
2893 | ena_buf->paddr = dma; | |
38005ca8 | 2894 | ena_buf->len = skb_head_len - push_len; |
1738cd3e NB |
2895 | |
2896 | ena_buf++; | |
2897 | tx_info->num_of_bufs++; | |
38005ca8 AK |
2898 | tx_info->map_linear_data = 1; |
2899 | } else { | |
2900 | tx_info->map_linear_data = 0; | |
1738cd3e NB |
2901 | } |
2902 | ||
2903 | last_frag = skb_shinfo(skb)->nr_frags; | |
2904 | ||
2905 | for (i = 0; i < last_frag; i++) { | |
2906 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2907 | ||
38005ca8 AK |
2908 | frag_len = skb_frag_size(frag); |
2909 | ||
2910 | if (unlikely(delta >= frag_len)) { | |
2911 | delta -= frag_len; | |
2912 | continue; | |
2913 | } | |
2914 | ||
2915 | dma = skb_frag_dma_map(tx_ring->dev, frag, delta, | |
2916 | frag_len - delta, DMA_TO_DEVICE); | |
2917 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) | |
1738cd3e NB |
2918 | goto error_report_dma_error; |
2919 | ||
2920 | ena_buf->paddr = dma; | |
38005ca8 | 2921 | ena_buf->len = frag_len - delta; |
1738cd3e | 2922 | ena_buf++; |
38005ca8 AK |
2923 | tx_info->num_of_bufs++; |
2924 | delta = 0; | |
1738cd3e NB |
2925 | } |
2926 | ||
38005ca8 AK |
2927 | return 0; |
2928 | ||
2929 | error_report_dma_error: | |
2930 | u64_stats_update_begin(&tx_ring->syncp); | |
2931 | tx_ring->tx_stats.dma_mapping_err++; | |
2932 | u64_stats_update_end(&tx_ring->syncp); | |
bf2746e8 | 2933 | netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); |
38005ca8 AK |
2934 | |
2935 | tx_info->skb = NULL; | |
2936 | ||
2937 | tx_info->num_of_bufs += i; | |
548c4940 | 2938 | ena_unmap_tx_buff(tx_ring, tx_info); |
38005ca8 AK |
2939 | |
2940 | return -EINVAL; | |
2941 | } | |
2942 | ||
2943 | /* Called with netif_tx_lock. */ | |
2944 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
2945 | { | |
2946 | struct ena_adapter *adapter = netdev_priv(dev); | |
2947 | struct ena_tx_buffer *tx_info; | |
2948 | struct ena_com_tx_ctx ena_tx_ctx; | |
2949 | struct ena_ring *tx_ring; | |
2950 | struct netdev_queue *txq; | |
2951 | void *push_hdr; | |
2952 | u16 next_to_use, req_id, header_len; | |
548c4940 | 2953 | int qid, rc; |
38005ca8 AK |
2954 | |
2955 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); | |
2956 | /* Determine which tx ring we will be placed on */ | |
2957 | qid = skb_get_queue_mapping(skb); | |
2958 | tx_ring = &adapter->tx_ring[qid]; | |
2959 | txq = netdev_get_tx_queue(dev, qid); | |
2960 | ||
2961 | rc = ena_check_and_linearize_skb(tx_ring, skb); | |
2962 | if (unlikely(rc)) | |
2963 | goto error_drop_packet; | |
2964 | ||
2965 | skb_tx_timestamp(skb); | |
2966 | ||
2967 | next_to_use = tx_ring->next_to_use; | |
f9172498 | 2968 | req_id = tx_ring->free_ids[next_to_use]; |
38005ca8 AK |
2969 | tx_info = &tx_ring->tx_buffer_info[req_id]; |
2970 | tx_info->num_of_bufs = 0; | |
2971 | ||
2972 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); | |
2973 | ||
2974 | rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); | |
2975 | if (unlikely(rc)) | |
2976 | goto error_drop_packet; | |
1738cd3e NB |
2977 | |
2978 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); | |
2979 | ena_tx_ctx.ena_bufs = tx_info->bufs; | |
2980 | ena_tx_ctx.push_header = push_hdr; | |
2981 | ena_tx_ctx.num_bufs = tx_info->num_of_bufs; | |
2982 | ena_tx_ctx.req_id = req_id; | |
2983 | ena_tx_ctx.header_len = header_len; | |
2984 | ||
2985 | /* set flags and meta data */ | |
0e3a3f6d | 2986 | ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); |
1738cd3e | 2987 | |
548c4940 SJ |
2988 | rc = ena_xmit_common(dev, |
2989 | tx_ring, | |
2990 | tx_info, | |
2991 | &ena_tx_ctx, | |
2992 | next_to_use, | |
2993 | skb->len); | |
2994 | if (rc) | |
1738cd3e | 2995 | goto error_unmap_dma; |
1738cd3e NB |
2996 | |
2997 | netdev_tx_sent_queue(txq, skb->len); | |
2998 | ||
1738cd3e NB |
2999 | /* stop the queue when no more space available, the packet can have up |
3000 | * to sgl_size + 2. one for the meta descriptor and one for header | |
3001 | * (if the header is larger than tx_max_header_size). | |
3002 | */ | |
689b2bda AK |
3003 | if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
3004 | tx_ring->sgl_size + 2))) { | |
1738cd3e NB |
3005 | netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n", |
3006 | __func__, qid); | |
3007 | ||
3008 | netif_tx_stop_queue(txq); | |
3009 | u64_stats_update_begin(&tx_ring->syncp); | |
3010 | tx_ring->tx_stats.queue_stop++; | |
3011 | u64_stats_update_end(&tx_ring->syncp); | |
3012 | ||
3013 | /* There is a rare condition where this function decide to | |
3014 | * stop the queue but meanwhile clean_tx_irq updates | |
3015 | * next_to_completion and terminates. | |
3016 | * The queue will remain stopped forever. | |
37dff155 NB |
3017 | * To solve this issue add a mb() to make sure that |
3018 | * netif_tx_stop_queue() write is vissible before checking if | |
3019 | * there is additional space in the queue. | |
1738cd3e | 3020 | */ |
37dff155 | 3021 | smp_mb(); |
1738cd3e | 3022 | |
689b2bda AK |
3023 | if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, |
3024 | ENA_TX_WAKEUP_THRESH)) { | |
1738cd3e NB |
3025 | netif_tx_wake_queue(txq); |
3026 | u64_stats_update_begin(&tx_ring->syncp); | |
3027 | tx_ring->tx_stats.queue_wakeup++; | |
3028 | u64_stats_update_end(&tx_ring->syncp); | |
3029 | } | |
3030 | } | |
3031 | ||
6b16f9ee | 3032 | if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { |
37dff155 NB |
3033 | /* trigger the dma engine. ena_com_write_sq_doorbell() |
3034 | * has a mb | |
3035 | */ | |
3036 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | |
1738cd3e NB |
3037 | u64_stats_update_begin(&tx_ring->syncp); |
3038 | tx_ring->tx_stats.doorbells++; | |
3039 | u64_stats_update_end(&tx_ring->syncp); | |
3040 | } | |
3041 | ||
3042 | return NETDEV_TX_OK; | |
3043 | ||
1738cd3e | 3044 | error_unmap_dma: |
548c4940 | 3045 | ena_unmap_tx_buff(tx_ring, tx_info); |
38005ca8 | 3046 | tx_info->skb = NULL; |
1738cd3e NB |
3047 | |
3048 | error_drop_packet: | |
1738cd3e NB |
3049 | dev_kfree_skb(skb); |
3050 | return NETDEV_TX_OK; | |
3051 | } | |
3052 | ||
1738cd3e | 3053 | static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 3054 | struct net_device *sb_dev) |
1738cd3e NB |
3055 | { |
3056 | u16 qid; | |
3057 | /* we suspect that this is good for in--kernel network services that | |
3058 | * want to loop incoming skb rx to tx in normal user generated traffic, | |
3059 | * most probably we will not get to this | |
3060 | */ | |
3061 | if (skb_rx_queue_recorded(skb)) | |
3062 | qid = skb_get_rx_queue(skb); | |
3063 | else | |
a350ecce | 3064 | qid = netdev_pick_tx(dev, skb, NULL); |
1738cd3e NB |
3065 | |
3066 | return qid; | |
3067 | } | |
3068 | ||
46143e58 | 3069 | static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
1738cd3e | 3070 | { |
f0525298 | 3071 | struct device *dev = &pdev->dev; |
1738cd3e NB |
3072 | struct ena_admin_host_info *host_info; |
3073 | int rc; | |
3074 | ||
3075 | /* Allocate only the host info */ | |
3076 | rc = ena_com_allocate_host_info(ena_dev); | |
3077 | if (rc) { | |
f0525298 | 3078 | dev_err(dev, "Cannot allocate host info\n"); |
1738cd3e NB |
3079 | return; |
3080 | } | |
3081 | ||
3082 | host_info = ena_dev->host_attr.host_info; | |
3083 | ||
095f2f1f | 3084 | host_info->bdf = (pdev->bus->number << 8) | pdev->devfn; |
1738cd3e NB |
3085 | host_info->os_type = ENA_ADMIN_OS_LINUX; |
3086 | host_info->kernel_ver = LINUX_VERSION_CODE; | |
f9133088 | 3087 | strlcpy(host_info->kernel_ver_str, utsname()->version, |
1738cd3e NB |
3088 | sizeof(host_info->kernel_ver_str) - 1); |
3089 | host_info->os_dist = 0; | |
3090 | strncpy(host_info->os_dist_str, utsname()->release, | |
3091 | sizeof(host_info->os_dist_str) - 1); | |
92040c6d AK |
3092 | host_info->driver_version = |
3093 | (DRV_MODULE_GEN_MAJOR) | | |
3094 | (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | | |
3095 | (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) | | |
3096 | ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT); | |
095f2f1f | 3097 | host_info->num_cpus = num_online_cpus(); |
1738cd3e | 3098 | |
bd21b0cc | 3099 | host_info->driver_supported_features = |
68f236df | 3100 | ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | |
0f505c60 | 3101 | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | |
0ee60edf AK |
3102 | ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | |
3103 | ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; | |
bd21b0cc | 3104 | |
1738cd3e NB |
3105 | rc = ena_com_set_host_attributes(ena_dev); |
3106 | if (rc) { | |
d1497638 | 3107 | if (rc == -EOPNOTSUPP) |
f0525298 | 3108 | dev_warn(dev, "Cannot set host attributes\n"); |
1738cd3e | 3109 | else |
f0525298 | 3110 | dev_err(dev, "Cannot set host attributes\n"); |
1738cd3e NB |
3111 | |
3112 | goto err; | |
3113 | } | |
3114 | ||
3115 | return; | |
3116 | ||
3117 | err: | |
3118 | ena_com_delete_host_info(ena_dev); | |
3119 | } | |
3120 | ||
3121 | static void ena_config_debug_area(struct ena_adapter *adapter) | |
3122 | { | |
3123 | u32 debug_area_size; | |
3124 | int rc, ss_count; | |
3125 | ||
3126 | ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); | |
3127 | if (ss_count <= 0) { | |
3128 | netif_err(adapter, drv, adapter->netdev, | |
3129 | "SS count is negative\n"); | |
3130 | return; | |
3131 | } | |
3132 | ||
3133 | /* allocate 32 bytes for each string and 64bit for the value */ | |
3134 | debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; | |
3135 | ||
3136 | rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); | |
3137 | if (rc) { | |
f0525298 SA |
3138 | netif_err(adapter, drv, adapter->netdev, |
3139 | "Cannot allocate debug area\n"); | |
1738cd3e NB |
3140 | return; |
3141 | } | |
3142 | ||
3143 | rc = ena_com_set_host_attributes(adapter->ena_dev); | |
3144 | if (rc) { | |
d1497638 | 3145 | if (rc == -EOPNOTSUPP) |
1738cd3e NB |
3146 | netif_warn(adapter, drv, adapter->netdev, |
3147 | "Cannot set host attributes\n"); | |
3148 | else | |
3149 | netif_err(adapter, drv, adapter->netdev, | |
3150 | "Cannot set host attributes\n"); | |
3151 | goto err; | |
3152 | } | |
3153 | ||
3154 | return; | |
3155 | err: | |
3156 | ena_com_delete_debug_area(adapter->ena_dev); | |
3157 | } | |
3158 | ||
713865da SJ |
3159 | int ena_update_hw_stats(struct ena_adapter *adapter) |
3160 | { | |
3161 | int rc = 0; | |
3162 | ||
3163 | rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); | |
3164 | if (rc) { | |
3165 | dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n"); | |
3166 | return rc; | |
3167 | } | |
3168 | ||
3169 | return 0; | |
3170 | } | |
3171 | ||
bc1f4470 | 3172 | static void ena_get_stats64(struct net_device *netdev, |
3173 | struct rtnl_link_stats64 *stats) | |
1738cd3e NB |
3174 | { |
3175 | struct ena_adapter *adapter = netdev_priv(netdev); | |
d81db240 NB |
3176 | struct ena_ring *rx_ring, *tx_ring; |
3177 | unsigned int start; | |
3178 | u64 rx_drops; | |
5c665f8c | 3179 | u64 tx_drops; |
d81db240 | 3180 | int i; |
1738cd3e NB |
3181 | |
3182 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
bc1f4470 | 3183 | return; |
1738cd3e | 3184 | |
faa615f9 | 3185 | for (i = 0; i < adapter->num_io_queues; i++) { |
d81db240 NB |
3186 | u64 bytes, packets; |
3187 | ||
3188 | tx_ring = &adapter->tx_ring[i]; | |
1738cd3e | 3189 | |
d81db240 NB |
3190 | do { |
3191 | start = u64_stats_fetch_begin_irq(&tx_ring->syncp); | |
3192 | packets = tx_ring->tx_stats.cnt; | |
3193 | bytes = tx_ring->tx_stats.bytes; | |
3194 | } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); | |
1738cd3e | 3195 | |
d81db240 NB |
3196 | stats->tx_packets += packets; |
3197 | stats->tx_bytes += bytes; | |
3198 | ||
3199 | rx_ring = &adapter->rx_ring[i]; | |
3200 | ||
3201 | do { | |
3202 | start = u64_stats_fetch_begin_irq(&rx_ring->syncp); | |
3203 | packets = rx_ring->rx_stats.cnt; | |
3204 | bytes = rx_ring->rx_stats.bytes; | |
3205 | } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); | |
3206 | ||
3207 | stats->rx_packets += packets; | |
3208 | stats->rx_bytes += bytes; | |
3209 | } | |
3210 | ||
3211 | do { | |
3212 | start = u64_stats_fetch_begin_irq(&adapter->syncp); | |
3213 | rx_drops = adapter->dev_stats.rx_drops; | |
5c665f8c | 3214 | tx_drops = adapter->dev_stats.tx_drops; |
d81db240 | 3215 | } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); |
1738cd3e | 3216 | |
d81db240 | 3217 | stats->rx_dropped = rx_drops; |
5c665f8c | 3218 | stats->tx_dropped = tx_drops; |
1738cd3e NB |
3219 | |
3220 | stats->multicast = 0; | |
3221 | stats->collisions = 0; | |
3222 | ||
3223 | stats->rx_length_errors = 0; | |
3224 | stats->rx_crc_errors = 0; | |
3225 | stats->rx_frame_errors = 0; | |
3226 | stats->rx_fifo_errors = 0; | |
3227 | stats->rx_missed_errors = 0; | |
3228 | stats->tx_window_errors = 0; | |
3229 | ||
3230 | stats->rx_errors = 0; | |
3231 | stats->tx_errors = 0; | |
1738cd3e NB |
3232 | } |
3233 | ||
3234 | static const struct net_device_ops ena_netdev_ops = { | |
3235 | .ndo_open = ena_open, | |
3236 | .ndo_stop = ena_close, | |
3237 | .ndo_start_xmit = ena_start_xmit, | |
3238 | .ndo_select_queue = ena_select_queue, | |
3239 | .ndo_get_stats64 = ena_get_stats64, | |
3240 | .ndo_tx_timeout = ena_tx_timeout, | |
3241 | .ndo_change_mtu = ena_change_mtu, | |
3242 | .ndo_set_mac_address = NULL, | |
3243 | .ndo_validate_addr = eth_validate_addr, | |
838c93dc | 3244 | .ndo_bpf = ena_xdp, |
1738cd3e NB |
3245 | }; |
3246 | ||
1738cd3e NB |
3247 | static int ena_device_validate_params(struct ena_adapter *adapter, |
3248 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
3249 | { | |
3250 | struct net_device *netdev = adapter->netdev; | |
3251 | int rc; | |
3252 | ||
3253 | rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, | |
3254 | adapter->mac_addr); | |
3255 | if (!rc) { | |
3256 | netif_err(adapter, drv, netdev, | |
3257 | "Error, mac address are different\n"); | |
3258 | return -EINVAL; | |
3259 | } | |
3260 | ||
1738cd3e NB |
3261 | if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { |
3262 | netif_err(adapter, drv, netdev, | |
3263 | "Error, device max mtu is smaller than netdev MTU\n"); | |
3264 | return -EINVAL; | |
3265 | } | |
3266 | ||
3267 | return 0; | |
3268 | } | |
3269 | ||
c29efeae AK |
3270 | static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) |
3271 | { | |
3272 | llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; | |
3273 | llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; | |
3274 | llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; | |
3275 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; | |
3276 | llq_config->llq_ring_entry_size_value = 128; | |
3277 | } | |
3278 | ||
3279 | static int ena_set_queues_placement_policy(struct pci_dev *pdev, | |
3280 | struct ena_com_dev *ena_dev, | |
3281 | struct ena_admin_feature_llq_desc *llq, | |
3282 | struct ena_llq_configurations *llq_default_configurations) | |
3283 | { | |
3284 | int rc; | |
3285 | u32 llq_feature_mask; | |
3286 | ||
3287 | llq_feature_mask = 1 << ENA_ADMIN_LLQ; | |
3288 | if (!(ena_dev->supported_features & llq_feature_mask)) { | |
3289 | dev_err(&pdev->dev, | |
3290 | "LLQ is not supported Fallback to host mode policy.\n"); | |
3291 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3292 | return 0; | |
3293 | } | |
3294 | ||
3295 | rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); | |
3296 | if (unlikely(rc)) { | |
3297 | dev_err(&pdev->dev, | |
3298 | "Failed to configure the device mode. Fallback to host mode policy.\n"); | |
3299 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3300 | } | |
3301 | ||
3302 | return 0; | |
3303 | } | |
3304 | ||
3305 | static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, | |
3306 | int bars) | |
3307 | { | |
3308 | bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); | |
3309 | ||
3310 | if (!has_mem_bar) { | |
3311 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | |
3312 | dev_err(&pdev->dev, | |
3313 | "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); | |
3314 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | |
3315 | } | |
3316 | ||
3317 | return 0; | |
3318 | } | |
3319 | ||
3320 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, | |
3321 | pci_resource_start(pdev, ENA_MEM_BAR), | |
3322 | pci_resource_len(pdev, ENA_MEM_BAR)); | |
3323 | ||
3324 | if (!ena_dev->mem_bar) | |
3325 | return -EFAULT; | |
3326 | ||
3327 | return 0; | |
3328 | } | |
3329 | ||
1738cd3e NB |
3330 | static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, |
3331 | struct ena_com_dev_get_features_ctx *get_feat_ctx, | |
3332 | bool *wd_state) | |
3333 | { | |
c29efeae | 3334 | struct ena_llq_configurations llq_config; |
1738cd3e NB |
3335 | struct device *dev = &pdev->dev; |
3336 | bool readless_supported; | |
3337 | u32 aenq_groups; | |
3338 | int dma_width; | |
3339 | int rc; | |
3340 | ||
3341 | rc = ena_com_mmio_reg_read_request_init(ena_dev); | |
3342 | if (rc) { | |
bf2746e8 | 3343 | dev_err(dev, "Failed to init mmio read less\n"); |
1738cd3e NB |
3344 | return rc; |
3345 | } | |
3346 | ||
3347 | /* The PCIe configuration space revision id indicate if mmio reg | |
3348 | * read is disabled | |
3349 | */ | |
3350 | readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); | |
3351 | ena_com_set_mmio_read_mode(ena_dev, readless_supported); | |
3352 | ||
e2eed0e3 | 3353 | rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); |
1738cd3e NB |
3354 | if (rc) { |
3355 | dev_err(dev, "Can not reset device\n"); | |
3356 | goto err_mmio_read_less; | |
3357 | } | |
3358 | ||
3359 | rc = ena_com_validate_version(ena_dev); | |
3360 | if (rc) { | |
bf2746e8 | 3361 | dev_err(dev, "Device version is too low\n"); |
1738cd3e NB |
3362 | goto err_mmio_read_less; |
3363 | } | |
3364 | ||
3365 | dma_width = ena_com_get_dma_width(ena_dev); | |
3366 | if (dma_width < 0) { | |
3367 | dev_err(dev, "Invalid dma width value %d", dma_width); | |
6e22066f | 3368 | rc = dma_width; |
1738cd3e NB |
3369 | goto err_mmio_read_less; |
3370 | } | |
3371 | ||
09323b3b | 3372 | rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width)); |
1738cd3e | 3373 | if (rc) { |
09323b3b | 3374 | dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc); |
1738cd3e NB |
3375 | goto err_mmio_read_less; |
3376 | } | |
3377 | ||
3378 | /* ENA admin level init */ | |
f1e90f6e | 3379 | rc = ena_com_admin_init(ena_dev, &aenq_handlers); |
1738cd3e NB |
3380 | if (rc) { |
3381 | dev_err(dev, | |
3382 | "Can not initialize ena admin queue with device\n"); | |
3383 | goto err_mmio_read_less; | |
3384 | } | |
3385 | ||
3386 | /* To enable the msix interrupts the driver needs to know the number | |
3387 | * of queues. So the driver uses polling mode to retrieve this | |
3388 | * information | |
3389 | */ | |
3390 | ena_com_set_admin_polling_mode(ena_dev, true); | |
3391 | ||
095f2f1f | 3392 | ena_config_host_info(ena_dev, pdev); |
dd8427a7 | 3393 | |
1738cd3e NB |
3394 | /* Get Device Attributes*/ |
3395 | rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); | |
3396 | if (rc) { | |
3397 | dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc); | |
3398 | goto err_admin_init; | |
3399 | } | |
3400 | ||
3401 | /* Try to turn all the available aenq groups */ | |
3402 | aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | | |
3403 | BIT(ENA_ADMIN_FATAL_ERROR) | | |
3404 | BIT(ENA_ADMIN_WARNING) | | |
3405 | BIT(ENA_ADMIN_NOTIFICATION) | | |
3406 | BIT(ENA_ADMIN_KEEP_ALIVE); | |
3407 | ||
3408 | aenq_groups &= get_feat_ctx->aenq.supported_groups; | |
3409 | ||
3410 | rc = ena_com_set_aenq_config(ena_dev, aenq_groups); | |
3411 | if (rc) { | |
3412 | dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc); | |
3413 | goto err_admin_init; | |
3414 | } | |
3415 | ||
3416 | *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); | |
3417 | ||
c29efeae AK |
3418 | set_default_llq_configurations(&llq_config); |
3419 | ||
3420 | rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, | |
3421 | &llq_config); | |
3422 | if (rc) { | |
bf2746e8 | 3423 | dev_err(dev, "ENA device init failed\n"); |
c29efeae AK |
3424 | goto err_admin_init; |
3425 | } | |
3426 | ||
1738cd3e NB |
3427 | return 0; |
3428 | ||
3429 | err_admin_init: | |
dd8427a7 | 3430 | ena_com_delete_host_info(ena_dev); |
1738cd3e NB |
3431 | ena_com_admin_destroy(ena_dev); |
3432 | err_mmio_read_less: | |
3433 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
3434 | ||
3435 | return rc; | |
3436 | } | |
3437 | ||
4d192660 | 3438 | static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter) |
1738cd3e NB |
3439 | { |
3440 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3441 | struct device *dev = &adapter->pdev->dev; | |
3442 | int rc; | |
3443 | ||
4d192660 | 3444 | rc = ena_enable_msix(adapter); |
1738cd3e NB |
3445 | if (rc) { |
3446 | dev_err(dev, "Can not reserve msix vectors\n"); | |
3447 | return rc; | |
3448 | } | |
3449 | ||
3450 | ena_setup_mgmnt_intr(adapter); | |
3451 | ||
3452 | rc = ena_request_mgmnt_irq(adapter); | |
3453 | if (rc) { | |
3454 | dev_err(dev, "Can not setup management interrupts\n"); | |
3455 | goto err_disable_msix; | |
3456 | } | |
3457 | ||
3458 | ena_com_set_admin_polling_mode(ena_dev, false); | |
3459 | ||
3460 | ena_com_admin_aenq_enable(ena_dev); | |
3461 | ||
3462 | return 0; | |
3463 | ||
3464 | err_disable_msix: | |
06443684 NB |
3465 | ena_disable_msix(adapter); |
3466 | ||
1738cd3e NB |
3467 | return rc; |
3468 | } | |
3469 | ||
cfa324a5 | 3470 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
1738cd3e | 3471 | { |
1738cd3e NB |
3472 | struct net_device *netdev = adapter->netdev; |
3473 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
8c5c7abd | 3474 | bool dev_up; |
3f6159db | 3475 | |
fe870c77 NB |
3476 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) |
3477 | return; | |
3478 | ||
3f6159db NB |
3479 | netif_carrier_off(netdev); |
3480 | ||
1738cd3e NB |
3481 | del_timer_sync(&adapter->timer_service); |
3482 | ||
1738cd3e | 3483 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
8c5c7abd | 3484 | adapter->dev_up_before_reset = dev_up; |
cfa324a5 NB |
3485 | if (!graceful) |
3486 | ena_com_set_admin_running_state(ena_dev, false); | |
1738cd3e | 3487 | |
ee4552aa NB |
3488 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
3489 | ena_down(adapter); | |
1738cd3e | 3490 | |
bd791175 | 3491 | /* Stop the device from sending AENQ events (in case reset flag is set |
58a54b9c | 3492 | * and device is up, ena_down() already reset the device. |
8c5c7abd NB |
3493 | */ |
3494 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) | |
3495 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | |
3496 | ||
1738cd3e NB |
3497 | ena_free_mgmnt_irq(adapter); |
3498 | ||
06443684 | 3499 | ena_disable_msix(adapter); |
1738cd3e NB |
3500 | |
3501 | ena_com_abort_admin_commands(ena_dev); | |
3502 | ||
3503 | ena_com_wait_for_abort_completion(ena_dev); | |
3504 | ||
3505 | ena_com_admin_destroy(ena_dev); | |
3506 | ||
3507 | ena_com_mmio_reg_read_request_destroy(ena_dev); | |
3508 | ||
c1c0e40b | 3509 | /* return reset reason to default value */ |
e2eed0e3 | 3510 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
8c5c7abd | 3511 | |
3f6159db | 3512 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
fe870c77 | 3513 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
8c5c7abd | 3514 | } |
3f6159db | 3515 | |
8c5c7abd NB |
3516 | static int ena_restore_device(struct ena_adapter *adapter) |
3517 | { | |
3518 | struct ena_com_dev_get_features_ctx get_feat_ctx; | |
3519 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3520 | struct pci_dev *pdev = adapter->pdev; | |
3521 | bool wd_state; | |
3522 | int rc; | |
1738cd3e | 3523 | |
d18e4f68 | 3524 | set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
1738cd3e NB |
3525 | rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); |
3526 | if (rc) { | |
3527 | dev_err(&pdev->dev, "Can not initialize device\n"); | |
3528 | goto err; | |
3529 | } | |
3530 | adapter->wd_state = wd_state; | |
3531 | ||
3532 | rc = ena_device_validate_params(adapter, &get_feat_ctx); | |
3533 | if (rc) { | |
3534 | dev_err(&pdev->dev, "Validation of device parameters failed\n"); | |
3535 | goto err_device_destroy; | |
3536 | } | |
3537 | ||
4d192660 | 3538 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
1738cd3e NB |
3539 | if (rc) { |
3540 | dev_err(&pdev->dev, "Enable MSI-X failed\n"); | |
3541 | goto err_device_destroy; | |
3542 | } | |
3543 | /* If the interface was up before the reset bring it up */ | |
8c5c7abd | 3544 | if (adapter->dev_up_before_reset) { |
1738cd3e NB |
3545 | rc = ena_up(adapter); |
3546 | if (rc) { | |
3547 | dev_err(&pdev->dev, "Failed to create I/O queues\n"); | |
3548 | goto err_disable_msix; | |
3549 | } | |
3550 | } | |
3551 | ||
fe870c77 | 3552 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
e1f1bd9b AK |
3553 | |
3554 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | |
3555 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | |
3556 | netif_carrier_on(adapter->netdev); | |
3557 | ||
1738cd3e | 3558 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
dfdde134 | 3559 | adapter->last_keep_alive_jiffies = jiffies; |
1738cd3e | 3560 | |
f0525298 SA |
3561 | dev_err(&pdev->dev, "Device reset completed successfully\n"); |
3562 | ||
8c5c7abd | 3563 | return rc; |
1738cd3e NB |
3564 | err_disable_msix: |
3565 | ena_free_mgmnt_irq(adapter); | |
06443684 | 3566 | ena_disable_msix(adapter); |
1738cd3e | 3567 | err_device_destroy: |
d7703ddb AK |
3568 | ena_com_abort_admin_commands(ena_dev); |
3569 | ena_com_wait_for_abort_completion(ena_dev); | |
1738cd3e | 3570 | ena_com_admin_destroy(ena_dev); |
d7703ddb | 3571 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); |
e76ad21d | 3572 | ena_com_mmio_reg_read_request_destroy(ena_dev); |
1738cd3e | 3573 | err: |
22b331c9 | 3574 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
d18e4f68 | 3575 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
1738cd3e NB |
3576 | dev_err(&pdev->dev, |
3577 | "Reset attempt failed. Can not reset the device\n"); | |
8c5c7abd NB |
3578 | |
3579 | return rc; | |
3580 | } | |
3581 | ||
3582 | static void ena_fw_reset_device(struct work_struct *work) | |
3583 | { | |
3584 | struct ena_adapter *adapter = | |
3585 | container_of(work, struct ena_adapter, reset_task); | |
8c5c7abd | 3586 | |
8c5c7abd | 3587 | rtnl_lock(); |
63d4a4c1 SA |
3588 | |
3589 | if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
3590 | ena_destroy_device(adapter, false); | |
3591 | ena_restore_device(adapter); | |
3592 | } | |
3593 | ||
8c5c7abd | 3594 | rtnl_unlock(); |
1738cd3e NB |
3595 | } |
3596 | ||
8510e1a3 NB |
3597 | static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, |
3598 | struct ena_ring *rx_ring) | |
3599 | { | |
3600 | if (likely(rx_ring->first_interrupt)) | |
3601 | return 0; | |
3602 | ||
3603 | if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) | |
3604 | return 0; | |
3605 | ||
3606 | rx_ring->no_interrupt_event_cnt++; | |
3607 | ||
3608 | if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { | |
3609 | netif_err(adapter, rx_err, adapter->netdev, | |
3610 | "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", | |
3611 | rx_ring->qid); | |
3612 | adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; | |
3613 | smp_mb__before_atomic(); | |
3614 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
3615 | return -EIO; | |
3616 | } | |
3617 | ||
3618 | return 0; | |
3619 | } | |
3620 | ||
3621 | static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, | |
3622 | struct ena_ring *tx_ring) | |
1738cd3e NB |
3623 | { |
3624 | struct ena_tx_buffer *tx_buf; | |
3625 | unsigned long last_jiffies; | |
800c55cb | 3626 | u32 missed_tx = 0; |
11095fdb | 3627 | int i, rc = 0; |
800c55cb NB |
3628 | |
3629 | for (i = 0; i < tx_ring->ring_size; i++) { | |
3630 | tx_buf = &tx_ring->tx_buffer_info[i]; | |
3631 | last_jiffies = tx_buf->last_jiffies; | |
8510e1a3 NB |
3632 | |
3633 | if (last_jiffies == 0) | |
3634 | /* no pending Tx at this location */ | |
3635 | continue; | |
3636 | ||
3637 | if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + | |
3638 | 2 * adapter->missing_tx_completion_to))) { | |
3639 | /* If after graceful period interrupt is still not | |
3640 | * received, we schedule a reset | |
3641 | */ | |
3642 | netif_err(adapter, tx_err, adapter->netdev, | |
3643 | "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", | |
3644 | tx_ring->qid); | |
3645 | adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; | |
3646 | smp_mb__before_atomic(); | |
3647 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
3648 | return -EIO; | |
3649 | } | |
3650 | ||
3651 | if (unlikely(time_is_before_jiffies(last_jiffies + | |
3652 | adapter->missing_tx_completion_to))) { | |
800c55cb NB |
3653 | if (!tx_buf->print_once) |
3654 | netif_notice(adapter, tx_err, adapter->netdev, | |
3655 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | |
3656 | tx_ring->qid, i); | |
3657 | ||
3658 | tx_buf->print_once = 1; | |
3659 | missed_tx++; | |
800c55cb NB |
3660 | } |
3661 | } | |
3662 | ||
11095fdb NB |
3663 | if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { |
3664 | netif_err(adapter, tx_err, adapter->netdev, | |
3665 | "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", | |
3666 | missed_tx, | |
3667 | adapter->missing_tx_completion_threshold); | |
3668 | adapter->reset_reason = | |
3669 | ENA_REGS_RESET_MISS_TX_CMPL; | |
3670 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | |
3671 | rc = -EIO; | |
3672 | } | |
3673 | ||
3674 | u64_stats_update_begin(&tx_ring->syncp); | |
ccd143e5 | 3675 | tx_ring->tx_stats.missed_tx += missed_tx; |
11095fdb NB |
3676 | u64_stats_update_end(&tx_ring->syncp); |
3677 | ||
3678 | return rc; | |
800c55cb NB |
3679 | } |
3680 | ||
8510e1a3 | 3681 | static void check_for_missing_completions(struct ena_adapter *adapter) |
800c55cb | 3682 | { |
1738cd3e | 3683 | struct ena_ring *tx_ring; |
8510e1a3 | 3684 | struct ena_ring *rx_ring; |
800c55cb | 3685 | int i, budget, rc; |
548c4940 | 3686 | int io_queue_count; |
1738cd3e | 3687 | |
548c4940 | 3688 | io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; |
1738cd3e NB |
3689 | /* Make sure the driver doesn't turn the device in other process */ |
3690 | smp_rmb(); | |
3691 | ||
3692 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
3693 | return; | |
3694 | ||
3f6159db NB |
3695 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) |
3696 | return; | |
3697 | ||
82ef30f1 NB |
3698 | if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) |
3699 | return; | |
3700 | ||
1738cd3e NB |
3701 | budget = ENA_MONITORED_TX_QUEUES; |
3702 | ||
548c4940 | 3703 | for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { |
1738cd3e | 3704 | tx_ring = &adapter->tx_ring[i]; |
8510e1a3 NB |
3705 | rx_ring = &adapter->rx_ring[i]; |
3706 | ||
3707 | rc = check_missing_comp_in_tx_queue(adapter, tx_ring); | |
3708 | if (unlikely(rc)) | |
3709 | return; | |
1738cd3e | 3710 | |
548c4940 SJ |
3711 | rc = !ENA_IS_XDP_INDEX(adapter, i) ? |
3712 | check_for_rx_interrupt_queue(adapter, rx_ring) : 0; | |
800c55cb NB |
3713 | if (unlikely(rc)) |
3714 | return; | |
1738cd3e NB |
3715 | |
3716 | budget--; | |
3717 | if (!budget) | |
3718 | break; | |
3719 | } | |
3720 | ||
548c4940 | 3721 | adapter->last_monitored_tx_qid = i % io_queue_count; |
1738cd3e NB |
3722 | } |
3723 | ||
a3af7c18 NB |
3724 | /* trigger napi schedule after 2 consecutive detections */ |
3725 | #define EMPTY_RX_REFILL 2 | |
3726 | /* For the rare case where the device runs out of Rx descriptors and the | |
3727 | * napi handler failed to refill new Rx descriptors (due to a lack of memory | |
3728 | * for example). | |
3729 | * This case will lead to a deadlock: | |
3730 | * The device won't send interrupts since all the new Rx packets will be dropped | |
3731 | * The napi handler won't allocate new Rx descriptors so the device will be | |
3732 | * able to send new packets. | |
3733 | * | |
3734 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. | |
3735 | * It is recommended to have at least 512MB, with a minimum of 128MB for | |
3736 | * constrained environment). | |
3737 | * | |
3738 | * When such a situation is detected - Reschedule napi | |
3739 | */ | |
3740 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) | |
3741 | { | |
3742 | struct ena_ring *rx_ring; | |
3743 | int i, refill_required; | |
3744 | ||
3745 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | |
3746 | return; | |
3747 | ||
3748 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | |
3749 | return; | |
3750 | ||
faa615f9 | 3751 | for (i = 0; i < adapter->num_io_queues; i++) { |
a3af7c18 NB |
3752 | rx_ring = &adapter->rx_ring[i]; |
3753 | ||
7cfe9a55 | 3754 | refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); |
a3af7c18 NB |
3755 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { |
3756 | rx_ring->empty_rx_queue++; | |
3757 | ||
3758 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | |
3759 | u64_stats_update_begin(&rx_ring->syncp); | |
3760 | rx_ring->rx_stats.empty_rx_ring++; | |
3761 | u64_stats_update_end(&rx_ring->syncp); | |
3762 | ||
3763 | netif_err(adapter, drv, adapter->netdev, | |
bf2746e8 | 3764 | "Trigger refill for ring %d\n", i); |
a3af7c18 NB |
3765 | |
3766 | napi_schedule(rx_ring->napi); | |
3767 | rx_ring->empty_rx_queue = 0; | |
3768 | } | |
3769 | } else { | |
3770 | rx_ring->empty_rx_queue = 0; | |
3771 | } | |
3772 | } | |
3773 | } | |
3774 | ||
1738cd3e NB |
3775 | /* Check for keep alive expiration */ |
3776 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | |
3777 | { | |
3778 | unsigned long keep_alive_expired; | |
3779 | ||
3780 | if (!adapter->wd_state) | |
3781 | return; | |
3782 | ||
82ef30f1 NB |
3783 | if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) |
3784 | return; | |
3785 | ||
2a6e5fa2 AK |
3786 | keep_alive_expired = adapter->last_keep_alive_jiffies + |
3787 | adapter->keep_alive_timeout; | |
1738cd3e NB |
3788 | if (unlikely(time_is_before_jiffies(keep_alive_expired))) { |
3789 | netif_err(adapter, drv, adapter->netdev, | |
3790 | "Keep alive watchdog timeout.\n"); | |
3791 | u64_stats_update_begin(&adapter->syncp); | |
3792 | adapter->dev_stats.wd_expired++; | |
3793 | u64_stats_update_end(&adapter->syncp); | |
e2eed0e3 | 3794 | adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; |
1738cd3e NB |
3795 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
3796 | } | |
3797 | } | |
3798 | ||
3799 | static void check_for_admin_com_state(struct ena_adapter *adapter) | |
3800 | { | |
3801 | if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { | |
3802 | netif_err(adapter, drv, adapter->netdev, | |
3803 | "ENA admin queue is not in running state!\n"); | |
3804 | u64_stats_update_begin(&adapter->syncp); | |
3805 | adapter->dev_stats.admin_q_pause++; | |
3806 | u64_stats_update_end(&adapter->syncp); | |
e2eed0e3 | 3807 | adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; |
1738cd3e NB |
3808 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
3809 | } | |
3810 | } | |
3811 | ||
82ef30f1 NB |
3812 | static void ena_update_hints(struct ena_adapter *adapter, |
3813 | struct ena_admin_ena_hw_hints *hints) | |
3814 | { | |
3815 | struct net_device *netdev = adapter->netdev; | |
3816 | ||
3817 | if (hints->admin_completion_tx_timeout) | |
3818 | adapter->ena_dev->admin_queue.completion_timeout = | |
3819 | hints->admin_completion_tx_timeout * 1000; | |
3820 | ||
3821 | if (hints->mmio_read_timeout) | |
3822 | /* convert to usec */ | |
3823 | adapter->ena_dev->mmio_read.reg_read_to = | |
3824 | hints->mmio_read_timeout * 1000; | |
3825 | ||
3826 | if (hints->missed_tx_completion_count_threshold_to_reset) | |
3827 | adapter->missing_tx_completion_threshold = | |
3828 | hints->missed_tx_completion_count_threshold_to_reset; | |
3829 | ||
3830 | if (hints->missing_tx_completion_timeout) { | |
3831 | if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) | |
3832 | adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; | |
3833 | else | |
3834 | adapter->missing_tx_completion_to = | |
3835 | msecs_to_jiffies(hints->missing_tx_completion_timeout); | |
3836 | } | |
3837 | ||
3838 | if (hints->netdev_wd_timeout) | |
3839 | netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); | |
3840 | ||
3841 | if (hints->driver_watchdog_timeout) { | |
3842 | if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) | |
3843 | adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; | |
3844 | else | |
3845 | adapter->keep_alive_timeout = | |
3846 | msecs_to_jiffies(hints->driver_watchdog_timeout); | |
3847 | } | |
3848 | } | |
3849 | ||
1738cd3e NB |
3850 | static void ena_update_host_info(struct ena_admin_host_info *host_info, |
3851 | struct net_device *netdev) | |
3852 | { | |
3853 | host_info->supported_network_features[0] = | |
3854 | netdev->features & GENMASK_ULL(31, 0); | |
3855 | host_info->supported_network_features[1] = | |
3856 | (netdev->features & GENMASK_ULL(63, 32)) >> 32; | |
3857 | } | |
3858 | ||
e99e88a9 | 3859 | static void ena_timer_service(struct timer_list *t) |
1738cd3e | 3860 | { |
e99e88a9 | 3861 | struct ena_adapter *adapter = from_timer(adapter, t, timer_service); |
1738cd3e NB |
3862 | u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; |
3863 | struct ena_admin_host_info *host_info = | |
3864 | adapter->ena_dev->host_attr.host_info; | |
3865 | ||
3866 | check_for_missing_keep_alive(adapter); | |
3867 | ||
3868 | check_for_admin_com_state(adapter); | |
3869 | ||
8510e1a3 | 3870 | check_for_missing_completions(adapter); |
1738cd3e | 3871 | |
a3af7c18 NB |
3872 | check_for_empty_rx_ring(adapter); |
3873 | ||
1738cd3e NB |
3874 | if (debug_area) |
3875 | ena_dump_stats_to_buf(adapter, debug_area); | |
3876 | ||
3877 | if (host_info) | |
3878 | ena_update_host_info(host_info, adapter->netdev); | |
3879 | ||
3f6159db | 3880 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { |
1738cd3e NB |
3881 | netif_err(adapter, drv, adapter->netdev, |
3882 | "Trigger reset is on\n"); | |
3883 | ena_dump_stats_to_dmesg(adapter); | |
3884 | queue_work(ena_wq, &adapter->reset_task); | |
3885 | return; | |
3886 | } | |
3887 | ||
3888 | /* Reset the timer */ | |
2a6e5fa2 | 3889 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
1738cd3e NB |
3890 | } |
3891 | ||
ba6f6b41 | 3892 | static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, |
736ce3f4 SJ |
3893 | struct ena_com_dev *ena_dev, |
3894 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | |
1738cd3e | 3895 | { |
ba6f6b41 | 3896 | u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; |
31aa9857 SJ |
3897 | |
3898 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { | |
3899 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = | |
3900 | &get_feat_ctx->max_queue_ext.max_queue_ext; | |
736ce3f4 | 3901 | io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, |
31aa9857 | 3902 | max_queue_ext->max_rx_cq_num); |
1738cd3e | 3903 | |
31aa9857 SJ |
3904 | io_tx_sq_num = max_queue_ext->max_tx_sq_num; |
3905 | io_tx_cq_num = max_queue_ext->max_tx_cq_num; | |
3906 | } else { | |
3907 | struct ena_admin_queue_feature_desc *max_queues = | |
3908 | &get_feat_ctx->max_queues; | |
3909 | io_tx_sq_num = max_queues->max_sq_num; | |
3910 | io_tx_cq_num = max_queues->max_cq_num; | |
736ce3f4 | 3911 | io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num); |
31aa9857 SJ |
3912 | } |
3913 | ||
3914 | /* In case of LLQ use the llq fields for the tx SQ/CQ */ | |
9fd25592 | 3915 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
31aa9857 | 3916 | io_tx_sq_num = get_feat_ctx->llq.max_llq_num; |
1738cd3e | 3917 | |
736ce3f4 SJ |
3918 | max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES); |
3919 | max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num); | |
3920 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num); | |
3921 | max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); | |
1738cd3e | 3922 | /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */ |
736ce3f4 SJ |
3923 | max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); |
3924 | if (unlikely(!max_num_io_queues)) { | |
1738cd3e NB |
3925 | dev_err(&pdev->dev, "The device doesn't have io queues\n"); |
3926 | return -EFAULT; | |
3927 | } | |
3928 | ||
736ce3f4 | 3929 | return max_num_io_queues; |
1738cd3e NB |
3930 | } |
3931 | ||
1738cd3e NB |
3932 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, |
3933 | struct net_device *netdev) | |
3934 | { | |
3935 | netdev_features_t dev_features = 0; | |
3936 | ||
3937 | /* Set offload features */ | |
3938 | if (feat->offload.tx & | |
3939 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) | |
3940 | dev_features |= NETIF_F_IP_CSUM; | |
3941 | ||
3942 | if (feat->offload.tx & | |
3943 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) | |
3944 | dev_features |= NETIF_F_IPV6_CSUM; | |
3945 | ||
3946 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) | |
3947 | dev_features |= NETIF_F_TSO; | |
3948 | ||
3949 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) | |
3950 | dev_features |= NETIF_F_TSO6; | |
3951 | ||
3952 | if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) | |
3953 | dev_features |= NETIF_F_TSO_ECN; | |
3954 | ||
3955 | if (feat->offload.rx_supported & | |
3956 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) | |
3957 | dev_features |= NETIF_F_RXCSUM; | |
3958 | ||
3959 | if (feat->offload.rx_supported & | |
3960 | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) | |
3961 | dev_features |= NETIF_F_RXCSUM; | |
3962 | ||
3963 | netdev->features = | |
3964 | dev_features | | |
3965 | NETIF_F_SG | | |
1738cd3e NB |
3966 | NETIF_F_RXHASH | |
3967 | NETIF_F_HIGHDMA; | |
3968 | ||
3969 | netdev->hw_features |= netdev->features; | |
3970 | netdev->vlan_features |= netdev->features; | |
3971 | } | |
3972 | ||
3973 | static void ena_set_conf_feat_params(struct ena_adapter *adapter, | |
3974 | struct ena_com_dev_get_features_ctx *feat) | |
3975 | { | |
3976 | struct net_device *netdev = adapter->netdev; | |
3977 | ||
3978 | /* Copy mac address */ | |
3979 | if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { | |
3980 | eth_hw_addr_random(netdev); | |
3981 | ether_addr_copy(adapter->mac_addr, netdev->dev_addr); | |
3982 | } else { | |
3983 | ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); | |
3984 | ether_addr_copy(netdev->dev_addr, adapter->mac_addr); | |
3985 | } | |
3986 | ||
3987 | /* Set offload features */ | |
3988 | ena_set_dev_offloads(feat, netdev); | |
3989 | ||
3990 | adapter->max_mtu = feat->dev_attr.max_mtu; | |
d894be57 JW |
3991 | netdev->max_mtu = adapter->max_mtu; |
3992 | netdev->min_mtu = ENA_MIN_MTU; | |
1738cd3e NB |
3993 | } |
3994 | ||
3995 | static int ena_rss_init_default(struct ena_adapter *adapter) | |
3996 | { | |
3997 | struct ena_com_dev *ena_dev = adapter->ena_dev; | |
3998 | struct device *dev = &adapter->pdev->dev; | |
3999 | int rc, i; | |
4000 | u32 val; | |
4001 | ||
4002 | rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); | |
4003 | if (unlikely(rc)) { | |
4004 | dev_err(dev, "Cannot init indirect table\n"); | |
4005 | goto err_rss_init; | |
4006 | } | |
4007 | ||
4008 | for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { | |
faa615f9 | 4009 | val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); |
1738cd3e NB |
4010 | rc = ena_com_indirect_table_fill_entry(ena_dev, i, |
4011 | ENA_IO_RXQ_IDX(val)); | |
d1497638 | 4012 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
1738cd3e NB |
4013 | dev_err(dev, "Cannot fill indirect table\n"); |
4014 | goto err_fill_indir; | |
4015 | } | |
4016 | } | |
4017 | ||
c1bd17e5 | 4018 | rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, |
1738cd3e | 4019 | ENA_HASH_KEY_SIZE, 0xFFFFFFFF); |
d1497638 | 4020 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
1738cd3e NB |
4021 | dev_err(dev, "Cannot fill hash function\n"); |
4022 | goto err_fill_indir; | |
4023 | } | |
4024 | ||
4025 | rc = ena_com_set_default_hash_ctrl(ena_dev); | |
d1497638 | 4026 | if (unlikely(rc && (rc != -EOPNOTSUPP))) { |
1738cd3e NB |
4027 | dev_err(dev, "Cannot fill hash control\n"); |
4028 | goto err_fill_indir; | |
4029 | } | |
4030 | ||
4031 | return 0; | |
4032 | ||
4033 | err_fill_indir: | |
4034 | ena_com_rss_destroy(ena_dev); | |
4035 | err_rss_init: | |
4036 | ||
4037 | return rc; | |
4038 | } | |
4039 | ||
4040 | static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |
4041 | { | |
d79c3888 | 4042 | int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
0857d92f | 4043 | |
1738cd3e NB |
4044 | pci_release_selected_regions(pdev, release_bars); |
4045 | } | |
4046 | ||
38005ca8 | 4047 | |
4d192660 | 4048 | static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) |
1738cd3e | 4049 | { |
31aa9857 SJ |
4050 | struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; |
4051 | struct ena_com_dev *ena_dev = ctx->ena_dev; | |
4052 | u32 tx_queue_size = ENA_DEFAULT_RING_SIZE; | |
4053 | u32 rx_queue_size = ENA_DEFAULT_RING_SIZE; | |
4054 | u32 max_tx_queue_size; | |
4055 | u32 max_rx_queue_size; | |
1738cd3e | 4056 | |
4d192660 | 4057 | if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
31aa9857 SJ |
4058 | struct ena_admin_queue_ext_feature_fields *max_queue_ext = |
4059 | &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; | |
4060 | max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, | |
4061 | max_queue_ext->max_rx_sq_depth); | |
4062 | max_tx_queue_size = max_queue_ext->max_tx_cq_depth; | |
1738cd3e | 4063 | |
31aa9857 SJ |
4064 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
4065 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
4066 | llq->max_llq_depth); | |
4067 | else | |
4068 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
4069 | max_queue_ext->max_tx_sq_depth); | |
1738cd3e | 4070 | |
31aa9857 SJ |
4071 | ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, |
4072 | max_queue_ext->max_per_packet_tx_descs); | |
4073 | ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
4074 | max_queue_ext->max_per_packet_rx_descs); | |
4075 | } else { | |
4076 | struct ena_admin_queue_feature_desc *max_queues = | |
4077 | &ctx->get_feat_ctx->max_queues; | |
4078 | max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, | |
4079 | max_queues->max_sq_depth); | |
4080 | max_tx_queue_size = max_queues->max_cq_depth; | |
4081 | ||
4082 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) | |
4083 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
4084 | llq->max_llq_depth); | |
4085 | else | |
4086 | max_tx_queue_size = min_t(u32, max_tx_queue_size, | |
4087 | max_queues->max_sq_depth); | |
4088 | ||
4089 | ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
4090 | max_queues->max_packet_tx_descs); | |
4091 | ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, | |
4092 | max_queues->max_packet_rx_descs); | |
4093 | } | |
4094 | ||
4095 | max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); | |
4096 | max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); | |
1738cd3e | 4097 | |
13ca32a6 SJ |
4098 | tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE, |
4099 | max_tx_queue_size); | |
4100 | rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE, | |
4101 | max_rx_queue_size); | |
31aa9857 SJ |
4102 | |
4103 | tx_queue_size = rounddown_pow_of_two(tx_queue_size); | |
4104 | rx_queue_size = rounddown_pow_of_two(rx_queue_size); | |
4105 | ||
31aa9857 SJ |
4106 | ctx->max_tx_queue_size = max_tx_queue_size; |
4107 | ctx->max_rx_queue_size = max_rx_queue_size; | |
4108 | ctx->tx_queue_size = tx_queue_size; | |
4109 | ctx->rx_queue_size = rx_queue_size; | |
1738cd3e | 4110 | |
31aa9857 | 4111 | return 0; |
1738cd3e NB |
4112 | } |
4113 | ||
4114 | /* ena_probe - Device Initialization Routine | |
4115 | * @pdev: PCI device information struct | |
4116 | * @ent: entry in ena_pci_tbl | |
4117 | * | |
4118 | * Returns 0 on success, negative on failure | |
4119 | * | |
4120 | * ena_probe initializes an adapter identified by a pci_dev structure. | |
4121 | * The OS initialization, configuring of the adapter private structure, | |
4122 | * and a hardware reset occur. | |
4123 | */ | |
4124 | static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
4125 | { | |
f49ed500 | 4126 | struct ena_calc_queue_size_ctx calc_queue_ctx = {}; |
0a39a35f | 4127 | struct ena_com_dev_get_features_ctx get_feat_ctx; |
1738cd3e | 4128 | struct ena_com_dev *ena_dev = NULL; |
83b92404 | 4129 | struct ena_adapter *adapter; |
83b92404 SJ |
4130 | struct net_device *netdev; |
4131 | static int adapters_found; | |
736ce3f4 | 4132 | u32 max_num_io_queues; |
1738cd3e | 4133 | bool wd_state; |
736ce3f4 | 4134 | int bars, rc; |
1738cd3e NB |
4135 | |
4136 | dev_dbg(&pdev->dev, "%s\n", __func__); | |
4137 | ||
1738cd3e NB |
4138 | rc = pci_enable_device_mem(pdev); |
4139 | if (rc) { | |
4140 | dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); | |
4141 | return rc; | |
4142 | } | |
4143 | ||
09323b3b SA |
4144 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); |
4145 | if (rc) { | |
4146 | dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); | |
4147 | goto err_disable_device; | |
4148 | } | |
4149 | ||
1738cd3e NB |
4150 | pci_set_master(pdev); |
4151 | ||
4152 | ena_dev = vzalloc(sizeof(*ena_dev)); | |
4153 | if (!ena_dev) { | |
4154 | rc = -ENOMEM; | |
4155 | goto err_disable_device; | |
4156 | } | |
4157 | ||
4158 | bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | |
4159 | rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); | |
4160 | if (rc) { | |
4161 | dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", | |
4162 | rc); | |
4163 | goto err_free_ena_dev; | |
4164 | } | |
4165 | ||
0857d92f NB |
4166 | ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
4167 | pci_resource_start(pdev, ENA_REG_BAR), | |
4168 | pci_resource_len(pdev, ENA_REG_BAR)); | |
1738cd3e | 4169 | if (!ena_dev->reg_bar) { |
bf2746e8 | 4170 | dev_err(&pdev->dev, "Failed to remap regs bar\n"); |
1738cd3e NB |
4171 | rc = -EFAULT; |
4172 | goto err_free_region; | |
4173 | } | |
4174 | ||
4bb7f4cf AK |
4175 | ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; |
4176 | ||
1738cd3e NB |
4177 | ena_dev->dmadev = &pdev->dev; |
4178 | ||
4179 | rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); | |
4180 | if (rc) { | |
bf2746e8 | 4181 | dev_err(&pdev->dev, "ENA device init failed\n"); |
1738cd3e NB |
4182 | if (rc == -ETIME) |
4183 | rc = -EPROBE_DEFER; | |
4184 | goto err_free_region; | |
4185 | } | |
4186 | ||
c29efeae | 4187 | rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); |
38005ca8 | 4188 | if (rc) { |
bf2746e8 | 4189 | dev_err(&pdev->dev, "ENA llq bar mapping failed\n"); |
c29efeae | 4190 | goto err_free_ena_dev; |
1738cd3e NB |
4191 | } |
4192 | ||
31aa9857 SJ |
4193 | calc_queue_ctx.ena_dev = ena_dev; |
4194 | calc_queue_ctx.get_feat_ctx = &get_feat_ctx; | |
4195 | calc_queue_ctx.pdev = pdev; | |
4196 | ||
13830937 | 4197 | /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. |
4d192660 SJ |
4198 | * Updated during device initialization with the real granularity |
4199 | */ | |
1738cd3e | 4200 | ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; |
15619e72 | 4201 | ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; |
79226cea | 4202 | ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; |
736ce3f4 | 4203 | max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx); |
4d192660 | 4204 | rc = ena_calc_io_queue_size(&calc_queue_ctx); |
736ce3f4 | 4205 | if (rc || !max_num_io_queues) { |
1738cd3e NB |
4206 | rc = -EFAULT; |
4207 | goto err_device_destroy; | |
4208 | } | |
4209 | ||
1738cd3e | 4210 | /* dev zeroed in init_etherdev */ |
736ce3f4 | 4211 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues); |
1738cd3e NB |
4212 | if (!netdev) { |
4213 | dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); | |
4214 | rc = -ENOMEM; | |
4215 | goto err_device_destroy; | |
4216 | } | |
4217 | ||
4218 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
4219 | ||
4220 | adapter = netdev_priv(netdev); | |
4221 | pci_set_drvdata(pdev, adapter); | |
4222 | ||
4223 | adapter->ena_dev = ena_dev; | |
4224 | adapter->netdev = netdev; | |
4225 | adapter->pdev = pdev; | |
4226 | ||
4227 | ena_set_conf_feat_params(adapter, &get_feat_ctx); | |
4228 | ||
4229 | adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); | |
e2eed0e3 | 4230 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
1738cd3e | 4231 | |
13ca32a6 SJ |
4232 | adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size; |
4233 | adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size; | |
9f9ae3f9 SJ |
4234 | adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size; |
4235 | adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size; | |
31aa9857 SJ |
4236 | adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; |
4237 | adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; | |
1738cd3e | 4238 | |
736ce3f4 SJ |
4239 | adapter->num_io_queues = max_num_io_queues; |
4240 | adapter->max_num_io_queues = max_num_io_queues; | |
0a39a35f | 4241 | adapter->last_monitored_tx_qid = 0; |
736ce3f4 | 4242 | |
548c4940 SJ |
4243 | adapter->xdp_first_ring = 0; |
4244 | adapter->xdp_num_queues = 0; | |
4245 | ||
1738cd3e | 4246 | adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; |
0e3a3f6d AK |
4247 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) |
4248 | adapter->disable_meta_caching = | |
4249 | !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & | |
4250 | BIT(ENA_ADMIN_DISABLE_META_CACHING)); | |
4251 | ||
1738cd3e NB |
4252 | adapter->wd_state = wd_state; |
4253 | ||
4254 | snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); | |
4255 | ||
4256 | rc = ena_com_init_interrupt_moderation(adapter->ena_dev); | |
4257 | if (rc) { | |
4258 | dev_err(&pdev->dev, | |
4259 | "Failed to query interrupt moderation feature\n"); | |
4260 | goto err_netdev_destroy; | |
4261 | } | |
548c4940 SJ |
4262 | ena_init_io_rings(adapter, |
4263 | 0, | |
4264 | adapter->xdp_num_queues + | |
4265 | adapter->num_io_queues); | |
1738cd3e NB |
4266 | |
4267 | netdev->netdev_ops = &ena_netdev_ops; | |
4268 | netdev->watchdog_timeo = TX_TIMEOUT; | |
4269 | ena_set_ethtool_ops(netdev); | |
4270 | ||
4271 | netdev->priv_flags |= IFF_UNICAST_FLT; | |
4272 | ||
4273 | u64_stats_init(&adapter->syncp); | |
4274 | ||
4d192660 | 4275 | rc = ena_enable_msix_and_set_admin_interrupts(adapter); |
1738cd3e NB |
4276 | if (rc) { |
4277 | dev_err(&pdev->dev, | |
4278 | "Failed to enable and set the admin interrupts\n"); | |
4279 | goto err_worker_destroy; | |
4280 | } | |
4281 | rc = ena_rss_init_default(adapter); | |
d1497638 | 4282 | if (rc && (rc != -EOPNOTSUPP)) { |
1738cd3e NB |
4283 | dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); |
4284 | goto err_free_msix; | |
4285 | } | |
4286 | ||
4287 | ena_config_debug_area(adapter); | |
4288 | ||
713865da SJ |
4289 | if (!ena_update_hw_stats(adapter)) |
4290 | adapter->eni_stats_supported = true; | |
4291 | else | |
4292 | adapter->eni_stats_supported = false; | |
4293 | ||
1738cd3e NB |
4294 | memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); |
4295 | ||
4296 | netif_carrier_off(netdev); | |
4297 | ||
4298 | rc = register_netdev(netdev); | |
4299 | if (rc) { | |
4300 | dev_err(&pdev->dev, "Cannot register net device\n"); | |
4301 | goto err_rss; | |
4302 | } | |
4303 | ||
1738cd3e NB |
4304 | INIT_WORK(&adapter->reset_task, ena_fw_reset_device); |
4305 | ||
4306 | adapter->last_keep_alive_jiffies = jiffies; | |
82ef30f1 NB |
4307 | adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; |
4308 | adapter->missing_tx_completion_to = TX_TIMEOUT; | |
4309 | adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; | |
4310 | ||
4311 | ena_update_hints(adapter, &get_feat_ctx.hw_hints); | |
1738cd3e | 4312 | |
e99e88a9 | 4313 | timer_setup(&adapter->timer_service, ena_timer_service, 0); |
f850b4a7 | 4314 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
1738cd3e | 4315 | |
38005ca8 | 4316 | dev_info(&pdev->dev, |
a8aea849 | 4317 | "%s found at mem %lx, mac addr %pM\n", |
1738cd3e | 4318 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), |
a8aea849 | 4319 | netdev->dev_addr); |
1738cd3e NB |
4320 | |
4321 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | |
4322 | ||
4323 | adapters_found++; | |
4324 | ||
4325 | return 0; | |
4326 | ||
4327 | err_rss: | |
4328 | ena_com_delete_debug_area(ena_dev); | |
4329 | ena_com_rss_destroy(ena_dev); | |
4330 | err_free_msix: | |
e2eed0e3 | 4331 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); |
58a54b9c AK |
4332 | /* stop submitting admin commands on a device that was reset */ |
4333 | ena_com_set_admin_running_state(ena_dev, false); | |
1738cd3e | 4334 | ena_free_mgmnt_irq(adapter); |
06443684 | 4335 | ena_disable_msix(adapter); |
1738cd3e | 4336 | err_worker_destroy: |
1738cd3e | 4337 | del_timer(&adapter->timer_service); |
1738cd3e NB |
4338 | err_netdev_destroy: |
4339 | free_netdev(netdev); | |
4340 | err_device_destroy: | |
4341 | ena_com_delete_host_info(ena_dev); | |
4342 | ena_com_admin_destroy(ena_dev); | |
4343 | err_free_region: | |
4344 | ena_release_bars(ena_dev, pdev); | |
4345 | err_free_ena_dev: | |
1738cd3e NB |
4346 | vfree(ena_dev); |
4347 | err_disable_device: | |
4348 | pci_disable_device(pdev); | |
4349 | return rc; | |
4350 | } | |
4351 | ||
1738cd3e NB |
4352 | /*****************************************************************************/ |
4353 | ||
428c4913 | 4354 | /* __ena_shutoff - Helper used in both PCI remove/shutdown routines |
1738cd3e | 4355 | * @pdev: PCI device information struct |
428c4913 | 4356 | * @shutdown: Is it a shutdown operation? If false, means it is a removal |
1738cd3e | 4357 | * |
428c4913 GP |
4358 | * __ena_shutoff is a helper routine that does the real work on shutdown and |
4359 | * removal paths; the difference between those paths is with regards to whether | |
4360 | * dettach or unregister the netdevice. | |
1738cd3e | 4361 | */ |
428c4913 | 4362 | static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) |
1738cd3e NB |
4363 | { |
4364 | struct ena_adapter *adapter = pci_get_drvdata(pdev); | |
4365 | struct ena_com_dev *ena_dev; | |
4366 | struct net_device *netdev; | |
4367 | ||
1738cd3e NB |
4368 | ena_dev = adapter->ena_dev; |
4369 | netdev = adapter->netdev; | |
4370 | ||
4371 | #ifdef CONFIG_RFS_ACCEL | |
4372 | if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { | |
4373 | free_irq_cpu_rmap(netdev->rx_cpu_rmap); | |
4374 | netdev->rx_cpu_rmap = NULL; | |
4375 | } | |
4376 | #endif /* CONFIG_RFS_ACCEL */ | |
1738cd3e | 4377 | |
63d4a4c1 SA |
4378 | /* Make sure timer and reset routine won't be called after |
4379 | * freeing device resources. | |
4380 | */ | |
4381 | del_timer_sync(&adapter->timer_service); | |
1738cd3e NB |
4382 | cancel_work_sync(&adapter->reset_task); |
4383 | ||
428c4913 | 4384 | rtnl_lock(); /* lock released inside the below if-else block */ |
c1c0e40b | 4385 | adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; |
944b28aa | 4386 | ena_destroy_device(adapter, true); |
428c4913 GP |
4387 | if (shutdown) { |
4388 | netif_device_detach(netdev); | |
4389 | dev_close(netdev); | |
4390 | rtnl_unlock(); | |
4391 | } else { | |
4392 | rtnl_unlock(); | |
4393 | unregister_netdev(netdev); | |
4394 | free_netdev(netdev); | |
4395 | } | |
1738cd3e | 4396 | |
1738cd3e NB |
4397 | ena_com_rss_destroy(ena_dev); |
4398 | ||
4399 | ena_com_delete_debug_area(ena_dev); | |
4400 | ||
4401 | ena_com_delete_host_info(ena_dev); | |
4402 | ||
4403 | ena_release_bars(ena_dev, pdev); | |
4404 | ||
1738cd3e NB |
4405 | pci_disable_device(pdev); |
4406 | ||
1738cd3e NB |
4407 | vfree(ena_dev); |
4408 | } | |
4409 | ||
428c4913 GP |
4410 | /* ena_remove - Device Removal Routine |
4411 | * @pdev: PCI device information struct | |
4412 | * | |
4413 | * ena_remove is called by the PCI subsystem to alert the driver | |
4414 | * that it should release a PCI device. | |
4415 | */ | |
4416 | ||
4417 | static void ena_remove(struct pci_dev *pdev) | |
4418 | { | |
4419 | __ena_shutoff(pdev, false); | |
4420 | } | |
4421 | ||
4422 | /* ena_shutdown - Device Shutdown Routine | |
4423 | * @pdev: PCI device information struct | |
4424 | * | |
4425 | * ena_shutdown is called by the PCI subsystem to alert the driver that | |
4426 | * a shutdown/reboot (or kexec) is happening and device must be disabled. | |
4427 | */ | |
4428 | ||
4429 | static void ena_shutdown(struct pci_dev *pdev) | |
4430 | { | |
4431 | __ena_shutoff(pdev, true); | |
4432 | } | |
4433 | ||
8c5c7abd | 4434 | /* ena_suspend - PM suspend callback |
817a89ae | 4435 | * @dev_d: Device information struct |
8c5c7abd | 4436 | */ |
817a89ae | 4437 | static int __maybe_unused ena_suspend(struct device *dev_d) |
8c5c7abd | 4438 | { |
817a89ae | 4439 | struct pci_dev *pdev = to_pci_dev(dev_d); |
8c5c7abd NB |
4440 | struct ena_adapter *adapter = pci_get_drvdata(pdev); |
4441 | ||
4442 | u64_stats_update_begin(&adapter->syncp); | |
4443 | adapter->dev_stats.suspend++; | |
4444 | u64_stats_update_end(&adapter->syncp); | |
4445 | ||
4446 | rtnl_lock(); | |
4447 | if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { | |
4448 | dev_err(&pdev->dev, | |
bf2746e8 | 4449 | "Ignoring device reset request as the device is being suspended\n"); |
8c5c7abd NB |
4450 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
4451 | } | |
cfa324a5 | 4452 | ena_destroy_device(adapter, true); |
8c5c7abd NB |
4453 | rtnl_unlock(); |
4454 | return 0; | |
4455 | } | |
4456 | ||
4457 | /* ena_resume - PM resume callback | |
817a89ae | 4458 | * @dev_d: Device information struct |
8c5c7abd | 4459 | */ |
817a89ae | 4460 | static int __maybe_unused ena_resume(struct device *dev_d) |
8c5c7abd | 4461 | { |
817a89ae | 4462 | struct ena_adapter *adapter = dev_get_drvdata(dev_d); |
8c5c7abd NB |
4463 | int rc; |
4464 | ||
4465 | u64_stats_update_begin(&adapter->syncp); | |
4466 | adapter->dev_stats.resume++; | |
4467 | u64_stats_update_end(&adapter->syncp); | |
4468 | ||
4469 | rtnl_lock(); | |
4470 | rc = ena_restore_device(adapter); | |
4471 | rtnl_unlock(); | |
4472 | return rc; | |
4473 | } | |
817a89ae VG |
4474 | |
4475 | static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); | |
8c5c7abd | 4476 | |
1738cd3e NB |
4477 | static struct pci_driver ena_pci_driver = { |
4478 | .name = DRV_MODULE_NAME, | |
4479 | .id_table = ena_pci_tbl, | |
4480 | .probe = ena_probe, | |
4481 | .remove = ena_remove, | |
428c4913 | 4482 | .shutdown = ena_shutdown, |
817a89ae | 4483 | .driver.pm = &ena_pm_ops, |
115ddc49 | 4484 | .sriov_configure = pci_sriov_configure_simple, |
1738cd3e NB |
4485 | }; |
4486 | ||
4487 | static int __init ena_init(void) | |
4488 | { | |
1738cd3e NB |
4489 | ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); |
4490 | if (!ena_wq) { | |
4491 | pr_err("Failed to create workqueue\n"); | |
4492 | return -ENOMEM; | |
4493 | } | |
4494 | ||
4495 | return pci_register_driver(&ena_pci_driver); | |
4496 | } | |
4497 | ||
4498 | static void __exit ena_cleanup(void) | |
4499 | { | |
4500 | pci_unregister_driver(&ena_pci_driver); | |
4501 | ||
4502 | if (ena_wq) { | |
4503 | destroy_workqueue(ena_wq); | |
4504 | ena_wq = NULL; | |
4505 | } | |
4506 | } | |
4507 | ||
4508 | /****************************************************************************** | |
4509 | ******************************** AENQ Handlers ******************************* | |
4510 | *****************************************************************************/ | |
4511 | /* ena_update_on_link_change: | |
4512 | * Notify the network interface about the change in link status | |
4513 | */ | |
4514 | static void ena_update_on_link_change(void *adapter_data, | |
4515 | struct ena_admin_aenq_entry *aenq_e) | |
4516 | { | |
4517 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
4518 | struct ena_admin_aenq_link_change_desc *aenq_desc = | |
4519 | (struct ena_admin_aenq_link_change_desc *)aenq_e; | |
4520 | int status = aenq_desc->flags & | |
4521 | ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; | |
4522 | ||
4523 | if (status) { | |
f0525298 | 4524 | netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); |
1738cd3e | 4525 | set_bit(ENA_FLAG_LINK_UP, &adapter->flags); |
d18e4f68 NB |
4526 | if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) |
4527 | netif_carrier_on(adapter->netdev); | |
1738cd3e NB |
4528 | } else { |
4529 | clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); | |
4530 | netif_carrier_off(adapter->netdev); | |
4531 | } | |
4532 | } | |
4533 | ||
4534 | static void ena_keep_alive_wd(void *adapter_data, | |
4535 | struct ena_admin_aenq_entry *aenq_e) | |
4536 | { | |
4537 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
11a9a460 NB |
4538 | struct ena_admin_aenq_keep_alive_desc *desc; |
4539 | u64 rx_drops; | |
5c665f8c | 4540 | u64 tx_drops; |
1738cd3e | 4541 | |
11a9a460 | 4542 | desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; |
1738cd3e | 4543 | adapter->last_keep_alive_jiffies = jiffies; |
11a9a460 NB |
4544 | |
4545 | rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; | |
5c665f8c | 4546 | tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; |
11a9a460 NB |
4547 | |
4548 | u64_stats_update_begin(&adapter->syncp); | |
ccd143e5 SA |
4549 | /* These stats are accumulated by the device, so the counters indicate |
4550 | * all drops since last reset. | |
4551 | */ | |
11a9a460 | 4552 | adapter->dev_stats.rx_drops = rx_drops; |
5c665f8c | 4553 | adapter->dev_stats.tx_drops = tx_drops; |
11a9a460 | 4554 | u64_stats_update_end(&adapter->syncp); |
1738cd3e NB |
4555 | } |
4556 | ||
4557 | static void ena_notification(void *adapter_data, | |
4558 | struct ena_admin_aenq_entry *aenq_e) | |
4559 | { | |
4560 | struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; | |
82ef30f1 | 4561 | struct ena_admin_ena_hw_hints *hints; |
1738cd3e NB |
4562 | |
4563 | WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, | |
4564 | "Invalid group(%x) expected %x\n", | |
4565 | aenq_e->aenq_common_desc.group, | |
4566 | ENA_ADMIN_NOTIFICATION); | |
4567 | ||
bf2746e8 | 4568 | switch (aenq_e->aenq_common_desc.syndrome) { |
82ef30f1 NB |
4569 | case ENA_ADMIN_UPDATE_HINTS: |
4570 | hints = (struct ena_admin_ena_hw_hints *) | |
4571 | (&aenq_e->inline_data_w4); | |
4572 | ena_update_hints(adapter, hints); | |
4573 | break; | |
1738cd3e NB |
4574 | default: |
4575 | netif_err(adapter, drv, adapter->netdev, | |
4576 | "Invalid aenq notification link state %d\n", | |
bf2746e8 | 4577 | aenq_e->aenq_common_desc.syndrome); |
1738cd3e NB |
4578 | } |
4579 | } | |
4580 | ||
4581 | /* This handler will called for unknown event group or unimplemented handlers*/ | |
4582 | static void unimplemented_aenq_handler(void *data, | |
4583 | struct ena_admin_aenq_entry *aenq_e) | |
4584 | { | |
4585 | struct ena_adapter *adapter = (struct ena_adapter *)data; | |
4586 | ||
4587 | netif_err(adapter, drv, adapter->netdev, | |
4588 | "Unknown event was received or event with unimplemented handler\n"); | |
4589 | } | |
4590 | ||
4591 | static struct ena_aenq_handlers aenq_handlers = { | |
4592 | .handlers = { | |
4593 | [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, | |
4594 | [ENA_ADMIN_NOTIFICATION] = ena_notification, | |
4595 | [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, | |
4596 | }, | |
4597 | .unimplemented_handler = unimplemented_aenq_handler | |
4598 | }; | |
4599 | ||
4600 | module_init(ena_init); | |
4601 | module_exit(ena_cleanup); |