2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 /*****************************************************************************/
65 /*****************************************************************************/
66 /*****************************************************************************/
71 /* Abort - canceled by the driver */
76 struct completion wait_event
;
77 struct ena_admin_acq_entry
*user_cqe
;
79 enum ena_cmd_status status
;
80 /* status from the device */
86 struct ena_com_stats_ctx
{
87 struct ena_admin_aq_get_stats_cmd get_cmd
;
88 struct ena_admin_acq_get_stats_resp get_resp
;
91 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
92 struct ena_common_mem_addr
*ena_addr
,
95 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
96 pr_err("dma address has more bits that the device supports\n");
100 ena_addr
->mem_addr_low
= (u32
)addr
;
101 ena_addr
->mem_addr_high
= (u64
)addr
>> 32;
106 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
108 struct ena_com_admin_sq
*sq
= &queue
->sq
;
109 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
111 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
115 pr_err("memory allocation failed");
128 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
130 struct ena_com_admin_cq
*cq
= &queue
->cq
;
131 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
133 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
137 pr_err("memory allocation failed");
147 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
148 struct ena_aenq_handlers
*aenq_handlers
)
150 struct ena_com_aenq
*aenq
= &dev
->aenq
;
151 u32 addr_low
, addr_high
, aenq_caps
;
154 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
155 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
156 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
159 if (!aenq
->entries
) {
160 pr_err("memory allocation failed");
164 aenq
->head
= aenq
->q_depth
;
167 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
168 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
170 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
171 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
174 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
175 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
176 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
178 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
180 if (unlikely(!aenq_handlers
)) {
181 pr_err("aenq handlers pointer is NULL\n");
185 aenq
->aenq_handlers
= aenq_handlers
;
190 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
191 struct ena_comp_ctx
*comp_ctx
)
193 comp_ctx
->occupied
= false;
194 atomic_dec(&queue
->outstanding_cmds
);
197 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
198 u16 command_id
, bool capture
)
200 if (unlikely(command_id
>= queue
->q_depth
)) {
201 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
202 command_id
, queue
->q_depth
);
206 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
207 pr_err("Completion context is occupied\n");
212 atomic_inc(&queue
->outstanding_cmds
);
213 queue
->comp_ctx
[command_id
].occupied
= true;
216 return &queue
->comp_ctx
[command_id
];
219 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
220 struct ena_admin_aq_entry
*cmd
,
221 size_t cmd_size_in_bytes
,
222 struct ena_admin_acq_entry
*comp
,
223 size_t comp_size_in_bytes
)
225 struct ena_comp_ctx
*comp_ctx
;
226 u16 tail_masked
, cmd_id
;
230 queue_size_mask
= admin_queue
->q_depth
- 1;
232 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
234 /* In case of queue FULL */
235 cnt
= admin_queue
->sq
.tail
- admin_queue
->sq
.head
;
236 if (cnt
>= admin_queue
->q_depth
) {
237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
238 admin_queue
->sq
.tail
, admin_queue
->sq
.head
,
239 admin_queue
->q_depth
);
240 admin_queue
->stats
.out_of_space
++;
241 return ERR_PTR(-ENOSPC
);
244 cmd_id
= admin_queue
->curr_cmd_id
;
246 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
249 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
252 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
253 if (unlikely(!comp_ctx
))
254 return ERR_PTR(-EINVAL
);
256 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
257 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
258 comp_ctx
->user_cqe
= comp
;
259 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
261 reinit_completion(&comp_ctx
->wait_event
);
263 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
265 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
268 admin_queue
->sq
.tail
++;
269 admin_queue
->stats
.submitted_cmd
++;
271 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
272 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
274 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
281 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
282 struct ena_comp_ctx
*comp_ctx
;
285 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
286 if (unlikely(!queue
->comp_ctx
)) {
287 pr_err("memory allocation failed");
291 for (i
= 0; i
< queue
->q_depth
; i
++) {
292 comp_ctx
= get_comp_ctxt(queue
, i
, false);
294 init_completion(&comp_ctx
->wait_event
);
300 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
301 struct ena_admin_aq_entry
*cmd
,
302 size_t cmd_size_in_bytes
,
303 struct ena_admin_acq_entry
*comp
,
304 size_t comp_size_in_bytes
)
307 struct ena_comp_ctx
*comp_ctx
;
309 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
310 if (unlikely(!admin_queue
->running_state
)) {
311 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
312 return ERR_PTR(-ENODEV
);
314 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
318 if (unlikely(IS_ERR(comp_ctx
)))
319 admin_queue
->running_state
= false;
320 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
325 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
326 struct ena_com_create_io_ctx
*ctx
,
327 struct ena_com_io_sq
*io_sq
)
332 memset(&io_sq
->desc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
334 io_sq
->desc_entry_size
=
335 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
336 sizeof(struct ena_eth_io_tx_desc
) :
337 sizeof(struct ena_eth_io_rx_desc
);
339 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
341 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
342 dev_node
= dev_to_node(ena_dev
->dmadev
);
343 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
344 io_sq
->desc_addr
.virt_addr
=
345 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
346 &io_sq
->desc_addr
.phys_addr
,
348 set_dev_node(ena_dev
->dmadev
, dev_node
);
349 if (!io_sq
->desc_addr
.virt_addr
) {
350 io_sq
->desc_addr
.virt_addr
=
351 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
352 &io_sq
->desc_addr
.phys_addr
,
356 dev_node
= dev_to_node(ena_dev
->dmadev
);
357 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
358 io_sq
->desc_addr
.virt_addr
=
359 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
360 set_dev_node(ena_dev
->dmadev
, dev_node
);
361 if (!io_sq
->desc_addr
.virt_addr
) {
362 io_sq
->desc_addr
.virt_addr
=
363 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
367 if (!io_sq
->desc_addr
.virt_addr
) {
368 pr_err("memory allocation failed");
373 io_sq
->next_to_comp
= 0;
379 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
380 struct ena_com_create_io_ctx
*ctx
,
381 struct ena_com_io_cq
*io_cq
)
386 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
388 /* Use the basic completion descriptor for Rx */
389 io_cq
->cdesc_entry_size_in_bytes
=
390 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
391 sizeof(struct ena_eth_io_tx_cdesc
) :
392 sizeof(struct ena_eth_io_rx_cdesc_base
);
394 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
396 prev_node
= dev_to_node(ena_dev
->dmadev
);
397 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
398 io_cq
->cdesc_addr
.virt_addr
=
399 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
400 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
401 set_dev_node(ena_dev
->dmadev
, prev_node
);
402 if (!io_cq
->cdesc_addr
.virt_addr
) {
403 io_cq
->cdesc_addr
.virt_addr
=
404 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
405 &io_cq
->cdesc_addr
.phys_addr
,
409 if (!io_cq
->cdesc_addr
.virt_addr
) {
410 pr_err("memory allocation failed");
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
421 struct ena_admin_acq_entry
*cqe
)
423 struct ena_comp_ctx
*comp_ctx
;
426 cmd_id
= cqe
->acq_common_descriptor
.command
&
427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
429 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
430 if (unlikely(!comp_ctx
)) {
431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432 admin_queue
->running_state
= false;
436 comp_ctx
->status
= ENA_CMD_COMPLETED
;
437 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
439 if (comp_ctx
->user_cqe
)
440 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
442 if (!admin_queue
->polling
)
443 complete(&comp_ctx
->wait_event
);
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
448 struct ena_admin_acq_entry
*cqe
= NULL
;
453 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
454 phase
= admin_queue
->cq
.phase
;
456 cqe
= &admin_queue
->cq
.entries
[head_masked
];
458 /* Go over all the completions */
459 while ((cqe
->acq_common_descriptor
.flags
&
460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
461 /* Do not read the rest of the completion entry before the
462 * phase bit was validated
465 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
469 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
474 cqe
= &admin_queue
->cq
.entries
[head_masked
];
477 admin_queue
->cq
.head
+= comp_num
;
478 admin_queue
->cq
.phase
= phase
;
479 admin_queue
->sq
.head
+= comp_num
;
480 admin_queue
->stats
.completed_cmd
+= comp_num
;
483 static int ena_com_comp_status_to_errno(u8 comp_status
)
485 if (unlikely(comp_status
!= 0))
486 pr_err("admin command failed[%u]\n", comp_status
);
488 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
491 switch (comp_status
) {
492 case ENA_ADMIN_SUCCESS
:
494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
496 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
498 case ENA_ADMIN_BAD_OPCODE
:
499 case ENA_ADMIN_MALFORMED_REQUEST
:
500 case ENA_ADMIN_ILLEGAL_PARAMETER
:
501 case ENA_ADMIN_UNKNOWN_ERROR
:
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
509 struct ena_com_admin_queue
*admin_queue
)
511 unsigned long flags
, timeout
;
514 timeout
= jiffies
+ ADMIN_CMD_TIMEOUT_US
;
517 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
518 ena_com_handle_admin_completion(admin_queue
);
519 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
521 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
524 if (time_is_before_jiffies(timeout
)) {
525 pr_err("Wait for completion (polling) timeout\n");
526 /* ENA didn't have any completion */
527 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
528 admin_queue
->stats
.no_completion
++;
529 admin_queue
->running_state
= false;
530 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
539 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
540 pr_err("Command was aborted\n");
541 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
542 admin_queue
->stats
.aborted_cmd
++;
543 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
548 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
551 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
553 comp_ctxt_release(admin_queue
, comp_ctx
);
557 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
558 struct ena_com_admin_queue
*admin_queue
)
563 wait_for_completion_timeout(&comp_ctx
->wait_event
,
564 usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US
));
566 /* In case the command wasn't completed find out the root cause.
567 * There might be 2 kinds of errors
568 * 1) No completion (timeout reached)
569 * 2) There is completion but the device didn't get any msi-x interrupt.
571 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
572 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
573 ena_com_handle_admin_completion(admin_queue
);
574 admin_queue
->stats
.no_completion
++;
575 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
577 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
578 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
579 comp_ctx
->cmd_opcode
);
581 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
582 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
584 admin_queue
->running_state
= false;
589 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
591 comp_ctxt_release(admin_queue
, comp_ctx
);
595 /* This method read the hardware device register through posting writes
596 * and waiting for response
597 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
599 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
601 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
602 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
603 mmio_read
->read_resp
;
604 u32 mmio_read_reg
, ret
;
610 /* If readless is disabled, perform regular read */
611 if (!mmio_read
->readless_supported
)
612 return readl(ena_dev
->reg_bar
+ offset
);
614 spin_lock_irqsave(&mmio_read
->lock
, flags
);
615 mmio_read
->seq_num
++;
617 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
618 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
619 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
620 mmio_read_reg
|= mmio_read
->seq_num
&
621 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
623 /* make sure read_resp->req_id get updated before the hw can write
628 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
630 for (i
= 0; i
< ENA_REG_READ_TIMEOUT
; i
++) {
631 if (read_resp
->req_id
== mmio_read
->seq_num
)
637 if (unlikely(i
== ENA_REG_READ_TIMEOUT
)) {
638 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
639 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
641 ret
= ENA_MMIO_READ_TIMEOUT
;
645 if (read_resp
->reg_off
!= offset
) {
646 pr_err("Read failure: wrong offset provided");
647 ret
= ENA_MMIO_READ_TIMEOUT
;
649 ret
= read_resp
->reg_val
;
652 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
657 /* There are two types to wait for completion.
658 * Polling mode - wait until the completion is available.
659 * Async mode - wait on wait queue until the completion is ready
660 * (or the timeout expired).
661 * It is expected that the IRQ called ena_com_handle_admin_completion
662 * to mark the completions.
664 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
665 struct ena_com_admin_queue
*admin_queue
)
667 if (admin_queue
->polling
)
668 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
671 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
675 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
676 struct ena_com_io_sq
*io_sq
)
678 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
679 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
680 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
684 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
686 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
687 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
689 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
691 destroy_cmd
.sq
.sq_identity
|= (direction
<<
692 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
693 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
695 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
696 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
698 ret
= ena_com_execute_admin_command(admin_queue
,
699 (struct ena_admin_aq_entry
*)&destroy_cmd
,
701 (struct ena_admin_acq_entry
*)&destroy_resp
,
702 sizeof(destroy_resp
));
704 if (unlikely(ret
&& (ret
!= -ENODEV
)))
705 pr_err("failed to destroy io sq error: %d\n", ret
);
710 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
711 struct ena_com_io_sq
*io_sq
,
712 struct ena_com_io_cq
*io_cq
)
716 if (io_cq
->cdesc_addr
.virt_addr
) {
717 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
719 dma_free_coherent(ena_dev
->dmadev
, size
,
720 io_cq
->cdesc_addr
.virt_addr
,
721 io_cq
->cdesc_addr
.phys_addr
);
723 io_cq
->cdesc_addr
.virt_addr
= NULL
;
726 if (io_sq
->desc_addr
.virt_addr
) {
727 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
729 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
730 dma_free_coherent(ena_dev
->dmadev
, size
,
731 io_sq
->desc_addr
.virt_addr
,
732 io_sq
->desc_addr
.phys_addr
);
734 devm_kfree(ena_dev
->dmadev
, io_sq
->desc_addr
.virt_addr
);
736 io_sq
->desc_addr
.virt_addr
= NULL
;
740 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
745 for (i
= 0; i
< timeout
; i
++) {
746 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
748 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
749 pr_err("Reg read timeout occurred\n");
753 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
757 /* The resolution of the timeout is 100ms */
764 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
765 enum ena_admin_aq_feature_id feature_id
)
767 u32 feature_mask
= 1 << feature_id
;
769 /* Device attributes is always supported */
770 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
771 !(ena_dev
->supported_features
& feature_mask
))
777 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
778 struct ena_admin_get_feat_resp
*get_resp
,
779 enum ena_admin_aq_feature_id feature_id
,
780 dma_addr_t control_buf_dma_addr
,
781 u32 control_buff_size
)
783 struct ena_com_admin_queue
*admin_queue
;
784 struct ena_admin_get_feat_cmd get_cmd
;
787 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
788 pr_debug("Feature %d isn't supported\n", feature_id
);
792 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
793 admin_queue
= &ena_dev
->admin_queue
;
795 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
797 if (control_buff_size
)
798 get_cmd
.aq_common_descriptor
.flags
=
799 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
801 get_cmd
.aq_common_descriptor
.flags
= 0;
803 ret
= ena_com_mem_addr_set(ena_dev
,
804 &get_cmd
.control_buffer
.address
,
805 control_buf_dma_addr
);
807 pr_err("memory address set failed\n");
811 get_cmd
.control_buffer
.length
= control_buff_size
;
813 get_cmd
.feat_common
.feature_id
= feature_id
;
815 ret
= ena_com_execute_admin_command(admin_queue
,
816 (struct ena_admin_aq_entry
*)
819 (struct ena_admin_acq_entry
*)
824 pr_err("Failed to submit get_feature command %d error: %d\n",
830 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
831 struct ena_admin_get_feat_resp
*get_resp
,
832 enum ena_admin_aq_feature_id feature_id
)
834 return ena_com_get_feature_ex(ena_dev
,
841 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
843 struct ena_rss
*rss
= &ena_dev
->rss
;
846 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
847 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
849 if (unlikely(!rss
->hash_key
))
855 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
857 struct ena_rss
*rss
= &ena_dev
->rss
;
860 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
861 rss
->hash_key
, rss
->hash_key_dma_addr
);
862 rss
->hash_key
= NULL
;
865 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
867 struct ena_rss
*rss
= &ena_dev
->rss
;
870 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
871 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
873 if (unlikely(!rss
->hash_ctrl
))
879 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
881 struct ena_rss
*rss
= &ena_dev
->rss
;
884 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
885 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
886 rss
->hash_ctrl
= NULL
;
889 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
892 struct ena_rss
*rss
= &ena_dev
->rss
;
893 struct ena_admin_get_feat_resp get_resp
;
897 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
898 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
902 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
903 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
904 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
905 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
906 1 << get_resp
.u
.ind_table
.max_size
);
910 tbl_size
= (1ULL << log_size
) *
911 sizeof(struct ena_admin_rss_ind_table_entry
);
914 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
915 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
916 if (unlikely(!rss
->rss_ind_tbl
))
919 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
920 rss
->host_rss_ind_tbl
=
921 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
922 if (unlikely(!rss
->host_rss_ind_tbl
))
925 rss
->tbl_log_size
= log_size
;
930 tbl_size
= (1ULL << log_size
) *
931 sizeof(struct ena_admin_rss_ind_table_entry
);
933 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
934 rss
->rss_ind_tbl_dma_addr
);
935 rss
->rss_ind_tbl
= NULL
;
937 rss
->tbl_log_size
= 0;
941 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
943 struct ena_rss
*rss
= &ena_dev
->rss
;
944 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
945 sizeof(struct ena_admin_rss_ind_table_entry
);
947 if (rss
->rss_ind_tbl
)
948 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
949 rss
->rss_ind_tbl_dma_addr
);
950 rss
->rss_ind_tbl
= NULL
;
952 if (rss
->host_rss_ind_tbl
)
953 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
954 rss
->host_rss_ind_tbl
= NULL
;
957 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
958 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
960 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
961 struct ena_admin_aq_create_sq_cmd create_cmd
;
962 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
966 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd
));
968 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
970 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
971 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
973 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
975 create_cmd
.sq_identity
|= (direction
<<
976 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
977 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
979 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
980 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
982 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
983 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
984 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
986 create_cmd
.sq_caps_3
|=
987 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
989 create_cmd
.cq_idx
= cq_idx
;
990 create_cmd
.sq_depth
= io_sq
->q_depth
;
992 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
993 ret
= ena_com_mem_addr_set(ena_dev
,
995 io_sq
->desc_addr
.phys_addr
);
997 pr_err("memory address set failed\n");
1002 ret
= ena_com_execute_admin_command(admin_queue
,
1003 (struct ena_admin_aq_entry
*)&create_cmd
,
1005 (struct ena_admin_acq_entry
*)&cmd_completion
,
1006 sizeof(cmd_completion
));
1007 if (unlikely(ret
)) {
1008 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1012 io_sq
->idx
= cmd_completion
.sq_idx
;
1014 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1015 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1017 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1018 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1019 + cmd_completion
.llq_headers_offset
);
1021 io_sq
->desc_addr
.pbuf_dev_addr
=
1022 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1023 cmd_completion
.llq_descriptors_offset
);
1026 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1031 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1033 struct ena_rss
*rss
= &ena_dev
->rss
;
1034 struct ena_com_io_sq
*io_sq
;
1038 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1039 qid
= rss
->host_rss_ind_tbl
[i
];
1040 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1043 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1045 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1048 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1054 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1056 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1057 struct ena_rss
*rss
= &ena_dev
->rss
;
1061 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1062 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1064 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1065 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1067 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1069 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1072 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1078 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1082 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1084 ena_dev
->intr_moder_tbl
=
1085 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1086 if (!ena_dev
->intr_moder_tbl
)
1089 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1094 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1095 u16 intr_delay_resolution
)
1097 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1100 if (!intr_delay_resolution
) {
1101 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1102 intr_delay_resolution
= 1;
1104 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1107 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1108 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1111 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1114 /*****************************************************************************/
1115 /******************************* API ******************************/
1116 /*****************************************************************************/
1118 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1119 struct ena_admin_aq_entry
*cmd
,
1121 struct ena_admin_acq_entry
*comp
,
1124 struct ena_comp_ctx
*comp_ctx
;
1127 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1129 if (unlikely(IS_ERR(comp_ctx
))) {
1130 if (comp_ctx
== ERR_PTR(-ENODEV
))
1131 pr_debug("Failed to submit command [%ld]\n",
1134 pr_err("Failed to submit command [%ld]\n",
1137 return PTR_ERR(comp_ctx
);
1140 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1141 if (unlikely(ret
)) {
1142 if (admin_queue
->running_state
)
1143 pr_err("Failed to process command. ret = %d\n", ret
);
1145 pr_debug("Failed to process command. ret = %d\n", ret
);
1150 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1151 struct ena_com_io_cq
*io_cq
)
1153 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1154 struct ena_admin_aq_create_cq_cmd create_cmd
;
1155 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1158 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd
));
1160 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1162 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1163 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1164 create_cmd
.cq_caps_1
|=
1165 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1167 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1168 create_cmd
.cq_depth
= io_cq
->q_depth
;
1170 ret
= ena_com_mem_addr_set(ena_dev
,
1172 io_cq
->cdesc_addr
.phys_addr
);
1173 if (unlikely(ret
)) {
1174 pr_err("memory address set failed\n");
1178 ret
= ena_com_execute_admin_command(admin_queue
,
1179 (struct ena_admin_aq_entry
*)&create_cmd
,
1181 (struct ena_admin_acq_entry
*)&cmd_completion
,
1182 sizeof(cmd_completion
));
1183 if (unlikely(ret
)) {
1184 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1188 io_cq
->idx
= cmd_completion
.cq_idx
;
1190 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1191 cmd_completion
.cq_interrupt_unmask_register_offset
);
1193 if (cmd_completion
.cq_head_db_register_offset
)
1194 io_cq
->cq_head_db_reg
=
1195 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1196 cmd_completion
.cq_head_db_register_offset
);
1198 if (cmd_completion
.numa_node_register_offset
)
1199 io_cq
->numa_node_cfg_reg
=
1200 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1201 cmd_completion
.numa_node_register_offset
);
1203 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1208 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1209 struct ena_com_io_sq
**io_sq
,
1210 struct ena_com_io_cq
**io_cq
)
1212 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1213 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1214 ENA_TOTAL_NUM_QUEUES
);
1218 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1219 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1224 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1226 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1227 struct ena_comp_ctx
*comp_ctx
;
1230 if (!admin_queue
->comp_ctx
)
1233 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1234 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1235 if (unlikely(!comp_ctx
))
1238 comp_ctx
->status
= ENA_CMD_ABORTED
;
1240 complete(&comp_ctx
->wait_event
);
1244 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1246 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1247 unsigned long flags
;
1249 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1250 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1251 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1253 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1255 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1258 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1259 struct ena_com_io_cq
*io_cq
)
1261 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1262 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1263 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1266 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
1268 destroy_cmd
.cq_idx
= io_cq
->idx
;
1269 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1271 ret
= ena_com_execute_admin_command(admin_queue
,
1272 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1273 sizeof(destroy_cmd
),
1274 (struct ena_admin_acq_entry
*)&destroy_resp
,
1275 sizeof(destroy_resp
));
1277 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1278 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1283 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1285 return ena_dev
->admin_queue
.running_state
;
1288 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1290 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1291 unsigned long flags
;
1293 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1294 ena_dev
->admin_queue
.running_state
= state
;
1295 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1298 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1300 u16 depth
= ena_dev
->aenq
.q_depth
;
1302 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1304 /* Init head_db to mark that all entries in the queue
1305 * are initially available
1307 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1310 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1312 struct ena_com_admin_queue
*admin_queue
;
1313 struct ena_admin_set_feat_cmd cmd
;
1314 struct ena_admin_set_feat_resp resp
;
1315 struct ena_admin_get_feat_resp get_resp
;
1318 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1320 pr_info("Can't get aenq configuration\n");
1324 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1325 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1326 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1330 memset(&cmd
, 0x0, sizeof(cmd
));
1331 admin_queue
= &ena_dev
->admin_queue
;
1333 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1334 cmd
.aq_common_descriptor
.flags
= 0;
1335 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1336 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1338 ret
= ena_com_execute_admin_command(admin_queue
,
1339 (struct ena_admin_aq_entry
*)&cmd
,
1341 (struct ena_admin_acq_entry
*)&resp
,
1345 pr_err("Failed to config AENQ ret: %d\n", ret
);
1350 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1352 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1355 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1356 pr_err("Reg read timeout occurred\n");
1360 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1361 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1363 pr_debug("ENA dma width: %d\n", width
);
1365 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1366 pr_err("DMA width illegal value: %d\n", width
);
1370 ena_dev
->dma_addr_bits
= width
;
1375 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1379 u32 ctrl_ver_masked
;
1381 /* Make sure the ENA version and the controller version are at least
1382 * as the driver expects
1384 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1385 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1386 ENA_REGS_CONTROLLER_VERSION_OFF
);
1388 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1389 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1390 pr_err("Reg read timeout occurred\n");
1394 pr_info("ena device version: %d.%d\n",
1395 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1396 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1397 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1399 if (ver
< MIN_ENA_VER
) {
1400 pr_err("ENA version is lower than the minimal version the driver supports\n");
1404 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1405 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1406 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1407 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1408 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1409 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1410 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1411 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1414 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1415 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1416 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1418 /* Validate the ctrl version without the implementation ID */
1419 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1420 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1427 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1429 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1430 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1431 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1432 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1435 if (admin_queue
->comp_ctx
)
1436 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1437 admin_queue
->comp_ctx
= NULL
;
1438 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1440 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1444 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1446 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1450 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1451 if (ena_dev
->aenq
.entries
)
1452 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1454 aenq
->entries
= NULL
;
1457 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1459 ena_dev
->admin_queue
.polling
= polling
;
1462 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1464 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1466 spin_lock_init(&mmio_read
->lock
);
1467 mmio_read
->read_resp
=
1468 dma_zalloc_coherent(ena_dev
->dmadev
,
1469 sizeof(*mmio_read
->read_resp
),
1470 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1471 if (unlikely(!mmio_read
->read_resp
))
1474 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1476 mmio_read
->read_resp
->req_id
= 0x0;
1477 mmio_read
->seq_num
= 0x0;
1478 mmio_read
->readless_supported
= true;
1483 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1485 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1487 mmio_read
->readless_supported
= readless_supported
;
1490 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1492 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1494 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1495 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1497 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1498 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1500 mmio_read
->read_resp
= NULL
;
1503 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1505 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1506 u32 addr_low
, addr_high
;
1508 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1509 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1511 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1512 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1515 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1516 struct ena_aenq_handlers
*aenq_handlers
,
1519 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1520 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1523 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1525 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1526 pr_err("Reg read timeout occurred\n");
1530 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1531 pr_err("Device isn't ready, abort com init\n");
1535 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1537 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1538 admin_queue
->polling
= false;
1539 admin_queue
->curr_cmd_id
= 0;
1541 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1544 spin_lock_init(&admin_queue
->q_lock
);
1546 ret
= ena_com_init_comp_ctxt(admin_queue
);
1550 ret
= ena_com_admin_init_sq(admin_queue
);
1554 ret
= ena_com_admin_init_cq(admin_queue
);
1558 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1559 ENA_REGS_AQ_DB_OFF
);
1561 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1562 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1564 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1565 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1567 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1568 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1570 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1571 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1574 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1575 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1576 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1577 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1580 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1581 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1582 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1583 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1585 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1586 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1587 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1591 admin_queue
->running_state
= true;
1595 ena_com_admin_destroy(ena_dev
);
1600 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1601 struct ena_com_create_io_ctx
*ctx
)
1603 struct ena_com_io_sq
*io_sq
;
1604 struct ena_com_io_cq
*io_cq
;
1607 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1608 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1609 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1613 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1614 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1616 memset(io_sq
, 0x0, sizeof(struct ena_com_io_sq
));
1617 memset(io_cq
, 0x0, sizeof(struct ena_com_io_cq
));
1620 io_cq
->q_depth
= ctx
->queue_size
;
1621 io_cq
->direction
= ctx
->direction
;
1622 io_cq
->qid
= ctx
->qid
;
1624 io_cq
->msix_vector
= ctx
->msix_vector
;
1626 io_sq
->q_depth
= ctx
->queue_size
;
1627 io_sq
->direction
= ctx
->direction
;
1628 io_sq
->qid
= ctx
->qid
;
1630 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1632 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1633 /* header length is limited to 8 bits */
1634 io_sq
->tx_max_header_size
=
1635 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1637 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1640 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1644 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1648 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1655 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1657 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1661 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1663 struct ena_com_io_sq
*io_sq
;
1664 struct ena_com_io_cq
*io_cq
;
1666 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1667 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1668 ENA_TOTAL_NUM_QUEUES
);
1672 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1673 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1675 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1676 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1678 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1681 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1682 struct ena_admin_get_feat_resp
*resp
)
1684 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1687 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1688 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1690 struct ena_admin_get_feat_resp get_resp
;
1693 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1694 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1698 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1699 sizeof(get_resp
.u
.dev_attr
));
1700 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1702 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1703 ENA_ADMIN_MAX_QUEUES_NUM
);
1707 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1708 sizeof(get_resp
.u
.max_queue
));
1709 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1711 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1712 ENA_ADMIN_AENQ_CONFIG
);
1716 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1717 sizeof(get_resp
.u
.aenq
));
1719 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1720 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1724 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1725 sizeof(get_resp
.u
.offload
));
1730 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1732 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1735 /* ena_handle_specific_aenq_event:
1736 * return the handler that is relevant to the specific event group
1738 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1741 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1743 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1744 return aenq_handlers
->handlers
[group
];
1746 return aenq_handlers
->unimplemented_handler
;
1749 /* ena_aenq_intr_handler:
1750 * handles the aenq incoming events.
1751 * pop events from the queue and apply the specific handler
1753 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1755 struct ena_admin_aenq_entry
*aenq_e
;
1756 struct ena_admin_aenq_common_desc
*aenq_common
;
1757 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1758 ena_aenq_handler handler_cb
;
1759 u16 masked_head
, processed
= 0;
1762 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1763 phase
= aenq
->phase
;
1764 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1765 aenq_common
= &aenq_e
->aenq_common_desc
;
1767 /* Go over all the events */
1768 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1770 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1771 aenq_common
->group
, aenq_common
->syndrom
,
1772 (u64
)aenq_common
->timestamp_low
+
1773 ((u64
)aenq_common
->timestamp_high
<< 32));
1775 /* Handle specific event*/
1776 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1777 aenq_common
->group
);
1778 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1780 /* Get next event entry */
1784 if (unlikely(masked_head
== aenq
->q_depth
)) {
1788 aenq_e
= &aenq
->entries
[masked_head
];
1789 aenq_common
= &aenq_e
->aenq_common_desc
;
1792 aenq
->head
+= processed
;
1793 aenq
->phase
= phase
;
1795 /* Don't update aenq doorbell if there weren't any processed events */
1799 /* write the aenq doorbell after all AENQ descriptors were read */
1801 writel((u32
)aenq
->head
, dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1804 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
)
1806 u32 stat
, timeout
, cap
, reset_val
;
1809 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1810 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1812 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1813 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1814 pr_err("Reg read32 timeout occurred\n");
1818 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1819 pr_err("Device isn't ready, can't reset device\n");
1823 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1824 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1826 pr_err("Invalid timeout value\n");
1831 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1832 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1834 /* Write again the MMIO read request address */
1835 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1837 rc
= wait_for_reset_state(ena_dev
, timeout
,
1838 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1840 pr_err("Reset indication didn't turn on\n");
1845 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1846 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1848 pr_err("Reset indication didn't turn off\n");
1855 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1856 struct ena_com_stats_ctx
*ctx
,
1857 enum ena_admin_get_stats_type type
)
1859 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
1860 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
1861 struct ena_com_admin_queue
*admin_queue
;
1864 admin_queue
= &ena_dev
->admin_queue
;
1866 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1867 get_cmd
->aq_common_descriptor
.flags
= 0;
1868 get_cmd
->type
= type
;
1870 ret
= ena_com_execute_admin_command(admin_queue
,
1871 (struct ena_admin_aq_entry
*)get_cmd
,
1873 (struct ena_admin_acq_entry
*)get_resp
,
1877 pr_err("Failed to get stats. error: %d\n", ret
);
1882 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1883 struct ena_admin_basic_stats
*stats
)
1885 struct ena_com_stats_ctx ctx
;
1888 memset(&ctx
, 0x0, sizeof(ctx
));
1889 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1890 if (likely(ret
== 0))
1891 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
1892 sizeof(ctx
.get_resp
.basic_stats
));
1897 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
1899 struct ena_com_admin_queue
*admin_queue
;
1900 struct ena_admin_set_feat_cmd cmd
;
1901 struct ena_admin_set_feat_resp resp
;
1904 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
1905 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
1909 memset(&cmd
, 0x0, sizeof(cmd
));
1910 admin_queue
= &ena_dev
->admin_queue
;
1912 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1913 cmd
.aq_common_descriptor
.flags
= 0;
1914 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
1915 cmd
.u
.mtu
.mtu
= mtu
;
1917 ret
= ena_com_execute_admin_command(admin_queue
,
1918 (struct ena_admin_aq_entry
*)&cmd
,
1920 (struct ena_admin_acq_entry
*)&resp
,
1924 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
1929 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
1930 struct ena_admin_feature_offload_desc
*offload
)
1933 struct ena_admin_get_feat_resp resp
;
1935 ret
= ena_com_get_feature(ena_dev
, &resp
,
1936 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1937 if (unlikely(ret
)) {
1938 pr_err("Failed to get offload capabilities %d\n", ret
);
1942 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
1947 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
1949 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1950 struct ena_rss
*rss
= &ena_dev
->rss
;
1951 struct ena_admin_set_feat_cmd cmd
;
1952 struct ena_admin_set_feat_resp resp
;
1953 struct ena_admin_get_feat_resp get_resp
;
1956 if (!ena_com_check_supported_feature_id(ena_dev
,
1957 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
1958 pr_debug("Feature %d isn't supported\n",
1959 ENA_ADMIN_RSS_HASH_FUNCTION
);
1963 /* Validate hash function is supported */
1964 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1965 ENA_ADMIN_RSS_HASH_FUNCTION
);
1969 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
1970 pr_err("Func hash %d isn't supported by device, abort\n",
1975 memset(&cmd
, 0x0, sizeof(cmd
));
1977 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1978 cmd
.aq_common_descriptor
.flags
=
1979 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
1980 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
1981 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
1982 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
1984 ret
= ena_com_mem_addr_set(ena_dev
,
1985 &cmd
.control_buffer
.address
,
1986 rss
->hash_key_dma_addr
);
1987 if (unlikely(ret
)) {
1988 pr_err("memory address set failed\n");
1992 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
1994 ret
= ena_com_execute_admin_command(admin_queue
,
1995 (struct ena_admin_aq_entry
*)&cmd
,
1997 (struct ena_admin_acq_entry
*)&resp
,
1999 if (unlikely(ret
)) {
2000 pr_err("Failed to set hash function %d. error: %d\n",
2001 rss
->hash_func
, ret
);
2008 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2009 enum ena_admin_hash_functions func
,
2010 const u8
*key
, u16 key_len
, u32 init_val
)
2012 struct ena_rss
*rss
= &ena_dev
->rss
;
2013 struct ena_admin_get_feat_resp get_resp
;
2014 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2018 /* Make sure size is a mult of DWs */
2019 if (unlikely(key_len
& 0x3))
2022 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2023 ENA_ADMIN_RSS_HASH_FUNCTION
,
2024 rss
->hash_key_dma_addr
,
2025 sizeof(*rss
->hash_key
));
2029 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2030 pr_err("Flow hash function %d isn't supported\n", func
);
2035 case ENA_ADMIN_TOEPLITZ
:
2036 if (key_len
> sizeof(hash_key
->key
)) {
2037 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2038 key_len
, sizeof(hash_key
->key
));
2042 memcpy(hash_key
->key
, key
, key_len
);
2043 rss
->hash_init_val
= init_val
;
2044 hash_key
->keys_num
= key_len
>> 2;
2046 case ENA_ADMIN_CRC32
:
2047 rss
->hash_init_val
= init_val
;
2050 pr_err("Invalid hash function (%d)\n", func
);
2054 rc
= ena_com_set_hash_function(ena_dev
);
2056 /* Restore the old function */
2058 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2063 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2064 enum ena_admin_hash_functions
*func
,
2067 struct ena_rss
*rss
= &ena_dev
->rss
;
2068 struct ena_admin_get_feat_resp get_resp
;
2069 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2073 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2074 ENA_ADMIN_RSS_HASH_FUNCTION
,
2075 rss
->hash_key_dma_addr
,
2076 sizeof(*rss
->hash_key
));
2080 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2082 *func
= rss
->hash_func
;
2085 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2090 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2091 enum ena_admin_flow_hash_proto proto
,
2094 struct ena_rss
*rss
= &ena_dev
->rss
;
2095 struct ena_admin_get_feat_resp get_resp
;
2098 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2099 ENA_ADMIN_RSS_HASH_INPUT
,
2100 rss
->hash_ctrl_dma_addr
,
2101 sizeof(*rss
->hash_ctrl
));
2106 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2111 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2113 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2114 struct ena_rss
*rss
= &ena_dev
->rss
;
2115 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2116 struct ena_admin_set_feat_cmd cmd
;
2117 struct ena_admin_set_feat_resp resp
;
2120 if (!ena_com_check_supported_feature_id(ena_dev
,
2121 ENA_ADMIN_RSS_HASH_INPUT
)) {
2122 pr_debug("Feature %d isn't supported\n",
2123 ENA_ADMIN_RSS_HASH_INPUT
);
2127 memset(&cmd
, 0x0, sizeof(cmd
));
2129 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2130 cmd
.aq_common_descriptor
.flags
=
2131 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2132 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2133 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2134 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2135 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2137 ret
= ena_com_mem_addr_set(ena_dev
,
2138 &cmd
.control_buffer
.address
,
2139 rss
->hash_ctrl_dma_addr
);
2140 if (unlikely(ret
)) {
2141 pr_err("memory address set failed\n");
2144 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2146 ret
= ena_com_execute_admin_command(admin_queue
,
2147 (struct ena_admin_aq_entry
*)&cmd
,
2149 (struct ena_admin_acq_entry
*)&resp
,
2152 pr_err("Failed to set hash input. error: %d\n", ret
);
2157 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2159 struct ena_rss
*rss
= &ena_dev
->rss
;
2160 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2162 u16 available_fields
= 0;
2165 /* Get the supported hash input */
2166 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2170 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2171 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2172 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2174 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2175 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2176 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2178 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2179 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2180 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2182 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2183 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2184 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2186 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2187 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2189 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2190 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2192 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2193 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2195 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2196 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2198 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2199 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2200 hash_ctrl
->supported_fields
[i
].fields
;
2201 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2202 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2203 i
, hash_ctrl
->supported_fields
[i
].fields
,
2204 hash_ctrl
->selected_fields
[i
].fields
);
2209 rc
= ena_com_set_hash_ctrl(ena_dev
);
2211 /* In case of failure, restore the old hash ctrl */
2213 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2218 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2219 enum ena_admin_flow_hash_proto proto
,
2222 struct ena_rss
*rss
= &ena_dev
->rss
;
2223 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2224 u16 supported_fields
;
2227 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2228 pr_err("Invalid proto num (%u)\n", proto
);
2232 /* Get the ctrl table */
2233 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2237 /* Make sure all the fields are supported */
2238 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2239 if ((hash_fields
& supported_fields
) != hash_fields
) {
2240 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2241 proto
, hash_fields
, supported_fields
);
2244 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2246 rc
= ena_com_set_hash_ctrl(ena_dev
);
2248 /* In case of failure, restore the old hash ctrl */
2250 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2255 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2256 u16 entry_idx
, u16 entry_value
)
2258 struct ena_rss
*rss
= &ena_dev
->rss
;
2260 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2263 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2266 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2271 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2273 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2274 struct ena_rss
*rss
= &ena_dev
->rss
;
2275 struct ena_admin_set_feat_cmd cmd
;
2276 struct ena_admin_set_feat_resp resp
;
2279 if (!ena_com_check_supported_feature_id(
2280 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2281 pr_debug("Feature %d isn't supported\n",
2282 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2286 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2288 pr_err("Failed to convert host indirection table to device table\n");
2292 memset(&cmd
, 0x0, sizeof(cmd
));
2294 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2295 cmd
.aq_common_descriptor
.flags
=
2296 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2297 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2298 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2299 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2301 ret
= ena_com_mem_addr_set(ena_dev
,
2302 &cmd
.control_buffer
.address
,
2303 rss
->rss_ind_tbl_dma_addr
);
2304 if (unlikely(ret
)) {
2305 pr_err("memory address set failed\n");
2309 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2310 sizeof(struct ena_admin_rss_ind_table_entry
);
2312 ret
= ena_com_execute_admin_command(admin_queue
,
2313 (struct ena_admin_aq_entry
*)&cmd
,
2315 (struct ena_admin_acq_entry
*)&resp
,
2319 pr_err("Failed to set indirect table. error: %d\n", ret
);
2324 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2326 struct ena_rss
*rss
= &ena_dev
->rss
;
2327 struct ena_admin_get_feat_resp get_resp
;
2331 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2332 sizeof(struct ena_admin_rss_ind_table_entry
);
2334 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2335 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2336 rss
->rss_ind_tbl_dma_addr
,
2344 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2348 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2349 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2354 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2358 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2360 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2364 rc
= ena_com_hash_key_allocate(ena_dev
);
2368 rc
= ena_com_hash_ctrl_init(ena_dev
);
2375 ena_com_hash_key_destroy(ena_dev
);
2377 ena_com_indirect_table_destroy(ena_dev
);
2383 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2385 ena_com_indirect_table_destroy(ena_dev
);
2386 ena_com_hash_key_destroy(ena_dev
);
2387 ena_com_hash_ctrl_destroy(ena_dev
);
2389 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2392 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2394 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2396 host_attr
->host_info
=
2397 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2398 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2399 if (unlikely(!host_attr
->host_info
))
2405 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2406 u32 debug_area_size
)
2408 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2410 host_attr
->debug_area_virt_addr
=
2411 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2412 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2413 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2414 host_attr
->debug_area_size
= 0;
2418 host_attr
->debug_area_size
= debug_area_size
;
2423 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2425 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2427 if (host_attr
->host_info
) {
2428 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2429 host_attr
->host_info_dma_addr
);
2430 host_attr
->host_info
= NULL
;
2434 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2436 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2438 if (host_attr
->debug_area_virt_addr
) {
2439 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2440 host_attr
->debug_area_virt_addr
,
2441 host_attr
->debug_area_dma_addr
);
2442 host_attr
->debug_area_virt_addr
= NULL
;
2446 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2448 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2449 struct ena_com_admin_queue
*admin_queue
;
2450 struct ena_admin_set_feat_cmd cmd
;
2451 struct ena_admin_set_feat_resp resp
;
2455 /* Host attribute config is called before ena_com_get_dev_attr_feat
2456 * so ena_com can't check if the feature is supported.
2459 memset(&cmd
, 0x0, sizeof(cmd
));
2460 admin_queue
= &ena_dev
->admin_queue
;
2462 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2463 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2465 ret
= ena_com_mem_addr_set(ena_dev
,
2466 &cmd
.u
.host_attr
.debug_ba
,
2467 host_attr
->debug_area_dma_addr
);
2468 if (unlikely(ret
)) {
2469 pr_err("memory address set failed\n");
2473 ret
= ena_com_mem_addr_set(ena_dev
,
2474 &cmd
.u
.host_attr
.os_info_ba
,
2475 host_attr
->host_info_dma_addr
);
2476 if (unlikely(ret
)) {
2477 pr_err("memory address set failed\n");
2481 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2483 ret
= ena_com_execute_admin_command(admin_queue
,
2484 (struct ena_admin_aq_entry
*)&cmd
,
2486 (struct ena_admin_acq_entry
*)&resp
,
2490 pr_err("Failed to set host attributes: %d\n", ret
);
2495 /* Interrupt moderation */
2496 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2498 return ena_com_check_supported_feature_id(ena_dev
,
2499 ENA_ADMIN_INTERRUPT_MODERATION
);
2502 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2503 u32 tx_coalesce_usecs
)
2505 if (!ena_dev
->intr_delay_resolution
) {
2506 pr_err("Illegal interrupt delay granularity value\n");
2510 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2511 ena_dev
->intr_delay_resolution
;
2516 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2517 u32 rx_coalesce_usecs
)
2519 if (!ena_dev
->intr_delay_resolution
) {
2520 pr_err("Illegal interrupt delay granularity value\n");
2524 /* We use LOWEST entry of moderation table for storing
2525 * nonadaptive interrupt coalescing values
2527 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2528 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2533 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2535 if (ena_dev
->intr_moder_tbl
)
2536 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2537 ena_dev
->intr_moder_tbl
= NULL
;
2540 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2542 struct ena_admin_get_feat_resp get_resp
;
2543 u16 delay_resolution
;
2546 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2547 ENA_ADMIN_INTERRUPT_MODERATION
);
2551 pr_debug("Feature %d isn't supported\n",
2552 ENA_ADMIN_INTERRUPT_MODERATION
);
2555 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2559 /* no moderation supported, disable adaptive support */
2560 ena_com_disable_adaptive_moderation(ena_dev
);
2564 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2568 /* if moderation is supported by device we set adaptive moderation */
2569 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2570 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2571 ena_com_enable_adaptive_moderation(ena_dev
);
2575 ena_com_destroy_interrupt_moderation(ena_dev
);
2579 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2581 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2583 if (!intr_moder_tbl
)
2586 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2587 ENA_INTR_LOWEST_USECS
;
2588 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2589 ENA_INTR_LOWEST_PKTS
;
2590 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2591 ENA_INTR_LOWEST_BYTES
;
2593 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2595 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2597 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2600 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2602 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2604 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2607 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2608 ENA_INTR_HIGH_USECS
;
2609 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2611 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2612 ENA_INTR_HIGH_BYTES
;
2614 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2615 ENA_INTR_HIGHEST_USECS
;
2616 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2617 ENA_INTR_HIGHEST_PKTS
;
2618 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2619 ENA_INTR_HIGHEST_BYTES
;
2622 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2624 return ena_dev
->intr_moder_tx_interval
;
2627 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2629 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2632 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2637 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2638 enum ena_intr_moder_level level
,
2639 struct ena_intr_moder_entry
*entry
)
2641 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2643 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2646 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2647 if (ena_dev
->intr_delay_resolution
)
2648 intr_moder_tbl
[level
].intr_moder_interval
/=
2649 ena_dev
->intr_delay_resolution
;
2650 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2652 /* use hardcoded value until ethtool supports bytecount parameter */
2653 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2654 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2657 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2658 enum ena_intr_moder_level level
,
2659 struct ena_intr_moder_entry
*entry
)
2661 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2663 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2666 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2667 if (ena_dev
->intr_delay_resolution
)
2668 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2669 entry
->pkts_per_interval
=
2670 intr_moder_tbl
[level
].pkts_per_interval
;
2671 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;