2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 #define ENA_REGS_ADMIN_INTR_MASK 1
66 /*****************************************************************************/
67 /*****************************************************************************/
68 /*****************************************************************************/
73 /* Abort - canceled by the driver */
78 struct completion wait_event
;
79 struct ena_admin_acq_entry
*user_cqe
;
81 enum ena_cmd_status status
;
82 /* status from the device */
88 struct ena_com_stats_ctx
{
89 struct ena_admin_aq_get_stats_cmd get_cmd
;
90 struct ena_admin_acq_get_stats_resp get_resp
;
93 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
94 struct ena_common_mem_addr
*ena_addr
,
97 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
98 pr_err("dma address has more bits that the device supports\n");
102 ena_addr
->mem_addr_low
= (u32
)addr
;
103 ena_addr
->mem_addr_high
= (u64
)addr
>> 32;
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
110 struct ena_com_admin_sq
*sq
= &queue
->sq
;
111 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
113 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
117 pr_err("memory allocation failed");
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
132 struct ena_com_admin_cq
*cq
= &queue
->cq
;
133 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
135 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
139 pr_err("memory allocation failed");
149 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
150 struct ena_aenq_handlers
*aenq_handlers
)
152 struct ena_com_aenq
*aenq
= &dev
->aenq
;
153 u32 addr_low
, addr_high
, aenq_caps
;
156 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
157 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
158 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
161 if (!aenq
->entries
) {
162 pr_err("memory allocation failed");
166 aenq
->head
= aenq
->q_depth
;
169 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
170 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
172 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
173 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
176 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
177 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
178 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
179 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
180 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
182 if (unlikely(!aenq_handlers
)) {
183 pr_err("aenq handlers pointer is NULL\n");
187 aenq
->aenq_handlers
= aenq_handlers
;
192 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
193 struct ena_comp_ctx
*comp_ctx
)
195 comp_ctx
->occupied
= false;
196 atomic_dec(&queue
->outstanding_cmds
);
199 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
200 u16 command_id
, bool capture
)
202 if (unlikely(command_id
>= queue
->q_depth
)) {
203 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
204 command_id
, queue
->q_depth
);
208 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
209 pr_err("Completion context is occupied\n");
214 atomic_inc(&queue
->outstanding_cmds
);
215 queue
->comp_ctx
[command_id
].occupied
= true;
218 return &queue
->comp_ctx
[command_id
];
221 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
222 struct ena_admin_aq_entry
*cmd
,
223 size_t cmd_size_in_bytes
,
224 struct ena_admin_acq_entry
*comp
,
225 size_t comp_size_in_bytes
)
227 struct ena_comp_ctx
*comp_ctx
;
228 u16 tail_masked
, cmd_id
;
232 queue_size_mask
= admin_queue
->q_depth
- 1;
234 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
236 /* In case of queue FULL */
237 cnt
= atomic_read(&admin_queue
->outstanding_cmds
);
238 if (cnt
>= admin_queue
->q_depth
) {
239 pr_debug("admin queue is full.\n");
240 admin_queue
->stats
.out_of_space
++;
241 return ERR_PTR(-ENOSPC
);
244 cmd_id
= admin_queue
->curr_cmd_id
;
246 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
249 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
252 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
253 if (unlikely(!comp_ctx
))
254 return ERR_PTR(-EINVAL
);
256 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
257 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
258 comp_ctx
->user_cqe
= comp
;
259 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
261 reinit_completion(&comp_ctx
->wait_event
);
263 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
265 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
268 admin_queue
->sq
.tail
++;
269 admin_queue
->stats
.submitted_cmd
++;
271 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
272 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
274 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
281 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
282 struct ena_comp_ctx
*comp_ctx
;
285 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
286 if (unlikely(!queue
->comp_ctx
)) {
287 pr_err("memory allocation failed");
291 for (i
= 0; i
< queue
->q_depth
; i
++) {
292 comp_ctx
= get_comp_ctxt(queue
, i
, false);
294 init_completion(&comp_ctx
->wait_event
);
300 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
301 struct ena_admin_aq_entry
*cmd
,
302 size_t cmd_size_in_bytes
,
303 struct ena_admin_acq_entry
*comp
,
304 size_t comp_size_in_bytes
)
307 struct ena_comp_ctx
*comp_ctx
;
309 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
310 if (unlikely(!admin_queue
->running_state
)) {
311 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
312 return ERR_PTR(-ENODEV
);
314 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
318 if (unlikely(IS_ERR(comp_ctx
)))
319 admin_queue
->running_state
= false;
320 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
325 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
326 struct ena_com_create_io_ctx
*ctx
,
327 struct ena_com_io_sq
*io_sq
)
332 memset(&io_sq
->desc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
334 io_sq
->desc_entry_size
=
335 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
336 sizeof(struct ena_eth_io_tx_desc
) :
337 sizeof(struct ena_eth_io_rx_desc
);
339 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
341 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
342 dev_node
= dev_to_node(ena_dev
->dmadev
);
343 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
344 io_sq
->desc_addr
.virt_addr
=
345 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
346 &io_sq
->desc_addr
.phys_addr
,
348 set_dev_node(ena_dev
->dmadev
, dev_node
);
349 if (!io_sq
->desc_addr
.virt_addr
) {
350 io_sq
->desc_addr
.virt_addr
=
351 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
352 &io_sq
->desc_addr
.phys_addr
,
356 dev_node
= dev_to_node(ena_dev
->dmadev
);
357 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
358 io_sq
->desc_addr
.virt_addr
=
359 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
360 set_dev_node(ena_dev
->dmadev
, dev_node
);
361 if (!io_sq
->desc_addr
.virt_addr
) {
362 io_sq
->desc_addr
.virt_addr
=
363 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
367 if (!io_sq
->desc_addr
.virt_addr
) {
368 pr_err("memory allocation failed");
373 io_sq
->next_to_comp
= 0;
379 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
380 struct ena_com_create_io_ctx
*ctx
,
381 struct ena_com_io_cq
*io_cq
)
386 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
388 /* Use the basic completion descriptor for Rx */
389 io_cq
->cdesc_entry_size_in_bytes
=
390 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
391 sizeof(struct ena_eth_io_tx_cdesc
) :
392 sizeof(struct ena_eth_io_rx_cdesc_base
);
394 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
396 prev_node
= dev_to_node(ena_dev
->dmadev
);
397 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
398 io_cq
->cdesc_addr
.virt_addr
=
399 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
400 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
401 set_dev_node(ena_dev
->dmadev
, prev_node
);
402 if (!io_cq
->cdesc_addr
.virt_addr
) {
403 io_cq
->cdesc_addr
.virt_addr
=
404 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
405 &io_cq
->cdesc_addr
.phys_addr
,
409 if (!io_cq
->cdesc_addr
.virt_addr
) {
410 pr_err("memory allocation failed");
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
421 struct ena_admin_acq_entry
*cqe
)
423 struct ena_comp_ctx
*comp_ctx
;
426 cmd_id
= cqe
->acq_common_descriptor
.command
&
427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
429 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
430 if (unlikely(!comp_ctx
)) {
431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432 admin_queue
->running_state
= false;
436 comp_ctx
->status
= ENA_CMD_COMPLETED
;
437 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
439 if (comp_ctx
->user_cqe
)
440 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
442 if (!admin_queue
->polling
)
443 complete(&comp_ctx
->wait_event
);
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
448 struct ena_admin_acq_entry
*cqe
= NULL
;
453 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
454 phase
= admin_queue
->cq
.phase
;
456 cqe
= &admin_queue
->cq
.entries
[head_masked
];
458 /* Go over all the completions */
459 while ((cqe
->acq_common_descriptor
.flags
&
460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
461 /* Do not read the rest of the completion entry before the
462 * phase bit was validated
465 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
469 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
474 cqe
= &admin_queue
->cq
.entries
[head_masked
];
477 admin_queue
->cq
.head
+= comp_num
;
478 admin_queue
->cq
.phase
= phase
;
479 admin_queue
->sq
.head
+= comp_num
;
480 admin_queue
->stats
.completed_cmd
+= comp_num
;
483 static int ena_com_comp_status_to_errno(u8 comp_status
)
485 if (unlikely(comp_status
!= 0))
486 pr_err("admin command failed[%u]\n", comp_status
);
488 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
491 switch (comp_status
) {
492 case ENA_ADMIN_SUCCESS
:
494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
496 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
498 case ENA_ADMIN_BAD_OPCODE
:
499 case ENA_ADMIN_MALFORMED_REQUEST
:
500 case ENA_ADMIN_ILLEGAL_PARAMETER
:
501 case ENA_ADMIN_UNKNOWN_ERROR
:
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
509 struct ena_com_admin_queue
*admin_queue
)
511 unsigned long flags
, timeout
;
514 timeout
= jiffies
+ ADMIN_CMD_TIMEOUT_US
;
517 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
518 ena_com_handle_admin_completion(admin_queue
);
519 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
521 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
524 if (time_is_before_jiffies(timeout
)) {
525 pr_err("Wait for completion (polling) timeout\n");
526 /* ENA didn't have any completion */
527 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
528 admin_queue
->stats
.no_completion
++;
529 admin_queue
->running_state
= false;
530 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
539 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
540 pr_err("Command was aborted\n");
541 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
542 admin_queue
->stats
.aborted_cmd
++;
543 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
548 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
551 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
553 comp_ctxt_release(admin_queue
, comp_ctx
);
557 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
558 struct ena_com_admin_queue
*admin_queue
)
563 wait_for_completion_timeout(&comp_ctx
->wait_event
,
564 usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US
));
566 /* In case the command wasn't completed find out the root cause.
567 * There might be 2 kinds of errors
568 * 1) No completion (timeout reached)
569 * 2) There is completion but the device didn't get any msi-x interrupt.
571 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
572 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
573 ena_com_handle_admin_completion(admin_queue
);
574 admin_queue
->stats
.no_completion
++;
575 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
577 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
578 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
579 comp_ctx
->cmd_opcode
);
581 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
582 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
584 admin_queue
->running_state
= false;
589 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
591 comp_ctxt_release(admin_queue
, comp_ctx
);
595 /* This method read the hardware device register through posting writes
596 * and waiting for response
597 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
599 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
601 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
602 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
603 mmio_read
->read_resp
;
604 u32 mmio_read_reg
, ret
;
610 /* If readless is disabled, perform regular read */
611 if (!mmio_read
->readless_supported
)
612 return readl(ena_dev
->reg_bar
+ offset
);
614 spin_lock_irqsave(&mmio_read
->lock
, flags
);
615 mmio_read
->seq_num
++;
617 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
618 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
619 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
620 mmio_read_reg
|= mmio_read
->seq_num
&
621 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
623 /* make sure read_resp->req_id get updated before the hw can write
628 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
630 for (i
= 0; i
< ENA_REG_READ_TIMEOUT
; i
++) {
631 if (read_resp
->req_id
== mmio_read
->seq_num
)
637 if (unlikely(i
== ENA_REG_READ_TIMEOUT
)) {
638 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
639 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
641 ret
= ENA_MMIO_READ_TIMEOUT
;
645 if (read_resp
->reg_off
!= offset
) {
646 pr_err("Read failure: wrong offset provided");
647 ret
= ENA_MMIO_READ_TIMEOUT
;
649 ret
= read_resp
->reg_val
;
652 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
657 /* There are two types to wait for completion.
658 * Polling mode - wait until the completion is available.
659 * Async mode - wait on wait queue until the completion is ready
660 * (or the timeout expired).
661 * It is expected that the IRQ called ena_com_handle_admin_completion
662 * to mark the completions.
664 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
665 struct ena_com_admin_queue
*admin_queue
)
667 if (admin_queue
->polling
)
668 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
671 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
675 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
676 struct ena_com_io_sq
*io_sq
)
678 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
679 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
680 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
684 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
686 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
687 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
689 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
691 destroy_cmd
.sq
.sq_identity
|= (direction
<<
692 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
693 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
695 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
696 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
698 ret
= ena_com_execute_admin_command(admin_queue
,
699 (struct ena_admin_aq_entry
*)&destroy_cmd
,
701 (struct ena_admin_acq_entry
*)&destroy_resp
,
702 sizeof(destroy_resp
));
704 if (unlikely(ret
&& (ret
!= -ENODEV
)))
705 pr_err("failed to destroy io sq error: %d\n", ret
);
710 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
711 struct ena_com_io_sq
*io_sq
,
712 struct ena_com_io_cq
*io_cq
)
716 if (io_cq
->cdesc_addr
.virt_addr
) {
717 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
719 dma_free_coherent(ena_dev
->dmadev
, size
,
720 io_cq
->cdesc_addr
.virt_addr
,
721 io_cq
->cdesc_addr
.phys_addr
);
723 io_cq
->cdesc_addr
.virt_addr
= NULL
;
726 if (io_sq
->desc_addr
.virt_addr
) {
727 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
729 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
730 dma_free_coherent(ena_dev
->dmadev
, size
,
731 io_sq
->desc_addr
.virt_addr
,
732 io_sq
->desc_addr
.phys_addr
);
734 devm_kfree(ena_dev
->dmadev
, io_sq
->desc_addr
.virt_addr
);
736 io_sq
->desc_addr
.virt_addr
= NULL
;
740 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
745 for (i
= 0; i
< timeout
; i
++) {
746 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
748 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
749 pr_err("Reg read timeout occurred\n");
753 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
757 /* The resolution of the timeout is 100ms */
764 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
765 enum ena_admin_aq_feature_id feature_id
)
767 u32 feature_mask
= 1 << feature_id
;
769 /* Device attributes is always supported */
770 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
771 !(ena_dev
->supported_features
& feature_mask
))
777 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
778 struct ena_admin_get_feat_resp
*get_resp
,
779 enum ena_admin_aq_feature_id feature_id
,
780 dma_addr_t control_buf_dma_addr
,
781 u32 control_buff_size
)
783 struct ena_com_admin_queue
*admin_queue
;
784 struct ena_admin_get_feat_cmd get_cmd
;
787 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
788 pr_debug("Feature %d isn't supported\n", feature_id
);
792 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
793 admin_queue
= &ena_dev
->admin_queue
;
795 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
797 if (control_buff_size
)
798 get_cmd
.aq_common_descriptor
.flags
=
799 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
801 get_cmd
.aq_common_descriptor
.flags
= 0;
803 ret
= ena_com_mem_addr_set(ena_dev
,
804 &get_cmd
.control_buffer
.address
,
805 control_buf_dma_addr
);
807 pr_err("memory address set failed\n");
811 get_cmd
.control_buffer
.length
= control_buff_size
;
813 get_cmd
.feat_common
.feature_id
= feature_id
;
815 ret
= ena_com_execute_admin_command(admin_queue
,
816 (struct ena_admin_aq_entry
*)
819 (struct ena_admin_acq_entry
*)
824 pr_err("Failed to submit get_feature command %d error: %d\n",
830 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
831 struct ena_admin_get_feat_resp
*get_resp
,
832 enum ena_admin_aq_feature_id feature_id
)
834 return ena_com_get_feature_ex(ena_dev
,
841 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
843 struct ena_rss
*rss
= &ena_dev
->rss
;
846 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
847 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
849 if (unlikely(!rss
->hash_key
))
855 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
857 struct ena_rss
*rss
= &ena_dev
->rss
;
860 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
861 rss
->hash_key
, rss
->hash_key_dma_addr
);
862 rss
->hash_key
= NULL
;
865 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
867 struct ena_rss
*rss
= &ena_dev
->rss
;
870 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
871 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
873 if (unlikely(!rss
->hash_ctrl
))
879 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
881 struct ena_rss
*rss
= &ena_dev
->rss
;
884 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
885 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
886 rss
->hash_ctrl
= NULL
;
889 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
892 struct ena_rss
*rss
= &ena_dev
->rss
;
893 struct ena_admin_get_feat_resp get_resp
;
897 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
898 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
902 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
903 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
904 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
905 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
906 1 << get_resp
.u
.ind_table
.max_size
);
910 tbl_size
= (1ULL << log_size
) *
911 sizeof(struct ena_admin_rss_ind_table_entry
);
914 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
915 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
916 if (unlikely(!rss
->rss_ind_tbl
))
919 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
920 rss
->host_rss_ind_tbl
=
921 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
922 if (unlikely(!rss
->host_rss_ind_tbl
))
925 rss
->tbl_log_size
= log_size
;
930 tbl_size
= (1ULL << log_size
) *
931 sizeof(struct ena_admin_rss_ind_table_entry
);
933 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
934 rss
->rss_ind_tbl_dma_addr
);
935 rss
->rss_ind_tbl
= NULL
;
937 rss
->tbl_log_size
= 0;
941 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
943 struct ena_rss
*rss
= &ena_dev
->rss
;
944 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
945 sizeof(struct ena_admin_rss_ind_table_entry
);
947 if (rss
->rss_ind_tbl
)
948 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
949 rss
->rss_ind_tbl_dma_addr
);
950 rss
->rss_ind_tbl
= NULL
;
952 if (rss
->host_rss_ind_tbl
)
953 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
954 rss
->host_rss_ind_tbl
= NULL
;
957 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
958 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
960 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
961 struct ena_admin_aq_create_sq_cmd create_cmd
;
962 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
966 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd
));
968 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
970 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
971 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
973 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
975 create_cmd
.sq_identity
|= (direction
<<
976 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
977 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
979 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
980 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
982 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
983 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
984 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
986 create_cmd
.sq_caps_3
|=
987 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
989 create_cmd
.cq_idx
= cq_idx
;
990 create_cmd
.sq_depth
= io_sq
->q_depth
;
992 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
993 ret
= ena_com_mem_addr_set(ena_dev
,
995 io_sq
->desc_addr
.phys_addr
);
997 pr_err("memory address set failed\n");
1002 ret
= ena_com_execute_admin_command(admin_queue
,
1003 (struct ena_admin_aq_entry
*)&create_cmd
,
1005 (struct ena_admin_acq_entry
*)&cmd_completion
,
1006 sizeof(cmd_completion
));
1007 if (unlikely(ret
)) {
1008 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1012 io_sq
->idx
= cmd_completion
.sq_idx
;
1014 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1015 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1017 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1018 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1019 + cmd_completion
.llq_headers_offset
);
1021 io_sq
->desc_addr
.pbuf_dev_addr
=
1022 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1023 cmd_completion
.llq_descriptors_offset
);
1026 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1031 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1033 struct ena_rss
*rss
= &ena_dev
->rss
;
1034 struct ena_com_io_sq
*io_sq
;
1038 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1039 qid
= rss
->host_rss_ind_tbl
[i
];
1040 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1043 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1045 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1048 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1054 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1056 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1057 struct ena_rss
*rss
= &ena_dev
->rss
;
1061 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1062 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1064 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1065 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1067 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1069 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1072 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1078 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1082 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1084 ena_dev
->intr_moder_tbl
=
1085 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1086 if (!ena_dev
->intr_moder_tbl
)
1089 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1094 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1095 u16 intr_delay_resolution
)
1097 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1100 if (!intr_delay_resolution
) {
1101 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1102 intr_delay_resolution
= 1;
1104 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1107 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1108 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1111 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1114 /*****************************************************************************/
1115 /******************************* API ******************************/
1116 /*****************************************************************************/
1118 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1119 struct ena_admin_aq_entry
*cmd
,
1121 struct ena_admin_acq_entry
*comp
,
1124 struct ena_comp_ctx
*comp_ctx
;
1127 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1129 if (unlikely(IS_ERR(comp_ctx
))) {
1130 if (comp_ctx
== ERR_PTR(-ENODEV
))
1131 pr_debug("Failed to submit command [%ld]\n",
1134 pr_err("Failed to submit command [%ld]\n",
1137 return PTR_ERR(comp_ctx
);
1140 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1141 if (unlikely(ret
)) {
1142 if (admin_queue
->running_state
)
1143 pr_err("Failed to process command. ret = %d\n", ret
);
1145 pr_debug("Failed to process command. ret = %d\n", ret
);
1150 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1151 struct ena_com_io_cq
*io_cq
)
1153 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1154 struct ena_admin_aq_create_cq_cmd create_cmd
;
1155 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1158 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd
));
1160 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1162 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1163 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1164 create_cmd
.cq_caps_1
|=
1165 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1167 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1168 create_cmd
.cq_depth
= io_cq
->q_depth
;
1170 ret
= ena_com_mem_addr_set(ena_dev
,
1172 io_cq
->cdesc_addr
.phys_addr
);
1173 if (unlikely(ret
)) {
1174 pr_err("memory address set failed\n");
1178 ret
= ena_com_execute_admin_command(admin_queue
,
1179 (struct ena_admin_aq_entry
*)&create_cmd
,
1181 (struct ena_admin_acq_entry
*)&cmd_completion
,
1182 sizeof(cmd_completion
));
1183 if (unlikely(ret
)) {
1184 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1188 io_cq
->idx
= cmd_completion
.cq_idx
;
1190 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1191 cmd_completion
.cq_interrupt_unmask_register_offset
);
1193 if (cmd_completion
.cq_head_db_register_offset
)
1194 io_cq
->cq_head_db_reg
=
1195 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1196 cmd_completion
.cq_head_db_register_offset
);
1198 if (cmd_completion
.numa_node_register_offset
)
1199 io_cq
->numa_node_cfg_reg
=
1200 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1201 cmd_completion
.numa_node_register_offset
);
1203 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1208 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1209 struct ena_com_io_sq
**io_sq
,
1210 struct ena_com_io_cq
**io_cq
)
1212 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1213 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1214 ENA_TOTAL_NUM_QUEUES
);
1218 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1219 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1224 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1226 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1227 struct ena_comp_ctx
*comp_ctx
;
1230 if (!admin_queue
->comp_ctx
)
1233 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1234 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1235 if (unlikely(!comp_ctx
))
1238 comp_ctx
->status
= ENA_CMD_ABORTED
;
1240 complete(&comp_ctx
->wait_event
);
1244 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1246 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1247 unsigned long flags
;
1249 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1250 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1251 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1253 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1255 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1258 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1259 struct ena_com_io_cq
*io_cq
)
1261 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1262 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1263 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1266 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
1268 destroy_cmd
.cq_idx
= io_cq
->idx
;
1269 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1271 ret
= ena_com_execute_admin_command(admin_queue
,
1272 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1273 sizeof(destroy_cmd
),
1274 (struct ena_admin_acq_entry
*)&destroy_resp
,
1275 sizeof(destroy_resp
));
1277 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1278 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1283 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1285 return ena_dev
->admin_queue
.running_state
;
1288 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1290 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1291 unsigned long flags
;
1293 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1294 ena_dev
->admin_queue
.running_state
= state
;
1295 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1298 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1300 u16 depth
= ena_dev
->aenq
.q_depth
;
1302 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1304 /* Init head_db to mark that all entries in the queue
1305 * are initially available
1307 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1310 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1312 struct ena_com_admin_queue
*admin_queue
;
1313 struct ena_admin_set_feat_cmd cmd
;
1314 struct ena_admin_set_feat_resp resp
;
1315 struct ena_admin_get_feat_resp get_resp
;
1318 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1320 pr_info("Can't get aenq configuration\n");
1324 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1325 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1326 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1330 memset(&cmd
, 0x0, sizeof(cmd
));
1331 admin_queue
= &ena_dev
->admin_queue
;
1333 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1334 cmd
.aq_common_descriptor
.flags
= 0;
1335 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1336 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1338 ret
= ena_com_execute_admin_command(admin_queue
,
1339 (struct ena_admin_aq_entry
*)&cmd
,
1341 (struct ena_admin_acq_entry
*)&resp
,
1345 pr_err("Failed to config AENQ ret: %d\n", ret
);
1350 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1352 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1355 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1356 pr_err("Reg read timeout occurred\n");
1360 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1361 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1363 pr_debug("ENA dma width: %d\n", width
);
1365 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1366 pr_err("DMA width illegal value: %d\n", width
);
1370 ena_dev
->dma_addr_bits
= width
;
1375 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1379 u32 ctrl_ver_masked
;
1381 /* Make sure the ENA version and the controller version are at least
1382 * as the driver expects
1384 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1385 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1386 ENA_REGS_CONTROLLER_VERSION_OFF
);
1388 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1389 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1390 pr_err("Reg read timeout occurred\n");
1394 pr_info("ena device version: %d.%d\n",
1395 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1396 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1397 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1399 if (ver
< MIN_ENA_VER
) {
1400 pr_err("ENA version is lower than the minimal version the driver supports\n");
1404 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1405 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1406 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1407 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1408 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1409 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1410 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1411 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1414 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1415 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1416 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1418 /* Validate the ctrl version without the implementation ID */
1419 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1420 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1427 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1429 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1430 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1431 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1432 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1435 if (admin_queue
->comp_ctx
)
1436 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1437 admin_queue
->comp_ctx
= NULL
;
1438 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1440 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1444 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1446 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1450 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1451 if (ena_dev
->aenq
.entries
)
1452 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1454 aenq
->entries
= NULL
;
1457 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1462 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1464 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1465 ena_dev
->admin_queue
.polling
= polling
;
1468 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1470 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1472 spin_lock_init(&mmio_read
->lock
);
1473 mmio_read
->read_resp
=
1474 dma_zalloc_coherent(ena_dev
->dmadev
,
1475 sizeof(*mmio_read
->read_resp
),
1476 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1477 if (unlikely(!mmio_read
->read_resp
))
1480 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1482 mmio_read
->read_resp
->req_id
= 0x0;
1483 mmio_read
->seq_num
= 0x0;
1484 mmio_read
->readless_supported
= true;
1489 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1491 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1493 mmio_read
->readless_supported
= readless_supported
;
1496 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1498 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1500 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1501 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1503 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1504 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1506 mmio_read
->read_resp
= NULL
;
1509 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1511 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1512 u32 addr_low
, addr_high
;
1514 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1515 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1517 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1518 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1521 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1522 struct ena_aenq_handlers
*aenq_handlers
,
1525 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1526 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1529 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1531 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1532 pr_err("Reg read timeout occurred\n");
1536 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1537 pr_err("Device isn't ready, abort com init\n");
1541 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1543 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1544 admin_queue
->polling
= false;
1545 admin_queue
->curr_cmd_id
= 0;
1547 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1550 spin_lock_init(&admin_queue
->q_lock
);
1552 ret
= ena_com_init_comp_ctxt(admin_queue
);
1556 ret
= ena_com_admin_init_sq(admin_queue
);
1560 ret
= ena_com_admin_init_cq(admin_queue
);
1564 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1565 ENA_REGS_AQ_DB_OFF
);
1567 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1568 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1570 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1571 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1573 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1574 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1576 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1577 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1580 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1581 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1582 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1583 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1586 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1587 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1588 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1589 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1591 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1592 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1593 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1597 admin_queue
->running_state
= true;
1601 ena_com_admin_destroy(ena_dev
);
1606 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1607 struct ena_com_create_io_ctx
*ctx
)
1609 struct ena_com_io_sq
*io_sq
;
1610 struct ena_com_io_cq
*io_cq
;
1613 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1614 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1615 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1619 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1620 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1622 memset(io_sq
, 0x0, sizeof(struct ena_com_io_sq
));
1623 memset(io_cq
, 0x0, sizeof(struct ena_com_io_cq
));
1626 io_cq
->q_depth
= ctx
->queue_size
;
1627 io_cq
->direction
= ctx
->direction
;
1628 io_cq
->qid
= ctx
->qid
;
1630 io_cq
->msix_vector
= ctx
->msix_vector
;
1632 io_sq
->q_depth
= ctx
->queue_size
;
1633 io_sq
->direction
= ctx
->direction
;
1634 io_sq
->qid
= ctx
->qid
;
1636 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1638 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1639 /* header length is limited to 8 bits */
1640 io_sq
->tx_max_header_size
=
1641 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1643 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1646 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1650 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1654 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1661 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1663 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1667 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1669 struct ena_com_io_sq
*io_sq
;
1670 struct ena_com_io_cq
*io_cq
;
1672 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1673 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1674 ENA_TOTAL_NUM_QUEUES
);
1678 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1679 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1681 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1682 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1684 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1687 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1688 struct ena_admin_get_feat_resp
*resp
)
1690 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1693 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1694 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1696 struct ena_admin_get_feat_resp get_resp
;
1699 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1700 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1704 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1705 sizeof(get_resp
.u
.dev_attr
));
1706 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1708 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1709 ENA_ADMIN_MAX_QUEUES_NUM
);
1713 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1714 sizeof(get_resp
.u
.max_queue
));
1715 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1717 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1718 ENA_ADMIN_AENQ_CONFIG
);
1722 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1723 sizeof(get_resp
.u
.aenq
));
1725 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1726 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1730 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1731 sizeof(get_resp
.u
.offload
));
1736 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1738 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1741 /* ena_handle_specific_aenq_event:
1742 * return the handler that is relevant to the specific event group
1744 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1747 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1749 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1750 return aenq_handlers
->handlers
[group
];
1752 return aenq_handlers
->unimplemented_handler
;
1755 /* ena_aenq_intr_handler:
1756 * handles the aenq incoming events.
1757 * pop events from the queue and apply the specific handler
1759 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1761 struct ena_admin_aenq_entry
*aenq_e
;
1762 struct ena_admin_aenq_common_desc
*aenq_common
;
1763 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1764 ena_aenq_handler handler_cb
;
1765 u16 masked_head
, processed
= 0;
1768 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1769 phase
= aenq
->phase
;
1770 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1771 aenq_common
= &aenq_e
->aenq_common_desc
;
1773 /* Go over all the events */
1774 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1776 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1777 aenq_common
->group
, aenq_common
->syndrom
,
1778 (u64
)aenq_common
->timestamp_low
+
1779 ((u64
)aenq_common
->timestamp_high
<< 32));
1781 /* Handle specific event*/
1782 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1783 aenq_common
->group
);
1784 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1786 /* Get next event entry */
1790 if (unlikely(masked_head
== aenq
->q_depth
)) {
1794 aenq_e
= &aenq
->entries
[masked_head
];
1795 aenq_common
= &aenq_e
->aenq_common_desc
;
1798 aenq
->head
+= processed
;
1799 aenq
->phase
= phase
;
1801 /* Don't update aenq doorbell if there weren't any processed events */
1805 /* write the aenq doorbell after all AENQ descriptors were read */
1807 writel((u32
)aenq
->head
, dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1810 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
)
1812 u32 stat
, timeout
, cap
, reset_val
;
1815 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1816 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1818 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1819 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1820 pr_err("Reg read32 timeout occurred\n");
1824 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1825 pr_err("Device isn't ready, can't reset device\n");
1829 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1830 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1832 pr_err("Invalid timeout value\n");
1837 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1838 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1840 /* Write again the MMIO read request address */
1841 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1843 rc
= wait_for_reset_state(ena_dev
, timeout
,
1844 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1846 pr_err("Reset indication didn't turn on\n");
1851 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1852 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1854 pr_err("Reset indication didn't turn off\n");
1861 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1862 struct ena_com_stats_ctx
*ctx
,
1863 enum ena_admin_get_stats_type type
)
1865 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
1866 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
1867 struct ena_com_admin_queue
*admin_queue
;
1870 admin_queue
= &ena_dev
->admin_queue
;
1872 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1873 get_cmd
->aq_common_descriptor
.flags
= 0;
1874 get_cmd
->type
= type
;
1876 ret
= ena_com_execute_admin_command(admin_queue
,
1877 (struct ena_admin_aq_entry
*)get_cmd
,
1879 (struct ena_admin_acq_entry
*)get_resp
,
1883 pr_err("Failed to get stats. error: %d\n", ret
);
1888 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1889 struct ena_admin_basic_stats
*stats
)
1891 struct ena_com_stats_ctx ctx
;
1894 memset(&ctx
, 0x0, sizeof(ctx
));
1895 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1896 if (likely(ret
== 0))
1897 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
1898 sizeof(ctx
.get_resp
.basic_stats
));
1903 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
1905 struct ena_com_admin_queue
*admin_queue
;
1906 struct ena_admin_set_feat_cmd cmd
;
1907 struct ena_admin_set_feat_resp resp
;
1910 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
1911 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
1915 memset(&cmd
, 0x0, sizeof(cmd
));
1916 admin_queue
= &ena_dev
->admin_queue
;
1918 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1919 cmd
.aq_common_descriptor
.flags
= 0;
1920 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
1921 cmd
.u
.mtu
.mtu
= mtu
;
1923 ret
= ena_com_execute_admin_command(admin_queue
,
1924 (struct ena_admin_aq_entry
*)&cmd
,
1926 (struct ena_admin_acq_entry
*)&resp
,
1930 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
1935 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
1936 struct ena_admin_feature_offload_desc
*offload
)
1939 struct ena_admin_get_feat_resp resp
;
1941 ret
= ena_com_get_feature(ena_dev
, &resp
,
1942 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1943 if (unlikely(ret
)) {
1944 pr_err("Failed to get offload capabilities %d\n", ret
);
1948 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
1953 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
1955 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1956 struct ena_rss
*rss
= &ena_dev
->rss
;
1957 struct ena_admin_set_feat_cmd cmd
;
1958 struct ena_admin_set_feat_resp resp
;
1959 struct ena_admin_get_feat_resp get_resp
;
1962 if (!ena_com_check_supported_feature_id(ena_dev
,
1963 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
1964 pr_debug("Feature %d isn't supported\n",
1965 ENA_ADMIN_RSS_HASH_FUNCTION
);
1969 /* Validate hash function is supported */
1970 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1971 ENA_ADMIN_RSS_HASH_FUNCTION
);
1975 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
1976 pr_err("Func hash %d isn't supported by device, abort\n",
1981 memset(&cmd
, 0x0, sizeof(cmd
));
1983 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1984 cmd
.aq_common_descriptor
.flags
=
1985 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
1986 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
1987 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
1988 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
1990 ret
= ena_com_mem_addr_set(ena_dev
,
1991 &cmd
.control_buffer
.address
,
1992 rss
->hash_key_dma_addr
);
1993 if (unlikely(ret
)) {
1994 pr_err("memory address set failed\n");
1998 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2000 ret
= ena_com_execute_admin_command(admin_queue
,
2001 (struct ena_admin_aq_entry
*)&cmd
,
2003 (struct ena_admin_acq_entry
*)&resp
,
2005 if (unlikely(ret
)) {
2006 pr_err("Failed to set hash function %d. error: %d\n",
2007 rss
->hash_func
, ret
);
2014 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2015 enum ena_admin_hash_functions func
,
2016 const u8
*key
, u16 key_len
, u32 init_val
)
2018 struct ena_rss
*rss
= &ena_dev
->rss
;
2019 struct ena_admin_get_feat_resp get_resp
;
2020 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2024 /* Make sure size is a mult of DWs */
2025 if (unlikely(key_len
& 0x3))
2028 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2029 ENA_ADMIN_RSS_HASH_FUNCTION
,
2030 rss
->hash_key_dma_addr
,
2031 sizeof(*rss
->hash_key
));
2035 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2036 pr_err("Flow hash function %d isn't supported\n", func
);
2041 case ENA_ADMIN_TOEPLITZ
:
2042 if (key_len
> sizeof(hash_key
->key
)) {
2043 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2044 key_len
, sizeof(hash_key
->key
));
2048 memcpy(hash_key
->key
, key
, key_len
);
2049 rss
->hash_init_val
= init_val
;
2050 hash_key
->keys_num
= key_len
>> 2;
2052 case ENA_ADMIN_CRC32
:
2053 rss
->hash_init_val
= init_val
;
2056 pr_err("Invalid hash function (%d)\n", func
);
2060 rc
= ena_com_set_hash_function(ena_dev
);
2062 /* Restore the old function */
2064 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2069 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2070 enum ena_admin_hash_functions
*func
,
2073 struct ena_rss
*rss
= &ena_dev
->rss
;
2074 struct ena_admin_get_feat_resp get_resp
;
2075 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2079 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2080 ENA_ADMIN_RSS_HASH_FUNCTION
,
2081 rss
->hash_key_dma_addr
,
2082 sizeof(*rss
->hash_key
));
2086 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2088 *func
= rss
->hash_func
;
2091 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2096 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2097 enum ena_admin_flow_hash_proto proto
,
2100 struct ena_rss
*rss
= &ena_dev
->rss
;
2101 struct ena_admin_get_feat_resp get_resp
;
2104 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2105 ENA_ADMIN_RSS_HASH_INPUT
,
2106 rss
->hash_ctrl_dma_addr
,
2107 sizeof(*rss
->hash_ctrl
));
2112 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2117 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2119 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2120 struct ena_rss
*rss
= &ena_dev
->rss
;
2121 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2122 struct ena_admin_set_feat_cmd cmd
;
2123 struct ena_admin_set_feat_resp resp
;
2126 if (!ena_com_check_supported_feature_id(ena_dev
,
2127 ENA_ADMIN_RSS_HASH_INPUT
)) {
2128 pr_debug("Feature %d isn't supported\n",
2129 ENA_ADMIN_RSS_HASH_INPUT
);
2133 memset(&cmd
, 0x0, sizeof(cmd
));
2135 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2136 cmd
.aq_common_descriptor
.flags
=
2137 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2138 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2139 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2140 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2141 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2143 ret
= ena_com_mem_addr_set(ena_dev
,
2144 &cmd
.control_buffer
.address
,
2145 rss
->hash_ctrl_dma_addr
);
2146 if (unlikely(ret
)) {
2147 pr_err("memory address set failed\n");
2150 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2152 ret
= ena_com_execute_admin_command(admin_queue
,
2153 (struct ena_admin_aq_entry
*)&cmd
,
2155 (struct ena_admin_acq_entry
*)&resp
,
2158 pr_err("Failed to set hash input. error: %d\n", ret
);
2163 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2165 struct ena_rss
*rss
= &ena_dev
->rss
;
2166 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2168 u16 available_fields
= 0;
2171 /* Get the supported hash input */
2172 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2176 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2177 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2178 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2180 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2181 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2182 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2184 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2185 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2186 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2188 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2189 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2190 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2192 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2193 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2195 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2196 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2198 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2199 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2201 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2202 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2204 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2205 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2206 hash_ctrl
->supported_fields
[i
].fields
;
2207 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2208 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2209 i
, hash_ctrl
->supported_fields
[i
].fields
,
2210 hash_ctrl
->selected_fields
[i
].fields
);
2215 rc
= ena_com_set_hash_ctrl(ena_dev
);
2217 /* In case of failure, restore the old hash ctrl */
2219 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2224 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2225 enum ena_admin_flow_hash_proto proto
,
2228 struct ena_rss
*rss
= &ena_dev
->rss
;
2229 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2230 u16 supported_fields
;
2233 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2234 pr_err("Invalid proto num (%u)\n", proto
);
2238 /* Get the ctrl table */
2239 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2243 /* Make sure all the fields are supported */
2244 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2245 if ((hash_fields
& supported_fields
) != hash_fields
) {
2246 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2247 proto
, hash_fields
, supported_fields
);
2250 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2252 rc
= ena_com_set_hash_ctrl(ena_dev
);
2254 /* In case of failure, restore the old hash ctrl */
2256 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2261 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2262 u16 entry_idx
, u16 entry_value
)
2264 struct ena_rss
*rss
= &ena_dev
->rss
;
2266 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2269 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2272 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2277 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2279 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2280 struct ena_rss
*rss
= &ena_dev
->rss
;
2281 struct ena_admin_set_feat_cmd cmd
;
2282 struct ena_admin_set_feat_resp resp
;
2285 if (!ena_com_check_supported_feature_id(
2286 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2287 pr_debug("Feature %d isn't supported\n",
2288 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2292 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2294 pr_err("Failed to convert host indirection table to device table\n");
2298 memset(&cmd
, 0x0, sizeof(cmd
));
2300 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2301 cmd
.aq_common_descriptor
.flags
=
2302 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2303 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2304 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2305 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2307 ret
= ena_com_mem_addr_set(ena_dev
,
2308 &cmd
.control_buffer
.address
,
2309 rss
->rss_ind_tbl_dma_addr
);
2310 if (unlikely(ret
)) {
2311 pr_err("memory address set failed\n");
2315 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2316 sizeof(struct ena_admin_rss_ind_table_entry
);
2318 ret
= ena_com_execute_admin_command(admin_queue
,
2319 (struct ena_admin_aq_entry
*)&cmd
,
2321 (struct ena_admin_acq_entry
*)&resp
,
2325 pr_err("Failed to set indirect table. error: %d\n", ret
);
2330 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2332 struct ena_rss
*rss
= &ena_dev
->rss
;
2333 struct ena_admin_get_feat_resp get_resp
;
2337 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2338 sizeof(struct ena_admin_rss_ind_table_entry
);
2340 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2341 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2342 rss
->rss_ind_tbl_dma_addr
,
2350 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2354 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2355 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2360 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2364 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2366 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2370 rc
= ena_com_hash_key_allocate(ena_dev
);
2374 rc
= ena_com_hash_ctrl_init(ena_dev
);
2381 ena_com_hash_key_destroy(ena_dev
);
2383 ena_com_indirect_table_destroy(ena_dev
);
2389 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2391 ena_com_indirect_table_destroy(ena_dev
);
2392 ena_com_hash_key_destroy(ena_dev
);
2393 ena_com_hash_ctrl_destroy(ena_dev
);
2395 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2398 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2400 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2402 host_attr
->host_info
=
2403 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2404 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2405 if (unlikely(!host_attr
->host_info
))
2411 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2412 u32 debug_area_size
)
2414 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2416 host_attr
->debug_area_virt_addr
=
2417 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2418 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2419 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2420 host_attr
->debug_area_size
= 0;
2424 host_attr
->debug_area_size
= debug_area_size
;
2429 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2431 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2433 if (host_attr
->host_info
) {
2434 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2435 host_attr
->host_info_dma_addr
);
2436 host_attr
->host_info
= NULL
;
2440 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2442 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2444 if (host_attr
->debug_area_virt_addr
) {
2445 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2446 host_attr
->debug_area_virt_addr
,
2447 host_attr
->debug_area_dma_addr
);
2448 host_attr
->debug_area_virt_addr
= NULL
;
2452 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2454 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2455 struct ena_com_admin_queue
*admin_queue
;
2456 struct ena_admin_set_feat_cmd cmd
;
2457 struct ena_admin_set_feat_resp resp
;
2461 /* Host attribute config is called before ena_com_get_dev_attr_feat
2462 * so ena_com can't check if the feature is supported.
2465 memset(&cmd
, 0x0, sizeof(cmd
));
2466 admin_queue
= &ena_dev
->admin_queue
;
2468 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2469 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2471 ret
= ena_com_mem_addr_set(ena_dev
,
2472 &cmd
.u
.host_attr
.debug_ba
,
2473 host_attr
->debug_area_dma_addr
);
2474 if (unlikely(ret
)) {
2475 pr_err("memory address set failed\n");
2479 ret
= ena_com_mem_addr_set(ena_dev
,
2480 &cmd
.u
.host_attr
.os_info_ba
,
2481 host_attr
->host_info_dma_addr
);
2482 if (unlikely(ret
)) {
2483 pr_err("memory address set failed\n");
2487 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2489 ret
= ena_com_execute_admin_command(admin_queue
,
2490 (struct ena_admin_aq_entry
*)&cmd
,
2492 (struct ena_admin_acq_entry
*)&resp
,
2496 pr_err("Failed to set host attributes: %d\n", ret
);
2501 /* Interrupt moderation */
2502 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2504 return ena_com_check_supported_feature_id(ena_dev
,
2505 ENA_ADMIN_INTERRUPT_MODERATION
);
2508 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2509 u32 tx_coalesce_usecs
)
2511 if (!ena_dev
->intr_delay_resolution
) {
2512 pr_err("Illegal interrupt delay granularity value\n");
2516 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2517 ena_dev
->intr_delay_resolution
;
2522 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2523 u32 rx_coalesce_usecs
)
2525 if (!ena_dev
->intr_delay_resolution
) {
2526 pr_err("Illegal interrupt delay granularity value\n");
2530 /* We use LOWEST entry of moderation table for storing
2531 * nonadaptive interrupt coalescing values
2533 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2534 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2539 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2541 if (ena_dev
->intr_moder_tbl
)
2542 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2543 ena_dev
->intr_moder_tbl
= NULL
;
2546 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2548 struct ena_admin_get_feat_resp get_resp
;
2549 u16 delay_resolution
;
2552 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2553 ENA_ADMIN_INTERRUPT_MODERATION
);
2556 if (rc
== -EOPNOTSUPP
) {
2557 pr_debug("Feature %d isn't supported\n",
2558 ENA_ADMIN_INTERRUPT_MODERATION
);
2561 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2565 /* no moderation supported, disable adaptive support */
2566 ena_com_disable_adaptive_moderation(ena_dev
);
2570 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2574 /* if moderation is supported by device we set adaptive moderation */
2575 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2576 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2577 ena_com_enable_adaptive_moderation(ena_dev
);
2581 ena_com_destroy_interrupt_moderation(ena_dev
);
2585 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2587 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2589 if (!intr_moder_tbl
)
2592 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2593 ENA_INTR_LOWEST_USECS
;
2594 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2595 ENA_INTR_LOWEST_PKTS
;
2596 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2597 ENA_INTR_LOWEST_BYTES
;
2599 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2601 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2603 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2606 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2608 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2610 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2613 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2614 ENA_INTR_HIGH_USECS
;
2615 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2617 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2618 ENA_INTR_HIGH_BYTES
;
2620 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2621 ENA_INTR_HIGHEST_USECS
;
2622 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2623 ENA_INTR_HIGHEST_PKTS
;
2624 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2625 ENA_INTR_HIGHEST_BYTES
;
2628 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2630 return ena_dev
->intr_moder_tx_interval
;
2633 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2635 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2638 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2643 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2644 enum ena_intr_moder_level level
,
2645 struct ena_intr_moder_entry
*entry
)
2647 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2649 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2652 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2653 if (ena_dev
->intr_delay_resolution
)
2654 intr_moder_tbl
[level
].intr_moder_interval
/=
2655 ena_dev
->intr_delay_resolution
;
2656 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2658 /* use hardcoded value until ethtool supports bytecount parameter */
2659 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2660 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2663 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2664 enum ena_intr_moder_level level
,
2665 struct ena_intr_moder_entry
*entry
)
2667 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2669 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2672 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2673 if (ena_dev
->intr_delay_resolution
)
2674 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2675 entry
->pkts_per_interval
=
2676 intr_moder_tbl
[level
].pkts_per_interval
;
2677 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;