2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 #define ENA_REGS_ADMIN_INTR_MASK 1
66 /*****************************************************************************/
67 /*****************************************************************************/
68 /*****************************************************************************/
73 /* Abort - canceled by the driver */
78 struct completion wait_event
;
79 struct ena_admin_acq_entry
*user_cqe
;
81 enum ena_cmd_status status
;
82 /* status from the device */
88 struct ena_com_stats_ctx
{
89 struct ena_admin_aq_get_stats_cmd get_cmd
;
90 struct ena_admin_acq_get_stats_resp get_resp
;
93 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
94 struct ena_common_mem_addr
*ena_addr
,
97 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
98 pr_err("dma address has more bits that the device supports\n");
102 ena_addr
->mem_addr_low
= (u32
)addr
;
103 ena_addr
->mem_addr_high
= (u64
)addr
>> 32;
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
110 struct ena_com_admin_sq
*sq
= &queue
->sq
;
111 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
113 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
117 pr_err("memory allocation failed");
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
132 struct ena_com_admin_cq
*cq
= &queue
->cq
;
133 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
135 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
139 pr_err("memory allocation failed");
149 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
150 struct ena_aenq_handlers
*aenq_handlers
)
152 struct ena_com_aenq
*aenq
= &dev
->aenq
;
153 u32 addr_low
, addr_high
, aenq_caps
;
156 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
157 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
158 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
161 if (!aenq
->entries
) {
162 pr_err("memory allocation failed");
166 aenq
->head
= aenq
->q_depth
;
169 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
170 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
172 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
173 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
176 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
177 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
178 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
179 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
180 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
182 if (unlikely(!aenq_handlers
)) {
183 pr_err("aenq handlers pointer is NULL\n");
187 aenq
->aenq_handlers
= aenq_handlers
;
192 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
193 struct ena_comp_ctx
*comp_ctx
)
195 comp_ctx
->occupied
= false;
196 atomic_dec(&queue
->outstanding_cmds
);
199 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
200 u16 command_id
, bool capture
)
202 if (unlikely(command_id
>= queue
->q_depth
)) {
203 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
204 command_id
, queue
->q_depth
);
208 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
209 pr_err("Completion context is occupied\n");
214 atomic_inc(&queue
->outstanding_cmds
);
215 queue
->comp_ctx
[command_id
].occupied
= true;
218 return &queue
->comp_ctx
[command_id
];
221 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
222 struct ena_admin_aq_entry
*cmd
,
223 size_t cmd_size_in_bytes
,
224 struct ena_admin_acq_entry
*comp
,
225 size_t comp_size_in_bytes
)
227 struct ena_comp_ctx
*comp_ctx
;
228 u16 tail_masked
, cmd_id
;
232 queue_size_mask
= admin_queue
->q_depth
- 1;
234 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
236 /* In case of queue FULL */
237 cnt
= atomic_read(&admin_queue
->outstanding_cmds
);
238 if (cnt
>= admin_queue
->q_depth
) {
239 pr_debug("admin queue is full.\n");
240 admin_queue
->stats
.out_of_space
++;
241 return ERR_PTR(-ENOSPC
);
244 cmd_id
= admin_queue
->curr_cmd_id
;
246 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
249 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
252 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
253 if (unlikely(!comp_ctx
))
254 return ERR_PTR(-EINVAL
);
256 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
257 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
258 comp_ctx
->user_cqe
= comp
;
259 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
261 reinit_completion(&comp_ctx
->wait_event
);
263 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
265 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
268 admin_queue
->sq
.tail
++;
269 admin_queue
->stats
.submitted_cmd
++;
271 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
272 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
274 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
281 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
282 struct ena_comp_ctx
*comp_ctx
;
285 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
286 if (unlikely(!queue
->comp_ctx
)) {
287 pr_err("memory allocation failed");
291 for (i
= 0; i
< queue
->q_depth
; i
++) {
292 comp_ctx
= get_comp_ctxt(queue
, i
, false);
294 init_completion(&comp_ctx
->wait_event
);
300 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
301 struct ena_admin_aq_entry
*cmd
,
302 size_t cmd_size_in_bytes
,
303 struct ena_admin_acq_entry
*comp
,
304 size_t comp_size_in_bytes
)
307 struct ena_comp_ctx
*comp_ctx
;
309 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
310 if (unlikely(!admin_queue
->running_state
)) {
311 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
312 return ERR_PTR(-ENODEV
);
314 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
318 if (unlikely(IS_ERR(comp_ctx
)))
319 admin_queue
->running_state
= false;
320 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
325 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
326 struct ena_com_create_io_ctx
*ctx
,
327 struct ena_com_io_sq
*io_sq
)
332 memset(&io_sq
->desc_addr
, 0x0, sizeof(io_sq
->desc_addr
));
334 io_sq
->desc_entry_size
=
335 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
336 sizeof(struct ena_eth_io_tx_desc
) :
337 sizeof(struct ena_eth_io_rx_desc
);
339 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
341 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
342 dev_node
= dev_to_node(ena_dev
->dmadev
);
343 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
344 io_sq
->desc_addr
.virt_addr
=
345 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
346 &io_sq
->desc_addr
.phys_addr
,
348 set_dev_node(ena_dev
->dmadev
, dev_node
);
349 if (!io_sq
->desc_addr
.virt_addr
) {
350 io_sq
->desc_addr
.virt_addr
=
351 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
352 &io_sq
->desc_addr
.phys_addr
,
356 dev_node
= dev_to_node(ena_dev
->dmadev
);
357 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
358 io_sq
->desc_addr
.virt_addr
=
359 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
360 set_dev_node(ena_dev
->dmadev
, dev_node
);
361 if (!io_sq
->desc_addr
.virt_addr
) {
362 io_sq
->desc_addr
.virt_addr
=
363 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
367 if (!io_sq
->desc_addr
.virt_addr
) {
368 pr_err("memory allocation failed");
373 io_sq
->next_to_comp
= 0;
379 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
380 struct ena_com_create_io_ctx
*ctx
,
381 struct ena_com_io_cq
*io_cq
)
386 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(io_cq
->cdesc_addr
));
388 /* Use the basic completion descriptor for Rx */
389 io_cq
->cdesc_entry_size_in_bytes
=
390 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
391 sizeof(struct ena_eth_io_tx_cdesc
) :
392 sizeof(struct ena_eth_io_rx_cdesc_base
);
394 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
396 prev_node
= dev_to_node(ena_dev
->dmadev
);
397 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
398 io_cq
->cdesc_addr
.virt_addr
=
399 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
400 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
401 set_dev_node(ena_dev
->dmadev
, prev_node
);
402 if (!io_cq
->cdesc_addr
.virt_addr
) {
403 io_cq
->cdesc_addr
.virt_addr
=
404 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
405 &io_cq
->cdesc_addr
.phys_addr
,
409 if (!io_cq
->cdesc_addr
.virt_addr
) {
410 pr_err("memory allocation failed");
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
421 struct ena_admin_acq_entry
*cqe
)
423 struct ena_comp_ctx
*comp_ctx
;
426 cmd_id
= cqe
->acq_common_descriptor
.command
&
427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
429 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
430 if (unlikely(!comp_ctx
)) {
431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432 admin_queue
->running_state
= false;
436 comp_ctx
->status
= ENA_CMD_COMPLETED
;
437 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
439 if (comp_ctx
->user_cqe
)
440 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
442 if (!admin_queue
->polling
)
443 complete(&comp_ctx
->wait_event
);
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
448 struct ena_admin_acq_entry
*cqe
= NULL
;
453 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
454 phase
= admin_queue
->cq
.phase
;
456 cqe
= &admin_queue
->cq
.entries
[head_masked
];
458 /* Go over all the completions */
459 while ((cqe
->acq_common_descriptor
.flags
&
460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
461 /* Do not read the rest of the completion entry before the
462 * phase bit was validated
465 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
469 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
474 cqe
= &admin_queue
->cq
.entries
[head_masked
];
477 admin_queue
->cq
.head
+= comp_num
;
478 admin_queue
->cq
.phase
= phase
;
479 admin_queue
->sq
.head
+= comp_num
;
480 admin_queue
->stats
.completed_cmd
+= comp_num
;
483 static int ena_com_comp_status_to_errno(u8 comp_status
)
485 if (unlikely(comp_status
!= 0))
486 pr_err("admin command failed[%u]\n", comp_status
);
488 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
491 switch (comp_status
) {
492 case ENA_ADMIN_SUCCESS
:
494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
496 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
498 case ENA_ADMIN_BAD_OPCODE
:
499 case ENA_ADMIN_MALFORMED_REQUEST
:
500 case ENA_ADMIN_ILLEGAL_PARAMETER
:
501 case ENA_ADMIN_UNKNOWN_ERROR
:
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
509 struct ena_com_admin_queue
*admin_queue
)
511 unsigned long flags
, timeout
;
514 timeout
= jiffies
+ usecs_to_jiffies(admin_queue
->completion_timeout
);
517 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
518 ena_com_handle_admin_completion(admin_queue
);
519 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
521 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
524 if (time_is_before_jiffies(timeout
)) {
525 pr_err("Wait for completion (polling) timeout\n");
526 /* ENA didn't have any completion */
527 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
528 admin_queue
->stats
.no_completion
++;
529 admin_queue
->running_state
= false;
530 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
539 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
540 pr_err("Command was aborted\n");
541 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
542 admin_queue
->stats
.aborted_cmd
++;
543 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
548 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
551 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
553 comp_ctxt_release(admin_queue
, comp_ctx
);
557 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
558 struct ena_com_admin_queue
*admin_queue
)
563 wait_for_completion_timeout(&comp_ctx
->wait_event
,
565 admin_queue
->completion_timeout
));
567 /* In case the command wasn't completed find out the root cause.
568 * There might be 2 kinds of errors
569 * 1) No completion (timeout reached)
570 * 2) There is completion but the device didn't get any msi-x interrupt.
572 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
573 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
574 ena_com_handle_admin_completion(admin_queue
);
575 admin_queue
->stats
.no_completion
++;
576 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
578 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
579 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
580 comp_ctx
->cmd_opcode
);
582 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
583 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
585 admin_queue
->running_state
= false;
590 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
592 comp_ctxt_release(admin_queue
, comp_ctx
);
596 /* This method read the hardware device register through posting writes
597 * and waiting for response
598 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
600 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
602 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
603 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
604 mmio_read
->read_resp
;
605 u32 mmio_read_reg
, ret
, i
;
607 u32 timeout
= mmio_read
->reg_read_to
;
612 timeout
= ENA_REG_READ_TIMEOUT
;
614 /* If readless is disabled, perform regular read */
615 if (!mmio_read
->readless_supported
)
616 return readl(ena_dev
->reg_bar
+ offset
);
618 spin_lock_irqsave(&mmio_read
->lock
, flags
);
619 mmio_read
->seq_num
++;
621 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
622 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
623 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
624 mmio_read_reg
|= mmio_read
->seq_num
&
625 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
627 /* make sure read_resp->req_id get updated before the hw can write
632 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
634 for (i
= 0; i
< timeout
; i
++) {
635 if (read_resp
->req_id
== mmio_read
->seq_num
)
641 if (unlikely(i
== timeout
)) {
642 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
643 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
645 ret
= ENA_MMIO_READ_TIMEOUT
;
649 if (read_resp
->reg_off
!= offset
) {
650 pr_err("Read failure: wrong offset provided");
651 ret
= ENA_MMIO_READ_TIMEOUT
;
653 ret
= read_resp
->reg_val
;
656 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
661 /* There are two types to wait for completion.
662 * Polling mode - wait until the completion is available.
663 * Async mode - wait on wait queue until the completion is ready
664 * (or the timeout expired).
665 * It is expected that the IRQ called ena_com_handle_admin_completion
666 * to mark the completions.
668 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
669 struct ena_com_admin_queue
*admin_queue
)
671 if (admin_queue
->polling
)
672 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
675 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
679 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
680 struct ena_com_io_sq
*io_sq
)
682 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
683 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
684 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
688 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
690 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
691 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
693 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
695 destroy_cmd
.sq
.sq_identity
|= (direction
<<
696 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
697 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
699 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
700 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
702 ret
= ena_com_execute_admin_command(admin_queue
,
703 (struct ena_admin_aq_entry
*)&destroy_cmd
,
705 (struct ena_admin_acq_entry
*)&destroy_resp
,
706 sizeof(destroy_resp
));
708 if (unlikely(ret
&& (ret
!= -ENODEV
)))
709 pr_err("failed to destroy io sq error: %d\n", ret
);
714 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
715 struct ena_com_io_sq
*io_sq
,
716 struct ena_com_io_cq
*io_cq
)
720 if (io_cq
->cdesc_addr
.virt_addr
) {
721 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
723 dma_free_coherent(ena_dev
->dmadev
, size
,
724 io_cq
->cdesc_addr
.virt_addr
,
725 io_cq
->cdesc_addr
.phys_addr
);
727 io_cq
->cdesc_addr
.virt_addr
= NULL
;
730 if (io_sq
->desc_addr
.virt_addr
) {
731 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
733 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
734 dma_free_coherent(ena_dev
->dmadev
, size
,
735 io_sq
->desc_addr
.virt_addr
,
736 io_sq
->desc_addr
.phys_addr
);
738 devm_kfree(ena_dev
->dmadev
, io_sq
->desc_addr
.virt_addr
);
740 io_sq
->desc_addr
.virt_addr
= NULL
;
744 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
749 for (i
= 0; i
< timeout
; i
++) {
750 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
752 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
753 pr_err("Reg read timeout occurred\n");
757 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
761 /* The resolution of the timeout is 100ms */
768 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
769 enum ena_admin_aq_feature_id feature_id
)
771 u32 feature_mask
= 1 << feature_id
;
773 /* Device attributes is always supported */
774 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
775 !(ena_dev
->supported_features
& feature_mask
))
781 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
782 struct ena_admin_get_feat_resp
*get_resp
,
783 enum ena_admin_aq_feature_id feature_id
,
784 dma_addr_t control_buf_dma_addr
,
785 u32 control_buff_size
)
787 struct ena_com_admin_queue
*admin_queue
;
788 struct ena_admin_get_feat_cmd get_cmd
;
791 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
792 pr_debug("Feature %d isn't supported\n", feature_id
);
796 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
797 admin_queue
= &ena_dev
->admin_queue
;
799 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
801 if (control_buff_size
)
802 get_cmd
.aq_common_descriptor
.flags
=
803 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
805 get_cmd
.aq_common_descriptor
.flags
= 0;
807 ret
= ena_com_mem_addr_set(ena_dev
,
808 &get_cmd
.control_buffer
.address
,
809 control_buf_dma_addr
);
811 pr_err("memory address set failed\n");
815 get_cmd
.control_buffer
.length
= control_buff_size
;
817 get_cmd
.feat_common
.feature_id
= feature_id
;
819 ret
= ena_com_execute_admin_command(admin_queue
,
820 (struct ena_admin_aq_entry
*)
823 (struct ena_admin_acq_entry
*)
828 pr_err("Failed to submit get_feature command %d error: %d\n",
834 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
835 struct ena_admin_get_feat_resp
*get_resp
,
836 enum ena_admin_aq_feature_id feature_id
)
838 return ena_com_get_feature_ex(ena_dev
,
845 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
847 struct ena_rss
*rss
= &ena_dev
->rss
;
850 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
851 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
853 if (unlikely(!rss
->hash_key
))
859 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
861 struct ena_rss
*rss
= &ena_dev
->rss
;
864 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
865 rss
->hash_key
, rss
->hash_key_dma_addr
);
866 rss
->hash_key
= NULL
;
869 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
871 struct ena_rss
*rss
= &ena_dev
->rss
;
874 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
875 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
877 if (unlikely(!rss
->hash_ctrl
))
883 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
885 struct ena_rss
*rss
= &ena_dev
->rss
;
888 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
889 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
890 rss
->hash_ctrl
= NULL
;
893 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
896 struct ena_rss
*rss
= &ena_dev
->rss
;
897 struct ena_admin_get_feat_resp get_resp
;
901 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
902 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
906 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
907 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
908 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
909 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
910 1 << get_resp
.u
.ind_table
.max_size
);
914 tbl_size
= (1ULL << log_size
) *
915 sizeof(struct ena_admin_rss_ind_table_entry
);
918 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
919 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
920 if (unlikely(!rss
->rss_ind_tbl
))
923 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
924 rss
->host_rss_ind_tbl
=
925 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
926 if (unlikely(!rss
->host_rss_ind_tbl
))
929 rss
->tbl_log_size
= log_size
;
934 tbl_size
= (1ULL << log_size
) *
935 sizeof(struct ena_admin_rss_ind_table_entry
);
937 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
938 rss
->rss_ind_tbl_dma_addr
);
939 rss
->rss_ind_tbl
= NULL
;
941 rss
->tbl_log_size
= 0;
945 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
947 struct ena_rss
*rss
= &ena_dev
->rss
;
948 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
949 sizeof(struct ena_admin_rss_ind_table_entry
);
951 if (rss
->rss_ind_tbl
)
952 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
953 rss
->rss_ind_tbl_dma_addr
);
954 rss
->rss_ind_tbl
= NULL
;
956 if (rss
->host_rss_ind_tbl
)
957 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
958 rss
->host_rss_ind_tbl
= NULL
;
961 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
962 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
964 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
965 struct ena_admin_aq_create_sq_cmd create_cmd
;
966 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
970 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
972 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
974 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
975 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
977 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
979 create_cmd
.sq_identity
|= (direction
<<
980 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
981 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
983 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
984 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
986 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
987 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
988 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
990 create_cmd
.sq_caps_3
|=
991 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
993 create_cmd
.cq_idx
= cq_idx
;
994 create_cmd
.sq_depth
= io_sq
->q_depth
;
996 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
997 ret
= ena_com_mem_addr_set(ena_dev
,
999 io_sq
->desc_addr
.phys_addr
);
1000 if (unlikely(ret
)) {
1001 pr_err("memory address set failed\n");
1006 ret
= ena_com_execute_admin_command(admin_queue
,
1007 (struct ena_admin_aq_entry
*)&create_cmd
,
1009 (struct ena_admin_acq_entry
*)&cmd_completion
,
1010 sizeof(cmd_completion
));
1011 if (unlikely(ret
)) {
1012 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1016 io_sq
->idx
= cmd_completion
.sq_idx
;
1018 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1019 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1021 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1022 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1023 + cmd_completion
.llq_headers_offset
);
1025 io_sq
->desc_addr
.pbuf_dev_addr
=
1026 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1027 cmd_completion
.llq_descriptors_offset
);
1030 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1035 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1037 struct ena_rss
*rss
= &ena_dev
->rss
;
1038 struct ena_com_io_sq
*io_sq
;
1042 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1043 qid
= rss
->host_rss_ind_tbl
[i
];
1044 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1047 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1049 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1052 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1058 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1060 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1061 struct ena_rss
*rss
= &ena_dev
->rss
;
1065 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1066 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1068 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1069 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1071 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1073 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1076 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1082 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1086 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1088 ena_dev
->intr_moder_tbl
=
1089 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1090 if (!ena_dev
->intr_moder_tbl
)
1093 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1098 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1099 u16 intr_delay_resolution
)
1101 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1104 if (!intr_delay_resolution
) {
1105 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1106 intr_delay_resolution
= 1;
1108 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1111 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1112 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1115 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1118 /*****************************************************************************/
1119 /******************************* API ******************************/
1120 /*****************************************************************************/
1122 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1123 struct ena_admin_aq_entry
*cmd
,
1125 struct ena_admin_acq_entry
*comp
,
1128 struct ena_comp_ctx
*comp_ctx
;
1131 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1133 if (unlikely(IS_ERR(comp_ctx
))) {
1134 if (comp_ctx
== ERR_PTR(-ENODEV
))
1135 pr_debug("Failed to submit command [%ld]\n",
1138 pr_err("Failed to submit command [%ld]\n",
1141 return PTR_ERR(comp_ctx
);
1144 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1145 if (unlikely(ret
)) {
1146 if (admin_queue
->running_state
)
1147 pr_err("Failed to process command. ret = %d\n", ret
);
1149 pr_debug("Failed to process command. ret = %d\n", ret
);
1154 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1155 struct ena_com_io_cq
*io_cq
)
1157 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1158 struct ena_admin_aq_create_cq_cmd create_cmd
;
1159 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1162 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1164 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1166 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1167 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1168 create_cmd
.cq_caps_1
|=
1169 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1171 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1172 create_cmd
.cq_depth
= io_cq
->q_depth
;
1174 ret
= ena_com_mem_addr_set(ena_dev
,
1176 io_cq
->cdesc_addr
.phys_addr
);
1177 if (unlikely(ret
)) {
1178 pr_err("memory address set failed\n");
1182 ret
= ena_com_execute_admin_command(admin_queue
,
1183 (struct ena_admin_aq_entry
*)&create_cmd
,
1185 (struct ena_admin_acq_entry
*)&cmd_completion
,
1186 sizeof(cmd_completion
));
1187 if (unlikely(ret
)) {
1188 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1192 io_cq
->idx
= cmd_completion
.cq_idx
;
1194 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1195 cmd_completion
.cq_interrupt_unmask_register_offset
);
1197 if (cmd_completion
.cq_head_db_register_offset
)
1198 io_cq
->cq_head_db_reg
=
1199 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1200 cmd_completion
.cq_head_db_register_offset
);
1202 if (cmd_completion
.numa_node_register_offset
)
1203 io_cq
->numa_node_cfg_reg
=
1204 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1205 cmd_completion
.numa_node_register_offset
);
1207 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1212 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1213 struct ena_com_io_sq
**io_sq
,
1214 struct ena_com_io_cq
**io_cq
)
1216 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1217 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1218 ENA_TOTAL_NUM_QUEUES
);
1222 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1223 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1228 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1230 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1231 struct ena_comp_ctx
*comp_ctx
;
1234 if (!admin_queue
->comp_ctx
)
1237 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1238 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1239 if (unlikely(!comp_ctx
))
1242 comp_ctx
->status
= ENA_CMD_ABORTED
;
1244 complete(&comp_ctx
->wait_event
);
1248 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1250 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1251 unsigned long flags
;
1253 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1254 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1255 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1257 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1259 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1262 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1263 struct ena_com_io_cq
*io_cq
)
1265 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1266 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1267 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1270 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
1272 destroy_cmd
.cq_idx
= io_cq
->idx
;
1273 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1275 ret
= ena_com_execute_admin_command(admin_queue
,
1276 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1277 sizeof(destroy_cmd
),
1278 (struct ena_admin_acq_entry
*)&destroy_resp
,
1279 sizeof(destroy_resp
));
1281 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1282 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1287 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1289 return ena_dev
->admin_queue
.running_state
;
1292 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1294 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1295 unsigned long flags
;
1297 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1298 ena_dev
->admin_queue
.running_state
= state
;
1299 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1302 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1304 u16 depth
= ena_dev
->aenq
.q_depth
;
1306 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1308 /* Init head_db to mark that all entries in the queue
1309 * are initially available
1311 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1314 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1316 struct ena_com_admin_queue
*admin_queue
;
1317 struct ena_admin_set_feat_cmd cmd
;
1318 struct ena_admin_set_feat_resp resp
;
1319 struct ena_admin_get_feat_resp get_resp
;
1322 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1324 pr_info("Can't get aenq configuration\n");
1328 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1329 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1330 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1334 memset(&cmd
, 0x0, sizeof(cmd
));
1335 admin_queue
= &ena_dev
->admin_queue
;
1337 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1338 cmd
.aq_common_descriptor
.flags
= 0;
1339 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1340 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1342 ret
= ena_com_execute_admin_command(admin_queue
,
1343 (struct ena_admin_aq_entry
*)&cmd
,
1345 (struct ena_admin_acq_entry
*)&resp
,
1349 pr_err("Failed to config AENQ ret: %d\n", ret
);
1354 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1356 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1359 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1360 pr_err("Reg read timeout occurred\n");
1364 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1365 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1367 pr_debug("ENA dma width: %d\n", width
);
1369 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1370 pr_err("DMA width illegal value: %d\n", width
);
1374 ena_dev
->dma_addr_bits
= width
;
1379 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1383 u32 ctrl_ver_masked
;
1385 /* Make sure the ENA version and the controller version are at least
1386 * as the driver expects
1388 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1389 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1390 ENA_REGS_CONTROLLER_VERSION_OFF
);
1392 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1393 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1394 pr_err("Reg read timeout occurred\n");
1398 pr_info("ena device version: %d.%d\n",
1399 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1400 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1401 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1403 if (ver
< MIN_ENA_VER
) {
1404 pr_err("ENA version is lower than the minimal version the driver supports\n");
1408 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1409 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1410 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1411 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1412 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1413 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1414 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1415 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1418 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1419 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1420 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1422 /* Validate the ctrl version without the implementation ID */
1423 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1424 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1431 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1433 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1434 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1435 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1436 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1439 if (admin_queue
->comp_ctx
)
1440 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1441 admin_queue
->comp_ctx
= NULL
;
1442 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1444 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1448 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1450 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1454 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1455 if (ena_dev
->aenq
.entries
)
1456 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1458 aenq
->entries
= NULL
;
1461 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1466 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1468 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1469 ena_dev
->admin_queue
.polling
= polling
;
1472 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1474 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1476 spin_lock_init(&mmio_read
->lock
);
1477 mmio_read
->read_resp
=
1478 dma_zalloc_coherent(ena_dev
->dmadev
,
1479 sizeof(*mmio_read
->read_resp
),
1480 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1481 if (unlikely(!mmio_read
->read_resp
))
1484 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1486 mmio_read
->read_resp
->req_id
= 0x0;
1487 mmio_read
->seq_num
= 0x0;
1488 mmio_read
->readless_supported
= true;
1493 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1495 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1497 mmio_read
->readless_supported
= readless_supported
;
1500 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1502 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1504 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1505 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1507 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1508 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1510 mmio_read
->read_resp
= NULL
;
1513 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1515 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1516 u32 addr_low
, addr_high
;
1518 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1519 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1521 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1522 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1525 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1526 struct ena_aenq_handlers
*aenq_handlers
,
1529 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1530 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1533 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1535 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1536 pr_err("Reg read timeout occurred\n");
1540 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1541 pr_err("Device isn't ready, abort com init\n");
1545 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1547 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1548 admin_queue
->polling
= false;
1549 admin_queue
->curr_cmd_id
= 0;
1551 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1554 spin_lock_init(&admin_queue
->q_lock
);
1556 ret
= ena_com_init_comp_ctxt(admin_queue
);
1560 ret
= ena_com_admin_init_sq(admin_queue
);
1564 ret
= ena_com_admin_init_cq(admin_queue
);
1568 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1569 ENA_REGS_AQ_DB_OFF
);
1571 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1572 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1574 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1575 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1577 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1578 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1580 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1581 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1584 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1585 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1586 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1587 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1590 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1591 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1592 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1593 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1595 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1596 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1597 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1601 admin_queue
->running_state
= true;
1605 ena_com_admin_destroy(ena_dev
);
1610 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1611 struct ena_com_create_io_ctx
*ctx
)
1613 struct ena_com_io_sq
*io_sq
;
1614 struct ena_com_io_cq
*io_cq
;
1617 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1618 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1619 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1623 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1624 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1626 memset(io_sq
, 0x0, sizeof(*io_sq
));
1627 memset(io_cq
, 0x0, sizeof(*io_cq
));
1630 io_cq
->q_depth
= ctx
->queue_size
;
1631 io_cq
->direction
= ctx
->direction
;
1632 io_cq
->qid
= ctx
->qid
;
1634 io_cq
->msix_vector
= ctx
->msix_vector
;
1636 io_sq
->q_depth
= ctx
->queue_size
;
1637 io_sq
->direction
= ctx
->direction
;
1638 io_sq
->qid
= ctx
->qid
;
1640 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1642 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1643 /* header length is limited to 8 bits */
1644 io_sq
->tx_max_header_size
=
1645 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1647 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1650 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1654 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1658 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1665 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1667 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1671 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1673 struct ena_com_io_sq
*io_sq
;
1674 struct ena_com_io_cq
*io_cq
;
1676 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1677 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1678 ENA_TOTAL_NUM_QUEUES
);
1682 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1683 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1685 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1686 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1688 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1691 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1692 struct ena_admin_get_feat_resp
*resp
)
1694 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1697 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1698 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1700 struct ena_admin_get_feat_resp get_resp
;
1703 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1704 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1708 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1709 sizeof(get_resp
.u
.dev_attr
));
1710 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1712 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1713 ENA_ADMIN_MAX_QUEUES_NUM
);
1717 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1718 sizeof(get_resp
.u
.max_queue
));
1719 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1721 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1722 ENA_ADMIN_AENQ_CONFIG
);
1726 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1727 sizeof(get_resp
.u
.aenq
));
1729 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1730 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1734 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1735 sizeof(get_resp
.u
.offload
));
1737 /* Driver hints isn't mandatory admin command. So in case the
1738 * command isn't supported set driver hints to 0
1740 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_HW_HINTS
);
1743 memcpy(&get_feat_ctx
->hw_hints
, &get_resp
.u
.hw_hints
,
1744 sizeof(get_resp
.u
.hw_hints
));
1745 else if (rc
== -EOPNOTSUPP
)
1746 memset(&get_feat_ctx
->hw_hints
, 0x0,
1747 sizeof(get_feat_ctx
->hw_hints
));
1754 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1756 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1759 /* ena_handle_specific_aenq_event:
1760 * return the handler that is relevant to the specific event group
1762 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1765 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1767 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1768 return aenq_handlers
->handlers
[group
];
1770 return aenq_handlers
->unimplemented_handler
;
1773 /* ena_aenq_intr_handler:
1774 * handles the aenq incoming events.
1775 * pop events from the queue and apply the specific handler
1777 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1779 struct ena_admin_aenq_entry
*aenq_e
;
1780 struct ena_admin_aenq_common_desc
*aenq_common
;
1781 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1782 ena_aenq_handler handler_cb
;
1783 u16 masked_head
, processed
= 0;
1786 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1787 phase
= aenq
->phase
;
1788 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1789 aenq_common
= &aenq_e
->aenq_common_desc
;
1791 /* Go over all the events */
1792 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1794 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1795 aenq_common
->group
, aenq_common
->syndrom
,
1796 (u64
)aenq_common
->timestamp_low
+
1797 ((u64
)aenq_common
->timestamp_high
<< 32));
1799 /* Handle specific event*/
1800 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1801 aenq_common
->group
);
1802 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1804 /* Get next event entry */
1808 if (unlikely(masked_head
== aenq
->q_depth
)) {
1812 aenq_e
= &aenq
->entries
[masked_head
];
1813 aenq_common
= &aenq_e
->aenq_common_desc
;
1816 aenq
->head
+= processed
;
1817 aenq
->phase
= phase
;
1819 /* Don't update aenq doorbell if there weren't any processed events */
1823 /* write the aenq doorbell after all AENQ descriptors were read */
1825 writel((u32
)aenq
->head
, dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1828 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
)
1830 u32 stat
, timeout
, cap
, reset_val
;
1833 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1834 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1836 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1837 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1838 pr_err("Reg read32 timeout occurred\n");
1842 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1843 pr_err("Device isn't ready, can't reset device\n");
1847 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1848 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1850 pr_err("Invalid timeout value\n");
1855 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1856 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1858 /* Write again the MMIO read request address */
1859 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1861 rc
= wait_for_reset_state(ena_dev
, timeout
,
1862 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1864 pr_err("Reset indication didn't turn on\n");
1869 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1870 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1872 pr_err("Reset indication didn't turn off\n");
1876 timeout
= (cap
& ENA_REGS_CAPS_ADMIN_CMD_TO_MASK
) >>
1877 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT
;
1879 /* the resolution of timeout reg is 100ms */
1880 ena_dev
->admin_queue
.completion_timeout
= timeout
* 100000;
1882 ena_dev
->admin_queue
.completion_timeout
= ADMIN_CMD_TIMEOUT_US
;
1887 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1888 struct ena_com_stats_ctx
*ctx
,
1889 enum ena_admin_get_stats_type type
)
1891 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
1892 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
1893 struct ena_com_admin_queue
*admin_queue
;
1896 admin_queue
= &ena_dev
->admin_queue
;
1898 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1899 get_cmd
->aq_common_descriptor
.flags
= 0;
1900 get_cmd
->type
= type
;
1902 ret
= ena_com_execute_admin_command(admin_queue
,
1903 (struct ena_admin_aq_entry
*)get_cmd
,
1905 (struct ena_admin_acq_entry
*)get_resp
,
1909 pr_err("Failed to get stats. error: %d\n", ret
);
1914 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1915 struct ena_admin_basic_stats
*stats
)
1917 struct ena_com_stats_ctx ctx
;
1920 memset(&ctx
, 0x0, sizeof(ctx
));
1921 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1922 if (likely(ret
== 0))
1923 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
1924 sizeof(ctx
.get_resp
.basic_stats
));
1929 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
1931 struct ena_com_admin_queue
*admin_queue
;
1932 struct ena_admin_set_feat_cmd cmd
;
1933 struct ena_admin_set_feat_resp resp
;
1936 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
1937 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
1941 memset(&cmd
, 0x0, sizeof(cmd
));
1942 admin_queue
= &ena_dev
->admin_queue
;
1944 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1945 cmd
.aq_common_descriptor
.flags
= 0;
1946 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
1947 cmd
.u
.mtu
.mtu
= mtu
;
1949 ret
= ena_com_execute_admin_command(admin_queue
,
1950 (struct ena_admin_aq_entry
*)&cmd
,
1952 (struct ena_admin_acq_entry
*)&resp
,
1956 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
1961 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
1962 struct ena_admin_feature_offload_desc
*offload
)
1965 struct ena_admin_get_feat_resp resp
;
1967 ret
= ena_com_get_feature(ena_dev
, &resp
,
1968 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1969 if (unlikely(ret
)) {
1970 pr_err("Failed to get offload capabilities %d\n", ret
);
1974 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
1979 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
1981 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1982 struct ena_rss
*rss
= &ena_dev
->rss
;
1983 struct ena_admin_set_feat_cmd cmd
;
1984 struct ena_admin_set_feat_resp resp
;
1985 struct ena_admin_get_feat_resp get_resp
;
1988 if (!ena_com_check_supported_feature_id(ena_dev
,
1989 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
1990 pr_debug("Feature %d isn't supported\n",
1991 ENA_ADMIN_RSS_HASH_FUNCTION
);
1995 /* Validate hash function is supported */
1996 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1997 ENA_ADMIN_RSS_HASH_FUNCTION
);
2001 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
2002 pr_err("Func hash %d isn't supported by device, abort\n",
2007 memset(&cmd
, 0x0, sizeof(cmd
));
2009 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2010 cmd
.aq_common_descriptor
.flags
=
2011 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2012 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2013 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2014 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2016 ret
= ena_com_mem_addr_set(ena_dev
,
2017 &cmd
.control_buffer
.address
,
2018 rss
->hash_key_dma_addr
);
2019 if (unlikely(ret
)) {
2020 pr_err("memory address set failed\n");
2024 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2026 ret
= ena_com_execute_admin_command(admin_queue
,
2027 (struct ena_admin_aq_entry
*)&cmd
,
2029 (struct ena_admin_acq_entry
*)&resp
,
2031 if (unlikely(ret
)) {
2032 pr_err("Failed to set hash function %d. error: %d\n",
2033 rss
->hash_func
, ret
);
2040 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2041 enum ena_admin_hash_functions func
,
2042 const u8
*key
, u16 key_len
, u32 init_val
)
2044 struct ena_rss
*rss
= &ena_dev
->rss
;
2045 struct ena_admin_get_feat_resp get_resp
;
2046 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2050 /* Make sure size is a mult of DWs */
2051 if (unlikely(key_len
& 0x3))
2054 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2055 ENA_ADMIN_RSS_HASH_FUNCTION
,
2056 rss
->hash_key_dma_addr
,
2057 sizeof(*rss
->hash_key
));
2061 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2062 pr_err("Flow hash function %d isn't supported\n", func
);
2067 case ENA_ADMIN_TOEPLITZ
:
2068 if (key_len
> sizeof(hash_key
->key
)) {
2069 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2070 key_len
, sizeof(hash_key
->key
));
2074 memcpy(hash_key
->key
, key
, key_len
);
2075 rss
->hash_init_val
= init_val
;
2076 hash_key
->keys_num
= key_len
>> 2;
2078 case ENA_ADMIN_CRC32
:
2079 rss
->hash_init_val
= init_val
;
2082 pr_err("Invalid hash function (%d)\n", func
);
2086 rc
= ena_com_set_hash_function(ena_dev
);
2088 /* Restore the old function */
2090 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2095 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2096 enum ena_admin_hash_functions
*func
,
2099 struct ena_rss
*rss
= &ena_dev
->rss
;
2100 struct ena_admin_get_feat_resp get_resp
;
2101 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2105 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2106 ENA_ADMIN_RSS_HASH_FUNCTION
,
2107 rss
->hash_key_dma_addr
,
2108 sizeof(*rss
->hash_key
));
2112 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2114 *func
= rss
->hash_func
;
2117 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2122 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2123 enum ena_admin_flow_hash_proto proto
,
2126 struct ena_rss
*rss
= &ena_dev
->rss
;
2127 struct ena_admin_get_feat_resp get_resp
;
2130 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2131 ENA_ADMIN_RSS_HASH_INPUT
,
2132 rss
->hash_ctrl_dma_addr
,
2133 sizeof(*rss
->hash_ctrl
));
2138 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2143 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2145 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2146 struct ena_rss
*rss
= &ena_dev
->rss
;
2147 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2148 struct ena_admin_set_feat_cmd cmd
;
2149 struct ena_admin_set_feat_resp resp
;
2152 if (!ena_com_check_supported_feature_id(ena_dev
,
2153 ENA_ADMIN_RSS_HASH_INPUT
)) {
2154 pr_debug("Feature %d isn't supported\n",
2155 ENA_ADMIN_RSS_HASH_INPUT
);
2159 memset(&cmd
, 0x0, sizeof(cmd
));
2161 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2162 cmd
.aq_common_descriptor
.flags
=
2163 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2164 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2165 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2166 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2167 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2169 ret
= ena_com_mem_addr_set(ena_dev
,
2170 &cmd
.control_buffer
.address
,
2171 rss
->hash_ctrl_dma_addr
);
2172 if (unlikely(ret
)) {
2173 pr_err("memory address set failed\n");
2176 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2178 ret
= ena_com_execute_admin_command(admin_queue
,
2179 (struct ena_admin_aq_entry
*)&cmd
,
2181 (struct ena_admin_acq_entry
*)&resp
,
2184 pr_err("Failed to set hash input. error: %d\n", ret
);
2189 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2191 struct ena_rss
*rss
= &ena_dev
->rss
;
2192 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2194 u16 available_fields
= 0;
2197 /* Get the supported hash input */
2198 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2202 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2203 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2204 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2206 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2207 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2208 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2210 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2211 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2212 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2214 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2215 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2216 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2218 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2219 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2221 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2222 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2224 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2225 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2227 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2228 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2230 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2231 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2232 hash_ctrl
->supported_fields
[i
].fields
;
2233 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2234 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2235 i
, hash_ctrl
->supported_fields
[i
].fields
,
2236 hash_ctrl
->selected_fields
[i
].fields
);
2241 rc
= ena_com_set_hash_ctrl(ena_dev
);
2243 /* In case of failure, restore the old hash ctrl */
2245 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2250 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2251 enum ena_admin_flow_hash_proto proto
,
2254 struct ena_rss
*rss
= &ena_dev
->rss
;
2255 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2256 u16 supported_fields
;
2259 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2260 pr_err("Invalid proto num (%u)\n", proto
);
2264 /* Get the ctrl table */
2265 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2269 /* Make sure all the fields are supported */
2270 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2271 if ((hash_fields
& supported_fields
) != hash_fields
) {
2272 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2273 proto
, hash_fields
, supported_fields
);
2276 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2278 rc
= ena_com_set_hash_ctrl(ena_dev
);
2280 /* In case of failure, restore the old hash ctrl */
2282 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2287 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2288 u16 entry_idx
, u16 entry_value
)
2290 struct ena_rss
*rss
= &ena_dev
->rss
;
2292 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2295 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2298 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2303 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2305 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2306 struct ena_rss
*rss
= &ena_dev
->rss
;
2307 struct ena_admin_set_feat_cmd cmd
;
2308 struct ena_admin_set_feat_resp resp
;
2311 if (!ena_com_check_supported_feature_id(
2312 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2313 pr_debug("Feature %d isn't supported\n",
2314 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2318 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2320 pr_err("Failed to convert host indirection table to device table\n");
2324 memset(&cmd
, 0x0, sizeof(cmd
));
2326 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2327 cmd
.aq_common_descriptor
.flags
=
2328 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2329 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2330 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2331 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2333 ret
= ena_com_mem_addr_set(ena_dev
,
2334 &cmd
.control_buffer
.address
,
2335 rss
->rss_ind_tbl_dma_addr
);
2336 if (unlikely(ret
)) {
2337 pr_err("memory address set failed\n");
2341 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2342 sizeof(struct ena_admin_rss_ind_table_entry
);
2344 ret
= ena_com_execute_admin_command(admin_queue
,
2345 (struct ena_admin_aq_entry
*)&cmd
,
2347 (struct ena_admin_acq_entry
*)&resp
,
2351 pr_err("Failed to set indirect table. error: %d\n", ret
);
2356 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2358 struct ena_rss
*rss
= &ena_dev
->rss
;
2359 struct ena_admin_get_feat_resp get_resp
;
2363 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2364 sizeof(struct ena_admin_rss_ind_table_entry
);
2366 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2367 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2368 rss
->rss_ind_tbl_dma_addr
,
2376 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2380 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2381 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2386 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2390 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2392 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2396 rc
= ena_com_hash_key_allocate(ena_dev
);
2400 rc
= ena_com_hash_ctrl_init(ena_dev
);
2407 ena_com_hash_key_destroy(ena_dev
);
2409 ena_com_indirect_table_destroy(ena_dev
);
2415 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2417 ena_com_indirect_table_destroy(ena_dev
);
2418 ena_com_hash_key_destroy(ena_dev
);
2419 ena_com_hash_ctrl_destroy(ena_dev
);
2421 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2424 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2426 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2428 host_attr
->host_info
=
2429 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2430 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2431 if (unlikely(!host_attr
->host_info
))
2437 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2438 u32 debug_area_size
)
2440 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2442 host_attr
->debug_area_virt_addr
=
2443 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2444 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2445 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2446 host_attr
->debug_area_size
= 0;
2450 host_attr
->debug_area_size
= debug_area_size
;
2455 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2457 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2459 if (host_attr
->host_info
) {
2460 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2461 host_attr
->host_info_dma_addr
);
2462 host_attr
->host_info
= NULL
;
2466 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2468 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2470 if (host_attr
->debug_area_virt_addr
) {
2471 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2472 host_attr
->debug_area_virt_addr
,
2473 host_attr
->debug_area_dma_addr
);
2474 host_attr
->debug_area_virt_addr
= NULL
;
2478 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2480 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2481 struct ena_com_admin_queue
*admin_queue
;
2482 struct ena_admin_set_feat_cmd cmd
;
2483 struct ena_admin_set_feat_resp resp
;
2487 /* Host attribute config is called before ena_com_get_dev_attr_feat
2488 * so ena_com can't check if the feature is supported.
2491 memset(&cmd
, 0x0, sizeof(cmd
));
2492 admin_queue
= &ena_dev
->admin_queue
;
2494 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2495 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2497 ret
= ena_com_mem_addr_set(ena_dev
,
2498 &cmd
.u
.host_attr
.debug_ba
,
2499 host_attr
->debug_area_dma_addr
);
2500 if (unlikely(ret
)) {
2501 pr_err("memory address set failed\n");
2505 ret
= ena_com_mem_addr_set(ena_dev
,
2506 &cmd
.u
.host_attr
.os_info_ba
,
2507 host_attr
->host_info_dma_addr
);
2508 if (unlikely(ret
)) {
2509 pr_err("memory address set failed\n");
2513 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2515 ret
= ena_com_execute_admin_command(admin_queue
,
2516 (struct ena_admin_aq_entry
*)&cmd
,
2518 (struct ena_admin_acq_entry
*)&resp
,
2522 pr_err("Failed to set host attributes: %d\n", ret
);
2527 /* Interrupt moderation */
2528 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2530 return ena_com_check_supported_feature_id(ena_dev
,
2531 ENA_ADMIN_INTERRUPT_MODERATION
);
2534 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2535 u32 tx_coalesce_usecs
)
2537 if (!ena_dev
->intr_delay_resolution
) {
2538 pr_err("Illegal interrupt delay granularity value\n");
2542 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2543 ena_dev
->intr_delay_resolution
;
2548 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2549 u32 rx_coalesce_usecs
)
2551 if (!ena_dev
->intr_delay_resolution
) {
2552 pr_err("Illegal interrupt delay granularity value\n");
2556 /* We use LOWEST entry of moderation table for storing
2557 * nonadaptive interrupt coalescing values
2559 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2560 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2565 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2567 if (ena_dev
->intr_moder_tbl
)
2568 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2569 ena_dev
->intr_moder_tbl
= NULL
;
2572 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2574 struct ena_admin_get_feat_resp get_resp
;
2575 u16 delay_resolution
;
2578 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2579 ENA_ADMIN_INTERRUPT_MODERATION
);
2582 if (rc
== -EOPNOTSUPP
) {
2583 pr_debug("Feature %d isn't supported\n",
2584 ENA_ADMIN_INTERRUPT_MODERATION
);
2587 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2591 /* no moderation supported, disable adaptive support */
2592 ena_com_disable_adaptive_moderation(ena_dev
);
2596 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2600 /* if moderation is supported by device we set adaptive moderation */
2601 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2602 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2603 ena_com_enable_adaptive_moderation(ena_dev
);
2607 ena_com_destroy_interrupt_moderation(ena_dev
);
2611 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2613 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2615 if (!intr_moder_tbl
)
2618 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2619 ENA_INTR_LOWEST_USECS
;
2620 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2621 ENA_INTR_LOWEST_PKTS
;
2622 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2623 ENA_INTR_LOWEST_BYTES
;
2625 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2627 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2629 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2632 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2634 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2636 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2639 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2640 ENA_INTR_HIGH_USECS
;
2641 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2643 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2644 ENA_INTR_HIGH_BYTES
;
2646 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2647 ENA_INTR_HIGHEST_USECS
;
2648 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2649 ENA_INTR_HIGHEST_PKTS
;
2650 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2651 ENA_INTR_HIGHEST_BYTES
;
2654 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2656 return ena_dev
->intr_moder_tx_interval
;
2659 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2661 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2664 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2669 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2670 enum ena_intr_moder_level level
,
2671 struct ena_intr_moder_entry
*entry
)
2673 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2675 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2678 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2679 if (ena_dev
->intr_delay_resolution
)
2680 intr_moder_tbl
[level
].intr_moder_interval
/=
2681 ena_dev
->intr_delay_resolution
;
2682 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2684 /* use hardcoded value until ethtool supports bytecount parameter */
2685 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2686 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2689 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2690 enum ena_intr_moder_level level
,
2691 struct ena_intr_moder_entry
*entry
)
2693 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2695 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2698 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2699 if (ena_dev
->intr_delay_resolution
)
2700 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2701 entry
->pkts_per_interval
=
2702 intr_moder_tbl
[level
].pkts_per_interval
;
2703 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;