2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (1000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 4
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 /*****************************************************************************/
65 /*****************************************************************************/
66 /*****************************************************************************/
71 /* Abort - canceled by the driver */
76 struct completion wait_event
;
77 struct ena_admin_acq_entry
*user_cqe
;
79 enum ena_cmd_status status
;
80 /* status from the device */
86 struct ena_com_stats_ctx
{
87 struct ena_admin_aq_get_stats_cmd get_cmd
;
88 struct ena_admin_acq_get_stats_resp get_resp
;
91 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
92 struct ena_common_mem_addr
*ena_addr
,
95 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
96 pr_err("dma address has more bits that the device supports\n");
100 ena_addr
->mem_addr_low
= (u32
)addr
;
101 ena_addr
->mem_addr_high
= (u64
)addr
>> 32;
106 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
108 struct ena_com_admin_sq
*sq
= &queue
->sq
;
109 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
111 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
115 pr_err("memory allocation failed");
128 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
130 struct ena_com_admin_cq
*cq
= &queue
->cq
;
131 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
133 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
137 pr_err("memory allocation failed");
147 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
148 struct ena_aenq_handlers
*aenq_handlers
)
150 struct ena_com_aenq
*aenq
= &dev
->aenq
;
151 u32 addr_low
, addr_high
, aenq_caps
;
154 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
155 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
156 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
159 if (!aenq
->entries
) {
160 pr_err("memory allocation failed");
164 aenq
->head
= aenq
->q_depth
;
167 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
168 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
170 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
171 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
174 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
175 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
176 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
178 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
180 if (unlikely(!aenq_handlers
)) {
181 pr_err("aenq handlers pointer is NULL\n");
185 aenq
->aenq_handlers
= aenq_handlers
;
190 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
191 struct ena_comp_ctx
*comp_ctx
)
193 comp_ctx
->occupied
= false;
194 atomic_dec(&queue
->outstanding_cmds
);
197 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
198 u16 command_id
, bool capture
)
200 if (unlikely(command_id
>= queue
->q_depth
)) {
201 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
202 command_id
, queue
->q_depth
);
206 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
207 pr_err("Completion context is occupied\n");
212 atomic_inc(&queue
->outstanding_cmds
);
213 queue
->comp_ctx
[command_id
].occupied
= true;
216 return &queue
->comp_ctx
[command_id
];
219 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
220 struct ena_admin_aq_entry
*cmd
,
221 size_t cmd_size_in_bytes
,
222 struct ena_admin_acq_entry
*comp
,
223 size_t comp_size_in_bytes
)
225 struct ena_comp_ctx
*comp_ctx
;
226 u16 tail_masked
, cmd_id
;
230 queue_size_mask
= admin_queue
->q_depth
- 1;
232 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
234 /* In case of queue FULL */
235 cnt
= admin_queue
->sq
.tail
- admin_queue
->sq
.head
;
236 if (cnt
>= admin_queue
->q_depth
) {
237 pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n",
238 admin_queue
->sq
.tail
, admin_queue
->sq
.head
,
239 admin_queue
->q_depth
);
240 admin_queue
->stats
.out_of_space
++;
241 return ERR_PTR(-ENOSPC
);
244 cmd_id
= admin_queue
->curr_cmd_id
;
246 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
247 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
249 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
250 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
252 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
253 if (unlikely(!comp_ctx
))
254 return ERR_PTR(-EINVAL
);
256 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
257 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
258 comp_ctx
->user_cqe
= comp
;
259 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
261 reinit_completion(&comp_ctx
->wait_event
);
263 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
265 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
268 admin_queue
->sq
.tail
++;
269 admin_queue
->stats
.submitted_cmd
++;
271 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
272 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
274 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
279 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
281 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
282 struct ena_comp_ctx
*comp_ctx
;
285 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
286 if (unlikely(!queue
->comp_ctx
)) {
287 pr_err("memory allocation failed");
291 for (i
= 0; i
< queue
->q_depth
; i
++) {
292 comp_ctx
= get_comp_ctxt(queue
, i
, false);
294 init_completion(&comp_ctx
->wait_event
);
300 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
301 struct ena_admin_aq_entry
*cmd
,
302 size_t cmd_size_in_bytes
,
303 struct ena_admin_acq_entry
*comp
,
304 size_t comp_size_in_bytes
)
307 struct ena_comp_ctx
*comp_ctx
;
309 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
310 if (unlikely(!admin_queue
->running_state
)) {
311 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
312 return ERR_PTR(-ENODEV
);
314 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
318 if (unlikely(IS_ERR(comp_ctx
)))
319 admin_queue
->running_state
= false;
320 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
325 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
326 struct ena_com_create_io_ctx
*ctx
,
327 struct ena_com_io_sq
*io_sq
)
332 memset(&io_sq
->desc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
334 io_sq
->desc_entry_size
=
335 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
336 sizeof(struct ena_eth_io_tx_desc
) :
337 sizeof(struct ena_eth_io_rx_desc
);
339 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
341 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
342 dev_node
= dev_to_node(ena_dev
->dmadev
);
343 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
344 io_sq
->desc_addr
.virt_addr
=
345 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
346 &io_sq
->desc_addr
.phys_addr
,
348 set_dev_node(ena_dev
->dmadev
, dev_node
);
349 if (!io_sq
->desc_addr
.virt_addr
) {
350 io_sq
->desc_addr
.virt_addr
=
351 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
352 &io_sq
->desc_addr
.phys_addr
,
356 dev_node
= dev_to_node(ena_dev
->dmadev
);
357 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
358 io_sq
->desc_addr
.virt_addr
=
359 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
360 set_dev_node(ena_dev
->dmadev
, dev_node
);
361 if (!io_sq
->desc_addr
.virt_addr
) {
362 io_sq
->desc_addr
.virt_addr
=
363 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
367 if (!io_sq
->desc_addr
.virt_addr
) {
368 pr_err("memory allocation failed");
373 io_sq
->next_to_comp
= 0;
379 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
380 struct ena_com_create_io_ctx
*ctx
,
381 struct ena_com_io_cq
*io_cq
)
386 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
388 /* Use the basic completion descriptor for Rx */
389 io_cq
->cdesc_entry_size_in_bytes
=
390 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
391 sizeof(struct ena_eth_io_tx_cdesc
) :
392 sizeof(struct ena_eth_io_rx_cdesc_base
);
394 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
396 prev_node
= dev_to_node(ena_dev
->dmadev
);
397 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
398 io_cq
->cdesc_addr
.virt_addr
=
399 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
400 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
401 set_dev_node(ena_dev
->dmadev
, prev_node
);
402 if (!io_cq
->cdesc_addr
.virt_addr
) {
403 io_cq
->cdesc_addr
.virt_addr
=
404 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
405 &io_cq
->cdesc_addr
.phys_addr
,
409 if (!io_cq
->cdesc_addr
.virt_addr
) {
410 pr_err("memory allocation failed");
420 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
421 struct ena_admin_acq_entry
*cqe
)
423 struct ena_comp_ctx
*comp_ctx
;
426 cmd_id
= cqe
->acq_common_descriptor
.command
&
427 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
429 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
430 if (unlikely(!comp_ctx
)) {
431 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
432 admin_queue
->running_state
= false;
436 comp_ctx
->status
= ENA_CMD_COMPLETED
;
437 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
439 if (comp_ctx
->user_cqe
)
440 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
442 if (!admin_queue
->polling
)
443 complete(&comp_ctx
->wait_event
);
446 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
448 struct ena_admin_acq_entry
*cqe
= NULL
;
453 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
454 phase
= admin_queue
->cq
.phase
;
456 cqe
= &admin_queue
->cq
.entries
[head_masked
];
458 /* Go over all the completions */
459 while ((cqe
->acq_common_descriptor
.flags
&
460 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
461 /* Do not read the rest of the completion entry before the
462 * phase bit was validated
465 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
469 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
474 cqe
= &admin_queue
->cq
.entries
[head_masked
];
477 admin_queue
->cq
.head
+= comp_num
;
478 admin_queue
->cq
.phase
= phase
;
479 admin_queue
->sq
.head
+= comp_num
;
480 admin_queue
->stats
.completed_cmd
+= comp_num
;
483 static int ena_com_comp_status_to_errno(u8 comp_status
)
485 if (unlikely(comp_status
!= 0))
486 pr_err("admin command failed[%u]\n", comp_status
);
488 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
491 switch (comp_status
) {
492 case ENA_ADMIN_SUCCESS
:
494 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
496 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
498 case ENA_ADMIN_BAD_OPCODE
:
499 case ENA_ADMIN_MALFORMED_REQUEST
:
500 case ENA_ADMIN_ILLEGAL_PARAMETER
:
501 case ENA_ADMIN_UNKNOWN_ERROR
:
508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
509 struct ena_com_admin_queue
*admin_queue
)
515 start_time
= ((u32
)jiffies_to_usecs(jiffies
));
517 while (comp_ctx
->status
== ENA_CMD_SUBMITTED
) {
518 if ((((u32
)jiffies_to_usecs(jiffies
)) - start_time
) >
519 ADMIN_CMD_TIMEOUT_US
) {
520 pr_err("Wait for completion (polling) timeout\n");
521 /* ENA didn't have any completion */
522 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
523 admin_queue
->stats
.no_completion
++;
524 admin_queue
->running_state
= false;
525 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
531 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
532 ena_com_handle_admin_completion(admin_queue
);
533 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
538 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
539 pr_err("Command was aborted\n");
540 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
541 admin_queue
->stats
.aborted_cmd
++;
542 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
547 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
550 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
552 comp_ctxt_release(admin_queue
, comp_ctx
);
556 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
557 struct ena_com_admin_queue
*admin_queue
)
562 wait_for_completion_timeout(&comp_ctx
->wait_event
,
563 usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US
));
565 /* In case the command wasn't completed find out the root cause.
566 * There might be 2 kinds of errors
567 * 1) No completion (timeout reached)
568 * 2) There is completion but the device didn't get any msi-x interrupt.
570 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
571 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
572 ena_com_handle_admin_completion(admin_queue
);
573 admin_queue
->stats
.no_completion
++;
574 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
576 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
577 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
578 comp_ctx
->cmd_opcode
);
580 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
581 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
583 admin_queue
->running_state
= false;
588 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
590 comp_ctxt_release(admin_queue
, comp_ctx
);
594 /* This method read the hardware device register through posting writes
595 * and waiting for response
596 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
598 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
600 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
601 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
602 mmio_read
->read_resp
;
603 u32 mmio_read_reg
, ret
;
609 /* If readless is disabled, perform regular read */
610 if (!mmio_read
->readless_supported
)
611 return readl(ena_dev
->reg_bar
+ offset
);
613 spin_lock_irqsave(&mmio_read
->lock
, flags
);
614 mmio_read
->seq_num
++;
616 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
617 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
618 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
619 mmio_read_reg
|= mmio_read
->seq_num
&
620 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
622 /* make sure read_resp->req_id get updated before the hw can write
627 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
629 for (i
= 0; i
< ENA_REG_READ_TIMEOUT
; i
++) {
630 if (read_resp
->req_id
== mmio_read
->seq_num
)
636 if (unlikely(i
== ENA_REG_READ_TIMEOUT
)) {
637 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
638 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
640 ret
= ENA_MMIO_READ_TIMEOUT
;
644 if (read_resp
->reg_off
!= offset
) {
645 pr_err("Read failure: wrong offset provided");
646 ret
= ENA_MMIO_READ_TIMEOUT
;
648 ret
= read_resp
->reg_val
;
651 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
656 /* There are two types to wait for completion.
657 * Polling mode - wait until the completion is available.
658 * Async mode - wait on wait queue until the completion is ready
659 * (or the timeout expired).
660 * It is expected that the IRQ called ena_com_handle_admin_completion
661 * to mark the completions.
663 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
664 struct ena_com_admin_queue
*admin_queue
)
666 if (admin_queue
->polling
)
667 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
670 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
674 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
675 struct ena_com_io_sq
*io_sq
)
677 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
678 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
679 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
683 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
685 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
686 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
688 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
690 destroy_cmd
.sq
.sq_identity
|= (direction
<<
691 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
692 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
694 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
695 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
697 ret
= ena_com_execute_admin_command(admin_queue
,
698 (struct ena_admin_aq_entry
*)&destroy_cmd
,
700 (struct ena_admin_acq_entry
*)&destroy_resp
,
701 sizeof(destroy_resp
));
703 if (unlikely(ret
&& (ret
!= -ENODEV
)))
704 pr_err("failed to destroy io sq error: %d\n", ret
);
709 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
710 struct ena_com_io_sq
*io_sq
,
711 struct ena_com_io_cq
*io_cq
)
715 if (io_cq
->cdesc_addr
.virt_addr
) {
716 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
718 dma_free_coherent(ena_dev
->dmadev
, size
,
719 io_cq
->cdesc_addr
.virt_addr
,
720 io_cq
->cdesc_addr
.phys_addr
);
722 io_cq
->cdesc_addr
.virt_addr
= NULL
;
725 if (io_sq
->desc_addr
.virt_addr
) {
726 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
728 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
729 dma_free_coherent(ena_dev
->dmadev
, size
,
730 io_sq
->desc_addr
.virt_addr
,
731 io_sq
->desc_addr
.phys_addr
);
733 devm_kfree(ena_dev
->dmadev
, io_sq
->desc_addr
.virt_addr
);
735 io_sq
->desc_addr
.virt_addr
= NULL
;
739 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
744 for (i
= 0; i
< timeout
; i
++) {
745 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
747 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
748 pr_err("Reg read timeout occurred\n");
752 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
756 /* The resolution of the timeout is 100ms */
763 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
764 enum ena_admin_aq_feature_id feature_id
)
766 u32 feature_mask
= 1 << feature_id
;
768 /* Device attributes is always supported */
769 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
770 !(ena_dev
->supported_features
& feature_mask
))
776 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
777 struct ena_admin_get_feat_resp
*get_resp
,
778 enum ena_admin_aq_feature_id feature_id
,
779 dma_addr_t control_buf_dma_addr
,
780 u32 control_buff_size
)
782 struct ena_com_admin_queue
*admin_queue
;
783 struct ena_admin_get_feat_cmd get_cmd
;
786 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
787 pr_info("Feature %d isn't supported\n", feature_id
);
791 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
792 admin_queue
= &ena_dev
->admin_queue
;
794 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
796 if (control_buff_size
)
797 get_cmd
.aq_common_descriptor
.flags
=
798 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
800 get_cmd
.aq_common_descriptor
.flags
= 0;
802 ret
= ena_com_mem_addr_set(ena_dev
,
803 &get_cmd
.control_buffer
.address
,
804 control_buf_dma_addr
);
806 pr_err("memory address set failed\n");
810 get_cmd
.control_buffer
.length
= control_buff_size
;
812 get_cmd
.feat_common
.feature_id
= feature_id
;
814 ret
= ena_com_execute_admin_command(admin_queue
,
815 (struct ena_admin_aq_entry
*)
818 (struct ena_admin_acq_entry
*)
823 pr_err("Failed to submit get_feature command %d error: %d\n",
829 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
830 struct ena_admin_get_feat_resp
*get_resp
,
831 enum ena_admin_aq_feature_id feature_id
)
833 return ena_com_get_feature_ex(ena_dev
,
840 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
842 struct ena_rss
*rss
= &ena_dev
->rss
;
845 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
846 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
848 if (unlikely(!rss
->hash_key
))
854 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
856 struct ena_rss
*rss
= &ena_dev
->rss
;
859 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
860 rss
->hash_key
, rss
->hash_key_dma_addr
);
861 rss
->hash_key
= NULL
;
864 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
866 struct ena_rss
*rss
= &ena_dev
->rss
;
869 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
870 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
872 if (unlikely(!rss
->hash_ctrl
))
878 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
880 struct ena_rss
*rss
= &ena_dev
->rss
;
883 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
884 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
885 rss
->hash_ctrl
= NULL
;
888 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
891 struct ena_rss
*rss
= &ena_dev
->rss
;
892 struct ena_admin_get_feat_resp get_resp
;
896 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
897 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
901 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
902 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
903 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
904 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
905 1 << get_resp
.u
.ind_table
.max_size
);
909 tbl_size
= (1ULL << log_size
) *
910 sizeof(struct ena_admin_rss_ind_table_entry
);
913 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
914 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
915 if (unlikely(!rss
->rss_ind_tbl
))
918 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
919 rss
->host_rss_ind_tbl
=
920 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
921 if (unlikely(!rss
->host_rss_ind_tbl
))
924 rss
->tbl_log_size
= log_size
;
929 tbl_size
= (1ULL << log_size
) *
930 sizeof(struct ena_admin_rss_ind_table_entry
);
932 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
933 rss
->rss_ind_tbl_dma_addr
);
934 rss
->rss_ind_tbl
= NULL
;
936 rss
->tbl_log_size
= 0;
940 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
942 struct ena_rss
*rss
= &ena_dev
->rss
;
943 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
944 sizeof(struct ena_admin_rss_ind_table_entry
);
946 if (rss
->rss_ind_tbl
)
947 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
948 rss
->rss_ind_tbl_dma_addr
);
949 rss
->rss_ind_tbl
= NULL
;
951 if (rss
->host_rss_ind_tbl
)
952 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
953 rss
->host_rss_ind_tbl
= NULL
;
956 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
957 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
959 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
960 struct ena_admin_aq_create_sq_cmd create_cmd
;
961 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
965 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd
));
967 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
969 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
970 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
972 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
974 create_cmd
.sq_identity
|= (direction
<<
975 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
976 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
978 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
979 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
981 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
982 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
983 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
985 create_cmd
.sq_caps_3
|=
986 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
988 create_cmd
.cq_idx
= cq_idx
;
989 create_cmd
.sq_depth
= io_sq
->q_depth
;
991 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
992 ret
= ena_com_mem_addr_set(ena_dev
,
994 io_sq
->desc_addr
.phys_addr
);
996 pr_err("memory address set failed\n");
1001 ret
= ena_com_execute_admin_command(admin_queue
,
1002 (struct ena_admin_aq_entry
*)&create_cmd
,
1004 (struct ena_admin_acq_entry
*)&cmd_completion
,
1005 sizeof(cmd_completion
));
1006 if (unlikely(ret
)) {
1007 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1011 io_sq
->idx
= cmd_completion
.sq_idx
;
1013 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1014 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1016 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1017 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1018 + cmd_completion
.llq_headers_offset
);
1020 io_sq
->desc_addr
.pbuf_dev_addr
=
1021 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1022 cmd_completion
.llq_descriptors_offset
);
1025 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1030 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1032 struct ena_rss
*rss
= &ena_dev
->rss
;
1033 struct ena_com_io_sq
*io_sq
;
1037 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1038 qid
= rss
->host_rss_ind_tbl
[i
];
1039 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1042 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1044 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1047 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1053 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1055 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1056 struct ena_rss
*rss
= &ena_dev
->rss
;
1060 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1061 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1063 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1064 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1066 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1068 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1071 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1077 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1081 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1083 ena_dev
->intr_moder_tbl
=
1084 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1085 if (!ena_dev
->intr_moder_tbl
)
1088 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1093 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1094 u16 intr_delay_resolution
)
1096 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1099 if (!intr_delay_resolution
) {
1100 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1101 intr_delay_resolution
= 1;
1103 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1106 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1107 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1110 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1113 /*****************************************************************************/
1114 /******************************* API ******************************/
1115 /*****************************************************************************/
1117 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1118 struct ena_admin_aq_entry
*cmd
,
1120 struct ena_admin_acq_entry
*comp
,
1123 struct ena_comp_ctx
*comp_ctx
;
1126 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1128 if (unlikely(IS_ERR(comp_ctx
))) {
1129 pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx
));
1130 return PTR_ERR(comp_ctx
);
1133 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1134 if (unlikely(ret
)) {
1135 if (admin_queue
->running_state
)
1136 pr_err("Failed to process command. ret = %d\n", ret
);
1138 pr_debug("Failed to process command. ret = %d\n", ret
);
1143 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1144 struct ena_com_io_cq
*io_cq
)
1146 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1147 struct ena_admin_aq_create_cq_cmd create_cmd
;
1148 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1151 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd
));
1153 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1155 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1156 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1157 create_cmd
.cq_caps_1
|=
1158 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1160 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1161 create_cmd
.cq_depth
= io_cq
->q_depth
;
1163 ret
= ena_com_mem_addr_set(ena_dev
,
1165 io_cq
->cdesc_addr
.phys_addr
);
1166 if (unlikely(ret
)) {
1167 pr_err("memory address set failed\n");
1171 ret
= ena_com_execute_admin_command(admin_queue
,
1172 (struct ena_admin_aq_entry
*)&create_cmd
,
1174 (struct ena_admin_acq_entry
*)&cmd_completion
,
1175 sizeof(cmd_completion
));
1176 if (unlikely(ret
)) {
1177 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1181 io_cq
->idx
= cmd_completion
.cq_idx
;
1183 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1184 cmd_completion
.cq_interrupt_unmask_register_offset
);
1186 if (cmd_completion
.cq_head_db_register_offset
)
1187 io_cq
->cq_head_db_reg
=
1188 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1189 cmd_completion
.cq_head_db_register_offset
);
1191 if (cmd_completion
.numa_node_register_offset
)
1192 io_cq
->numa_node_cfg_reg
=
1193 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1194 cmd_completion
.numa_node_register_offset
);
1196 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1201 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1202 struct ena_com_io_sq
**io_sq
,
1203 struct ena_com_io_cq
**io_cq
)
1205 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1206 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1207 ENA_TOTAL_NUM_QUEUES
);
1211 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1212 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1217 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1219 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1220 struct ena_comp_ctx
*comp_ctx
;
1223 if (!admin_queue
->comp_ctx
)
1226 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1227 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1228 if (unlikely(!comp_ctx
))
1231 comp_ctx
->status
= ENA_CMD_ABORTED
;
1233 complete(&comp_ctx
->wait_event
);
1237 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1239 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1240 unsigned long flags
;
1242 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1243 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1244 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1246 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1248 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1251 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1252 struct ena_com_io_cq
*io_cq
)
1254 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1255 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1256 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1259 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
1261 destroy_cmd
.cq_idx
= io_cq
->idx
;
1262 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1264 ret
= ena_com_execute_admin_command(admin_queue
,
1265 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1266 sizeof(destroy_cmd
),
1267 (struct ena_admin_acq_entry
*)&destroy_resp
,
1268 sizeof(destroy_resp
));
1270 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1271 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1276 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1278 return ena_dev
->admin_queue
.running_state
;
1281 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1283 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1284 unsigned long flags
;
1286 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1287 ena_dev
->admin_queue
.running_state
= state
;
1288 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1291 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1293 u16 depth
= ena_dev
->aenq
.q_depth
;
1295 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1297 /* Init head_db to mark that all entries in the queue
1298 * are initially available
1300 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1303 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1305 struct ena_com_admin_queue
*admin_queue
;
1306 struct ena_admin_set_feat_cmd cmd
;
1307 struct ena_admin_set_feat_resp resp
;
1308 struct ena_admin_get_feat_resp get_resp
;
1311 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1313 pr_info("Can't get aenq configuration\n");
1317 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1318 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1319 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1323 memset(&cmd
, 0x0, sizeof(cmd
));
1324 admin_queue
= &ena_dev
->admin_queue
;
1326 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1327 cmd
.aq_common_descriptor
.flags
= 0;
1328 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1329 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1331 ret
= ena_com_execute_admin_command(admin_queue
,
1332 (struct ena_admin_aq_entry
*)&cmd
,
1334 (struct ena_admin_acq_entry
*)&resp
,
1338 pr_err("Failed to config AENQ ret: %d\n", ret
);
1343 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1345 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1348 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1349 pr_err("Reg read timeout occurred\n");
1353 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1354 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1356 pr_debug("ENA dma width: %d\n", width
);
1358 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1359 pr_err("DMA width illegal value: %d\n", width
);
1363 ena_dev
->dma_addr_bits
= width
;
1368 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1372 u32 ctrl_ver_masked
;
1374 /* Make sure the ENA version and the controller version are at least
1375 * as the driver expects
1377 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1378 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1379 ENA_REGS_CONTROLLER_VERSION_OFF
);
1381 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1382 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1383 pr_err("Reg read timeout occurred\n");
1387 pr_info("ena device version: %d.%d\n",
1388 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1389 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1390 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1392 if (ver
< MIN_ENA_VER
) {
1393 pr_err("ENA version is lower than the minimal version the driver supports\n");
1397 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1398 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1399 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1400 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1401 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1402 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1403 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1404 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1407 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1408 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1409 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1411 /* Validate the ctrl version without the implementation ID */
1412 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1413 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1420 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1422 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1423 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1424 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1425 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1428 if (admin_queue
->comp_ctx
)
1429 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1430 admin_queue
->comp_ctx
= NULL
;
1431 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1433 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1437 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1439 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1443 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1444 if (ena_dev
->aenq
.entries
)
1445 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1447 aenq
->entries
= NULL
;
1450 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1452 ena_dev
->admin_queue
.polling
= polling
;
1455 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1457 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1459 spin_lock_init(&mmio_read
->lock
);
1460 mmio_read
->read_resp
=
1461 dma_zalloc_coherent(ena_dev
->dmadev
,
1462 sizeof(*mmio_read
->read_resp
),
1463 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1464 if (unlikely(!mmio_read
->read_resp
))
1467 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1469 mmio_read
->read_resp
->req_id
= 0x0;
1470 mmio_read
->seq_num
= 0x0;
1471 mmio_read
->readless_supported
= true;
1476 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1478 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1480 mmio_read
->readless_supported
= readless_supported
;
1483 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1485 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1487 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1488 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1490 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1491 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1493 mmio_read
->read_resp
= NULL
;
1496 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1498 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1499 u32 addr_low
, addr_high
;
1501 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1502 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1504 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1505 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1508 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1509 struct ena_aenq_handlers
*aenq_handlers
,
1512 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1513 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1516 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1518 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1519 pr_err("Reg read timeout occurred\n");
1523 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1524 pr_err("Device isn't ready, abort com init\n");
1528 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1530 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1531 admin_queue
->polling
= false;
1532 admin_queue
->curr_cmd_id
= 0;
1534 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1537 spin_lock_init(&admin_queue
->q_lock
);
1539 ret
= ena_com_init_comp_ctxt(admin_queue
);
1543 ret
= ena_com_admin_init_sq(admin_queue
);
1547 ret
= ena_com_admin_init_cq(admin_queue
);
1551 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1552 ENA_REGS_AQ_DB_OFF
);
1554 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1555 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1557 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1558 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1560 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1561 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1563 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1564 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1567 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1568 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1569 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1570 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1573 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1574 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1575 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1576 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1578 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1579 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1580 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1584 admin_queue
->running_state
= true;
1588 ena_com_admin_destroy(ena_dev
);
1593 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1594 struct ena_com_create_io_ctx
*ctx
)
1596 struct ena_com_io_sq
*io_sq
;
1597 struct ena_com_io_cq
*io_cq
;
1600 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1601 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1602 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1606 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1607 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1609 memset(io_sq
, 0x0, sizeof(struct ena_com_io_sq
));
1610 memset(io_cq
, 0x0, sizeof(struct ena_com_io_cq
));
1613 io_cq
->q_depth
= ctx
->queue_size
;
1614 io_cq
->direction
= ctx
->direction
;
1615 io_cq
->qid
= ctx
->qid
;
1617 io_cq
->msix_vector
= ctx
->msix_vector
;
1619 io_sq
->q_depth
= ctx
->queue_size
;
1620 io_sq
->direction
= ctx
->direction
;
1621 io_sq
->qid
= ctx
->qid
;
1623 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1625 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1626 /* header length is limited to 8 bits */
1627 io_sq
->tx_max_header_size
=
1628 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1630 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1633 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1637 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1641 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1648 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1650 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1654 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1656 struct ena_com_io_sq
*io_sq
;
1657 struct ena_com_io_cq
*io_cq
;
1659 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1660 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1661 ENA_TOTAL_NUM_QUEUES
);
1665 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1666 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1668 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1669 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1671 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1674 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1675 struct ena_admin_get_feat_resp
*resp
)
1677 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1680 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1681 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1683 struct ena_admin_get_feat_resp get_resp
;
1686 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1687 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1691 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1692 sizeof(get_resp
.u
.dev_attr
));
1693 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1695 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1696 ENA_ADMIN_MAX_QUEUES_NUM
);
1700 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1701 sizeof(get_resp
.u
.max_queue
));
1702 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1704 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1705 ENA_ADMIN_AENQ_CONFIG
);
1709 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1710 sizeof(get_resp
.u
.aenq
));
1712 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1713 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1717 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1718 sizeof(get_resp
.u
.offload
));
1723 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1725 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1728 /* ena_handle_specific_aenq_event:
1729 * return the handler that is relevant to the specific event group
1731 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1734 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1736 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1737 return aenq_handlers
->handlers
[group
];
1739 return aenq_handlers
->unimplemented_handler
;
1742 /* ena_aenq_intr_handler:
1743 * handles the aenq incoming events.
1744 * pop events from the queue and apply the specific handler
1746 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1748 struct ena_admin_aenq_entry
*aenq_e
;
1749 struct ena_admin_aenq_common_desc
*aenq_common
;
1750 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1751 ena_aenq_handler handler_cb
;
1752 u16 masked_head
, processed
= 0;
1755 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1756 phase
= aenq
->phase
;
1757 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1758 aenq_common
= &aenq_e
->aenq_common_desc
;
1760 /* Go over all the events */
1761 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1763 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1764 aenq_common
->group
, aenq_common
->syndrom
,
1765 (u64
)aenq_common
->timestamp_low
+
1766 ((u64
)aenq_common
->timestamp_high
<< 32));
1768 /* Handle specific event*/
1769 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1770 aenq_common
->group
);
1771 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1773 /* Get next event entry */
1777 if (unlikely(masked_head
== aenq
->q_depth
)) {
1781 aenq_e
= &aenq
->entries
[masked_head
];
1782 aenq_common
= &aenq_e
->aenq_common_desc
;
1785 aenq
->head
+= processed
;
1786 aenq
->phase
= phase
;
1788 /* Don't update aenq doorbell if there weren't any processed events */
1792 /* write the aenq doorbell after all AENQ descriptors were read */
1794 writel((u32
)aenq
->head
, dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1797 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
)
1799 u32 stat
, timeout
, cap
, reset_val
;
1802 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1803 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1805 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1806 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1807 pr_err("Reg read32 timeout occurred\n");
1811 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1812 pr_err("Device isn't ready, can't reset device\n");
1816 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1817 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1819 pr_err("Invalid timeout value\n");
1824 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1825 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1827 /* Write again the MMIO read request address */
1828 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1830 rc
= wait_for_reset_state(ena_dev
, timeout
,
1831 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1833 pr_err("Reset indication didn't turn on\n");
1838 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
1839 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1841 pr_err("Reset indication didn't turn off\n");
1848 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1849 struct ena_com_stats_ctx
*ctx
,
1850 enum ena_admin_get_stats_type type
)
1852 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
1853 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
1854 struct ena_com_admin_queue
*admin_queue
;
1857 admin_queue
= &ena_dev
->admin_queue
;
1859 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1860 get_cmd
->aq_common_descriptor
.flags
= 0;
1861 get_cmd
->type
= type
;
1863 ret
= ena_com_execute_admin_command(admin_queue
,
1864 (struct ena_admin_aq_entry
*)get_cmd
,
1866 (struct ena_admin_acq_entry
*)get_resp
,
1870 pr_err("Failed to get stats. error: %d\n", ret
);
1875 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1876 struct ena_admin_basic_stats
*stats
)
1878 struct ena_com_stats_ctx ctx
;
1881 memset(&ctx
, 0x0, sizeof(ctx
));
1882 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1883 if (likely(ret
== 0))
1884 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
1885 sizeof(ctx
.get_resp
.basic_stats
));
1890 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
1892 struct ena_com_admin_queue
*admin_queue
;
1893 struct ena_admin_set_feat_cmd cmd
;
1894 struct ena_admin_set_feat_resp resp
;
1897 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
1898 pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
1902 memset(&cmd
, 0x0, sizeof(cmd
));
1903 admin_queue
= &ena_dev
->admin_queue
;
1905 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1906 cmd
.aq_common_descriptor
.flags
= 0;
1907 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
1908 cmd
.u
.mtu
.mtu
= mtu
;
1910 ret
= ena_com_execute_admin_command(admin_queue
,
1911 (struct ena_admin_aq_entry
*)&cmd
,
1913 (struct ena_admin_acq_entry
*)&resp
,
1917 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
1922 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
1923 struct ena_admin_feature_offload_desc
*offload
)
1926 struct ena_admin_get_feat_resp resp
;
1928 ret
= ena_com_get_feature(ena_dev
, &resp
,
1929 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1930 if (unlikely(ret
)) {
1931 pr_err("Failed to get offload capabilities %d\n", ret
);
1935 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
1940 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
1942 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1943 struct ena_rss
*rss
= &ena_dev
->rss
;
1944 struct ena_admin_set_feat_cmd cmd
;
1945 struct ena_admin_set_feat_resp resp
;
1946 struct ena_admin_get_feat_resp get_resp
;
1949 if (!ena_com_check_supported_feature_id(ena_dev
,
1950 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
1951 pr_info("Feature %d isn't supported\n",
1952 ENA_ADMIN_RSS_HASH_FUNCTION
);
1956 /* Validate hash function is supported */
1957 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1958 ENA_ADMIN_RSS_HASH_FUNCTION
);
1962 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
1963 pr_err("Func hash %d isn't supported by device, abort\n",
1968 memset(&cmd
, 0x0, sizeof(cmd
));
1970 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1971 cmd
.aq_common_descriptor
.flags
=
1972 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
1973 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
1974 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
1975 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
1977 ret
= ena_com_mem_addr_set(ena_dev
,
1978 &cmd
.control_buffer
.address
,
1979 rss
->hash_key_dma_addr
);
1980 if (unlikely(ret
)) {
1981 pr_err("memory address set failed\n");
1985 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
1987 ret
= ena_com_execute_admin_command(admin_queue
,
1988 (struct ena_admin_aq_entry
*)&cmd
,
1990 (struct ena_admin_acq_entry
*)&resp
,
1992 if (unlikely(ret
)) {
1993 pr_err("Failed to set hash function %d. error: %d\n",
1994 rss
->hash_func
, ret
);
2001 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2002 enum ena_admin_hash_functions func
,
2003 const u8
*key
, u16 key_len
, u32 init_val
)
2005 struct ena_rss
*rss
= &ena_dev
->rss
;
2006 struct ena_admin_get_feat_resp get_resp
;
2007 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2011 /* Make sure size is a mult of DWs */
2012 if (unlikely(key_len
& 0x3))
2015 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2016 ENA_ADMIN_RSS_HASH_FUNCTION
,
2017 rss
->hash_key_dma_addr
,
2018 sizeof(*rss
->hash_key
));
2022 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2023 pr_err("Flow hash function %d isn't supported\n", func
);
2028 case ENA_ADMIN_TOEPLITZ
:
2029 if (key_len
> sizeof(hash_key
->key
)) {
2030 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2031 key_len
, sizeof(hash_key
->key
));
2035 memcpy(hash_key
->key
, key
, key_len
);
2036 rss
->hash_init_val
= init_val
;
2037 hash_key
->keys_num
= key_len
>> 2;
2039 case ENA_ADMIN_CRC32
:
2040 rss
->hash_init_val
= init_val
;
2043 pr_err("Invalid hash function (%d)\n", func
);
2047 rc
= ena_com_set_hash_function(ena_dev
);
2049 /* Restore the old function */
2051 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2056 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2057 enum ena_admin_hash_functions
*func
,
2060 struct ena_rss
*rss
= &ena_dev
->rss
;
2061 struct ena_admin_get_feat_resp get_resp
;
2062 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2066 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2067 ENA_ADMIN_RSS_HASH_FUNCTION
,
2068 rss
->hash_key_dma_addr
,
2069 sizeof(*rss
->hash_key
));
2073 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2075 *func
= rss
->hash_func
;
2078 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2083 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2084 enum ena_admin_flow_hash_proto proto
,
2087 struct ena_rss
*rss
= &ena_dev
->rss
;
2088 struct ena_admin_get_feat_resp get_resp
;
2091 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2092 ENA_ADMIN_RSS_HASH_INPUT
,
2093 rss
->hash_ctrl_dma_addr
,
2094 sizeof(*rss
->hash_ctrl
));
2099 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2104 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2106 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2107 struct ena_rss
*rss
= &ena_dev
->rss
;
2108 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2109 struct ena_admin_set_feat_cmd cmd
;
2110 struct ena_admin_set_feat_resp resp
;
2113 if (!ena_com_check_supported_feature_id(ena_dev
,
2114 ENA_ADMIN_RSS_HASH_INPUT
)) {
2115 pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT
);
2119 memset(&cmd
, 0x0, sizeof(cmd
));
2121 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2122 cmd
.aq_common_descriptor
.flags
=
2123 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2124 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2125 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2126 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2127 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2129 ret
= ena_com_mem_addr_set(ena_dev
,
2130 &cmd
.control_buffer
.address
,
2131 rss
->hash_ctrl_dma_addr
);
2132 if (unlikely(ret
)) {
2133 pr_err("memory address set failed\n");
2136 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2138 ret
= ena_com_execute_admin_command(admin_queue
,
2139 (struct ena_admin_aq_entry
*)&cmd
,
2141 (struct ena_admin_acq_entry
*)&resp
,
2144 pr_err("Failed to set hash input. error: %d\n", ret
);
2149 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2151 struct ena_rss
*rss
= &ena_dev
->rss
;
2152 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2154 u16 available_fields
= 0;
2157 /* Get the supported hash input */
2158 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2162 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2163 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2164 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2166 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2167 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2168 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2170 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2171 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2172 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2174 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2175 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2176 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2178 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2179 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2181 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2182 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2184 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2185 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2187 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2188 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2190 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2191 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2192 hash_ctrl
->supported_fields
[i
].fields
;
2193 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2194 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2195 i
, hash_ctrl
->supported_fields
[i
].fields
,
2196 hash_ctrl
->selected_fields
[i
].fields
);
2201 rc
= ena_com_set_hash_ctrl(ena_dev
);
2203 /* In case of failure, restore the old hash ctrl */
2205 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2210 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2211 enum ena_admin_flow_hash_proto proto
,
2214 struct ena_rss
*rss
= &ena_dev
->rss
;
2215 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2216 u16 supported_fields
;
2219 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2220 pr_err("Invalid proto num (%u)\n", proto
);
2224 /* Get the ctrl table */
2225 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2229 /* Make sure all the fields are supported */
2230 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2231 if ((hash_fields
& supported_fields
) != hash_fields
) {
2232 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2233 proto
, hash_fields
, supported_fields
);
2236 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2238 rc
= ena_com_set_hash_ctrl(ena_dev
);
2240 /* In case of failure, restore the old hash ctrl */
2242 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2247 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2248 u16 entry_idx
, u16 entry_value
)
2250 struct ena_rss
*rss
= &ena_dev
->rss
;
2252 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2255 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2258 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2263 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2265 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2266 struct ena_rss
*rss
= &ena_dev
->rss
;
2267 struct ena_admin_set_feat_cmd cmd
;
2268 struct ena_admin_set_feat_resp resp
;
2271 if (!ena_com_check_supported_feature_id(
2272 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2273 pr_info("Feature %d isn't supported\n",
2274 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2278 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2280 pr_err("Failed to convert host indirection table to device table\n");
2284 memset(&cmd
, 0x0, sizeof(cmd
));
2286 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2287 cmd
.aq_common_descriptor
.flags
=
2288 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2289 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2290 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2291 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2293 ret
= ena_com_mem_addr_set(ena_dev
,
2294 &cmd
.control_buffer
.address
,
2295 rss
->rss_ind_tbl_dma_addr
);
2296 if (unlikely(ret
)) {
2297 pr_err("memory address set failed\n");
2301 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2302 sizeof(struct ena_admin_rss_ind_table_entry
);
2304 ret
= ena_com_execute_admin_command(admin_queue
,
2305 (struct ena_admin_aq_entry
*)&cmd
,
2307 (struct ena_admin_acq_entry
*)&resp
,
2311 pr_err("Failed to set indirect table. error: %d\n", ret
);
2316 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2318 struct ena_rss
*rss
= &ena_dev
->rss
;
2319 struct ena_admin_get_feat_resp get_resp
;
2323 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2324 sizeof(struct ena_admin_rss_ind_table_entry
);
2326 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2327 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2328 rss
->rss_ind_tbl_dma_addr
,
2336 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2340 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2341 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2346 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2350 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2352 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2356 rc
= ena_com_hash_key_allocate(ena_dev
);
2360 rc
= ena_com_hash_ctrl_init(ena_dev
);
2367 ena_com_hash_key_destroy(ena_dev
);
2369 ena_com_indirect_table_destroy(ena_dev
);
2375 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2377 ena_com_indirect_table_destroy(ena_dev
);
2378 ena_com_hash_key_destroy(ena_dev
);
2379 ena_com_hash_ctrl_destroy(ena_dev
);
2381 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2384 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2386 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2388 host_attr
->host_info
=
2389 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2390 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2391 if (unlikely(!host_attr
->host_info
))
2397 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2398 u32 debug_area_size
)
2400 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2402 host_attr
->debug_area_virt_addr
=
2403 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2404 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2405 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2406 host_attr
->debug_area_size
= 0;
2410 host_attr
->debug_area_size
= debug_area_size
;
2415 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2417 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2419 if (host_attr
->host_info
) {
2420 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2421 host_attr
->host_info_dma_addr
);
2422 host_attr
->host_info
= NULL
;
2426 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2428 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2430 if (host_attr
->debug_area_virt_addr
) {
2431 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2432 host_attr
->debug_area_virt_addr
,
2433 host_attr
->debug_area_dma_addr
);
2434 host_attr
->debug_area_virt_addr
= NULL
;
2438 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2440 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2441 struct ena_com_admin_queue
*admin_queue
;
2442 struct ena_admin_set_feat_cmd cmd
;
2443 struct ena_admin_set_feat_resp resp
;
2447 if (!ena_com_check_supported_feature_id(ena_dev
,
2448 ENA_ADMIN_HOST_ATTR_CONFIG
)) {
2449 pr_warn("Set host attribute isn't supported\n");
2453 memset(&cmd
, 0x0, sizeof(cmd
));
2454 admin_queue
= &ena_dev
->admin_queue
;
2456 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2457 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2459 ret
= ena_com_mem_addr_set(ena_dev
,
2460 &cmd
.u
.host_attr
.debug_ba
,
2461 host_attr
->debug_area_dma_addr
);
2462 if (unlikely(ret
)) {
2463 pr_err("memory address set failed\n");
2467 ret
= ena_com_mem_addr_set(ena_dev
,
2468 &cmd
.u
.host_attr
.os_info_ba
,
2469 host_attr
->host_info_dma_addr
);
2470 if (unlikely(ret
)) {
2471 pr_err("memory address set failed\n");
2475 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2477 ret
= ena_com_execute_admin_command(admin_queue
,
2478 (struct ena_admin_aq_entry
*)&cmd
,
2480 (struct ena_admin_acq_entry
*)&resp
,
2484 pr_err("Failed to set host attributes: %d\n", ret
);
2489 /* Interrupt moderation */
2490 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2492 return ena_com_check_supported_feature_id(ena_dev
,
2493 ENA_ADMIN_INTERRUPT_MODERATION
);
2496 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2497 u32 tx_coalesce_usecs
)
2499 if (!ena_dev
->intr_delay_resolution
) {
2500 pr_err("Illegal interrupt delay granularity value\n");
2504 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2505 ena_dev
->intr_delay_resolution
;
2510 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2511 u32 rx_coalesce_usecs
)
2513 if (!ena_dev
->intr_delay_resolution
) {
2514 pr_err("Illegal interrupt delay granularity value\n");
2518 /* We use LOWEST entry of moderation table for storing
2519 * nonadaptive interrupt coalescing values
2521 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2522 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2527 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2529 if (ena_dev
->intr_moder_tbl
)
2530 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2531 ena_dev
->intr_moder_tbl
= NULL
;
2534 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2536 struct ena_admin_get_feat_resp get_resp
;
2537 u16 delay_resolution
;
2540 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2541 ENA_ADMIN_INTERRUPT_MODERATION
);
2545 pr_info("Feature %d isn't supported\n",
2546 ENA_ADMIN_INTERRUPT_MODERATION
);
2549 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2553 /* no moderation supported, disable adaptive support */
2554 ena_com_disable_adaptive_moderation(ena_dev
);
2558 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2562 /* if moderation is supported by device we set adaptive moderation */
2563 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2564 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2565 ena_com_enable_adaptive_moderation(ena_dev
);
2569 ena_com_destroy_interrupt_moderation(ena_dev
);
2573 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2575 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2577 if (!intr_moder_tbl
)
2580 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2581 ENA_INTR_LOWEST_USECS
;
2582 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2583 ENA_INTR_LOWEST_PKTS
;
2584 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2585 ENA_INTR_LOWEST_BYTES
;
2587 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2589 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2591 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2594 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2596 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2598 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2601 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2602 ENA_INTR_HIGH_USECS
;
2603 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2605 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2606 ENA_INTR_HIGH_BYTES
;
2608 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2609 ENA_INTR_HIGHEST_USECS
;
2610 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2611 ENA_INTR_HIGHEST_PKTS
;
2612 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2613 ENA_INTR_HIGHEST_BYTES
;
2616 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2618 return ena_dev
->intr_moder_tx_interval
;
2621 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2623 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2626 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2631 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2632 enum ena_intr_moder_level level
,
2633 struct ena_intr_moder_entry
*entry
)
2635 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2637 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2640 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2641 if (ena_dev
->intr_delay_resolution
)
2642 intr_moder_tbl
[level
].intr_moder_interval
/=
2643 ena_dev
->intr_delay_resolution
;
2644 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2646 /* use hardcoded value until ethtool supports bytecount parameter */
2647 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2648 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2651 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2652 enum ena_intr_moder_level level
,
2653 struct ena_intr_moder_entry
*entry
)
2655 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2657 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2660 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2661 if (ena_dev
->intr_delay_resolution
)
2662 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2663 entry
->pkts_per_interval
=
2664 intr_moder_tbl
[level
].pkts_per_interval
;
2665 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;