4 * Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 /*****************************************************************************/
37 /*****************************************************************************/
39 /* Timeout in micro-sec */
40 #define ADMIN_CMD_TIMEOUT_US (1000000)
42 #define ENA_ASYNC_QUEUE_DEPTH 4
43 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
46 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
47 | (ENA_COMMON_SPEC_VERSION_MINOR))
49 #define ENA_CTRL_MAJOR 0
50 #define ENA_CTRL_MINOR 0
51 #define ENA_CTRL_SUB_MINOR 1
53 #define MIN_ENA_CTRL_VER \
54 (((ENA_CTRL_MAJOR) << \
55 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
56 ((ENA_CTRL_MINOR) << \
57 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
60 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
61 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
63 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
65 static int ena_alloc_cnt
;
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
74 /* Abort - canceled by the driver */
79 ena_wait_event_t wait_event
;
80 struct ena_admin_acq_entry
*user_cqe
;
82 enum ena_cmd_status status
;
83 /* status from the device */
89 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
90 struct ena_common_mem_addr
*ena_addr
,
93 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
94 ena_trc_err("dma address has more bits that the device supports\n");
98 ena_addr
->mem_addr_low
= (u32
)addr
;
99 ena_addr
->mem_addr_high
=
100 ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 32)) >> 32);
105 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
107 ENA_MEM_ALLOC_COHERENT(queue
->q_dmadev
,
108 ADMIN_SQ_SIZE(queue
->q_depth
),
111 queue
->sq
.mem_handle
);
113 if (!queue
->sq
.entries
) {
114 ena_trc_err("memory allocation failed");
115 return ENA_COM_NO_MEM
;
122 queue
->sq
.db_addr
= NULL
;
127 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
129 ENA_MEM_ALLOC_COHERENT(queue
->q_dmadev
,
130 ADMIN_CQ_SIZE(queue
->q_depth
),
133 queue
->cq
.mem_handle
);
135 if (!queue
->cq
.entries
) {
136 ena_trc_err("memory allocation failed");
137 return ENA_COM_NO_MEM
;
146 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
147 struct ena_aenq_handlers
*aenq_handlers
)
149 u32 addr_low
, addr_high
, aenq_caps
;
151 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
152 ENA_MEM_ALLOC_COHERENT(dev
->dmadev
,
153 ADMIN_AENQ_SIZE(dev
->aenq
.q_depth
),
156 dev
->aenq
.mem_handle
);
158 if (!dev
->aenq
.entries
) {
159 ena_trc_err("memory allocation failed");
160 return ENA_COM_NO_MEM
;
163 dev
->aenq
.head
= dev
->aenq
.q_depth
;
166 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(dev
->aenq
.dma_addr
);
167 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(dev
->aenq
.dma_addr
);
169 ENA_REG_WRITE32(addr_low
, (unsigned char *)dev
->reg_bar
170 + ENA_REGS_AENQ_BASE_LO_OFF
);
171 ENA_REG_WRITE32(addr_high
, (unsigned char *)dev
->reg_bar
172 + ENA_REGS_AENQ_BASE_HI_OFF
);
175 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
176 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
) <<
177 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
178 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
180 ENA_REG_WRITE32(aenq_caps
, (unsigned char *)dev
->reg_bar
181 + ENA_REGS_AENQ_CAPS_OFF
);
183 if (unlikely(!aenq_handlers
))
184 ena_trc_err("aenq handlers pointer is NULL\n");
186 dev
->aenq
.aenq_handlers
= aenq_handlers
;
191 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
192 struct ena_comp_ctx
*comp_ctx
)
194 comp_ctx
->occupied
= false;
195 ATOMIC32_DEC(&queue
->outstanding_cmds
);
198 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
199 u16 command_id
, bool capture
)
201 if (unlikely(command_id
>= queue
->q_depth
)) {
202 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
203 command_id
, queue
->q_depth
);
207 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
208 ena_trc_err("Completion context is occupied\n");
213 ATOMIC32_INC(&queue
->outstanding_cmds
);
214 queue
->comp_ctx
[command_id
].occupied
= true;
217 return &queue
->comp_ctx
[command_id
];
220 static struct ena_comp_ctx
*
221 __ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
222 struct ena_admin_aq_entry
*cmd
,
223 size_t cmd_size_in_bytes
,
224 struct ena_admin_acq_entry
*comp
,
225 size_t comp_size_in_bytes
)
227 struct ena_comp_ctx
*comp_ctx
;
228 u16 tail_masked
, cmd_id
;
232 queue_size_mask
= admin_queue
->q_depth
- 1;
234 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
236 /* In case of queue FULL */
237 cnt
= admin_queue
->sq
.tail
- admin_queue
->sq
.head
;
238 if (cnt
>= admin_queue
->q_depth
) {
239 ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
240 admin_queue
->sq
.tail
,
241 admin_queue
->sq
.head
,
242 admin_queue
->q_depth
);
243 admin_queue
->stats
.out_of_space
++;
244 return ERR_PTR(ENA_COM_NO_SPACE
);
247 cmd_id
= admin_queue
->curr_cmd_id
;
249 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
250 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
252 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
253 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
255 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
257 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
258 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
259 comp_ctx
->user_cqe
= comp
;
260 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
262 ENA_WAIT_EVENT_CLEAR(comp_ctx
->wait_event
);
264 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
266 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
269 admin_queue
->sq
.tail
++;
270 admin_queue
->stats
.submitted_cmd
++;
272 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
273 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
275 ENA_REG_WRITE32(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
280 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
282 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
283 struct ena_comp_ctx
*comp_ctx
;
286 queue
->comp_ctx
= ENA_MEM_ALLOC(queue
->q_dmadev
, size
);
287 if (unlikely(!queue
->comp_ctx
)) {
288 ena_trc_err("memory allocation failed");
289 return ENA_COM_NO_MEM
;
292 for (i
= 0; i
< queue
->q_depth
; i
++) {
293 comp_ctx
= get_comp_ctxt(queue
, i
, false);
295 ENA_WAIT_EVENT_INIT(comp_ctx
->wait_event
);
301 static struct ena_comp_ctx
*
302 ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
303 struct ena_admin_aq_entry
*cmd
,
304 size_t cmd_size_in_bytes
,
305 struct ena_admin_acq_entry
*comp
,
306 size_t comp_size_in_bytes
)
308 unsigned long flags
= 0;
309 struct ena_comp_ctx
*comp_ctx
;
311 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
312 if (unlikely(!admin_queue
->running_state
)) {
313 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
314 return ERR_PTR(ENA_COM_NO_DEVICE
);
316 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
320 if (unlikely(IS_ERR(comp_ctx
)))
321 admin_queue
->running_state
= false;
322 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
327 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
328 struct ena_com_create_io_ctx
*ctx
,
329 struct ena_com_io_sq
*io_sq
)
336 memset(&io_sq
->desc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
338 io_sq
->desc_entry_size
=
339 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
340 sizeof(struct ena_eth_io_tx_desc
) :
341 sizeof(struct ena_eth_io_rx_desc
);
343 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
345 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
346 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev
->dmadev
,
348 io_sq
->desc_addr
.virt_addr
,
349 io_sq
->desc_addr
.phys_addr
,
352 if (!io_sq
->desc_addr
.virt_addr
)
353 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
355 io_sq
->desc_addr
.virt_addr
,
356 io_sq
->desc_addr
.phys_addr
,
357 io_sq
->desc_addr
.mem_handle
);
359 ENA_MEM_ALLOC_NODE(ena_dev
->dmadev
,
361 io_sq
->desc_addr
.virt_addr
,
364 if (!io_sq
->desc_addr
.virt_addr
)
365 io_sq
->desc_addr
.virt_addr
=
366 ENA_MEM_ALLOC(ena_dev
->dmadev
, size
);
369 if (!io_sq
->desc_addr
.virt_addr
) {
370 ena_trc_err("memory allocation failed");
371 return ENA_COM_NO_MEM
;
375 io_sq
->next_to_comp
= 0;
381 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
382 struct ena_com_create_io_ctx
*ctx
,
383 struct ena_com_io_cq
*io_cq
)
389 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(struct ena_com_io_desc_addr
));
391 /* Use the basic completion descriptor for Rx */
392 io_cq
->cdesc_entry_size_in_bytes
=
393 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
394 sizeof(struct ena_eth_io_tx_cdesc
) :
395 sizeof(struct ena_eth_io_rx_cdesc_base
);
397 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
399 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev
->dmadev
,
401 io_cq
->cdesc_addr
.virt_addr
,
402 io_cq
->cdesc_addr
.phys_addr
,
405 if (!io_cq
->cdesc_addr
.virt_addr
)
406 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
408 io_cq
->cdesc_addr
.virt_addr
,
409 io_cq
->cdesc_addr
.phys_addr
,
410 io_cq
->cdesc_addr
.mem_handle
);
412 if (!io_cq
->cdesc_addr
.virt_addr
) {
413 ena_trc_err("memory allocation failed");
414 return ENA_COM_NO_MEM
;
424 ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
425 struct ena_admin_acq_entry
*cqe
)
427 struct ena_comp_ctx
*comp_ctx
;
430 cmd_id
= cqe
->acq_common_descriptor
.command
&
431 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
433 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
434 if (unlikely(!comp_ctx
)) {
435 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
436 admin_queue
->running_state
= false;
440 comp_ctx
->status
= ENA_CMD_COMPLETED
;
441 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
443 if (comp_ctx
->user_cqe
)
444 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
446 if (!admin_queue
->polling
)
447 ENA_WAIT_EVENT_SIGNAL(comp_ctx
->wait_event
);
451 ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
453 struct ena_admin_acq_entry
*cqe
= NULL
;
458 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
459 phase
= admin_queue
->cq
.phase
;
461 cqe
= &admin_queue
->cq
.entries
[head_masked
];
463 /* Go over all the completions */
464 while ((cqe
->acq_common_descriptor
.flags
&
465 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
466 /* Do not read the rest of the completion entry before the
467 * phase bit was validated
470 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
474 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
479 cqe
= &admin_queue
->cq
.entries
[head_masked
];
482 admin_queue
->cq
.head
+= comp_num
;
483 admin_queue
->cq
.phase
= phase
;
484 admin_queue
->sq
.head
+= comp_num
;
485 admin_queue
->stats
.completed_cmd
+= comp_num
;
488 static int ena_com_comp_status_to_errno(u8 comp_status
)
490 if (unlikely(comp_status
!= 0))
491 ena_trc_err("admin command failed[%u]\n", comp_status
);
493 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
494 return ENA_COM_INVAL
;
496 switch (comp_status
) {
497 case ENA_ADMIN_SUCCESS
:
499 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
500 return ENA_COM_NO_MEM
;
501 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
502 return ENA_COM_PERMISSION
;
503 case ENA_ADMIN_BAD_OPCODE
:
504 case ENA_ADMIN_MALFORMED_REQUEST
:
505 case ENA_ADMIN_ILLEGAL_PARAMETER
:
506 case ENA_ADMIN_UNKNOWN_ERROR
:
507 return ENA_COM_INVAL
;
514 ena_com_wait_and_process_admin_cq_polling(
515 struct ena_comp_ctx
*comp_ctx
,
516 struct ena_com_admin_queue
*admin_queue
)
518 unsigned long flags
= 0;
522 start_time
= ENA_GET_SYSTEM_USECS();
524 while (comp_ctx
->status
== ENA_CMD_SUBMITTED
) {
525 if ((ENA_GET_SYSTEM_USECS() - start_time
) >
526 ADMIN_CMD_TIMEOUT_US
) {
527 ena_trc_err("Wait for completion (polling) timeout\n");
528 /* ENA didn't have any completion */
529 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
530 admin_queue
->stats
.no_completion
++;
531 admin_queue
->running_state
= false;
532 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
534 ret
= ENA_COM_TIMER_EXPIRED
;
538 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
539 ena_com_handle_admin_completion(admin_queue
);
540 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
543 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
544 ena_trc_err("Command was aborted\n");
545 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
546 admin_queue
->stats
.aborted_cmd
++;
547 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
548 ret
= ENA_COM_NO_DEVICE
;
552 ENA_ASSERT(comp_ctx
->status
== ENA_CMD_COMPLETED
,
553 "Invalid comp status %d\n", comp_ctx
->status
);
555 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
557 comp_ctxt_release(admin_queue
, comp_ctx
);
562 ena_com_wait_and_process_admin_cq_interrupts(
563 struct ena_comp_ctx
*comp_ctx
,
564 struct ena_com_admin_queue
*admin_queue
)
566 unsigned long flags
= 0;
569 ENA_WAIT_EVENT_WAIT(comp_ctx
->wait_event
,
570 ADMIN_CMD_TIMEOUT_US
);
572 /* In case the command wasn't completed find out the root cause.
573 * There might be 2 kinds of errors
574 * 1) No completion (timeout reached)
575 * 2) There is completion but the device didn't get any msi-x interrupt.
577 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
578 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
579 ena_com_handle_admin_completion(admin_queue
);
580 admin_queue
->stats
.no_completion
++;
581 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
583 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
584 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
585 comp_ctx
->cmd_opcode
);
587 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
588 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
590 admin_queue
->running_state
= false;
591 ret
= ENA_COM_TIMER_EXPIRED
;
595 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
597 comp_ctxt_release(admin_queue
, comp_ctx
);
601 /* This method read the hardware device register through posting writes
602 * and waiting for response
603 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
605 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
607 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
608 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
609 mmio_read
->read_resp
;
610 u32 mmio_read_reg
, ret
;
611 unsigned long flags
= 0;
616 /* If readless is disabled, perform regular read */
617 if (!mmio_read
->readless_supported
)
618 return ENA_REG_READ32((unsigned char *)ena_dev
->reg_bar
+
621 ENA_SPINLOCK_LOCK(mmio_read
->lock
, flags
);
622 mmio_read
->seq_num
++;
624 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
625 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
626 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
627 mmio_read_reg
|= mmio_read
->seq_num
&
628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
630 /* make sure read_resp->req_id get updated before the hw can write
635 ENA_REG_WRITE32(mmio_read_reg
, (unsigned char *)ena_dev
->reg_bar
636 + ENA_REGS_MMIO_REG_READ_OFF
);
638 for (i
= 0; i
< ENA_REG_READ_TIMEOUT
; i
++) {
639 if (read_resp
->req_id
== mmio_read
->seq_num
)
645 if (unlikely(i
== ENA_REG_READ_TIMEOUT
)) {
646 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
651 ret
= ENA_MMIO_READ_TIMEOUT
;
655 if (read_resp
->reg_off
!= offset
) {
656 ena_trc_err("reading failed for wrong offset value");
657 ret
= ENA_MMIO_READ_TIMEOUT
;
659 ret
= read_resp
->reg_val
;
662 ENA_SPINLOCK_UNLOCK(mmio_read
->lock
, flags
);
667 /* There are two types to wait for completion.
668 * Polling mode - wait until the completion is available.
669 * Async mode - wait on wait queue until the completion is ready
670 * (or the timeout expired).
671 * It is expected that the IRQ called ena_com_handle_admin_completion
672 * to mark the completions.
675 ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
676 struct ena_com_admin_queue
*admin_queue
)
678 if (admin_queue
->polling
)
679 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
682 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
686 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
687 struct ena_com_io_sq
*io_sq
)
689 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
690 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
691 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
695 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
697 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
698 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
700 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
702 destroy_cmd
.sq
.sq_identity
|= (direction
<<
703 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
704 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
706 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
707 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
709 ret
= ena_com_execute_admin_command(
711 (struct ena_admin_aq_entry
*)&destroy_cmd
,
713 (struct ena_admin_acq_entry
*)&destroy_resp
,
714 sizeof(destroy_resp
));
716 if (unlikely(ret
&& (ret
!= ENA_COM_NO_DEVICE
)))
717 ena_trc_err("failed to destroy io sq error: %d\n", ret
);
722 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
723 struct ena_com_io_sq
*io_sq
,
724 struct ena_com_io_cq
*io_cq
)
728 if (io_cq
->cdesc_addr
.virt_addr
) {
729 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
731 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
733 io_cq
->cdesc_addr
.virt_addr
,
734 io_cq
->cdesc_addr
.phys_addr
,
735 io_cq
->cdesc_addr
.mem_handle
);
737 io_cq
->cdesc_addr
.virt_addr
= NULL
;
740 if (io_sq
->desc_addr
.virt_addr
) {
741 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
743 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
)
744 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
746 io_sq
->desc_addr
.virt_addr
,
747 io_sq
->desc_addr
.phys_addr
,
748 io_sq
->desc_addr
.mem_handle
);
750 ENA_MEM_FREE(ena_dev
->dmadev
,
751 io_sq
->desc_addr
.virt_addr
);
753 io_sq
->desc_addr
.virt_addr
= NULL
;
757 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
,
758 u32 timeout
, u16 exp_state
)
762 for (i
= 0; i
< timeout
; i
++) {
763 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
765 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
766 ena_trc_err("Reg read timeout occurred\n");
767 return ENA_COM_TIMER_EXPIRED
;
770 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
774 /* The resolution of the timeout is 100ms */
778 return ENA_COM_TIMER_EXPIRED
;
782 ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
783 enum ena_admin_aq_feature_id feature_id
)
785 u32 feature_mask
= 1 << feature_id
;
787 /* Device attributes is always supported */
788 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
789 !(ena_dev
->supported_features
& feature_mask
))
795 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
796 struct ena_admin_get_feat_resp
*get_resp
,
797 enum ena_admin_aq_feature_id feature_id
,
798 dma_addr_t control_buf_dma_addr
,
799 u32 control_buff_size
)
801 struct ena_com_admin_queue
*admin_queue
;
802 struct ena_admin_get_feat_cmd get_cmd
;
806 ena_trc_err("%s : ena_dev is NULL\n", __func__
);
807 return ENA_COM_NO_DEVICE
;
810 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
811 ena_trc_info("Feature %d isn't supported\n", feature_id
);
812 return ENA_COM_PERMISSION
;
815 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
816 admin_queue
= &ena_dev
->admin_queue
;
818 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
820 if (control_buff_size
)
821 get_cmd
.aq_common_descriptor
.flags
=
822 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
824 get_cmd
.aq_common_descriptor
.flags
= 0;
826 ret
= ena_com_mem_addr_set(ena_dev
,
827 &get_cmd
.control_buffer
.address
,
828 control_buf_dma_addr
);
830 ena_trc_err("memory address set failed\n");
834 get_cmd
.control_buffer
.length
= control_buff_size
;
836 get_cmd
.feat_common
.feature_id
= feature_id
;
838 ret
= ena_com_execute_admin_command(admin_queue
,
839 (struct ena_admin_aq_entry
*)
842 (struct ena_admin_acq_entry
*)
847 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
853 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
854 struct ena_admin_get_feat_resp
*get_resp
,
855 enum ena_admin_aq_feature_id feature_id
)
857 return ena_com_get_feature_ex(ena_dev
,
864 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
866 struct ena_rss
*rss
= &ena_dev
->rss
;
868 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
869 sizeof(*rss
->hash_key
),
871 rss
->hash_key_dma_addr
,
872 rss
->hash_key_mem_handle
);
874 if (unlikely(!rss
->hash_key
))
875 return ENA_COM_NO_MEM
;
880 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
882 struct ena_rss
*rss
= &ena_dev
->rss
;
885 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
886 sizeof(*rss
->hash_key
),
888 rss
->hash_key_dma_addr
,
889 rss
->hash_key_mem_handle
);
890 rss
->hash_key
= NULL
;
893 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
895 struct ena_rss
*rss
= &ena_dev
->rss
;
897 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
898 sizeof(*rss
->hash_ctrl
),
900 rss
->hash_ctrl_dma_addr
,
901 rss
->hash_ctrl_mem_handle
);
903 if (unlikely(!rss
->hash_ctrl
))
904 return ENA_COM_NO_MEM
;
909 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
911 struct ena_rss
*rss
= &ena_dev
->rss
;
914 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
915 sizeof(*rss
->hash_ctrl
),
917 rss
->hash_ctrl_dma_addr
,
918 rss
->hash_ctrl_mem_handle
);
919 rss
->hash_ctrl
= NULL
;
922 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
925 struct ena_rss
*rss
= &ena_dev
->rss
;
926 struct ena_admin_get_feat_resp get_resp
;
930 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
931 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
935 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
936 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
937 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
939 1 << get_resp
.u
.ind_table
.min_size
,
940 1 << get_resp
.u
.ind_table
.max_size
);
941 return ENA_COM_INVAL
;
944 tbl_size
= (1ULL << log_size
) *
945 sizeof(struct ena_admin_rss_ind_table_entry
);
947 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
950 rss
->rss_ind_tbl_dma_addr
,
951 rss
->rss_ind_tbl_mem_handle
);
952 if (unlikely(!rss
->rss_ind_tbl
))
955 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
956 rss
->host_rss_ind_tbl
=
957 ENA_MEM_ALLOC(ena_dev
->dmadev
, tbl_size
);
958 if (unlikely(!rss
->host_rss_ind_tbl
))
961 rss
->tbl_log_size
= log_size
;
966 tbl_size
= (1ULL << log_size
) *
967 sizeof(struct ena_admin_rss_ind_table_entry
);
969 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
972 rss
->rss_ind_tbl_dma_addr
,
973 rss
->rss_ind_tbl_mem_handle
);
974 rss
->rss_ind_tbl
= NULL
;
976 rss
->tbl_log_size
= 0;
977 return ENA_COM_NO_MEM
;
980 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
982 struct ena_rss
*rss
= &ena_dev
->rss
;
983 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
984 sizeof(struct ena_admin_rss_ind_table_entry
);
986 if (rss
->rss_ind_tbl
)
987 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
990 rss
->rss_ind_tbl_dma_addr
,
991 rss
->rss_ind_tbl_mem_handle
);
992 rss
->rss_ind_tbl
= NULL
;
994 if (rss
->host_rss_ind_tbl
)
995 ENA_MEM_FREE(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
996 rss
->host_rss_ind_tbl
= NULL
;
999 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
1000 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
1002 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1003 struct ena_admin_aq_create_sq_cmd create_cmd
;
1004 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
1008 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd
));
1010 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
1012 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1013 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
1015 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
1017 create_cmd
.sq_identity
|= (direction
<<
1018 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
1019 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
1021 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
1022 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
1024 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
1025 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
1026 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
1028 create_cmd
.sq_caps_3
|=
1029 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
1031 create_cmd
.cq_idx
= cq_idx
;
1032 create_cmd
.sq_depth
= io_sq
->q_depth
;
1034 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
1035 ret
= ena_com_mem_addr_set(ena_dev
,
1037 io_sq
->desc_addr
.phys_addr
);
1038 if (unlikely(ret
)) {
1039 ena_trc_err("memory address set failed\n");
1044 ret
= ena_com_execute_admin_command(
1046 (struct ena_admin_aq_entry
*)&create_cmd
,
1048 (struct ena_admin_acq_entry
*)&cmd_completion
,
1049 sizeof(cmd_completion
));
1050 if (unlikely(ret
)) {
1051 ena_trc_err("Failed to create IO SQ. error: %d\n", ret
);
1055 io_sq
->idx
= cmd_completion
.sq_idx
;
1057 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1058 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1060 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1061 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1062 + cmd_completion
.llq_headers_offset
);
1064 io_sq
->desc_addr
.pbuf_dev_addr
=
1065 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1066 cmd_completion
.llq_descriptors_offset
);
1069 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1074 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1076 struct ena_rss
*rss
= &ena_dev
->rss
;
1077 struct ena_com_io_sq
*io_sq
;
1081 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1082 qid
= rss
->host_rss_ind_tbl
[i
];
1083 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1084 return ENA_COM_INVAL
;
1086 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1088 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1089 return ENA_COM_INVAL
;
1091 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1097 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1099 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1100 struct ena_rss
*rss
= &ena_dev
->rss
;
1104 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1105 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1107 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1108 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1109 return ENA_COM_INVAL
;
1110 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1112 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1113 return ENA_COM_INVAL
;
1115 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1121 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1125 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1127 ena_dev
->intr_moder_tbl
= ENA_MEM_ALLOC(ena_dev
->dmadev
, size
);
1128 if (!ena_dev
->intr_moder_tbl
)
1129 return ENA_COM_NO_MEM
;
1131 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1137 ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1138 u16 intr_delay_resolution
)
1140 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1143 if (!intr_delay_resolution
) {
1144 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1145 intr_delay_resolution
= 1;
1147 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1150 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1151 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1154 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1157 /*****************************************************************************/
1158 /******************************* API ******************************/
1159 /*****************************************************************************/
1161 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1162 struct ena_admin_aq_entry
*cmd
,
1164 struct ena_admin_acq_entry
*comp
,
1167 struct ena_comp_ctx
*comp_ctx
;
1170 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1172 if (unlikely(IS_ERR(comp_ctx
))) {
1173 ena_trc_err("Failed to submit command [%ld]\n",
1175 return PTR_ERR(comp_ctx
);
1178 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1179 if (unlikely(ret
)) {
1180 if (admin_queue
->running_state
)
1181 ena_trc_err("Failed to process command. ret = %d\n",
1184 ena_trc_dbg("Failed to process command. ret = %d\n",
1190 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1191 struct ena_com_io_cq
*io_cq
)
1193 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1194 struct ena_admin_aq_create_cq_cmd create_cmd
;
1195 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1198 memset(&create_cmd
, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd
));
1200 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1202 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1203 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1204 create_cmd
.cq_caps_1
|=
1205 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1207 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1208 create_cmd
.cq_depth
= io_cq
->q_depth
;
1210 ret
= ena_com_mem_addr_set(ena_dev
,
1212 io_cq
->cdesc_addr
.phys_addr
);
1213 if (unlikely(ret
)) {
1214 ena_trc_err("memory address set failed\n");
1218 ret
= ena_com_execute_admin_command(
1220 (struct ena_admin_aq_entry
*)&create_cmd
,
1222 (struct ena_admin_acq_entry
*)&cmd_completion
,
1223 sizeof(cmd_completion
));
1224 if (unlikely(ret
)) {
1225 ena_trc_err("Failed to create IO CQ. error: %d\n", ret
);
1229 io_cq
->idx
= cmd_completion
.cq_idx
;
1231 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1232 cmd_completion
.cq_interrupt_unmask_register_offset
);
1234 if (cmd_completion
.cq_head_db_register_offset
)
1235 io_cq
->cq_head_db_reg
=
1236 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1237 cmd_completion
.cq_head_db_register_offset
);
1239 if (cmd_completion
.numa_node_register_offset
)
1240 io_cq
->numa_node_cfg_reg
=
1241 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1242 cmd_completion
.numa_node_register_offset
);
1244 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1249 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1250 struct ena_com_io_sq
**io_sq
,
1251 struct ena_com_io_cq
**io_cq
)
1253 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1254 ena_trc_err("Invalid queue number %d but the max is %d\n",
1255 qid
, ENA_TOTAL_NUM_QUEUES
);
1256 return ENA_COM_INVAL
;
1259 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1260 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1265 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1267 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1268 struct ena_comp_ctx
*comp_ctx
;
1271 if (!admin_queue
->comp_ctx
)
1274 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1275 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1276 if (unlikely(!comp_ctx
))
1279 comp_ctx
->status
= ENA_CMD_ABORTED
;
1281 ENA_WAIT_EVENT_SIGNAL(comp_ctx
->wait_event
);
1285 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1287 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1288 unsigned long flags
= 0;
1290 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
1291 while (ATOMIC32_READ(&admin_queue
->outstanding_cmds
) != 0) {
1292 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
1294 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
1296 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
1299 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1300 struct ena_com_io_cq
*io_cq
)
1302 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1303 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1304 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1307 memset(&destroy_cmd
, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd
));
1309 destroy_cmd
.cq_idx
= io_cq
->idx
;
1310 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1312 ret
= ena_com_execute_admin_command(
1314 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1315 sizeof(destroy_cmd
),
1316 (struct ena_admin_acq_entry
*)&destroy_resp
,
1317 sizeof(destroy_resp
));
1319 if (unlikely(ret
&& (ret
!= ENA_COM_NO_DEVICE
)))
1320 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret
);
1325 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1327 return ena_dev
->admin_queue
.running_state
;
1330 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1332 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1333 unsigned long flags
= 0;
1335 ENA_SPINLOCK_LOCK(admin_queue
->q_lock
, flags
);
1336 ena_dev
->admin_queue
.running_state
= state
;
1337 ENA_SPINLOCK_UNLOCK(admin_queue
->q_lock
, flags
);
1340 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1342 u16 depth
= ena_dev
->aenq
.q_depth
;
1344 ENA_ASSERT(ena_dev
->aenq
.head
== depth
, "Invalid AENQ state\n");
1346 /* Init head_db to mark that all entries in the queue
1347 * are initially available
1349 ENA_REG_WRITE32(depth
, (unsigned char *)ena_dev
->reg_bar
1350 + ENA_REGS_AENQ_HEAD_DB_OFF
);
1353 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1355 struct ena_com_admin_queue
*admin_queue
;
1356 struct ena_admin_set_feat_cmd cmd
;
1357 struct ena_admin_set_feat_resp resp
;
1358 struct ena_admin_get_feat_resp get_resp
;
1361 if (unlikely(!ena_dev
)) {
1362 ena_trc_err("%s : ena_dev is NULL\n", __func__
);
1363 return ENA_COM_NO_DEVICE
;
1366 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1368 ena_trc_info("Can't get aenq configuration\n");
1372 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1373 ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1374 get_resp
.u
.aenq
.supported_groups
,
1376 return ENA_COM_PERMISSION
;
1379 memset(&cmd
, 0x0, sizeof(cmd
));
1380 admin_queue
= &ena_dev
->admin_queue
;
1382 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1383 cmd
.aq_common_descriptor
.flags
= 0;
1384 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1385 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1387 ret
= ena_com_execute_admin_command(admin_queue
,
1388 (struct ena_admin_aq_entry
*)&cmd
,
1390 (struct ena_admin_acq_entry
*)&resp
,
1394 ena_trc_err("Failed to config AENQ ret: %d\n", ret
);
1399 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1401 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1404 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1405 ena_trc_err("Reg read timeout occurred\n");
1406 return ENA_COM_TIMER_EXPIRED
;
1409 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1410 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1412 ena_trc_dbg("ENA dma width: %d\n", width
);
1414 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1415 ena_trc_err("DMA width illegal value: %d\n", width
);
1416 return ENA_COM_INVAL
;
1419 ena_dev
->dma_addr_bits
= width
;
1424 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1428 u32 ctrl_ver_masked
;
1430 /* Make sure the ENA version and the controller version are at least
1431 * as the driver expects
1433 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1434 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1435 ENA_REGS_CONTROLLER_VERSION_OFF
);
1437 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1438 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1439 ena_trc_err("Reg read timeout occurred\n");
1440 return ENA_COM_TIMER_EXPIRED
;
1443 ena_trc_info("ena device version: %d.%d\n",
1444 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1445 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1446 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1448 if (ver
< MIN_ENA_VER
) {
1449 ena_trc_err("ENA version is lower than the minimal version the driver supports\n");
1453 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1454 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
)
1455 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1456 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
)
1457 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1458 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1459 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1460 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1463 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1464 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1465 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1467 /* Validate the ctrl version without the implementation ID */
1468 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1469 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1476 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1478 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1483 if (admin_queue
->comp_ctx
)
1484 ENA_MEM_FREE(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1485 admin_queue
->comp_ctx
= NULL
;
1487 if (admin_queue
->sq
.entries
)
1488 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
1489 ADMIN_SQ_SIZE(admin_queue
->q_depth
),
1490 admin_queue
->sq
.entries
,
1491 admin_queue
->sq
.dma_addr
,
1492 admin_queue
->sq
.mem_handle
);
1493 admin_queue
->sq
.entries
= NULL
;
1495 if (admin_queue
->cq
.entries
)
1496 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
1497 ADMIN_CQ_SIZE(admin_queue
->q_depth
),
1498 admin_queue
->cq
.entries
,
1499 admin_queue
->cq
.dma_addr
,
1500 admin_queue
->cq
.mem_handle
);
1501 admin_queue
->cq
.entries
= NULL
;
1503 if (ena_dev
->aenq
.entries
)
1504 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
1505 ADMIN_AENQ_SIZE(ena_dev
->aenq
.q_depth
),
1506 ena_dev
->aenq
.entries
,
1507 ena_dev
->aenq
.dma_addr
,
1508 ena_dev
->aenq
.mem_handle
);
1509 ena_dev
->aenq
.entries
= NULL
;
1512 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1514 ena_dev
->admin_queue
.polling
= polling
;
1517 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1519 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1521 ENA_SPINLOCK_INIT(mmio_read
->lock
);
1522 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
1523 sizeof(*mmio_read
->read_resp
),
1524 mmio_read
->read_resp
,
1525 mmio_read
->read_resp_dma_addr
,
1526 mmio_read
->read_resp_mem_handle
);
1527 if (unlikely(!mmio_read
->read_resp
))
1528 return ENA_COM_NO_MEM
;
1530 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1532 mmio_read
->read_resp
->req_id
= 0x0;
1533 mmio_read
->seq_num
= 0x0;
1534 mmio_read
->readless_supported
= true;
1540 ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1542 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1544 mmio_read
->readless_supported
= readless_supported
;
1547 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1549 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1551 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev
->reg_bar
1552 + ENA_REGS_MMIO_RESP_LO_OFF
);
1553 ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev
->reg_bar
1554 + ENA_REGS_MMIO_RESP_HI_OFF
);
1556 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
1557 sizeof(*mmio_read
->read_resp
),
1558 mmio_read
->read_resp
,
1559 mmio_read
->read_resp_dma_addr
,
1560 mmio_read
->read_resp_mem_handle
);
1562 mmio_read
->read_resp
= NULL
;
1565 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1567 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1568 u32 addr_low
, addr_high
;
1570 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1571 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1573 ENA_REG_WRITE32(addr_low
, (unsigned char *)ena_dev
->reg_bar
1574 + ENA_REGS_MMIO_RESP_LO_OFF
);
1575 ENA_REG_WRITE32(addr_high
, (unsigned char *)ena_dev
->reg_bar
1576 + ENA_REGS_MMIO_RESP_HI_OFF
);
1579 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1580 struct ena_aenq_handlers
*aenq_handlers
,
1583 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1584 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1587 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1589 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1590 ena_trc_err("Reg read timeout occurred\n");
1591 return ENA_COM_TIMER_EXPIRED
;
1594 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1595 ena_trc_err("Device isn't ready, abort com init\n");
1596 return ENA_COM_NO_DEVICE
;
1599 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1601 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1602 admin_queue
->polling
= false;
1603 admin_queue
->curr_cmd_id
= 0;
1605 ATOMIC32_SET(&admin_queue
->outstanding_cmds
, 0);
1608 ENA_SPINLOCK_INIT(admin_queue
->q_lock
);
1610 ret
= ena_com_init_comp_ctxt(admin_queue
);
1614 ret
= ena_com_admin_init_sq(admin_queue
);
1618 ret
= ena_com_admin_init_cq(admin_queue
);
1622 admin_queue
->sq
.db_addr
= (u32 __iomem
*)
1623 ((unsigned char *)ena_dev
->reg_bar
+ ENA_REGS_AQ_DB_OFF
);
1625 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1626 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1628 ENA_REG_WRITE32(addr_low
, (unsigned char *)ena_dev
->reg_bar
1629 + ENA_REGS_AQ_BASE_LO_OFF
);
1630 ENA_REG_WRITE32(addr_high
, (unsigned char *)ena_dev
->reg_bar
1631 + ENA_REGS_AQ_BASE_HI_OFF
);
1633 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1634 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1636 ENA_REG_WRITE32(addr_low
, (unsigned char *)ena_dev
->reg_bar
1637 + ENA_REGS_ACQ_BASE_LO_OFF
);
1638 ENA_REG_WRITE32(addr_high
, (unsigned char *)ena_dev
->reg_bar
1639 + ENA_REGS_ACQ_BASE_HI_OFF
);
1642 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1643 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1644 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1645 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1648 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1649 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1650 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1651 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1653 ENA_REG_WRITE32(aq_caps
, (unsigned char *)ena_dev
->reg_bar
1654 + ENA_REGS_AQ_CAPS_OFF
);
1655 ENA_REG_WRITE32(acq_caps
, (unsigned char *)ena_dev
->reg_bar
1656 + ENA_REGS_ACQ_CAPS_OFF
);
1657 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1661 admin_queue
->running_state
= true;
1665 ena_com_admin_destroy(ena_dev
);
1670 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1671 struct ena_com_create_io_ctx
*ctx
)
1673 struct ena_com_io_sq
*io_sq
;
1674 struct ena_com_io_cq
*io_cq
;
1677 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1678 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1679 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1680 return ENA_COM_INVAL
;
1683 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1684 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1686 memset(io_sq
, 0x0, sizeof(struct ena_com_io_sq
));
1687 memset(io_cq
, 0x0, sizeof(struct ena_com_io_cq
));
1690 io_cq
->q_depth
= ctx
->queue_size
;
1691 io_cq
->direction
= ctx
->direction
;
1692 io_cq
->qid
= ctx
->qid
;
1694 io_cq
->msix_vector
= ctx
->msix_vector
;
1696 io_sq
->q_depth
= ctx
->queue_size
;
1697 io_sq
->direction
= ctx
->direction
;
1698 io_sq
->qid
= ctx
->qid
;
1700 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1702 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1703 /* header length is limited to 8 bits */
1704 io_sq
->tx_max_header_size
=
1705 ENA_MIN32(ena_dev
->tx_max_header_size
, SZ_256
);
1707 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1710 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1714 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1718 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1725 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1727 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1731 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1733 struct ena_com_io_sq
*io_sq
;
1734 struct ena_com_io_cq
*io_cq
;
1736 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1737 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1738 qid
, ENA_TOTAL_NUM_QUEUES
);
1742 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1743 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1745 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1746 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1748 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1751 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1752 struct ena_admin_get_feat_resp
*resp
)
1754 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1757 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1758 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1760 struct ena_admin_get_feat_resp get_resp
;
1763 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1764 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1768 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1769 sizeof(get_resp
.u
.dev_attr
));
1770 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1772 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1773 ENA_ADMIN_MAX_QUEUES_NUM
);
1777 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1778 sizeof(get_resp
.u
.max_queue
));
1779 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1781 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1782 ENA_ADMIN_AENQ_CONFIG
);
1786 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1787 sizeof(get_resp
.u
.aenq
));
1789 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1790 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1794 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1795 sizeof(get_resp
.u
.offload
));
1800 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1802 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1805 /* ena_handle_specific_aenq_event:
1806 * return the handler that is relevant to the specific event group
1808 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1811 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1813 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1814 return aenq_handlers
->handlers
[group
];
1816 return aenq_handlers
->unimplemented_handler
;
1819 /* ena_aenq_intr_handler:
1820 * handles the aenq incoming events.
1821 * pop events from the queue and apply the specific handler
1823 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1825 struct ena_admin_aenq_entry
*aenq_e
;
1826 struct ena_admin_aenq_common_desc
*aenq_common
;
1827 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1828 ena_aenq_handler handler_cb
;
1829 u16 masked_head
, processed
= 0;
1832 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1833 phase
= aenq
->phase
;
1834 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1835 aenq_common
= &aenq_e
->aenq_common_desc
;
1837 /* Go over all the events */
1838 while ((aenq_common
->flags
& ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) ==
1840 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1842 aenq_common
->syndrom
,
1843 (unsigned long long)aenq_common
->timestamp_low
+
1844 ((u64
)aenq_common
->timestamp_high
<< 32));
1846 /* Handle specific event*/
1847 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1848 aenq_common
->group
);
1849 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1851 /* Get next event entry */
1855 if (unlikely(masked_head
== aenq
->q_depth
)) {
1859 aenq_e
= &aenq
->entries
[masked_head
];
1860 aenq_common
= &aenq_e
->aenq_common_desc
;
1863 aenq
->head
+= processed
;
1864 aenq
->phase
= phase
;
1866 /* Don't update aenq doorbell if there weren't any processed events */
1870 /* write the aenq doorbell after all AENQ descriptors were read */
1872 ENA_REG_WRITE32((u32
)aenq
->head
, (unsigned char *)dev
->reg_bar
1873 + ENA_REGS_AENQ_HEAD_DB_OFF
);
1876 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
)
1878 u32 stat
, timeout
, cap
, reset_val
;
1881 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1882 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1884 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
1885 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
1886 ena_trc_err("Reg read32 timeout occurred\n");
1887 return ENA_COM_TIMER_EXPIRED
;
1890 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
1891 ena_trc_err("Device isn't ready, can't reset device\n");
1892 return ENA_COM_INVAL
;
1895 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
1896 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
1898 ena_trc_err("Invalid timeout value\n");
1899 return ENA_COM_INVAL
;
1903 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
1904 ENA_REG_WRITE32(reset_val
, (unsigned char *)ena_dev
->reg_bar
1905 + ENA_REGS_DEV_CTL_OFF
);
1907 /* Write again the MMIO read request address */
1908 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1910 rc
= wait_for_reset_state(ena_dev
, timeout
,
1911 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
1913 ena_trc_err("Reset indication didn't turn on\n");
1918 ENA_REG_WRITE32(0, (unsigned char *)ena_dev
->reg_bar
1919 + ENA_REGS_DEV_CTL_OFF
);
1920 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
1922 ena_trc_err("Reset indication didn't turn off\n");
1929 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
1930 struct ena_admin_aq_get_stats_cmd
*get_cmd
,
1931 struct ena_admin_acq_get_stats_resp
*get_resp
,
1932 enum ena_admin_get_stats_type type
)
1934 struct ena_com_admin_queue
*admin_queue
;
1938 ena_trc_err("%s : ena_dev is NULL\n", __func__
);
1939 return ENA_COM_NO_DEVICE
;
1942 admin_queue
= &ena_dev
->admin_queue
;
1944 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
1945 get_cmd
->aq_common_descriptor
.flags
= 0;
1946 get_cmd
->type
= type
;
1948 ret
= ena_com_execute_admin_command(
1950 (struct ena_admin_aq_entry
*)get_cmd
,
1952 (struct ena_admin_acq_entry
*)get_resp
,
1956 ena_trc_err("Failed to get stats. error: %d\n", ret
);
1961 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
1962 struct ena_admin_basic_stats
*stats
)
1965 struct ena_admin_aq_get_stats_cmd get_cmd
;
1966 struct ena_admin_acq_get_stats_resp get_resp
;
1968 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
1969 ret
= ena_get_dev_stats(ena_dev
, &get_cmd
, &get_resp
,
1970 ENA_ADMIN_GET_STATS_TYPE_BASIC
);
1971 if (likely(ret
== 0))
1972 memcpy(stats
, &get_resp
.basic_stats
,
1973 sizeof(get_resp
.basic_stats
));
1978 int ena_com_get_dev_extended_stats(struct ena_com_dev
*ena_dev
, char *buff
,
1982 struct ena_admin_aq_get_stats_cmd get_cmd
;
1983 struct ena_admin_acq_get_stats_resp get_resp
;
1984 ena_mem_handle_t mem_handle
= 0;
1986 dma_addr_t phys_addr
;
1988 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
, len
,
1989 virt_addr
, phys_addr
, mem_handle
);
1991 ret
= ENA_COM_NO_MEM
;
1994 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
1995 ret
= ena_com_mem_addr_set(ena_dev
,
1996 &get_cmd
.u
.control_buffer
.address
,
1998 if (unlikely(ret
)) {
1999 ena_trc_err("memory address set failed\n");
2002 get_cmd
.u
.control_buffer
.length
= len
;
2004 get_cmd
.device_id
= ena_dev
->stats_func
;
2005 get_cmd
.queue_idx
= ena_dev
->stats_queue
;
2007 ret
= ena_get_dev_stats(ena_dev
, &get_cmd
, &get_resp
,
2008 ENA_ADMIN_GET_STATS_TYPE_EXTENDED
);
2010 goto free_ext_stats_mem
;
2012 ret
= snprintf(buff
, len
, "%s", (char *)virt_addr
);
2015 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
, len
, virt_addr
, phys_addr
,
2021 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
2023 struct ena_com_admin_queue
*admin_queue
;
2024 struct ena_admin_set_feat_cmd cmd
;
2025 struct ena_admin_set_feat_resp resp
;
2028 if (unlikely(!ena_dev
)) {
2029 ena_trc_err("%s : ena_dev is NULL\n", __func__
);
2030 return ENA_COM_NO_DEVICE
;
2033 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
2034 ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
2035 return ENA_COM_PERMISSION
;
2038 memset(&cmd
, 0x0, sizeof(cmd
));
2039 admin_queue
= &ena_dev
->admin_queue
;
2041 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2042 cmd
.aq_common_descriptor
.flags
= 0;
2043 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
2044 cmd
.u
.mtu
.mtu
= mtu
;
2046 ret
= ena_com_execute_admin_command(admin_queue
,
2047 (struct ena_admin_aq_entry
*)&cmd
,
2049 (struct ena_admin_acq_entry
*)&resp
,
2052 if (unlikely(ret
)) {
2053 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
2054 return ENA_COM_INVAL
;
2059 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
2060 struct ena_admin_feature_offload_desc
*offload
)
2063 struct ena_admin_get_feat_resp resp
;
2065 ret
= ena_com_get_feature(ena_dev
, &resp
,
2066 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
2067 if (unlikely(ret
)) {
2068 ena_trc_err("Failed to get offload capabilities %d\n", ret
);
2069 return ENA_COM_INVAL
;
2072 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
2077 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
2079 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2080 struct ena_rss
*rss
= &ena_dev
->rss
;
2081 struct ena_admin_set_feat_cmd cmd
;
2082 struct ena_admin_set_feat_resp resp
;
2083 struct ena_admin_get_feat_resp get_resp
;
2086 if (!ena_com_check_supported_feature_id(ena_dev
,
2087 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
2088 ena_trc_info("Feature %d isn't supported\n",
2089 ENA_ADMIN_RSS_HASH_FUNCTION
);
2090 return ENA_COM_PERMISSION
;
2093 /* Validate hash function is supported */
2094 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
2095 ENA_ADMIN_RSS_HASH_FUNCTION
);
2099 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
2100 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2102 return ENA_COM_PERMISSION
;
2105 memset(&cmd
, 0x0, sizeof(cmd
));
2107 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2108 cmd
.aq_common_descriptor
.flags
=
2109 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2110 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2111 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2112 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2114 ret
= ena_com_mem_addr_set(ena_dev
,
2115 &cmd
.control_buffer
.address
,
2116 rss
->hash_key_dma_addr
);
2117 if (unlikely(ret
)) {
2118 ena_trc_err("memory address set failed\n");
2122 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2124 ret
= ena_com_execute_admin_command(admin_queue
,
2125 (struct ena_admin_aq_entry
*)&cmd
,
2127 (struct ena_admin_acq_entry
*)&resp
,
2129 if (unlikely(ret
)) {
2130 ena_trc_err("Failed to set hash function %d. error: %d\n",
2131 rss
->hash_func
, ret
);
2132 return ENA_COM_INVAL
;
2138 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2139 enum ena_admin_hash_functions func
,
2140 const u8
*key
, u16 key_len
, u32 init_val
)
2142 struct ena_rss
*rss
= &ena_dev
->rss
;
2143 struct ena_admin_get_feat_resp get_resp
;
2144 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2148 /* Make sure size is a mult of DWs */
2149 if (unlikely(key_len
& 0x3))
2150 return ENA_COM_INVAL
;
2152 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2153 ENA_ADMIN_RSS_HASH_FUNCTION
,
2154 rss
->hash_key_dma_addr
,
2155 sizeof(*rss
->hash_key
));
2159 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2160 ena_trc_err("Flow hash function %d isn't supported\n", func
);
2161 return ENA_COM_PERMISSION
;
2165 case ENA_ADMIN_TOEPLITZ
:
2166 if (key_len
> sizeof(hash_key
->key
)) {
2167 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2168 key_len
, sizeof(hash_key
->key
));
2169 return ENA_COM_INVAL
;
2172 memcpy(hash_key
->key
, key
, key_len
);
2173 rss
->hash_init_val
= init_val
;
2174 hash_key
->keys_num
= key_len
>> 2;
2176 case ENA_ADMIN_CRC32
:
2177 rss
->hash_init_val
= init_val
;
2180 ena_trc_err("Invalid hash function (%d)\n", func
);
2181 return ENA_COM_INVAL
;
2184 rc
= ena_com_set_hash_function(ena_dev
);
2186 /* Restore the old function */
2188 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2193 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2194 enum ena_admin_hash_functions
*func
,
2197 struct ena_rss
*rss
= &ena_dev
->rss
;
2198 struct ena_admin_get_feat_resp get_resp
;
2199 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2203 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2204 ENA_ADMIN_RSS_HASH_FUNCTION
,
2205 rss
->hash_key_dma_addr
,
2206 sizeof(*rss
->hash_key
));
2210 rss
->hash_func
= (enum ena_admin_hash_functions
)get_resp
.u
.flow_hash_func
.selected_func
;
2212 *func
= rss
->hash_func
;
2215 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2220 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2221 enum ena_admin_flow_hash_proto proto
,
2224 struct ena_rss
*rss
= &ena_dev
->rss
;
2225 struct ena_admin_get_feat_resp get_resp
;
2228 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2229 ENA_ADMIN_RSS_HASH_INPUT
,
2230 rss
->hash_ctrl_dma_addr
,
2231 sizeof(*rss
->hash_ctrl
));
2236 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2241 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2243 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2244 struct ena_rss
*rss
= &ena_dev
->rss
;
2245 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2246 struct ena_admin_set_feat_cmd cmd
;
2247 struct ena_admin_set_feat_resp resp
;
2250 if (!ena_com_check_supported_feature_id(ena_dev
,
2251 ENA_ADMIN_RSS_HASH_INPUT
)) {
2252 ena_trc_info("Feature %d isn't supported\n",
2253 ENA_ADMIN_RSS_HASH_INPUT
);
2254 return ENA_COM_PERMISSION
;
2257 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2258 cmd
.aq_common_descriptor
.flags
=
2259 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2260 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2261 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2262 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2263 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2265 ret
= ena_com_mem_addr_set(ena_dev
,
2266 &cmd
.control_buffer
.address
,
2267 rss
->hash_ctrl_dma_addr
);
2268 if (unlikely(ret
)) {
2269 ena_trc_err("memory address set failed\n");
2272 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2274 ret
= ena_com_execute_admin_command(admin_queue
,
2275 (struct ena_admin_aq_entry
*)&cmd
,
2277 (struct ena_admin_acq_entry
*)&resp
,
2279 if (unlikely(ret
)) {
2280 ena_trc_err("Failed to set hash input. error: %d\n", ret
);
2281 ret
= ENA_COM_INVAL
;
2287 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2289 struct ena_rss
*rss
= &ena_dev
->rss
;
2290 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2292 u16 available_fields
= 0;
2295 /* Get the supported hash input */
2296 rc
= ena_com_get_hash_ctrl(ena_dev
, (enum ena_admin_flow_hash_proto
)0, NULL
);
2300 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2301 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2302 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2304 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2305 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2306 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2308 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2309 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2310 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2312 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2313 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2314 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2316 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2317 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2319 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2320 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2322 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2323 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2325 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2326 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2328 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2329 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2330 hash_ctrl
->supported_fields
[i
].fields
;
2331 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2332 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2333 i
, hash_ctrl
->supported_fields
[i
].fields
,
2334 hash_ctrl
->selected_fields
[i
].fields
);
2335 return ENA_COM_PERMISSION
;
2339 rc
= ena_com_set_hash_ctrl(ena_dev
);
2341 /* In case of failure, restore the old hash ctrl */
2343 ena_com_get_hash_ctrl(ena_dev
, (enum ena_admin_flow_hash_proto
)0, NULL
);
2348 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2349 enum ena_admin_flow_hash_proto proto
,
2352 struct ena_rss
*rss
= &ena_dev
->rss
;
2353 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2354 u16 supported_fields
;
2357 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2358 ena_trc_err("Invalid proto num (%u)\n", proto
);
2359 return ENA_COM_INVAL
;
2362 /* Get the ctrl table */
2363 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2367 /* Make sure all the fields are supported */
2368 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2369 if ((hash_fields
& supported_fields
) != hash_fields
) {
2370 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2371 proto
, hash_fields
, supported_fields
);
2374 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2376 rc
= ena_com_set_hash_ctrl(ena_dev
);
2378 /* In case of failure, restore the old hash ctrl */
2380 ena_com_get_hash_ctrl(ena_dev
, (enum ena_admin_flow_hash_proto
)0, NULL
);
2385 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2386 u16 entry_idx
, u16 entry_value
)
2388 struct ena_rss
*rss
= &ena_dev
->rss
;
2390 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2391 return ENA_COM_INVAL
;
2393 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2394 return ENA_COM_INVAL
;
2396 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2401 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2403 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2404 struct ena_rss
*rss
= &ena_dev
->rss
;
2405 struct ena_admin_set_feat_cmd cmd
;
2406 struct ena_admin_set_feat_resp resp
;
2409 if (!ena_com_check_supported_feature_id(
2411 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2412 ena_trc_info("Feature %d isn't supported\n",
2413 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2414 return ENA_COM_PERMISSION
;
2417 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2419 ena_trc_err("Failed to convert host indirection table to device table\n");
2423 memset(&cmd
, 0x0, sizeof(cmd
));
2425 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2426 cmd
.aq_common_descriptor
.flags
=
2427 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2428 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2429 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2430 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2432 ret
= ena_com_mem_addr_set(ena_dev
,
2433 &cmd
.control_buffer
.address
,
2434 rss
->rss_ind_tbl_dma_addr
);
2435 if (unlikely(ret
)) {
2436 ena_trc_err("memory address set failed\n");
2440 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2441 sizeof(struct ena_admin_rss_ind_table_entry
);
2443 ret
= ena_com_execute_admin_command(admin_queue
,
2444 (struct ena_admin_aq_entry
*)&cmd
,
2446 (struct ena_admin_acq_entry
*)&resp
,
2449 if (unlikely(ret
)) {
2450 ena_trc_err("Failed to set indirect table. error: %d\n", ret
);
2451 return ENA_COM_INVAL
;
2457 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2459 struct ena_rss
*rss
= &ena_dev
->rss
;
2460 struct ena_admin_get_feat_resp get_resp
;
2464 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2465 sizeof(struct ena_admin_rss_ind_table_entry
);
2467 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2468 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2469 rss
->rss_ind_tbl_dma_addr
,
2477 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2481 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2482 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2487 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2491 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2493 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2497 rc
= ena_com_hash_key_allocate(ena_dev
);
2501 rc
= ena_com_hash_ctrl_init(ena_dev
);
2508 ena_com_hash_key_destroy(ena_dev
);
2510 ena_com_indirect_table_destroy(ena_dev
);
2516 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2518 ena_com_indirect_table_destroy(ena_dev
);
2519 ena_com_hash_key_destroy(ena_dev
);
2520 ena_com_hash_ctrl_destroy(ena_dev
);
2522 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2525 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2527 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2529 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
2531 host_attr
->host_info
,
2532 host_attr
->host_info_dma_addr
,
2533 host_attr
->host_info_dma_handle
);
2534 if (unlikely(!host_attr
->host_info
))
2535 return ENA_COM_NO_MEM
;
2540 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2541 u32 debug_area_size
) {
2542 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2544 ENA_MEM_ALLOC_COHERENT(ena_dev
->dmadev
,
2546 host_attr
->debug_area_virt_addr
,
2547 host_attr
->debug_area_dma_addr
,
2548 host_attr
->debug_area_dma_handle
);
2549 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2550 host_attr
->debug_area_size
= 0;
2551 return ENA_COM_NO_MEM
;
2554 host_attr
->debug_area_size
= debug_area_size
;
2559 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2561 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2563 if (host_attr
->host_info
) {
2564 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
2566 host_attr
->host_info
,
2567 host_attr
->host_info_dma_addr
,
2568 host_attr
->host_info_dma_handle
);
2569 host_attr
->host_info
= NULL
;
2573 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2575 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2577 if (host_attr
->debug_area_virt_addr
) {
2578 ENA_MEM_FREE_COHERENT(ena_dev
->dmadev
,
2579 host_attr
->debug_area_size
,
2580 host_attr
->debug_area_virt_addr
,
2581 host_attr
->debug_area_dma_addr
,
2582 host_attr
->debug_area_dma_handle
);
2583 host_attr
->debug_area_virt_addr
= NULL
;
2587 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2589 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2590 struct ena_com_admin_queue
*admin_queue
;
2591 struct ena_admin_set_feat_cmd cmd
;
2592 struct ena_admin_set_feat_resp resp
;
2596 if (unlikely(!ena_dev
)) {
2597 ena_trc_err("%s : ena_dev is NULL\n", __func__
);
2598 return ENA_COM_NO_DEVICE
;
2601 if (!ena_com_check_supported_feature_id(ena_dev
,
2602 ENA_ADMIN_HOST_ATTR_CONFIG
)) {
2603 ena_trc_warn("Set host attribute isn't supported\n");
2604 return ENA_COM_PERMISSION
;
2607 memset(&cmd
, 0x0, sizeof(cmd
));
2608 admin_queue
= &ena_dev
->admin_queue
;
2610 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2611 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2613 ret
= ena_com_mem_addr_set(ena_dev
,
2614 &cmd
.u
.host_attr
.debug_ba
,
2615 host_attr
->debug_area_dma_addr
);
2616 if (unlikely(ret
)) {
2617 ena_trc_err("memory address set failed\n");
2621 ret
= ena_com_mem_addr_set(ena_dev
,
2622 &cmd
.u
.host_attr
.os_info_ba
,
2623 host_attr
->host_info_dma_addr
);
2624 if (unlikely(ret
)) {
2625 ena_trc_err("memory address set failed\n");
2629 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2631 ret
= ena_com_execute_admin_command(admin_queue
,
2632 (struct ena_admin_aq_entry
*)&cmd
,
2634 (struct ena_admin_acq_entry
*)&resp
,
2638 ena_trc_err("Failed to set host attributes: %d\n", ret
);
2643 /* Interrupt moderation */
2644 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2646 return ena_com_check_supported_feature_id(
2648 ENA_ADMIN_INTERRUPT_MODERATION
);
2652 ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2653 u32 tx_coalesce_usecs
)
2655 if (!ena_dev
->intr_delay_resolution
) {
2656 ena_trc_err("Illegal interrupt delay granularity value\n");
2657 return ENA_COM_FAULT
;
2660 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2661 ena_dev
->intr_delay_resolution
;
2667 ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2668 u32 rx_coalesce_usecs
)
2670 if (!ena_dev
->intr_delay_resolution
) {
2671 ena_trc_err("Illegal interrupt delay granularity value\n");
2672 return ENA_COM_FAULT
;
2675 /* We use LOWEST entry of moderation table for storing
2676 * nonadaptive interrupt coalescing values
2678 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2679 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2684 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2686 if (ena_dev
->intr_moder_tbl
)
2687 ENA_MEM_FREE(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2688 ena_dev
->intr_moder_tbl
= NULL
;
2691 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2693 struct ena_admin_get_feat_resp get_resp
;
2694 u16 delay_resolution
;
2697 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2698 ENA_ADMIN_INTERRUPT_MODERATION
);
2701 if (rc
== ENA_COM_PERMISSION
) {
2702 ena_trc_info("Feature %d isn't supported\n",
2703 ENA_ADMIN_INTERRUPT_MODERATION
);
2706 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2710 /* no moderation supported, disable adaptive support */
2711 ena_com_disable_adaptive_moderation(ena_dev
);
2715 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2719 /* if moderation is supported by device we set adaptive moderation */
2720 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2721 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2722 ena_com_enable_adaptive_moderation(ena_dev
);
2726 ena_com_destroy_interrupt_moderation(ena_dev
);
2731 ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2733 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2735 if (!intr_moder_tbl
)
2738 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2739 ENA_INTR_LOWEST_USECS
;
2740 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2741 ENA_INTR_LOWEST_PKTS
;
2742 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2743 ENA_INTR_LOWEST_BYTES
;
2745 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2747 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2749 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2752 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2754 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2756 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2759 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2760 ENA_INTR_HIGH_USECS
;
2761 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2763 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2764 ENA_INTR_HIGH_BYTES
;
2766 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2767 ENA_INTR_HIGHEST_USECS
;
2768 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2769 ENA_INTR_HIGHEST_PKTS
;
2770 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2771 ENA_INTR_HIGHEST_BYTES
;
2775 ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2777 return ena_dev
->intr_moder_tx_interval
;
2781 ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2783 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2786 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2791 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2792 enum ena_intr_moder_level level
,
2793 struct ena_intr_moder_entry
*entry
)
2795 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2797 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2800 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2801 if (ena_dev
->intr_delay_resolution
)
2802 intr_moder_tbl
[level
].intr_moder_interval
/=
2803 ena_dev
->intr_delay_resolution
;
2804 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2805 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2808 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2809 enum ena_intr_moder_level level
,
2810 struct ena_intr_moder_entry
*entry
)
2812 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2814 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2817 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2818 if (ena_dev
->intr_delay_resolution
)
2819 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2820 entry
->pkts_per_interval
=
2821 intr_moder_tbl
[level
].pkts_per_interval
;
2822 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;