2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
45 #define ENA_CTRL_MAJOR 0
46 #define ENA_CTRL_MINOR 0
47 #define ENA_CTRL_SUB_MINOR 1
49 #define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
63 #define ENA_REGS_ADMIN_INTR_MASK 1
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
74 /* Abort - canceled by the driver */
79 struct completion wait_event
;
80 struct ena_admin_acq_entry
*user_cqe
;
82 enum ena_cmd_status status
;
83 /* status from the device */
89 struct ena_com_stats_ctx
{
90 struct ena_admin_aq_get_stats_cmd get_cmd
;
91 struct ena_admin_acq_get_stats_resp get_resp
;
94 static inline int ena_com_mem_addr_set(struct ena_com_dev
*ena_dev
,
95 struct ena_common_mem_addr
*ena_addr
,
98 if ((addr
& GENMASK_ULL(ena_dev
->dma_addr_bits
- 1, 0)) != addr
) {
99 pr_err("dma address has more bits that the device supports\n");
103 ena_addr
->mem_addr_low
= lower_32_bits(addr
);
104 ena_addr
->mem_addr_high
= (u16
)upper_32_bits(addr
);
109 static int ena_com_admin_init_sq(struct ena_com_admin_queue
*queue
)
111 struct ena_com_admin_sq
*sq
= &queue
->sq
;
112 u16 size
= ADMIN_SQ_SIZE(queue
->q_depth
);
114 sq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &sq
->dma_addr
,
118 pr_err("memory allocation failed");
131 static int ena_com_admin_init_cq(struct ena_com_admin_queue
*queue
)
133 struct ena_com_admin_cq
*cq
= &queue
->cq
;
134 u16 size
= ADMIN_CQ_SIZE(queue
->q_depth
);
136 cq
->entries
= dma_zalloc_coherent(queue
->q_dmadev
, size
, &cq
->dma_addr
,
140 pr_err("memory allocation failed");
150 static int ena_com_admin_init_aenq(struct ena_com_dev
*dev
,
151 struct ena_aenq_handlers
*aenq_handlers
)
153 struct ena_com_aenq
*aenq
= &dev
->aenq
;
154 u32 addr_low
, addr_high
, aenq_caps
;
157 dev
->aenq
.q_depth
= ENA_ASYNC_QUEUE_DEPTH
;
158 size
= ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH
);
159 aenq
->entries
= dma_zalloc_coherent(dev
->dmadev
, size
, &aenq
->dma_addr
,
162 if (!aenq
->entries
) {
163 pr_err("memory allocation failed");
167 aenq
->head
= aenq
->q_depth
;
170 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(aenq
->dma_addr
);
171 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(aenq
->dma_addr
);
173 writel(addr_low
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_LO_OFF
);
174 writel(addr_high
, dev
->reg_bar
+ ENA_REGS_AENQ_BASE_HI_OFF
);
177 aenq_caps
|= dev
->aenq
.q_depth
& ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK
;
178 aenq_caps
|= (sizeof(struct ena_admin_aenq_entry
)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT
) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK
;
181 writel(aenq_caps
, dev
->reg_bar
+ ENA_REGS_AENQ_CAPS_OFF
);
183 if (unlikely(!aenq_handlers
)) {
184 pr_err("aenq handlers pointer is NULL\n");
188 aenq
->aenq_handlers
= aenq_handlers
;
193 static inline void comp_ctxt_release(struct ena_com_admin_queue
*queue
,
194 struct ena_comp_ctx
*comp_ctx
)
196 comp_ctx
->occupied
= false;
197 atomic_dec(&queue
->outstanding_cmds
);
200 static struct ena_comp_ctx
*get_comp_ctxt(struct ena_com_admin_queue
*queue
,
201 u16 command_id
, bool capture
)
203 if (unlikely(command_id
>= queue
->q_depth
)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id
, queue
->q_depth
);
209 if (unlikely(queue
->comp_ctx
[command_id
].occupied
&& capture
)) {
210 pr_err("Completion context is occupied\n");
215 atomic_inc(&queue
->outstanding_cmds
);
216 queue
->comp_ctx
[command_id
].occupied
= true;
219 return &queue
->comp_ctx
[command_id
];
222 static struct ena_comp_ctx
*__ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
223 struct ena_admin_aq_entry
*cmd
,
224 size_t cmd_size_in_bytes
,
225 struct ena_admin_acq_entry
*comp
,
226 size_t comp_size_in_bytes
)
228 struct ena_comp_ctx
*comp_ctx
;
229 u16 tail_masked
, cmd_id
;
233 queue_size_mask
= admin_queue
->q_depth
- 1;
235 tail_masked
= admin_queue
->sq
.tail
& queue_size_mask
;
237 /* In case of queue FULL */
238 cnt
= atomic_read(&admin_queue
->outstanding_cmds
);
239 if (cnt
>= admin_queue
->q_depth
) {
240 pr_debug("admin queue is full.\n");
241 admin_queue
->stats
.out_of_space
++;
242 return ERR_PTR(-ENOSPC
);
245 cmd_id
= admin_queue
->curr_cmd_id
;
247 cmd
->aq_common_descriptor
.flags
|= admin_queue
->sq
.phase
&
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK
;
250 cmd
->aq_common_descriptor
.command_id
|= cmd_id
&
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK
;
253 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, true);
254 if (unlikely(!comp_ctx
))
255 return ERR_PTR(-EINVAL
);
257 comp_ctx
->status
= ENA_CMD_SUBMITTED
;
258 comp_ctx
->comp_size
= (u32
)comp_size_in_bytes
;
259 comp_ctx
->user_cqe
= comp
;
260 comp_ctx
->cmd_opcode
= cmd
->aq_common_descriptor
.opcode
;
262 reinit_completion(&comp_ctx
->wait_event
);
264 memcpy(&admin_queue
->sq
.entries
[tail_masked
], cmd
, cmd_size_in_bytes
);
266 admin_queue
->curr_cmd_id
= (admin_queue
->curr_cmd_id
+ 1) &
269 admin_queue
->sq
.tail
++;
270 admin_queue
->stats
.submitted_cmd
++;
272 if (unlikely((admin_queue
->sq
.tail
& queue_size_mask
) == 0))
273 admin_queue
->sq
.phase
= !admin_queue
->sq
.phase
;
275 writel(admin_queue
->sq
.tail
, admin_queue
->sq
.db_addr
);
280 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue
*queue
)
282 size_t size
= queue
->q_depth
* sizeof(struct ena_comp_ctx
);
283 struct ena_comp_ctx
*comp_ctx
;
286 queue
->comp_ctx
= devm_kzalloc(queue
->q_dmadev
, size
, GFP_KERNEL
);
287 if (unlikely(!queue
->comp_ctx
)) {
288 pr_err("memory allocation failed");
292 for (i
= 0; i
< queue
->q_depth
; i
++) {
293 comp_ctx
= get_comp_ctxt(queue
, i
, false);
295 init_completion(&comp_ctx
->wait_event
);
301 static struct ena_comp_ctx
*ena_com_submit_admin_cmd(struct ena_com_admin_queue
*admin_queue
,
302 struct ena_admin_aq_entry
*cmd
,
303 size_t cmd_size_in_bytes
,
304 struct ena_admin_acq_entry
*comp
,
305 size_t comp_size_in_bytes
)
308 struct ena_comp_ctx
*comp_ctx
;
310 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
311 if (unlikely(!admin_queue
->running_state
)) {
312 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
313 return ERR_PTR(-ENODEV
);
315 comp_ctx
= __ena_com_submit_admin_cmd(admin_queue
, cmd
,
319 if (IS_ERR(comp_ctx
))
320 admin_queue
->running_state
= false;
321 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
326 static int ena_com_init_io_sq(struct ena_com_dev
*ena_dev
,
327 struct ena_com_create_io_ctx
*ctx
,
328 struct ena_com_io_sq
*io_sq
)
333 memset(&io_sq
->desc_addr
, 0x0, sizeof(io_sq
->desc_addr
));
335 io_sq
->dma_addr_bits
= ena_dev
->dma_addr_bits
;
336 io_sq
->desc_entry_size
=
337 (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
338 sizeof(struct ena_eth_io_tx_desc
) :
339 sizeof(struct ena_eth_io_rx_desc
);
341 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
343 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
344 dev_node
= dev_to_node(ena_dev
->dmadev
);
345 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
346 io_sq
->desc_addr
.virt_addr
=
347 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
348 &io_sq
->desc_addr
.phys_addr
,
350 set_dev_node(ena_dev
->dmadev
, dev_node
);
351 if (!io_sq
->desc_addr
.virt_addr
) {
352 io_sq
->desc_addr
.virt_addr
=
353 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
354 &io_sq
->desc_addr
.phys_addr
,
358 if (!io_sq
->desc_addr
.virt_addr
) {
359 pr_err("memory allocation failed");
364 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
365 /* Allocate bounce buffers */
366 io_sq
->bounce_buf_ctrl
.buffer_size
=
367 ena_dev
->llq_info
.desc_list_entry_size
;
368 io_sq
->bounce_buf_ctrl
.buffers_num
=
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT
;
370 io_sq
->bounce_buf_ctrl
.next_to_use
= 0;
372 size
= io_sq
->bounce_buf_ctrl
.buffer_size
*
373 io_sq
->bounce_buf_ctrl
.buffers_num
;
375 dev_node
= dev_to_node(ena_dev
->dmadev
);
376 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
377 io_sq
->bounce_buf_ctrl
.base_buffer
=
378 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
379 set_dev_node(ena_dev
->dmadev
, dev_node
);
380 if (!io_sq
->bounce_buf_ctrl
.base_buffer
)
381 io_sq
->bounce_buf_ctrl
.base_buffer
=
382 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
384 if (!io_sq
->bounce_buf_ctrl
.base_buffer
) {
385 pr_err("bounce buffer memory allocation failed");
389 memcpy(&io_sq
->llq_info
, &ena_dev
->llq_info
,
390 sizeof(io_sq
->llq_info
));
392 /* Initiate the first bounce buffer */
393 io_sq
->llq_buf_ctrl
.curr_bounce_buf
=
394 ena_com_get_next_bounce_buffer(&io_sq
->bounce_buf_ctrl
);
395 memset(io_sq
->llq_buf_ctrl
.curr_bounce_buf
,
396 0x0, io_sq
->llq_info
.desc_list_entry_size
);
397 io_sq
->llq_buf_ctrl
.descs_left_in_line
=
398 io_sq
->llq_info
.descs_num_before_header
;
402 io_sq
->next_to_comp
= 0;
408 static int ena_com_init_io_cq(struct ena_com_dev
*ena_dev
,
409 struct ena_com_create_io_ctx
*ctx
,
410 struct ena_com_io_cq
*io_cq
)
415 memset(&io_cq
->cdesc_addr
, 0x0, sizeof(io_cq
->cdesc_addr
));
417 /* Use the basic completion descriptor for Rx */
418 io_cq
->cdesc_entry_size_in_bytes
=
419 (io_cq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
) ?
420 sizeof(struct ena_eth_io_tx_cdesc
) :
421 sizeof(struct ena_eth_io_rx_cdesc_base
);
423 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
425 prev_node
= dev_to_node(ena_dev
->dmadev
);
426 set_dev_node(ena_dev
->dmadev
, ctx
->numa_node
);
427 io_cq
->cdesc_addr
.virt_addr
=
428 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
429 &io_cq
->cdesc_addr
.phys_addr
, GFP_KERNEL
);
430 set_dev_node(ena_dev
->dmadev
, prev_node
);
431 if (!io_cq
->cdesc_addr
.virt_addr
) {
432 io_cq
->cdesc_addr
.virt_addr
=
433 dma_zalloc_coherent(ena_dev
->dmadev
, size
,
434 &io_cq
->cdesc_addr
.phys_addr
,
438 if (!io_cq
->cdesc_addr
.virt_addr
) {
439 pr_err("memory allocation failed");
449 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue
*admin_queue
,
450 struct ena_admin_acq_entry
*cqe
)
452 struct ena_comp_ctx
*comp_ctx
;
455 cmd_id
= cqe
->acq_common_descriptor
.command
&
456 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK
;
458 comp_ctx
= get_comp_ctxt(admin_queue
, cmd_id
, false);
459 if (unlikely(!comp_ctx
)) {
460 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
461 admin_queue
->running_state
= false;
465 comp_ctx
->status
= ENA_CMD_COMPLETED
;
466 comp_ctx
->comp_status
= cqe
->acq_common_descriptor
.status
;
468 if (comp_ctx
->user_cqe
)
469 memcpy(comp_ctx
->user_cqe
, (void *)cqe
, comp_ctx
->comp_size
);
471 if (!admin_queue
->polling
)
472 complete(&comp_ctx
->wait_event
);
475 static void ena_com_handle_admin_completion(struct ena_com_admin_queue
*admin_queue
)
477 struct ena_admin_acq_entry
*cqe
= NULL
;
482 head_masked
= admin_queue
->cq
.head
& (admin_queue
->q_depth
- 1);
483 phase
= admin_queue
->cq
.phase
;
485 cqe
= &admin_queue
->cq
.entries
[head_masked
];
487 /* Go over all the completions */
488 while ((READ_ONCE(cqe
->acq_common_descriptor
.flags
) &
489 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK
) == phase
) {
490 /* Do not read the rest of the completion entry before the
491 * phase bit was validated
494 ena_com_handle_single_admin_completion(admin_queue
, cqe
);
498 if (unlikely(head_masked
== admin_queue
->q_depth
)) {
503 cqe
= &admin_queue
->cq
.entries
[head_masked
];
506 admin_queue
->cq
.head
+= comp_num
;
507 admin_queue
->cq
.phase
= phase
;
508 admin_queue
->sq
.head
+= comp_num
;
509 admin_queue
->stats
.completed_cmd
+= comp_num
;
512 static int ena_com_comp_status_to_errno(u8 comp_status
)
514 if (unlikely(comp_status
!= 0))
515 pr_err("admin command failed[%u]\n", comp_status
);
517 if (unlikely(comp_status
> ENA_ADMIN_UNKNOWN_ERROR
))
520 switch (comp_status
) {
521 case ENA_ADMIN_SUCCESS
:
523 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE
:
525 case ENA_ADMIN_UNSUPPORTED_OPCODE
:
527 case ENA_ADMIN_BAD_OPCODE
:
528 case ENA_ADMIN_MALFORMED_REQUEST
:
529 case ENA_ADMIN_ILLEGAL_PARAMETER
:
530 case ENA_ADMIN_UNKNOWN_ERROR
:
537 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx
*comp_ctx
,
538 struct ena_com_admin_queue
*admin_queue
)
540 unsigned long flags
, timeout
;
543 timeout
= jiffies
+ usecs_to_jiffies(admin_queue
->completion_timeout
);
546 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
547 ena_com_handle_admin_completion(admin_queue
);
548 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
550 if (comp_ctx
->status
!= ENA_CMD_SUBMITTED
)
553 if (time_is_before_jiffies(timeout
)) {
554 pr_err("Wait for completion (polling) timeout\n");
555 /* ENA didn't have any completion */
556 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
557 admin_queue
->stats
.no_completion
++;
558 admin_queue
->running_state
= false;
559 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
568 if (unlikely(comp_ctx
->status
== ENA_CMD_ABORTED
)) {
569 pr_err("Command was aborted\n");
570 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
571 admin_queue
->stats
.aborted_cmd
++;
572 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
577 WARN(comp_ctx
->status
!= ENA_CMD_COMPLETED
, "Invalid comp status %d\n",
580 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
582 comp_ctxt_release(admin_queue
, comp_ctx
);
587 * Set the LLQ configurations of the firmware
589 * The driver provides only the enabled feature values to the device,
590 * which in turn, checks if they are supported.
592 static int ena_com_set_llq(struct ena_com_dev
*ena_dev
)
594 struct ena_com_admin_queue
*admin_queue
;
595 struct ena_admin_set_feat_cmd cmd
;
596 struct ena_admin_set_feat_resp resp
;
597 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
600 memset(&cmd
, 0x0, sizeof(cmd
));
601 admin_queue
= &ena_dev
->admin_queue
;
603 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
604 cmd
.feat_common
.feature_id
= ENA_ADMIN_LLQ
;
606 cmd
.u
.llq
.header_location_ctrl_enabled
= llq_info
->header_location_ctrl
;
607 cmd
.u
.llq
.entry_size_ctrl_enabled
= llq_info
->desc_list_entry_size_ctrl
;
608 cmd
.u
.llq
.desc_num_before_header_enabled
= llq_info
->descs_num_before_header
;
609 cmd
.u
.llq
.descriptors_stride_ctrl_enabled
= llq_info
->desc_stride_ctrl
;
611 ret
= ena_com_execute_admin_command(admin_queue
,
612 (struct ena_admin_aq_entry
*)&cmd
,
614 (struct ena_admin_acq_entry
*)&resp
,
618 pr_err("Failed to set LLQ configurations: %d\n", ret
);
623 static int ena_com_config_llq_info(struct ena_com_dev
*ena_dev
,
624 struct ena_admin_feature_llq_desc
*llq_features
,
625 struct ena_llq_configurations
*llq_default_cfg
)
627 struct ena_com_llq_info
*llq_info
= &ena_dev
->llq_info
;
631 memset(llq_info
, 0, sizeof(*llq_info
));
633 supported_feat
= llq_features
->header_location_ctrl_supported
;
635 if (likely(supported_feat
& llq_default_cfg
->llq_header_location
)) {
636 llq_info
->header_location_ctrl
=
637 llq_default_cfg
->llq_header_location
;
639 pr_err("Invalid header location control, supported: 0x%x\n",
644 if (likely(llq_info
->header_location_ctrl
== ENA_ADMIN_INLINE_HEADER
)) {
645 supported_feat
= llq_features
->descriptors_stride_ctrl_supported
;
646 if (likely(supported_feat
& llq_default_cfg
->llq_stride_ctrl
)) {
647 llq_info
->desc_stride_ctrl
= llq_default_cfg
->llq_stride_ctrl
;
649 if (supported_feat
& ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
) {
650 llq_info
->desc_stride_ctrl
= ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
;
651 } else if (supported_feat
& ENA_ADMIN_SINGLE_DESC_PER_ENTRY
) {
652 llq_info
->desc_stride_ctrl
= ENA_ADMIN_SINGLE_DESC_PER_ENTRY
;
654 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
659 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
660 llq_default_cfg
->llq_stride_ctrl
, supported_feat
,
661 llq_info
->desc_stride_ctrl
);
664 llq_info
->desc_stride_ctrl
= 0;
667 supported_feat
= llq_features
->entry_size_ctrl_supported
;
668 if (likely(supported_feat
& llq_default_cfg
->llq_ring_entry_size
)) {
669 llq_info
->desc_list_entry_size_ctrl
= llq_default_cfg
->llq_ring_entry_size
;
670 llq_info
->desc_list_entry_size
= llq_default_cfg
->llq_ring_entry_size_value
;
672 if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_128B
) {
673 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_128B
;
674 llq_info
->desc_list_entry_size
= 128;
675 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_192B
) {
676 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_192B
;
677 llq_info
->desc_list_entry_size
= 192;
678 } else if (supported_feat
& ENA_ADMIN_LIST_ENTRY_SIZE_256B
) {
679 llq_info
->desc_list_entry_size_ctrl
= ENA_ADMIN_LIST_ENTRY_SIZE_256B
;
680 llq_info
->desc_list_entry_size
= 256;
682 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
687 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
688 llq_default_cfg
->llq_ring_entry_size
, supported_feat
,
689 llq_info
->desc_list_entry_size
);
691 if (unlikely(llq_info
->desc_list_entry_size
& 0x7)) {
692 /* The desc list entry size should be whole multiply of 8
693 * This requirement comes from __iowrite64_copy()
695 pr_err("illegal entry size %d\n",
696 llq_info
->desc_list_entry_size
);
700 if (llq_info
->desc_stride_ctrl
== ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY
)
701 llq_info
->descs_per_entry
= llq_info
->desc_list_entry_size
/
702 sizeof(struct ena_eth_io_tx_desc
);
704 llq_info
->descs_per_entry
= 1;
706 supported_feat
= llq_features
->desc_num_before_header_supported
;
707 if (likely(supported_feat
& llq_default_cfg
->llq_num_decs_before_header
)) {
708 llq_info
->descs_num_before_header
= llq_default_cfg
->llq_num_decs_before_header
;
710 if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
) {
711 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2
;
712 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
) {
713 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1
;
714 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
) {
715 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4
;
716 } else if (supported_feat
& ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
) {
717 llq_info
->descs_num_before_header
= ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8
;
719 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
724 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
725 llq_default_cfg
->llq_num_decs_before_header
,
726 supported_feat
, llq_info
->descs_num_before_header
);
729 rc
= ena_com_set_llq(ena_dev
);
731 pr_err("Cannot set LLQ configuration: %d\n", rc
);
736 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx
*comp_ctx
,
737 struct ena_com_admin_queue
*admin_queue
)
742 wait_for_completion_timeout(&comp_ctx
->wait_event
,
744 admin_queue
->completion_timeout
));
746 /* In case the command wasn't completed find out the root cause.
747 * There might be 2 kinds of errors
748 * 1) No completion (timeout reached)
749 * 2) There is completion but the device didn't get any msi-x interrupt.
751 if (unlikely(comp_ctx
->status
== ENA_CMD_SUBMITTED
)) {
752 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
753 ena_com_handle_admin_completion(admin_queue
);
754 admin_queue
->stats
.no_completion
++;
755 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
757 if (comp_ctx
->status
== ENA_CMD_COMPLETED
)
758 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
759 comp_ctx
->cmd_opcode
);
761 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
762 comp_ctx
->cmd_opcode
, comp_ctx
->status
);
764 admin_queue
->running_state
= false;
769 ret
= ena_com_comp_status_to_errno(comp_ctx
->comp_status
);
771 comp_ctxt_release(admin_queue
, comp_ctx
);
775 /* This method read the hardware device register through posting writes
776 * and waiting for response
777 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
779 static u32
ena_com_reg_bar_read32(struct ena_com_dev
*ena_dev
, u16 offset
)
781 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
782 volatile struct ena_admin_ena_mmio_req_read_less_resp
*read_resp
=
783 mmio_read
->read_resp
;
784 u32 mmio_read_reg
, ret
, i
;
786 u32 timeout
= mmio_read
->reg_read_to
;
791 timeout
= ENA_REG_READ_TIMEOUT
;
793 /* If readless is disabled, perform regular read */
794 if (!mmio_read
->readless_supported
)
795 return readl(ena_dev
->reg_bar
+ offset
);
797 spin_lock_irqsave(&mmio_read
->lock
, flags
);
798 mmio_read
->seq_num
++;
800 read_resp
->req_id
= mmio_read
->seq_num
+ 0xDEAD;
801 mmio_read_reg
= (offset
<< ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT
) &
802 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK
;
803 mmio_read_reg
|= mmio_read
->seq_num
&
804 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK
;
806 writel(mmio_read_reg
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_REG_READ_OFF
);
808 for (i
= 0; i
< timeout
; i
++) {
809 if (READ_ONCE(read_resp
->req_id
) == mmio_read
->seq_num
)
815 if (unlikely(i
== timeout
)) {
816 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
817 mmio_read
->seq_num
, offset
, read_resp
->req_id
,
819 ret
= ENA_MMIO_READ_TIMEOUT
;
823 if (read_resp
->reg_off
!= offset
) {
824 pr_err("Read failure: wrong offset provided");
825 ret
= ENA_MMIO_READ_TIMEOUT
;
827 ret
= read_resp
->reg_val
;
830 spin_unlock_irqrestore(&mmio_read
->lock
, flags
);
835 /* There are two types to wait for completion.
836 * Polling mode - wait until the completion is available.
837 * Async mode - wait on wait queue until the completion is ready
838 * (or the timeout expired).
839 * It is expected that the IRQ called ena_com_handle_admin_completion
840 * to mark the completions.
842 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx
*comp_ctx
,
843 struct ena_com_admin_queue
*admin_queue
)
845 if (admin_queue
->polling
)
846 return ena_com_wait_and_process_admin_cq_polling(comp_ctx
,
849 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx
,
853 static int ena_com_destroy_io_sq(struct ena_com_dev
*ena_dev
,
854 struct ena_com_io_sq
*io_sq
)
856 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
857 struct ena_admin_aq_destroy_sq_cmd destroy_cmd
;
858 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp
;
862 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
864 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
865 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
867 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
869 destroy_cmd
.sq
.sq_identity
|= (direction
<<
870 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT
) &
871 ENA_ADMIN_SQ_SQ_DIRECTION_MASK
;
873 destroy_cmd
.sq
.sq_idx
= io_sq
->idx
;
874 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_SQ
;
876 ret
= ena_com_execute_admin_command(admin_queue
,
877 (struct ena_admin_aq_entry
*)&destroy_cmd
,
879 (struct ena_admin_acq_entry
*)&destroy_resp
,
880 sizeof(destroy_resp
));
882 if (unlikely(ret
&& (ret
!= -ENODEV
)))
883 pr_err("failed to destroy io sq error: %d\n", ret
);
888 static void ena_com_io_queue_free(struct ena_com_dev
*ena_dev
,
889 struct ena_com_io_sq
*io_sq
,
890 struct ena_com_io_cq
*io_cq
)
894 if (io_cq
->cdesc_addr
.virt_addr
) {
895 size
= io_cq
->cdesc_entry_size_in_bytes
* io_cq
->q_depth
;
897 dma_free_coherent(ena_dev
->dmadev
, size
,
898 io_cq
->cdesc_addr
.virt_addr
,
899 io_cq
->cdesc_addr
.phys_addr
);
901 io_cq
->cdesc_addr
.virt_addr
= NULL
;
904 if (io_sq
->desc_addr
.virt_addr
) {
905 size
= io_sq
->desc_entry_size
* io_sq
->q_depth
;
907 dma_free_coherent(ena_dev
->dmadev
, size
,
908 io_sq
->desc_addr
.virt_addr
,
909 io_sq
->desc_addr
.phys_addr
);
911 io_sq
->desc_addr
.virt_addr
= NULL
;
914 if (io_sq
->bounce_buf_ctrl
.base_buffer
) {
915 devm_kfree(ena_dev
->dmadev
, io_sq
->bounce_buf_ctrl
.base_buffer
);
916 io_sq
->bounce_buf_ctrl
.base_buffer
= NULL
;
920 static int wait_for_reset_state(struct ena_com_dev
*ena_dev
, u32 timeout
,
925 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
926 timeout
= (timeout
* 100) / ENA_POLL_MS
;
928 for (i
= 0; i
< timeout
; i
++) {
929 val
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
931 if (unlikely(val
== ENA_MMIO_READ_TIMEOUT
)) {
932 pr_err("Reg read timeout occurred\n");
936 if ((val
& ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
) ==
946 static bool ena_com_check_supported_feature_id(struct ena_com_dev
*ena_dev
,
947 enum ena_admin_aq_feature_id feature_id
)
949 u32 feature_mask
= 1 << feature_id
;
951 /* Device attributes is always supported */
952 if ((feature_id
!= ENA_ADMIN_DEVICE_ATTRIBUTES
) &&
953 !(ena_dev
->supported_features
& feature_mask
))
959 static int ena_com_get_feature_ex(struct ena_com_dev
*ena_dev
,
960 struct ena_admin_get_feat_resp
*get_resp
,
961 enum ena_admin_aq_feature_id feature_id
,
962 dma_addr_t control_buf_dma_addr
,
963 u32 control_buff_size
)
965 struct ena_com_admin_queue
*admin_queue
;
966 struct ena_admin_get_feat_cmd get_cmd
;
969 if (!ena_com_check_supported_feature_id(ena_dev
, feature_id
)) {
970 pr_debug("Feature %d isn't supported\n", feature_id
);
974 memset(&get_cmd
, 0x0, sizeof(get_cmd
));
975 admin_queue
= &ena_dev
->admin_queue
;
977 get_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_GET_FEATURE
;
979 if (control_buff_size
)
980 get_cmd
.aq_common_descriptor
.flags
=
981 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
983 get_cmd
.aq_common_descriptor
.flags
= 0;
985 ret
= ena_com_mem_addr_set(ena_dev
,
986 &get_cmd
.control_buffer
.address
,
987 control_buf_dma_addr
);
989 pr_err("memory address set failed\n");
993 get_cmd
.control_buffer
.length
= control_buff_size
;
995 get_cmd
.feat_common
.feature_id
= feature_id
;
997 ret
= ena_com_execute_admin_command(admin_queue
,
998 (struct ena_admin_aq_entry
*)
1001 (struct ena_admin_acq_entry
*)
1006 pr_err("Failed to submit get_feature command %d error: %d\n",
1012 static int ena_com_get_feature(struct ena_com_dev
*ena_dev
,
1013 struct ena_admin_get_feat_resp
*get_resp
,
1014 enum ena_admin_aq_feature_id feature_id
)
1016 return ena_com_get_feature_ex(ena_dev
,
1023 static int ena_com_hash_key_allocate(struct ena_com_dev
*ena_dev
)
1025 struct ena_rss
*rss
= &ena_dev
->rss
;
1028 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1029 &rss
->hash_key_dma_addr
, GFP_KERNEL
);
1031 if (unlikely(!rss
->hash_key
))
1037 static void ena_com_hash_key_destroy(struct ena_com_dev
*ena_dev
)
1039 struct ena_rss
*rss
= &ena_dev
->rss
;
1042 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_key
),
1043 rss
->hash_key
, rss
->hash_key_dma_addr
);
1044 rss
->hash_key
= NULL
;
1047 static int ena_com_hash_ctrl_init(struct ena_com_dev
*ena_dev
)
1049 struct ena_rss
*rss
= &ena_dev
->rss
;
1052 dma_zalloc_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1053 &rss
->hash_ctrl_dma_addr
, GFP_KERNEL
);
1055 if (unlikely(!rss
->hash_ctrl
))
1061 static void ena_com_hash_ctrl_destroy(struct ena_com_dev
*ena_dev
)
1063 struct ena_rss
*rss
= &ena_dev
->rss
;
1066 dma_free_coherent(ena_dev
->dmadev
, sizeof(*rss
->hash_ctrl
),
1067 rss
->hash_ctrl
, rss
->hash_ctrl_dma_addr
);
1068 rss
->hash_ctrl
= NULL
;
1071 static int ena_com_indirect_table_allocate(struct ena_com_dev
*ena_dev
,
1074 struct ena_rss
*rss
= &ena_dev
->rss
;
1075 struct ena_admin_get_feat_resp get_resp
;
1079 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
1080 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
1084 if ((get_resp
.u
.ind_table
.min_size
> log_size
) ||
1085 (get_resp
.u
.ind_table
.max_size
< log_size
)) {
1086 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1087 1 << log_size
, 1 << get_resp
.u
.ind_table
.min_size
,
1088 1 << get_resp
.u
.ind_table
.max_size
);
1092 tbl_size
= (1ULL << log_size
) *
1093 sizeof(struct ena_admin_rss_ind_table_entry
);
1096 dma_zalloc_coherent(ena_dev
->dmadev
, tbl_size
,
1097 &rss
->rss_ind_tbl_dma_addr
, GFP_KERNEL
);
1098 if (unlikely(!rss
->rss_ind_tbl
))
1101 tbl_size
= (1ULL << log_size
) * sizeof(u16
);
1102 rss
->host_rss_ind_tbl
=
1103 devm_kzalloc(ena_dev
->dmadev
, tbl_size
, GFP_KERNEL
);
1104 if (unlikely(!rss
->host_rss_ind_tbl
))
1107 rss
->tbl_log_size
= log_size
;
1112 tbl_size
= (1ULL << log_size
) *
1113 sizeof(struct ena_admin_rss_ind_table_entry
);
1115 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1116 rss
->rss_ind_tbl_dma_addr
);
1117 rss
->rss_ind_tbl
= NULL
;
1119 rss
->tbl_log_size
= 0;
1123 static void ena_com_indirect_table_destroy(struct ena_com_dev
*ena_dev
)
1125 struct ena_rss
*rss
= &ena_dev
->rss
;
1126 size_t tbl_size
= (1ULL << rss
->tbl_log_size
) *
1127 sizeof(struct ena_admin_rss_ind_table_entry
);
1129 if (rss
->rss_ind_tbl
)
1130 dma_free_coherent(ena_dev
->dmadev
, tbl_size
, rss
->rss_ind_tbl
,
1131 rss
->rss_ind_tbl_dma_addr
);
1132 rss
->rss_ind_tbl
= NULL
;
1134 if (rss
->host_rss_ind_tbl
)
1135 devm_kfree(ena_dev
->dmadev
, rss
->host_rss_ind_tbl
);
1136 rss
->host_rss_ind_tbl
= NULL
;
1139 static int ena_com_create_io_sq(struct ena_com_dev
*ena_dev
,
1140 struct ena_com_io_sq
*io_sq
, u16 cq_idx
)
1142 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1143 struct ena_admin_aq_create_sq_cmd create_cmd
;
1144 struct ena_admin_acq_create_sq_resp_desc cmd_completion
;
1148 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1150 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_SQ
;
1152 if (io_sq
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1153 direction
= ENA_ADMIN_SQ_DIRECTION_TX
;
1155 direction
= ENA_ADMIN_SQ_DIRECTION_RX
;
1157 create_cmd
.sq_identity
|= (direction
<<
1158 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT
) &
1159 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK
;
1161 create_cmd
.sq_caps_2
|= io_sq
->mem_queue_type
&
1162 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK
;
1164 create_cmd
.sq_caps_2
|= (ENA_ADMIN_COMPLETION_POLICY_DESC
<<
1165 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT
) &
1166 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK
;
1168 create_cmd
.sq_caps_3
|=
1169 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK
;
1171 create_cmd
.cq_idx
= cq_idx
;
1172 create_cmd
.sq_depth
= io_sq
->q_depth
;
1174 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_HOST
) {
1175 ret
= ena_com_mem_addr_set(ena_dev
,
1177 io_sq
->desc_addr
.phys_addr
);
1178 if (unlikely(ret
)) {
1179 pr_err("memory address set failed\n");
1184 ret
= ena_com_execute_admin_command(admin_queue
,
1185 (struct ena_admin_aq_entry
*)&create_cmd
,
1187 (struct ena_admin_acq_entry
*)&cmd_completion
,
1188 sizeof(cmd_completion
));
1189 if (unlikely(ret
)) {
1190 pr_err("Failed to create IO SQ. error: %d\n", ret
);
1194 io_sq
->idx
= cmd_completion
.sq_idx
;
1196 io_sq
->db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1197 (uintptr_t)cmd_completion
.sq_doorbell_offset
);
1199 if (io_sq
->mem_queue_type
== ENA_ADMIN_PLACEMENT_POLICY_DEV
) {
1200 io_sq
->header_addr
= (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
1201 + cmd_completion
.llq_headers_offset
);
1203 io_sq
->desc_addr
.pbuf_dev_addr
=
1204 (u8 __iomem
*)((uintptr_t)ena_dev
->mem_bar
+
1205 cmd_completion
.llq_descriptors_offset
);
1208 pr_debug("created sq[%u], depth[%u]\n", io_sq
->idx
, io_sq
->q_depth
);
1213 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev
*ena_dev
)
1215 struct ena_rss
*rss
= &ena_dev
->rss
;
1216 struct ena_com_io_sq
*io_sq
;
1220 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1221 qid
= rss
->host_rss_ind_tbl
[i
];
1222 if (qid
>= ENA_TOTAL_NUM_QUEUES
)
1225 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1227 if (io_sq
->direction
!= ENA_COM_IO_QUEUE_DIRECTION_RX
)
1230 rss
->rss_ind_tbl
[i
].cq_idx
= io_sq
->idx
;
1236 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev
*ena_dev
)
1238 u16 dev_idx_to_host_tbl
[ENA_TOTAL_NUM_QUEUES
] = { (u16
)-1 };
1239 struct ena_rss
*rss
= &ena_dev
->rss
;
1243 for (i
= 0; i
< ENA_TOTAL_NUM_QUEUES
; i
++)
1244 dev_idx_to_host_tbl
[ena_dev
->io_sq_queues
[i
].idx
] = i
;
1246 for (i
= 0; i
< 1 << rss
->tbl_log_size
; i
++) {
1247 if (rss
->rss_ind_tbl
[i
].cq_idx
> ENA_TOTAL_NUM_QUEUES
)
1249 idx
= (u8
)rss
->rss_ind_tbl
[i
].cq_idx
;
1251 if (dev_idx_to_host_tbl
[idx
] > ENA_TOTAL_NUM_QUEUES
)
1254 rss
->host_rss_ind_tbl
[i
] = dev_idx_to_host_tbl
[idx
];
1260 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
1264 size
= sizeof(struct ena_intr_moder_entry
) * ENA_INTR_MAX_NUM_OF_LEVELS
;
1266 ena_dev
->intr_moder_tbl
=
1267 devm_kzalloc(ena_dev
->dmadev
, size
, GFP_KERNEL
);
1268 if (!ena_dev
->intr_moder_tbl
)
1271 ena_com_config_default_interrupt_moderation_table(ena_dev
);
1276 static void ena_com_update_intr_delay_resolution(struct ena_com_dev
*ena_dev
,
1277 u16 intr_delay_resolution
)
1279 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
1282 if (!intr_delay_resolution
) {
1283 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1284 intr_delay_resolution
= 1;
1286 ena_dev
->intr_delay_resolution
= intr_delay_resolution
;
1289 for (i
= 0; i
< ENA_INTR_MAX_NUM_OF_LEVELS
; i
++)
1290 intr_moder_tbl
[i
].intr_moder_interval
/= intr_delay_resolution
;
1293 ena_dev
->intr_moder_tx_interval
/= intr_delay_resolution
;
1296 /*****************************************************************************/
1297 /******************************* API ******************************/
1298 /*****************************************************************************/
1300 int ena_com_execute_admin_command(struct ena_com_admin_queue
*admin_queue
,
1301 struct ena_admin_aq_entry
*cmd
,
1303 struct ena_admin_acq_entry
*comp
,
1306 struct ena_comp_ctx
*comp_ctx
;
1309 comp_ctx
= ena_com_submit_admin_cmd(admin_queue
, cmd
, cmd_size
,
1311 if (IS_ERR(comp_ctx
)) {
1312 if (comp_ctx
== ERR_PTR(-ENODEV
))
1313 pr_debug("Failed to submit command [%ld]\n",
1316 pr_err("Failed to submit command [%ld]\n",
1319 return PTR_ERR(comp_ctx
);
1322 ret
= ena_com_wait_and_process_admin_cq(comp_ctx
, admin_queue
);
1323 if (unlikely(ret
)) {
1324 if (admin_queue
->running_state
)
1325 pr_err("Failed to process command. ret = %d\n", ret
);
1327 pr_debug("Failed to process command. ret = %d\n", ret
);
1332 int ena_com_create_io_cq(struct ena_com_dev
*ena_dev
,
1333 struct ena_com_io_cq
*io_cq
)
1335 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1336 struct ena_admin_aq_create_cq_cmd create_cmd
;
1337 struct ena_admin_acq_create_cq_resp_desc cmd_completion
;
1340 memset(&create_cmd
, 0x0, sizeof(create_cmd
));
1342 create_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_CREATE_CQ
;
1344 create_cmd
.cq_caps_2
|= (io_cq
->cdesc_entry_size_in_bytes
/ 4) &
1345 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK
;
1346 create_cmd
.cq_caps_1
|=
1347 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK
;
1349 create_cmd
.msix_vector
= io_cq
->msix_vector
;
1350 create_cmd
.cq_depth
= io_cq
->q_depth
;
1352 ret
= ena_com_mem_addr_set(ena_dev
,
1354 io_cq
->cdesc_addr
.phys_addr
);
1355 if (unlikely(ret
)) {
1356 pr_err("memory address set failed\n");
1360 ret
= ena_com_execute_admin_command(admin_queue
,
1361 (struct ena_admin_aq_entry
*)&create_cmd
,
1363 (struct ena_admin_acq_entry
*)&cmd_completion
,
1364 sizeof(cmd_completion
));
1365 if (unlikely(ret
)) {
1366 pr_err("Failed to create IO CQ. error: %d\n", ret
);
1370 io_cq
->idx
= cmd_completion
.cq_idx
;
1372 io_cq
->unmask_reg
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1373 cmd_completion
.cq_interrupt_unmask_register_offset
);
1375 if (cmd_completion
.cq_head_db_register_offset
)
1376 io_cq
->cq_head_db_reg
=
1377 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1378 cmd_completion
.cq_head_db_register_offset
);
1380 if (cmd_completion
.numa_node_register_offset
)
1381 io_cq
->numa_node_cfg_reg
=
1382 (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1383 cmd_completion
.numa_node_register_offset
);
1385 pr_debug("created cq[%u], depth[%u]\n", io_cq
->idx
, io_cq
->q_depth
);
1390 int ena_com_get_io_handlers(struct ena_com_dev
*ena_dev
, u16 qid
,
1391 struct ena_com_io_sq
**io_sq
,
1392 struct ena_com_io_cq
**io_cq
)
1394 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1395 pr_err("Invalid queue number %d but the max is %d\n", qid
,
1396 ENA_TOTAL_NUM_QUEUES
);
1400 *io_sq
= &ena_dev
->io_sq_queues
[qid
];
1401 *io_cq
= &ena_dev
->io_cq_queues
[qid
];
1406 void ena_com_abort_admin_commands(struct ena_com_dev
*ena_dev
)
1408 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1409 struct ena_comp_ctx
*comp_ctx
;
1412 if (!admin_queue
->comp_ctx
)
1415 for (i
= 0; i
< admin_queue
->q_depth
; i
++) {
1416 comp_ctx
= get_comp_ctxt(admin_queue
, i
, false);
1417 if (unlikely(!comp_ctx
))
1420 comp_ctx
->status
= ENA_CMD_ABORTED
;
1422 complete(&comp_ctx
->wait_event
);
1426 void ena_com_wait_for_abort_completion(struct ena_com_dev
*ena_dev
)
1428 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1429 unsigned long flags
;
1431 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1432 while (atomic_read(&admin_queue
->outstanding_cmds
) != 0) {
1433 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1434 msleep(ENA_POLL_MS
);
1435 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1437 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1440 int ena_com_destroy_io_cq(struct ena_com_dev
*ena_dev
,
1441 struct ena_com_io_cq
*io_cq
)
1443 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1444 struct ena_admin_aq_destroy_cq_cmd destroy_cmd
;
1445 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp
;
1448 memset(&destroy_cmd
, 0x0, sizeof(destroy_cmd
));
1450 destroy_cmd
.cq_idx
= io_cq
->idx
;
1451 destroy_cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_DESTROY_CQ
;
1453 ret
= ena_com_execute_admin_command(admin_queue
,
1454 (struct ena_admin_aq_entry
*)&destroy_cmd
,
1455 sizeof(destroy_cmd
),
1456 (struct ena_admin_acq_entry
*)&destroy_resp
,
1457 sizeof(destroy_resp
));
1459 if (unlikely(ret
&& (ret
!= -ENODEV
)))
1460 pr_err("Failed to destroy IO CQ. error: %d\n", ret
);
1465 bool ena_com_get_admin_running_state(struct ena_com_dev
*ena_dev
)
1467 return ena_dev
->admin_queue
.running_state
;
1470 void ena_com_set_admin_running_state(struct ena_com_dev
*ena_dev
, bool state
)
1472 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1473 unsigned long flags
;
1475 spin_lock_irqsave(&admin_queue
->q_lock
, flags
);
1476 ena_dev
->admin_queue
.running_state
= state
;
1477 spin_unlock_irqrestore(&admin_queue
->q_lock
, flags
);
1480 void ena_com_admin_aenq_enable(struct ena_com_dev
*ena_dev
)
1482 u16 depth
= ena_dev
->aenq
.q_depth
;
1484 WARN(ena_dev
->aenq
.head
!= depth
, "Invalid AENQ state\n");
1486 /* Init head_db to mark that all entries in the queue
1487 * are initially available
1489 writel(depth
, ena_dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
1492 int ena_com_set_aenq_config(struct ena_com_dev
*ena_dev
, u32 groups_flag
)
1494 struct ena_com_admin_queue
*admin_queue
;
1495 struct ena_admin_set_feat_cmd cmd
;
1496 struct ena_admin_set_feat_resp resp
;
1497 struct ena_admin_get_feat_resp get_resp
;
1500 ret
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_AENQ_CONFIG
);
1502 pr_info("Can't get aenq configuration\n");
1506 if ((get_resp
.u
.aenq
.supported_groups
& groups_flag
) != groups_flag
) {
1507 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1508 get_resp
.u
.aenq
.supported_groups
, groups_flag
);
1512 memset(&cmd
, 0x0, sizeof(cmd
));
1513 admin_queue
= &ena_dev
->admin_queue
;
1515 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
1516 cmd
.aq_common_descriptor
.flags
= 0;
1517 cmd
.feat_common
.feature_id
= ENA_ADMIN_AENQ_CONFIG
;
1518 cmd
.u
.aenq
.enabled_groups
= groups_flag
;
1520 ret
= ena_com_execute_admin_command(admin_queue
,
1521 (struct ena_admin_aq_entry
*)&cmd
,
1523 (struct ena_admin_acq_entry
*)&resp
,
1527 pr_err("Failed to config AENQ ret: %d\n", ret
);
1532 int ena_com_get_dma_width(struct ena_com_dev
*ena_dev
)
1534 u32 caps
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
1537 if (unlikely(caps
== ENA_MMIO_READ_TIMEOUT
)) {
1538 pr_err("Reg read timeout occurred\n");
1542 width
= (caps
& ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK
) >>
1543 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT
;
1545 pr_debug("ENA dma width: %d\n", width
);
1547 if ((width
< 32) || width
> ENA_MAX_PHYS_ADDR_SIZE_BITS
) {
1548 pr_err("DMA width illegal value: %d\n", width
);
1552 ena_dev
->dma_addr_bits
= width
;
1557 int ena_com_validate_version(struct ena_com_dev
*ena_dev
)
1561 u32 ctrl_ver_masked
;
1563 /* Make sure the ENA version and the controller version are at least
1564 * as the driver expects
1566 ver
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_VERSION_OFF
);
1567 ctrl_ver
= ena_com_reg_bar_read32(ena_dev
,
1568 ENA_REGS_CONTROLLER_VERSION_OFF
);
1570 if (unlikely((ver
== ENA_MMIO_READ_TIMEOUT
) ||
1571 (ctrl_ver
== ENA_MMIO_READ_TIMEOUT
))) {
1572 pr_err("Reg read timeout occurred\n");
1576 pr_info("ena device version: %d.%d\n",
1577 (ver
& ENA_REGS_VERSION_MAJOR_VERSION_MASK
) >>
1578 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
,
1579 ver
& ENA_REGS_VERSION_MINOR_VERSION_MASK
);
1581 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1582 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) >>
1583 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT
,
1584 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) >>
1585 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT
,
1586 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
),
1587 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK
) >>
1588 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT
);
1591 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK
) |
1592 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK
) |
1593 (ctrl_ver
& ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK
);
1595 /* Validate the ctrl version without the implementation ID */
1596 if (ctrl_ver_masked
< MIN_ENA_CTRL_VER
) {
1597 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1604 void ena_com_admin_destroy(struct ena_com_dev
*ena_dev
)
1606 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1607 struct ena_com_admin_cq
*cq
= &admin_queue
->cq
;
1608 struct ena_com_admin_sq
*sq
= &admin_queue
->sq
;
1609 struct ena_com_aenq
*aenq
= &ena_dev
->aenq
;
1612 if (admin_queue
->comp_ctx
)
1613 devm_kfree(ena_dev
->dmadev
, admin_queue
->comp_ctx
);
1614 admin_queue
->comp_ctx
= NULL
;
1615 size
= ADMIN_SQ_SIZE(admin_queue
->q_depth
);
1617 dma_free_coherent(ena_dev
->dmadev
, size
, sq
->entries
,
1621 size
= ADMIN_CQ_SIZE(admin_queue
->q_depth
);
1623 dma_free_coherent(ena_dev
->dmadev
, size
, cq
->entries
,
1627 size
= ADMIN_AENQ_SIZE(aenq
->q_depth
);
1628 if (ena_dev
->aenq
.entries
)
1629 dma_free_coherent(ena_dev
->dmadev
, size
, aenq
->entries
,
1631 aenq
->entries
= NULL
;
1634 void ena_com_set_admin_polling_mode(struct ena_com_dev
*ena_dev
, bool polling
)
1639 mask_value
= ENA_REGS_ADMIN_INTR_MASK
;
1641 writel(mask_value
, ena_dev
->reg_bar
+ ENA_REGS_INTR_MASK_OFF
);
1642 ena_dev
->admin_queue
.polling
= polling
;
1645 int ena_com_mmio_reg_read_request_init(struct ena_com_dev
*ena_dev
)
1647 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1649 spin_lock_init(&mmio_read
->lock
);
1650 mmio_read
->read_resp
=
1651 dma_zalloc_coherent(ena_dev
->dmadev
,
1652 sizeof(*mmio_read
->read_resp
),
1653 &mmio_read
->read_resp_dma_addr
, GFP_KERNEL
);
1654 if (unlikely(!mmio_read
->read_resp
))
1657 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
1659 mmio_read
->read_resp
->req_id
= 0x0;
1660 mmio_read
->seq_num
= 0x0;
1661 mmio_read
->readless_supported
= true;
1666 void ena_com_set_mmio_read_mode(struct ena_com_dev
*ena_dev
, bool readless_supported
)
1668 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1670 mmio_read
->readless_supported
= readless_supported
;
1673 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev
*ena_dev
)
1675 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1677 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1678 writel(0x0, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1680 dma_free_coherent(ena_dev
->dmadev
, sizeof(*mmio_read
->read_resp
),
1681 mmio_read
->read_resp
, mmio_read
->read_resp_dma_addr
);
1683 mmio_read
->read_resp
= NULL
;
1686 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev
*ena_dev
)
1688 struct ena_com_mmio_read
*mmio_read
= &ena_dev
->mmio_read
;
1689 u32 addr_low
, addr_high
;
1691 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read
->read_resp_dma_addr
);
1692 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read
->read_resp_dma_addr
);
1694 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_LO_OFF
);
1695 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_MMIO_RESP_HI_OFF
);
1698 int ena_com_admin_init(struct ena_com_dev
*ena_dev
,
1699 struct ena_aenq_handlers
*aenq_handlers
,
1702 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
1703 u32 aq_caps
, acq_caps
, dev_sts
, addr_low
, addr_high
;
1706 dev_sts
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
1708 if (unlikely(dev_sts
== ENA_MMIO_READ_TIMEOUT
)) {
1709 pr_err("Reg read timeout occurred\n");
1713 if (!(dev_sts
& ENA_REGS_DEV_STS_READY_MASK
)) {
1714 pr_err("Device isn't ready, abort com init\n");
1718 admin_queue
->q_depth
= ENA_ADMIN_QUEUE_DEPTH
;
1720 admin_queue
->q_dmadev
= ena_dev
->dmadev
;
1721 admin_queue
->polling
= false;
1722 admin_queue
->curr_cmd_id
= 0;
1724 atomic_set(&admin_queue
->outstanding_cmds
, 0);
1727 spin_lock_init(&admin_queue
->q_lock
);
1729 ret
= ena_com_init_comp_ctxt(admin_queue
);
1733 ret
= ena_com_admin_init_sq(admin_queue
);
1737 ret
= ena_com_admin_init_cq(admin_queue
);
1741 admin_queue
->sq
.db_addr
= (u32 __iomem
*)((uintptr_t)ena_dev
->reg_bar
+
1742 ENA_REGS_AQ_DB_OFF
);
1744 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->sq
.dma_addr
);
1745 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->sq
.dma_addr
);
1747 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_LO_OFF
);
1748 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_AQ_BASE_HI_OFF
);
1750 addr_low
= ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue
->cq
.dma_addr
);
1751 addr_high
= ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue
->cq
.dma_addr
);
1753 writel(addr_low
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_LO_OFF
);
1754 writel(addr_high
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_BASE_HI_OFF
);
1757 aq_caps
|= admin_queue
->q_depth
& ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK
;
1758 aq_caps
|= (sizeof(struct ena_admin_aq_entry
) <<
1759 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT
) &
1760 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK
;
1763 acq_caps
|= admin_queue
->q_depth
& ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK
;
1764 acq_caps
|= (sizeof(struct ena_admin_acq_entry
) <<
1765 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT
) &
1766 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK
;
1768 writel(aq_caps
, ena_dev
->reg_bar
+ ENA_REGS_AQ_CAPS_OFF
);
1769 writel(acq_caps
, ena_dev
->reg_bar
+ ENA_REGS_ACQ_CAPS_OFF
);
1770 ret
= ena_com_admin_init_aenq(ena_dev
, aenq_handlers
);
1774 admin_queue
->running_state
= true;
1778 ena_com_admin_destroy(ena_dev
);
1783 int ena_com_create_io_queue(struct ena_com_dev
*ena_dev
,
1784 struct ena_com_create_io_ctx
*ctx
)
1786 struct ena_com_io_sq
*io_sq
;
1787 struct ena_com_io_cq
*io_cq
;
1790 if (ctx
->qid
>= ENA_TOTAL_NUM_QUEUES
) {
1791 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1792 ctx
->qid
, ENA_TOTAL_NUM_QUEUES
);
1796 io_sq
= &ena_dev
->io_sq_queues
[ctx
->qid
];
1797 io_cq
= &ena_dev
->io_cq_queues
[ctx
->qid
];
1799 memset(io_sq
, 0x0, sizeof(*io_sq
));
1800 memset(io_cq
, 0x0, sizeof(*io_cq
));
1803 io_cq
->q_depth
= ctx
->queue_size
;
1804 io_cq
->direction
= ctx
->direction
;
1805 io_cq
->qid
= ctx
->qid
;
1807 io_cq
->msix_vector
= ctx
->msix_vector
;
1809 io_sq
->q_depth
= ctx
->queue_size
;
1810 io_sq
->direction
= ctx
->direction
;
1811 io_sq
->qid
= ctx
->qid
;
1813 io_sq
->mem_queue_type
= ctx
->mem_queue_type
;
1815 if (ctx
->direction
== ENA_COM_IO_QUEUE_DIRECTION_TX
)
1816 /* header length is limited to 8 bits */
1817 io_sq
->tx_max_header_size
=
1818 min_t(u32
, ena_dev
->tx_max_header_size
, SZ_256
);
1820 ret
= ena_com_init_io_sq(ena_dev
, ctx
, io_sq
);
1823 ret
= ena_com_init_io_cq(ena_dev
, ctx
, io_cq
);
1827 ret
= ena_com_create_io_cq(ena_dev
, io_cq
);
1831 ret
= ena_com_create_io_sq(ena_dev
, io_sq
, io_cq
->idx
);
1838 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1840 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1844 void ena_com_destroy_io_queue(struct ena_com_dev
*ena_dev
, u16 qid
)
1846 struct ena_com_io_sq
*io_sq
;
1847 struct ena_com_io_cq
*io_cq
;
1849 if (qid
>= ENA_TOTAL_NUM_QUEUES
) {
1850 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid
,
1851 ENA_TOTAL_NUM_QUEUES
);
1855 io_sq
= &ena_dev
->io_sq_queues
[qid
];
1856 io_cq
= &ena_dev
->io_cq_queues
[qid
];
1858 ena_com_destroy_io_sq(ena_dev
, io_sq
);
1859 ena_com_destroy_io_cq(ena_dev
, io_cq
);
1861 ena_com_io_queue_free(ena_dev
, io_sq
, io_cq
);
1864 int ena_com_get_link_params(struct ena_com_dev
*ena_dev
,
1865 struct ena_admin_get_feat_resp
*resp
)
1867 return ena_com_get_feature(ena_dev
, resp
, ENA_ADMIN_LINK_CONFIG
);
1870 int ena_com_get_dev_attr_feat(struct ena_com_dev
*ena_dev
,
1871 struct ena_com_dev_get_features_ctx
*get_feat_ctx
)
1873 struct ena_admin_get_feat_resp get_resp
;
1876 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1877 ENA_ADMIN_DEVICE_ATTRIBUTES
);
1881 memcpy(&get_feat_ctx
->dev_attr
, &get_resp
.u
.dev_attr
,
1882 sizeof(get_resp
.u
.dev_attr
));
1883 ena_dev
->supported_features
= get_resp
.u
.dev_attr
.supported_features
;
1885 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1886 ENA_ADMIN_MAX_QUEUES_NUM
);
1890 memcpy(&get_feat_ctx
->max_queues
, &get_resp
.u
.max_queue
,
1891 sizeof(get_resp
.u
.max_queue
));
1892 ena_dev
->tx_max_header_size
= get_resp
.u
.max_queue
.max_header_size
;
1894 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1895 ENA_ADMIN_AENQ_CONFIG
);
1899 memcpy(&get_feat_ctx
->aenq
, &get_resp
.u
.aenq
,
1900 sizeof(get_resp
.u
.aenq
));
1902 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
1903 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
1907 memcpy(&get_feat_ctx
->offload
, &get_resp
.u
.offload
,
1908 sizeof(get_resp
.u
.offload
));
1910 /* Driver hints isn't mandatory admin command. So in case the
1911 * command isn't supported set driver hints to 0
1913 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_HW_HINTS
);
1916 memcpy(&get_feat_ctx
->hw_hints
, &get_resp
.u
.hw_hints
,
1917 sizeof(get_resp
.u
.hw_hints
));
1918 else if (rc
== -EOPNOTSUPP
)
1919 memset(&get_feat_ctx
->hw_hints
, 0x0,
1920 sizeof(get_feat_ctx
->hw_hints
));
1924 rc
= ena_com_get_feature(ena_dev
, &get_resp
, ENA_ADMIN_LLQ
);
1926 memcpy(&get_feat_ctx
->llq
, &get_resp
.u
.llq
,
1927 sizeof(get_resp
.u
.llq
));
1928 else if (rc
== -EOPNOTSUPP
)
1929 memset(&get_feat_ctx
->llq
, 0x0, sizeof(get_feat_ctx
->llq
));
1936 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev
*ena_dev
)
1938 ena_com_handle_admin_completion(&ena_dev
->admin_queue
);
1941 /* ena_handle_specific_aenq_event:
1942 * return the handler that is relevant to the specific event group
1944 static ena_aenq_handler
ena_com_get_specific_aenq_cb(struct ena_com_dev
*dev
,
1947 struct ena_aenq_handlers
*aenq_handlers
= dev
->aenq
.aenq_handlers
;
1949 if ((group
< ENA_MAX_HANDLERS
) && aenq_handlers
->handlers
[group
])
1950 return aenq_handlers
->handlers
[group
];
1952 return aenq_handlers
->unimplemented_handler
;
1955 /* ena_aenq_intr_handler:
1956 * handles the aenq incoming events.
1957 * pop events from the queue and apply the specific handler
1959 void ena_com_aenq_intr_handler(struct ena_com_dev
*dev
, void *data
)
1961 struct ena_admin_aenq_entry
*aenq_e
;
1962 struct ena_admin_aenq_common_desc
*aenq_common
;
1963 struct ena_com_aenq
*aenq
= &dev
->aenq
;
1964 ena_aenq_handler handler_cb
;
1965 u16 masked_head
, processed
= 0;
1968 masked_head
= aenq
->head
& (aenq
->q_depth
- 1);
1969 phase
= aenq
->phase
;
1970 aenq_e
= &aenq
->entries
[masked_head
]; /* Get first entry */
1971 aenq_common
= &aenq_e
->aenq_common_desc
;
1973 /* Go over all the events */
1974 while ((READ_ONCE(aenq_common
->flags
) &
1975 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK
) == phase
) {
1976 /* Make sure the phase bit (ownership) is as expected before
1977 * reading the rest of the descriptor.
1981 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1982 aenq_common
->group
, aenq_common
->syndrom
,
1983 (u64
)aenq_common
->timestamp_low
+
1984 ((u64
)aenq_common
->timestamp_high
<< 32));
1986 /* Handle specific event*/
1987 handler_cb
= ena_com_get_specific_aenq_cb(dev
,
1988 aenq_common
->group
);
1989 handler_cb(data
, aenq_e
); /* call the actual event handler*/
1991 /* Get next event entry */
1995 if (unlikely(masked_head
== aenq
->q_depth
)) {
1999 aenq_e
= &aenq
->entries
[masked_head
];
2000 aenq_common
= &aenq_e
->aenq_common_desc
;
2003 aenq
->head
+= processed
;
2004 aenq
->phase
= phase
;
2006 /* Don't update aenq doorbell if there weren't any processed events */
2010 /* write the aenq doorbell after all AENQ descriptors were read */
2012 writel_relaxed((u32
)aenq
->head
,
2013 dev
->reg_bar
+ ENA_REGS_AENQ_HEAD_DB_OFF
);
2017 int ena_com_dev_reset(struct ena_com_dev
*ena_dev
,
2018 enum ena_regs_reset_reason_types reset_reason
)
2020 u32 stat
, timeout
, cap
, reset_val
;
2023 stat
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_DEV_STS_OFF
);
2024 cap
= ena_com_reg_bar_read32(ena_dev
, ENA_REGS_CAPS_OFF
);
2026 if (unlikely((stat
== ENA_MMIO_READ_TIMEOUT
) ||
2027 (cap
== ENA_MMIO_READ_TIMEOUT
))) {
2028 pr_err("Reg read32 timeout occurred\n");
2032 if ((stat
& ENA_REGS_DEV_STS_READY_MASK
) == 0) {
2033 pr_err("Device isn't ready, can't reset device\n");
2037 timeout
= (cap
& ENA_REGS_CAPS_RESET_TIMEOUT_MASK
) >>
2038 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT
;
2040 pr_err("Invalid timeout value\n");
2045 reset_val
= ENA_REGS_DEV_CTL_DEV_RESET_MASK
;
2046 reset_val
|= (reset_reason
<< ENA_REGS_DEV_CTL_RESET_REASON_SHIFT
) &
2047 ENA_REGS_DEV_CTL_RESET_REASON_MASK
;
2048 writel(reset_val
, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2050 /* Write again the MMIO read request address */
2051 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev
);
2053 rc
= wait_for_reset_state(ena_dev
, timeout
,
2054 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK
);
2056 pr_err("Reset indication didn't turn on\n");
2061 writel(0, ena_dev
->reg_bar
+ ENA_REGS_DEV_CTL_OFF
);
2062 rc
= wait_for_reset_state(ena_dev
, timeout
, 0);
2064 pr_err("Reset indication didn't turn off\n");
2068 timeout
= (cap
& ENA_REGS_CAPS_ADMIN_CMD_TO_MASK
) >>
2069 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT
;
2071 /* the resolution of timeout reg is 100ms */
2072 ena_dev
->admin_queue
.completion_timeout
= timeout
* 100000;
2074 ena_dev
->admin_queue
.completion_timeout
= ADMIN_CMD_TIMEOUT_US
;
2079 static int ena_get_dev_stats(struct ena_com_dev
*ena_dev
,
2080 struct ena_com_stats_ctx
*ctx
,
2081 enum ena_admin_get_stats_type type
)
2083 struct ena_admin_aq_get_stats_cmd
*get_cmd
= &ctx
->get_cmd
;
2084 struct ena_admin_acq_get_stats_resp
*get_resp
= &ctx
->get_resp
;
2085 struct ena_com_admin_queue
*admin_queue
;
2088 admin_queue
= &ena_dev
->admin_queue
;
2090 get_cmd
->aq_common_descriptor
.opcode
= ENA_ADMIN_GET_STATS
;
2091 get_cmd
->aq_common_descriptor
.flags
= 0;
2092 get_cmd
->type
= type
;
2094 ret
= ena_com_execute_admin_command(admin_queue
,
2095 (struct ena_admin_aq_entry
*)get_cmd
,
2097 (struct ena_admin_acq_entry
*)get_resp
,
2101 pr_err("Failed to get stats. error: %d\n", ret
);
2106 int ena_com_get_dev_basic_stats(struct ena_com_dev
*ena_dev
,
2107 struct ena_admin_basic_stats
*stats
)
2109 struct ena_com_stats_ctx ctx
;
2112 memset(&ctx
, 0x0, sizeof(ctx
));
2113 ret
= ena_get_dev_stats(ena_dev
, &ctx
, ENA_ADMIN_GET_STATS_TYPE_BASIC
);
2114 if (likely(ret
== 0))
2115 memcpy(stats
, &ctx
.get_resp
.basic_stats
,
2116 sizeof(ctx
.get_resp
.basic_stats
));
2121 int ena_com_set_dev_mtu(struct ena_com_dev
*ena_dev
, int mtu
)
2123 struct ena_com_admin_queue
*admin_queue
;
2124 struct ena_admin_set_feat_cmd cmd
;
2125 struct ena_admin_set_feat_resp resp
;
2128 if (!ena_com_check_supported_feature_id(ena_dev
, ENA_ADMIN_MTU
)) {
2129 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU
);
2133 memset(&cmd
, 0x0, sizeof(cmd
));
2134 admin_queue
= &ena_dev
->admin_queue
;
2136 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2137 cmd
.aq_common_descriptor
.flags
= 0;
2138 cmd
.feat_common
.feature_id
= ENA_ADMIN_MTU
;
2139 cmd
.u
.mtu
.mtu
= mtu
;
2141 ret
= ena_com_execute_admin_command(admin_queue
,
2142 (struct ena_admin_aq_entry
*)&cmd
,
2144 (struct ena_admin_acq_entry
*)&resp
,
2148 pr_err("Failed to set mtu %d. error: %d\n", mtu
, ret
);
2153 int ena_com_get_offload_settings(struct ena_com_dev
*ena_dev
,
2154 struct ena_admin_feature_offload_desc
*offload
)
2157 struct ena_admin_get_feat_resp resp
;
2159 ret
= ena_com_get_feature(ena_dev
, &resp
,
2160 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG
);
2161 if (unlikely(ret
)) {
2162 pr_err("Failed to get offload capabilities %d\n", ret
);
2166 memcpy(offload
, &resp
.u
.offload
, sizeof(resp
.u
.offload
));
2171 int ena_com_set_hash_function(struct ena_com_dev
*ena_dev
)
2173 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2174 struct ena_rss
*rss
= &ena_dev
->rss
;
2175 struct ena_admin_set_feat_cmd cmd
;
2176 struct ena_admin_set_feat_resp resp
;
2177 struct ena_admin_get_feat_resp get_resp
;
2180 if (!ena_com_check_supported_feature_id(ena_dev
,
2181 ENA_ADMIN_RSS_HASH_FUNCTION
)) {
2182 pr_debug("Feature %d isn't supported\n",
2183 ENA_ADMIN_RSS_HASH_FUNCTION
);
2187 /* Validate hash function is supported */
2188 ret
= ena_com_get_feature(ena_dev
, &get_resp
,
2189 ENA_ADMIN_RSS_HASH_FUNCTION
);
2193 if (get_resp
.u
.flow_hash_func
.supported_func
& (1 << rss
->hash_func
)) {
2194 pr_err("Func hash %d isn't supported by device, abort\n",
2199 memset(&cmd
, 0x0, sizeof(cmd
));
2201 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2202 cmd
.aq_common_descriptor
.flags
=
2203 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2204 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_FUNCTION
;
2205 cmd
.u
.flow_hash_func
.init_val
= rss
->hash_init_val
;
2206 cmd
.u
.flow_hash_func
.selected_func
= 1 << rss
->hash_func
;
2208 ret
= ena_com_mem_addr_set(ena_dev
,
2209 &cmd
.control_buffer
.address
,
2210 rss
->hash_key_dma_addr
);
2211 if (unlikely(ret
)) {
2212 pr_err("memory address set failed\n");
2216 cmd
.control_buffer
.length
= sizeof(*rss
->hash_key
);
2218 ret
= ena_com_execute_admin_command(admin_queue
,
2219 (struct ena_admin_aq_entry
*)&cmd
,
2221 (struct ena_admin_acq_entry
*)&resp
,
2223 if (unlikely(ret
)) {
2224 pr_err("Failed to set hash function %d. error: %d\n",
2225 rss
->hash_func
, ret
);
2232 int ena_com_fill_hash_function(struct ena_com_dev
*ena_dev
,
2233 enum ena_admin_hash_functions func
,
2234 const u8
*key
, u16 key_len
, u32 init_val
)
2236 struct ena_rss
*rss
= &ena_dev
->rss
;
2237 struct ena_admin_get_feat_resp get_resp
;
2238 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2242 /* Make sure size is a mult of DWs */
2243 if (unlikely(key_len
& 0x3))
2246 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2247 ENA_ADMIN_RSS_HASH_FUNCTION
,
2248 rss
->hash_key_dma_addr
,
2249 sizeof(*rss
->hash_key
));
2253 if (!((1 << func
) & get_resp
.u
.flow_hash_func
.supported_func
)) {
2254 pr_err("Flow hash function %d isn't supported\n", func
);
2259 case ENA_ADMIN_TOEPLITZ
:
2260 if (key_len
> sizeof(hash_key
->key
)) {
2261 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2262 key_len
, sizeof(hash_key
->key
));
2266 memcpy(hash_key
->key
, key
, key_len
);
2267 rss
->hash_init_val
= init_val
;
2268 hash_key
->keys_num
= key_len
>> 2;
2270 case ENA_ADMIN_CRC32
:
2271 rss
->hash_init_val
= init_val
;
2274 pr_err("Invalid hash function (%d)\n", func
);
2278 rc
= ena_com_set_hash_function(ena_dev
);
2280 /* Restore the old function */
2282 ena_com_get_hash_function(ena_dev
, NULL
, NULL
);
2287 int ena_com_get_hash_function(struct ena_com_dev
*ena_dev
,
2288 enum ena_admin_hash_functions
*func
,
2291 struct ena_rss
*rss
= &ena_dev
->rss
;
2292 struct ena_admin_get_feat_resp get_resp
;
2293 struct ena_admin_feature_rss_flow_hash_control
*hash_key
=
2297 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2298 ENA_ADMIN_RSS_HASH_FUNCTION
,
2299 rss
->hash_key_dma_addr
,
2300 sizeof(*rss
->hash_key
));
2304 rss
->hash_func
= get_resp
.u
.flow_hash_func
.selected_func
;
2306 *func
= rss
->hash_func
;
2309 memcpy(key
, hash_key
->key
, (size_t)(hash_key
->keys_num
) << 2);
2314 int ena_com_get_hash_ctrl(struct ena_com_dev
*ena_dev
,
2315 enum ena_admin_flow_hash_proto proto
,
2318 struct ena_rss
*rss
= &ena_dev
->rss
;
2319 struct ena_admin_get_feat_resp get_resp
;
2322 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2323 ENA_ADMIN_RSS_HASH_INPUT
,
2324 rss
->hash_ctrl_dma_addr
,
2325 sizeof(*rss
->hash_ctrl
));
2330 *fields
= rss
->hash_ctrl
->selected_fields
[proto
].fields
;
2335 int ena_com_set_hash_ctrl(struct ena_com_dev
*ena_dev
)
2337 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2338 struct ena_rss
*rss
= &ena_dev
->rss
;
2339 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2340 struct ena_admin_set_feat_cmd cmd
;
2341 struct ena_admin_set_feat_resp resp
;
2344 if (!ena_com_check_supported_feature_id(ena_dev
,
2345 ENA_ADMIN_RSS_HASH_INPUT
)) {
2346 pr_debug("Feature %d isn't supported\n",
2347 ENA_ADMIN_RSS_HASH_INPUT
);
2351 memset(&cmd
, 0x0, sizeof(cmd
));
2353 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2354 cmd
.aq_common_descriptor
.flags
=
2355 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2356 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_HASH_INPUT
;
2357 cmd
.u
.flow_hash_input
.enabled_input_sort
=
2358 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK
|
2359 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK
;
2361 ret
= ena_com_mem_addr_set(ena_dev
,
2362 &cmd
.control_buffer
.address
,
2363 rss
->hash_ctrl_dma_addr
);
2364 if (unlikely(ret
)) {
2365 pr_err("memory address set failed\n");
2368 cmd
.control_buffer
.length
= sizeof(*hash_ctrl
);
2370 ret
= ena_com_execute_admin_command(admin_queue
,
2371 (struct ena_admin_aq_entry
*)&cmd
,
2373 (struct ena_admin_acq_entry
*)&resp
,
2376 pr_err("Failed to set hash input. error: %d\n", ret
);
2381 int ena_com_set_default_hash_ctrl(struct ena_com_dev
*ena_dev
)
2383 struct ena_rss
*rss
= &ena_dev
->rss
;
2384 struct ena_admin_feature_rss_hash_control
*hash_ctrl
=
2386 u16 available_fields
= 0;
2389 /* Get the supported hash input */
2390 rc
= ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2394 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP4
].fields
=
2395 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2396 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2398 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP4
].fields
=
2399 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2400 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2402 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_TCP6
].fields
=
2403 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2404 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2406 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_UDP6
].fields
=
2407 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
|
2408 ENA_ADMIN_RSS_L4_DP
| ENA_ADMIN_RSS_L4_SP
;
2410 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4
].fields
=
2411 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2413 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP6
].fields
=
2414 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2416 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_IP4_FRAG
].fields
=
2417 ENA_ADMIN_RSS_L3_SA
| ENA_ADMIN_RSS_L3_DA
;
2419 hash_ctrl
->selected_fields
[ENA_ADMIN_RSS_NOT_IP
].fields
=
2420 ENA_ADMIN_RSS_L2_DA
| ENA_ADMIN_RSS_L2_SA
;
2422 for (i
= 0; i
< ENA_ADMIN_RSS_PROTO_NUM
; i
++) {
2423 available_fields
= hash_ctrl
->selected_fields
[i
].fields
&
2424 hash_ctrl
->supported_fields
[i
].fields
;
2425 if (available_fields
!= hash_ctrl
->selected_fields
[i
].fields
) {
2426 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2427 i
, hash_ctrl
->supported_fields
[i
].fields
,
2428 hash_ctrl
->selected_fields
[i
].fields
);
2433 rc
= ena_com_set_hash_ctrl(ena_dev
);
2435 /* In case of failure, restore the old hash ctrl */
2437 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2442 int ena_com_fill_hash_ctrl(struct ena_com_dev
*ena_dev
,
2443 enum ena_admin_flow_hash_proto proto
,
2446 struct ena_rss
*rss
= &ena_dev
->rss
;
2447 struct ena_admin_feature_rss_hash_control
*hash_ctrl
= rss
->hash_ctrl
;
2448 u16 supported_fields
;
2451 if (proto
>= ENA_ADMIN_RSS_PROTO_NUM
) {
2452 pr_err("Invalid proto num (%u)\n", proto
);
2456 /* Get the ctrl table */
2457 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, NULL
);
2461 /* Make sure all the fields are supported */
2462 supported_fields
= hash_ctrl
->supported_fields
[proto
].fields
;
2463 if ((hash_fields
& supported_fields
) != hash_fields
) {
2464 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2465 proto
, hash_fields
, supported_fields
);
2468 hash_ctrl
->selected_fields
[proto
].fields
= hash_fields
;
2470 rc
= ena_com_set_hash_ctrl(ena_dev
);
2472 /* In case of failure, restore the old hash ctrl */
2474 ena_com_get_hash_ctrl(ena_dev
, 0, NULL
);
2479 int ena_com_indirect_table_fill_entry(struct ena_com_dev
*ena_dev
,
2480 u16 entry_idx
, u16 entry_value
)
2482 struct ena_rss
*rss
= &ena_dev
->rss
;
2484 if (unlikely(entry_idx
>= (1 << rss
->tbl_log_size
)))
2487 if (unlikely((entry_value
> ENA_TOTAL_NUM_QUEUES
)))
2490 rss
->host_rss_ind_tbl
[entry_idx
] = entry_value
;
2495 int ena_com_indirect_table_set(struct ena_com_dev
*ena_dev
)
2497 struct ena_com_admin_queue
*admin_queue
= &ena_dev
->admin_queue
;
2498 struct ena_rss
*rss
= &ena_dev
->rss
;
2499 struct ena_admin_set_feat_cmd cmd
;
2500 struct ena_admin_set_feat_resp resp
;
2503 if (!ena_com_check_supported_feature_id(
2504 ena_dev
, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
)) {
2505 pr_debug("Feature %d isn't supported\n",
2506 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
);
2510 ret
= ena_com_ind_tbl_convert_to_device(ena_dev
);
2512 pr_err("Failed to convert host indirection table to device table\n");
2516 memset(&cmd
, 0x0, sizeof(cmd
));
2518 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2519 cmd
.aq_common_descriptor
.flags
=
2520 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK
;
2521 cmd
.feat_common
.feature_id
= ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
;
2522 cmd
.u
.ind_table
.size
= rss
->tbl_log_size
;
2523 cmd
.u
.ind_table
.inline_index
= 0xFFFFFFFF;
2525 ret
= ena_com_mem_addr_set(ena_dev
,
2526 &cmd
.control_buffer
.address
,
2527 rss
->rss_ind_tbl_dma_addr
);
2528 if (unlikely(ret
)) {
2529 pr_err("memory address set failed\n");
2533 cmd
.control_buffer
.length
= (1ULL << rss
->tbl_log_size
) *
2534 sizeof(struct ena_admin_rss_ind_table_entry
);
2536 ret
= ena_com_execute_admin_command(admin_queue
,
2537 (struct ena_admin_aq_entry
*)&cmd
,
2539 (struct ena_admin_acq_entry
*)&resp
,
2543 pr_err("Failed to set indirect table. error: %d\n", ret
);
2548 int ena_com_indirect_table_get(struct ena_com_dev
*ena_dev
, u32
*ind_tbl
)
2550 struct ena_rss
*rss
= &ena_dev
->rss
;
2551 struct ena_admin_get_feat_resp get_resp
;
2555 tbl_size
= (1ULL << rss
->tbl_log_size
) *
2556 sizeof(struct ena_admin_rss_ind_table_entry
);
2558 rc
= ena_com_get_feature_ex(ena_dev
, &get_resp
,
2559 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG
,
2560 rss
->rss_ind_tbl_dma_addr
,
2568 rc
= ena_com_ind_tbl_convert_from_device(ena_dev
);
2572 for (i
= 0; i
< (1 << rss
->tbl_log_size
); i
++)
2573 ind_tbl
[i
] = rss
->host_rss_ind_tbl
[i
];
2578 int ena_com_rss_init(struct ena_com_dev
*ena_dev
, u16 indr_tbl_log_size
)
2582 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2584 rc
= ena_com_indirect_table_allocate(ena_dev
, indr_tbl_log_size
);
2588 rc
= ena_com_hash_key_allocate(ena_dev
);
2592 rc
= ena_com_hash_ctrl_init(ena_dev
);
2599 ena_com_hash_key_destroy(ena_dev
);
2601 ena_com_indirect_table_destroy(ena_dev
);
2607 void ena_com_rss_destroy(struct ena_com_dev
*ena_dev
)
2609 ena_com_indirect_table_destroy(ena_dev
);
2610 ena_com_hash_key_destroy(ena_dev
);
2611 ena_com_hash_ctrl_destroy(ena_dev
);
2613 memset(&ena_dev
->rss
, 0x0, sizeof(ena_dev
->rss
));
2616 int ena_com_allocate_host_info(struct ena_com_dev
*ena_dev
)
2618 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2620 host_attr
->host_info
=
2621 dma_zalloc_coherent(ena_dev
->dmadev
, SZ_4K
,
2622 &host_attr
->host_info_dma_addr
, GFP_KERNEL
);
2623 if (unlikely(!host_attr
->host_info
))
2626 host_attr
->host_info
->ena_spec_version
=
2627 ((ENA_COMMON_SPEC_VERSION_MAJOR
<< ENA_REGS_VERSION_MAJOR_VERSION_SHIFT
) |
2628 (ENA_COMMON_SPEC_VERSION_MINOR
));
2633 int ena_com_allocate_debug_area(struct ena_com_dev
*ena_dev
,
2634 u32 debug_area_size
)
2636 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2638 host_attr
->debug_area_virt_addr
=
2639 dma_zalloc_coherent(ena_dev
->dmadev
, debug_area_size
,
2640 &host_attr
->debug_area_dma_addr
, GFP_KERNEL
);
2641 if (unlikely(!host_attr
->debug_area_virt_addr
)) {
2642 host_attr
->debug_area_size
= 0;
2646 host_attr
->debug_area_size
= debug_area_size
;
2651 void ena_com_delete_host_info(struct ena_com_dev
*ena_dev
)
2653 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2655 if (host_attr
->host_info
) {
2656 dma_free_coherent(ena_dev
->dmadev
, SZ_4K
, host_attr
->host_info
,
2657 host_attr
->host_info_dma_addr
);
2658 host_attr
->host_info
= NULL
;
2662 void ena_com_delete_debug_area(struct ena_com_dev
*ena_dev
)
2664 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2666 if (host_attr
->debug_area_virt_addr
) {
2667 dma_free_coherent(ena_dev
->dmadev
, host_attr
->debug_area_size
,
2668 host_attr
->debug_area_virt_addr
,
2669 host_attr
->debug_area_dma_addr
);
2670 host_attr
->debug_area_virt_addr
= NULL
;
2674 int ena_com_set_host_attributes(struct ena_com_dev
*ena_dev
)
2676 struct ena_host_attribute
*host_attr
= &ena_dev
->host_attr
;
2677 struct ena_com_admin_queue
*admin_queue
;
2678 struct ena_admin_set_feat_cmd cmd
;
2679 struct ena_admin_set_feat_resp resp
;
2683 /* Host attribute config is called before ena_com_get_dev_attr_feat
2684 * so ena_com can't check if the feature is supported.
2687 memset(&cmd
, 0x0, sizeof(cmd
));
2688 admin_queue
= &ena_dev
->admin_queue
;
2690 cmd
.aq_common_descriptor
.opcode
= ENA_ADMIN_SET_FEATURE
;
2691 cmd
.feat_common
.feature_id
= ENA_ADMIN_HOST_ATTR_CONFIG
;
2693 ret
= ena_com_mem_addr_set(ena_dev
,
2694 &cmd
.u
.host_attr
.debug_ba
,
2695 host_attr
->debug_area_dma_addr
);
2696 if (unlikely(ret
)) {
2697 pr_err("memory address set failed\n");
2701 ret
= ena_com_mem_addr_set(ena_dev
,
2702 &cmd
.u
.host_attr
.os_info_ba
,
2703 host_attr
->host_info_dma_addr
);
2704 if (unlikely(ret
)) {
2705 pr_err("memory address set failed\n");
2709 cmd
.u
.host_attr
.debug_area_size
= host_attr
->debug_area_size
;
2711 ret
= ena_com_execute_admin_command(admin_queue
,
2712 (struct ena_admin_aq_entry
*)&cmd
,
2714 (struct ena_admin_acq_entry
*)&resp
,
2718 pr_err("Failed to set host attributes: %d\n", ret
);
2723 /* Interrupt moderation */
2724 bool ena_com_interrupt_moderation_supported(struct ena_com_dev
*ena_dev
)
2726 return ena_com_check_supported_feature_id(ena_dev
,
2727 ENA_ADMIN_INTERRUPT_MODERATION
);
2730 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
,
2731 u32 tx_coalesce_usecs
)
2733 if (!ena_dev
->intr_delay_resolution
) {
2734 pr_err("Illegal interrupt delay granularity value\n");
2738 ena_dev
->intr_moder_tx_interval
= tx_coalesce_usecs
/
2739 ena_dev
->intr_delay_resolution
;
2744 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
,
2745 u32 rx_coalesce_usecs
)
2747 if (!ena_dev
->intr_delay_resolution
) {
2748 pr_err("Illegal interrupt delay granularity value\n");
2752 /* We use LOWEST entry of moderation table for storing
2753 * nonadaptive interrupt coalescing values
2755 ena_dev
->intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2756 rx_coalesce_usecs
/ ena_dev
->intr_delay_resolution
;
2761 void ena_com_destroy_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2763 if (ena_dev
->intr_moder_tbl
)
2764 devm_kfree(ena_dev
->dmadev
, ena_dev
->intr_moder_tbl
);
2765 ena_dev
->intr_moder_tbl
= NULL
;
2768 int ena_com_init_interrupt_moderation(struct ena_com_dev
*ena_dev
)
2770 struct ena_admin_get_feat_resp get_resp
;
2771 u16 delay_resolution
;
2774 rc
= ena_com_get_feature(ena_dev
, &get_resp
,
2775 ENA_ADMIN_INTERRUPT_MODERATION
);
2778 if (rc
== -EOPNOTSUPP
) {
2779 pr_debug("Feature %d isn't supported\n",
2780 ENA_ADMIN_INTERRUPT_MODERATION
);
2783 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2787 /* no moderation supported, disable adaptive support */
2788 ena_com_disable_adaptive_moderation(ena_dev
);
2792 rc
= ena_com_init_interrupt_moderation_table(ena_dev
);
2796 /* if moderation is supported by device we set adaptive moderation */
2797 delay_resolution
= get_resp
.u
.intr_moderation
.intr_delay_resolution
;
2798 ena_com_update_intr_delay_resolution(ena_dev
, delay_resolution
);
2799 ena_com_enable_adaptive_moderation(ena_dev
);
2803 ena_com_destroy_interrupt_moderation(ena_dev
);
2807 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev
*ena_dev
)
2809 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2811 if (!intr_moder_tbl
)
2814 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
=
2815 ENA_INTR_LOWEST_USECS
;
2816 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].pkts_per_interval
=
2817 ENA_INTR_LOWEST_PKTS
;
2818 intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].bytes_per_interval
=
2819 ENA_INTR_LOWEST_BYTES
;
2821 intr_moder_tbl
[ENA_INTR_MODER_LOW
].intr_moder_interval
=
2823 intr_moder_tbl
[ENA_INTR_MODER_LOW
].pkts_per_interval
=
2825 intr_moder_tbl
[ENA_INTR_MODER_LOW
].bytes_per_interval
=
2828 intr_moder_tbl
[ENA_INTR_MODER_MID
].intr_moder_interval
=
2830 intr_moder_tbl
[ENA_INTR_MODER_MID
].pkts_per_interval
=
2832 intr_moder_tbl
[ENA_INTR_MODER_MID
].bytes_per_interval
=
2835 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].intr_moder_interval
=
2836 ENA_INTR_HIGH_USECS
;
2837 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].pkts_per_interval
=
2839 intr_moder_tbl
[ENA_INTR_MODER_HIGH
].bytes_per_interval
=
2840 ENA_INTR_HIGH_BYTES
;
2842 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].intr_moder_interval
=
2843 ENA_INTR_HIGHEST_USECS
;
2844 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].pkts_per_interval
=
2845 ENA_INTR_HIGHEST_PKTS
;
2846 intr_moder_tbl
[ENA_INTR_MODER_HIGHEST
].bytes_per_interval
=
2847 ENA_INTR_HIGHEST_BYTES
;
2850 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev
*ena_dev
)
2852 return ena_dev
->intr_moder_tx_interval
;
2855 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev
*ena_dev
)
2857 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2860 return intr_moder_tbl
[ENA_INTR_MODER_LOWEST
].intr_moder_interval
;
2865 void ena_com_init_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2866 enum ena_intr_moder_level level
,
2867 struct ena_intr_moder_entry
*entry
)
2869 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2871 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2874 intr_moder_tbl
[level
].intr_moder_interval
= entry
->intr_moder_interval
;
2875 if (ena_dev
->intr_delay_resolution
)
2876 intr_moder_tbl
[level
].intr_moder_interval
/=
2877 ena_dev
->intr_delay_resolution
;
2878 intr_moder_tbl
[level
].pkts_per_interval
= entry
->pkts_per_interval
;
2880 /* use hardcoded value until ethtool supports bytecount parameter */
2881 if (entry
->bytes_per_interval
!= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
)
2882 intr_moder_tbl
[level
].bytes_per_interval
= entry
->bytes_per_interval
;
2885 void ena_com_get_intr_moderation_entry(struct ena_com_dev
*ena_dev
,
2886 enum ena_intr_moder_level level
,
2887 struct ena_intr_moder_entry
*entry
)
2889 struct ena_intr_moder_entry
*intr_moder_tbl
= ena_dev
->intr_moder_tbl
;
2891 if (level
>= ENA_INTR_MAX_NUM_OF_LEVELS
)
2894 entry
->intr_moder_interval
= intr_moder_tbl
[level
].intr_moder_interval
;
2895 if (ena_dev
->intr_delay_resolution
)
2896 entry
->intr_moder_interval
*= ena_dev
->intr_delay_resolution
;
2897 entry
->pkts_per_interval
=
2898 intr_moder_tbl
[level
].pkts_per_interval
;
2899 entry
->bytes_per_interval
= intr_moder_tbl
[level
].bytes_per_interval
;
2902 int ena_com_config_dev_mode(struct ena_com_dev
*ena_dev
,
2903 struct ena_admin_feature_llq_desc
*llq_features
,
2904 struct ena_llq_configurations
*llq_default_cfg
)
2909 if (!llq_features
->max_llq_num
) {
2910 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_HOST
;
2914 rc
= ena_com_config_llq_info(ena_dev
, llq_features
, llq_default_cfg
);
2918 /* Validate the descriptor is not too big */
2919 size
= ena_dev
->tx_max_header_size
;
2920 size
+= ena_dev
->llq_info
.descs_num_before_header
*
2921 sizeof(struct ena_eth_io_tx_desc
);
2923 if (unlikely(ena_dev
->llq_info
.desc_list_entry_size
< size
)) {
2924 pr_err("the size of the LLQ entry is smaller than needed\n");
2928 ena_dev
->tx_mem_queue_type
= ENA_ADMIN_PLACEMENT_POLICY_DEV
;