]> git.proxmox.com Git - ceph.git/blame - ceph/src/seastar/dpdk/drivers/net/ena/base/ena_com.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / ena / base / ena_com.c
CommitLineData
7c673cae
FG
1/*-
2* BSD LICENSE
3*
4* Copyright (c) 2015-2016 Amazon.com, Inc. or its affiliates.
5* All rights reserved.
6*
7* Redistribution and use in source and binary forms, with or without
8* modification, are permitted provided that the following conditions
9* are met:
10*
11* * Redistributions of source code must retain the above copyright
12* notice, this list of conditions and the following disclaimer.
13* * Redistributions in binary form must reproduce the above copyright
14* notice, this list of conditions and the following disclaimer in
15* the documentation and/or other materials provided with the
16* distribution.
17* * Neither the name of copyright holder nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*/
33
34#include "ena_com.h"
35
36/*****************************************************************************/
37/*****************************************************************************/
38
39/* Timeout in micro-sec */
9f95a23c 40#define ADMIN_CMD_TIMEOUT_US (3000000)
7c673cae 41
9f95a23c 42#define ENA_ASYNC_QUEUE_DEPTH 16
7c673cae
FG
43#define ENA_ADMIN_QUEUE_DEPTH 32
44
7c673cae
FG
45
46#define ENA_CTRL_MAJOR 0
47#define ENA_CTRL_MINOR 0
48#define ENA_CTRL_SUB_MINOR 1
49
50#define MIN_ENA_CTRL_VER \
51 (((ENA_CTRL_MAJOR) << \
52 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
53 ((ENA_CTRL_MINOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
55 (ENA_CTRL_SUB_MINOR))
56
57#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
58#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
59
60#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
61
9f95a23c
TL
62#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
63
64#define ENA_REGS_ADMIN_INTR_MASK 1
65
66#define ENA_POLL_MS 5
7c673cae
FG
67
68/*****************************************************************************/
69/*****************************************************************************/
70/*****************************************************************************/
71
72enum ena_cmd_status {
73 ENA_CMD_SUBMITTED,
74 ENA_CMD_COMPLETED,
75 /* Abort - canceled by the driver */
76 ENA_CMD_ABORTED,
77};
78
79struct ena_comp_ctx {
80 ena_wait_event_t wait_event;
81 struct ena_admin_acq_entry *user_cqe;
82 u32 comp_size;
83 enum ena_cmd_status status;
84 /* status from the device */
85 u8 comp_status;
86 u8 cmd_opcode;
87 bool occupied;
88};
89
9f95a23c
TL
90struct ena_com_stats_ctx {
91 struct ena_admin_aq_get_stats_cmd get_cmd;
92 struct ena_admin_acq_get_stats_resp get_resp;
93};
94
7c673cae
FG
95static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
96 struct ena_common_mem_addr *ena_addr,
97 dma_addr_t addr)
98{
99 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
100 ena_trc_err("dma address has more bits that the device supports\n");
101 return ENA_COM_INVAL;
102 }
103
9f95a23c
TL
104 ena_addr->mem_addr_low = lower_32_bits(addr);
105 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
7c673cae
FG
106
107 return 0;
108}
109
110static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111{
9f95a23c
TL
112 struct ena_com_admin_sq *sq = &queue->sq;
113 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
114
115 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
116 sq->mem_handle);
7c673cae 117
9f95a23c 118 if (!sq->entries) {
7c673cae
FG
119 ena_trc_err("memory allocation failed");
120 return ENA_COM_NO_MEM;
121 }
122
9f95a23c
TL
123 sq->head = 0;
124 sq->tail = 0;
125 sq->phase = 1;
7c673cae 126
9f95a23c 127 sq->db_addr = NULL;
7c673cae
FG
128
129 return 0;
130}
131
132static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133{
9f95a23c
TL
134 struct ena_com_admin_cq *cq = &queue->cq;
135 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
7c673cae 136
9f95a23c
TL
137 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
138 cq->mem_handle);
139
140 if (!cq->entries) {
7c673cae
FG
141 ena_trc_err("memory allocation failed");
142 return ENA_COM_NO_MEM;
143 }
144
9f95a23c
TL
145 cq->head = 0;
146 cq->phase = 1;
7c673cae
FG
147
148 return 0;
149}
150
151static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
152 struct ena_aenq_handlers *aenq_handlers)
153{
9f95a23c 154 struct ena_com_aenq *aenq = &dev->aenq;
7c673cae 155 u32 addr_low, addr_high, aenq_caps;
9f95a23c 156 u16 size;
7c673cae
FG
157
158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
9f95a23c
TL
159 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
160 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
161 aenq->entries,
162 aenq->dma_addr,
163 aenq->mem_handle);
7c673cae 164
9f95a23c 165 if (!aenq->entries) {
7c673cae
FG
166 ena_trc_err("memory allocation failed");
167 return ENA_COM_NO_MEM;
168 }
169
9f95a23c
TL
170 aenq->head = aenq->q_depth;
171 aenq->phase = 1;
7c673cae 172
9f95a23c
TL
173 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
174 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
7c673cae 175
9f95a23c
TL
176 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
177 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
7c673cae
FG
178
179 aenq_caps = 0;
180 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
181 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
182 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
183 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
9f95a23c 184 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
7c673cae 185
9f95a23c 186 if (unlikely(!aenq_handlers)) {
7c673cae 187 ena_trc_err("aenq handlers pointer is NULL\n");
9f95a23c
TL
188 return ENA_COM_INVAL;
189 }
7c673cae 190
9f95a23c 191 aenq->aenq_handlers = aenq_handlers;
7c673cae
FG
192
193 return 0;
194}
195
196static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
197 struct ena_comp_ctx *comp_ctx)
198{
199 comp_ctx->occupied = false;
200 ATOMIC32_DEC(&queue->outstanding_cmds);
201}
202
203static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
204 u16 command_id, bool capture)
205{
206 if (unlikely(command_id >= queue->q_depth)) {
207 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
208 command_id, queue->q_depth);
209 return NULL;
210 }
211
212 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
213 ena_trc_err("Completion context is occupied\n");
214 return NULL;
215 }
216
217 if (capture) {
218 ATOMIC32_INC(&queue->outstanding_cmds);
219 queue->comp_ctx[command_id].occupied = true;
220 }
221
222 return &queue->comp_ctx[command_id];
223}
224
9f95a23c
TL
225static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
226 struct ena_admin_aq_entry *cmd,
227 size_t cmd_size_in_bytes,
228 struct ena_admin_acq_entry *comp,
229 size_t comp_size_in_bytes)
7c673cae
FG
230{
231 struct ena_comp_ctx *comp_ctx;
232 u16 tail_masked, cmd_id;
233 u16 queue_size_mask;
234 u16 cnt;
235
236 queue_size_mask = admin_queue->q_depth - 1;
237
238 tail_masked = admin_queue->sq.tail & queue_size_mask;
239
240 /* In case of queue FULL */
9f95a23c 241 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
7c673cae 242 if (cnt >= admin_queue->q_depth) {
9f95a23c 243 ena_trc_dbg("admin queue is full.\n");
7c673cae
FG
244 admin_queue->stats.out_of_space++;
245 return ERR_PTR(ENA_COM_NO_SPACE);
246 }
247
248 cmd_id = admin_queue->curr_cmd_id;
249
250 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
251 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
252
253 cmd->aq_common_descriptor.command_id |= cmd_id &
254 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
255
256 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
9f95a23c
TL
257 if (unlikely(!comp_ctx))
258 return ERR_PTR(ENA_COM_INVAL);
7c673cae
FG
259
260 comp_ctx->status = ENA_CMD_SUBMITTED;
261 comp_ctx->comp_size = (u32)comp_size_in_bytes;
262 comp_ctx->user_cqe = comp;
263 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
264
265 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
266
267 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
268
269 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
270 queue_size_mask;
271
272 admin_queue->sq.tail++;
273 admin_queue->stats.submitted_cmd++;
274
275 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
276 admin_queue->sq.phase = !admin_queue->sq.phase;
277
9f95a23c
TL
278 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
279 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
280 admin_queue->sq.db_addr);
7c673cae
FG
281
282 return comp_ctx;
283}
284
285static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
286{
287 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
288 struct ena_comp_ctx *comp_ctx;
289 u16 i;
290
291 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
292 if (unlikely(!queue->comp_ctx)) {
293 ena_trc_err("memory allocation failed");
294 return ENA_COM_NO_MEM;
295 }
296
297 for (i = 0; i < queue->q_depth; i++) {
298 comp_ctx = get_comp_ctxt(queue, i, false);
299 if (comp_ctx)
300 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
301 }
302
303 return 0;
304}
305
9f95a23c
TL
306static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
307 struct ena_admin_aq_entry *cmd,
308 size_t cmd_size_in_bytes,
309 struct ena_admin_acq_entry *comp,
310 size_t comp_size_in_bytes)
7c673cae
FG
311{
312 unsigned long flags = 0;
313 struct ena_comp_ctx *comp_ctx;
314
315 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
316 if (unlikely(!admin_queue->running_state)) {
317 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
318 return ERR_PTR(ENA_COM_NO_DEVICE);
319 }
320 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
321 cmd_size_in_bytes,
322 comp,
323 comp_size_in_bytes);
9f95a23c 324 if (IS_ERR(comp_ctx))
7c673cae
FG
325 admin_queue->running_state = false;
326 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
327
328 return comp_ctx;
329}
330
331static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
332 struct ena_com_create_io_ctx *ctx,
333 struct ena_com_io_sq *io_sq)
334{
335 size_t size;
336 int dev_node = 0;
337
9f95a23c 338 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
7c673cae 339
9f95a23c 340 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
7c673cae
FG
341 io_sq->desc_entry_size =
342 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
343 sizeof(struct ena_eth_io_tx_desc) :
344 sizeof(struct ena_eth_io_rx_desc);
345
346 size = io_sq->desc_entry_size * io_sq->q_depth;
9f95a23c 347 io_sq->bus = ena_dev->bus;
7c673cae
FG
348
349 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
350 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
351 size,
352 io_sq->desc_addr.virt_addr,
353 io_sq->desc_addr.phys_addr,
9f95a23c 354 io_sq->desc_addr.mem_handle,
7c673cae
FG
355 ctx->numa_node,
356 dev_node);
9f95a23c 357 if (!io_sq->desc_addr.virt_addr) {
7c673cae
FG
358 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
359 size,
360 io_sq->desc_addr.virt_addr,
361 io_sq->desc_addr.phys_addr,
362 io_sq->desc_addr.mem_handle);
9f95a23c
TL
363 }
364
365 if (!io_sq->desc_addr.virt_addr) {
366 ena_trc_err("memory allocation failed");
367 return ENA_COM_NO_MEM;
368 }
369 }
370
371 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
372 /* Allocate bounce buffers */
373 io_sq->bounce_buf_ctrl.buffer_size = ena_dev->llq_info.desc_list_entry_size;
374 io_sq->bounce_buf_ctrl.buffers_num = ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
375 io_sq->bounce_buf_ctrl.next_to_use = 0;
376
377 size = io_sq->bounce_buf_ctrl.buffer_size * io_sq->bounce_buf_ctrl.buffers_num;
378
7c673cae
FG
379 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
380 size,
9f95a23c 381 io_sq->bounce_buf_ctrl.base_buffer,
7c673cae
FG
382 ctx->numa_node,
383 dev_node);
9f95a23c
TL
384 if (!io_sq->bounce_buf_ctrl.base_buffer)
385 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
7c673cae 386
9f95a23c
TL
387 if (!io_sq->bounce_buf_ctrl.base_buffer) {
388 ena_trc_err("bounce buffer memory allocation failed");
389 return ENA_COM_NO_MEM;
390 }
391
392 memcpy(&io_sq->llq_info, &ena_dev->llq_info, sizeof(io_sq->llq_info));
393
394 /* Initiate the first bounce buffer */
395 io_sq->llq_buf_ctrl.curr_bounce_buf =
396 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
397 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
398 0x0, io_sq->llq_info.desc_list_entry_size);
399 io_sq->llq_buf_ctrl.descs_left_in_line =
400 io_sq->llq_info.descs_num_before_header;
401
402 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
403 io_sq->entries_in_tx_burst_left =
404 io_sq->llq_info.max_entries_in_tx_burst;
7c673cae
FG
405 }
406
407 io_sq->tail = 0;
408 io_sq->next_to_comp = 0;
409 io_sq->phase = 1;
410
411 return 0;
412}
413
414static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
415 struct ena_com_create_io_ctx *ctx,
416 struct ena_com_io_cq *io_cq)
417{
418 size_t size;
419 int prev_node = 0;
420
9f95a23c 421 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
7c673cae
FG
422
423 /* Use the basic completion descriptor for Rx */
424 io_cq->cdesc_entry_size_in_bytes =
425 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
426 sizeof(struct ena_eth_io_tx_cdesc) :
427 sizeof(struct ena_eth_io_rx_cdesc_base);
428
429 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
9f95a23c 430 io_cq->bus = ena_dev->bus;
7c673cae
FG
431
432 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
9f95a23c
TL
433 size,
434 io_cq->cdesc_addr.virt_addr,
435 io_cq->cdesc_addr.phys_addr,
436 io_cq->cdesc_addr.mem_handle,
437 ctx->numa_node,
438 prev_node);
439 if (!io_cq->cdesc_addr.virt_addr) {
7c673cae
FG
440 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
441 size,
442 io_cq->cdesc_addr.virt_addr,
443 io_cq->cdesc_addr.phys_addr,
444 io_cq->cdesc_addr.mem_handle);
9f95a23c 445 }
7c673cae
FG
446
447 if (!io_cq->cdesc_addr.virt_addr) {
448 ena_trc_err("memory allocation failed");
449 return ENA_COM_NO_MEM;
450 }
451
452 io_cq->phase = 1;
453 io_cq->head = 0;
454
455 return 0;
456}
457
9f95a23c
TL
458static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
459 struct ena_admin_acq_entry *cqe)
7c673cae
FG
460{
461 struct ena_comp_ctx *comp_ctx;
462 u16 cmd_id;
463
464 cmd_id = cqe->acq_common_descriptor.command &
465 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
466
467 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
468 if (unlikely(!comp_ctx)) {
469 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
470 admin_queue->running_state = false;
471 return;
472 }
473
474 comp_ctx->status = ENA_CMD_COMPLETED;
475 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
476
477 if (comp_ctx->user_cqe)
478 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
479
480 if (!admin_queue->polling)
481 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
482}
483
9f95a23c 484static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
7c673cae
FG
485{
486 struct ena_admin_acq_entry *cqe = NULL;
487 u16 comp_num = 0;
488 u16 head_masked;
489 u8 phase;
490
491 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
492 phase = admin_queue->cq.phase;
493
494 cqe = &admin_queue->cq.entries[head_masked];
495
496 /* Go over all the completions */
9f95a23c 497 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
7c673cae
FG
498 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
499 /* Do not read the rest of the completion entry before the
500 * phase bit was validated
501 */
9f95a23c 502 dma_rmb();
7c673cae
FG
503 ena_com_handle_single_admin_completion(admin_queue, cqe);
504
505 head_masked++;
506 comp_num++;
507 if (unlikely(head_masked == admin_queue->q_depth)) {
508 head_masked = 0;
509 phase = !phase;
510 }
511
512 cqe = &admin_queue->cq.entries[head_masked];
513 }
514
515 admin_queue->cq.head += comp_num;
516 admin_queue->cq.phase = phase;
517 admin_queue->sq.head += comp_num;
518 admin_queue->stats.completed_cmd += comp_num;
519}
520
521static int ena_com_comp_status_to_errno(u8 comp_status)
522{
523 if (unlikely(comp_status != 0))
524 ena_trc_err("admin command failed[%u]\n", comp_status);
525
526 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
527 return ENA_COM_INVAL;
528
529 switch (comp_status) {
530 case ENA_ADMIN_SUCCESS:
531 return 0;
532 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
533 return ENA_COM_NO_MEM;
534 case ENA_ADMIN_UNSUPPORTED_OPCODE:
9f95a23c 535 return ENA_COM_UNSUPPORTED;
7c673cae
FG
536 case ENA_ADMIN_BAD_OPCODE:
537 case ENA_ADMIN_MALFORMED_REQUEST:
538 case ENA_ADMIN_ILLEGAL_PARAMETER:
539 case ENA_ADMIN_UNKNOWN_ERROR:
540 return ENA_COM_INVAL;
541 }
542
543 return 0;
544}
545
9f95a23c
TL
546static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
547 struct ena_com_admin_queue *admin_queue)
7c673cae
FG
548{
549 unsigned long flags = 0;
9f95a23c 550 unsigned long timeout;
7c673cae
FG
551 int ret;
552
9f95a23c
TL
553 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
554
555 while (1) {
556 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
557 ena_com_handle_admin_completion(admin_queue);
558 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
559
560 if (comp_ctx->status != ENA_CMD_SUBMITTED)
561 break;
7c673cae 562
9f95a23c 563 if (ENA_TIME_EXPIRE(timeout)) {
7c673cae
FG
564 ena_trc_err("Wait for completion (polling) timeout\n");
565 /* ENA didn't have any completion */
566 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
567 admin_queue->stats.no_completion++;
568 admin_queue->running_state = false;
569 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
570
571 ret = ENA_COM_TIMER_EXPIRED;
572 goto err;
573 }
574
9f95a23c 575 ENA_MSLEEP(ENA_POLL_MS);
7c673cae
FG
576 }
577
578 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
579 ena_trc_err("Command was aborted\n");
580 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
581 admin_queue->stats.aborted_cmd++;
582 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
583 ret = ENA_COM_NO_DEVICE;
584 goto err;
585 }
586
9f95a23c
TL
587 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
588 "Invalid comp status %d\n", comp_ctx->status);
7c673cae
FG
589
590 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
591err:
592 comp_ctxt_release(admin_queue, comp_ctx);
593 return ret;
594}
595
9f95a23c
TL
596/**
597 * Set the LLQ configurations of the firmware
598 *
599 * The driver provides only the enabled feature values to the FW,
600 * which in turn, checks if they are supported.
601 */
602static int ena_com_set_llq(struct ena_com_dev *ena_dev)
603{
604 struct ena_com_admin_queue *admin_queue;
605 struct ena_admin_set_feat_cmd cmd;
606 struct ena_admin_set_feat_resp resp;
607 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
608 int ret;
609
610 memset(&cmd, 0x0, sizeof(cmd));
611 admin_queue = &ena_dev->admin_queue;
612
613 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
614 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
615
616 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
617 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
618 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
619 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
620
621 ret = ena_com_execute_admin_command(admin_queue,
622 (struct ena_admin_aq_entry *)&cmd,
623 sizeof(cmd),
624 (struct ena_admin_acq_entry *)&resp,
625 sizeof(resp));
626
627 if (unlikely(ret))
628 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
629
630 return ret;
631}
632
633static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
634 struct ena_admin_feature_llq_desc *llq_features,
635 struct ena_llq_configurations *llq_default_cfg)
636{
637 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
638 u16 supported_feat;
639 int rc;
640
641 memset(llq_info, 0, sizeof(*llq_info));
642
643 supported_feat = llq_features->header_location_ctrl_supported;
644
645 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
646 llq_info->header_location_ctrl = llq_default_cfg->llq_header_location;
647 } else {
648 ena_trc_err("Invalid header location control, supported: 0x%x\n",
649 supported_feat);
650 return -EINVAL;
651 }
652
653 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
654 llq_info->inline_header = true;
655
656 supported_feat = llq_features->descriptors_stride_ctrl_supported;
657 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
658 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
659 } else {
660 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
661 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
662 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
663 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
664 } else {
665 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
666 supported_feat);
667 return -EINVAL;
668 }
669
670 ena_trc_err("Default llq stride ctrl is not supported, performing fallback,"
671 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
672 llq_default_cfg->llq_stride_ctrl,
673 supported_feat,
674 llq_info->desc_stride_ctrl);
675 }
676 } else {
677 llq_info->inline_header = false;
678 llq_info->desc_stride_ctrl = 0;
679 }
680
681 supported_feat = llq_features->entry_size_ctrl_supported;
682 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
683 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
684 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
685 } else {
686 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
687 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
688 llq_info->desc_list_entry_size = 128;
689 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
690 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
691 llq_info->desc_list_entry_size = 192;
692 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
693 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
694 llq_info->desc_list_entry_size = 256;
695 } else {
696 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
697 return -EINVAL;
698 }
699
700 ena_trc_err("Default llq ring entry size is not supported, performing fallback,"
701 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
702 llq_default_cfg->llq_ring_entry_size,
703 supported_feat,
704 llq_info->desc_list_entry_size);
705 }
706 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
707 /* The desc list entry size should be whole multiply of 8
708 * This requirement comes from __iowrite64_copy()
709 */
710 ena_trc_err("illegal entry size %d\n",
711 llq_info->desc_list_entry_size);
712 return -EINVAL;
713 }
714
715 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
716 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
717 sizeof(struct ena_eth_io_tx_desc);
718 else
719 llq_info->descs_per_entry = 1;
720
721 supported_feat = llq_features->desc_num_before_header_supported;
722 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
723 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
724 } else {
725 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
726 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
727 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
728 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
729 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
730 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
731 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
732 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
733 } else {
734 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
735 supported_feat);
736 return -EINVAL;
737 }
738
739 ena_trc_err("Default llq num descs before header is not supported, performing fallback,"
740 "default: 0x%x, supported: 0x%x, used: 0x%x\n",
741 llq_default_cfg->llq_num_decs_before_header,
742 supported_feat,
743 llq_info->descs_num_before_header);
744 }
745
746 llq_info->max_entries_in_tx_burst =
747 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
748
749 rc = ena_com_set_llq(ena_dev);
750 if (rc)
751 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
752
753 return 0;
754}
755
756
757
758static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
759 struct ena_com_admin_queue *admin_queue)
7c673cae
FG
760{
761 unsigned long flags = 0;
9f95a23c 762 int ret;
7c673cae
FG
763
764 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
9f95a23c 765 admin_queue->completion_timeout);
7c673cae
FG
766
767 /* In case the command wasn't completed find out the root cause.
768 * There might be 2 kinds of errors
769 * 1) No completion (timeout reached)
770 * 2) There is completion but the device didn't get any msi-x interrupt.
771 */
772 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
773 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
774 ena_com_handle_admin_completion(admin_queue);
775 admin_queue->stats.no_completion++;
776 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
777
778 if (comp_ctx->status == ENA_CMD_COMPLETED)
779 ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
780 comp_ctx->cmd_opcode);
781 else
782 ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
783 comp_ctx->cmd_opcode, comp_ctx->status);
784
785 admin_queue->running_state = false;
786 ret = ENA_COM_TIMER_EXPIRED;
787 goto err;
788 }
789
790 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
791err:
792 comp_ctxt_release(admin_queue, comp_ctx);
793 return ret;
794}
795
796/* This method read the hardware device register through posting writes
797 * and waiting for response
798 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
799 */
800static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
801{
802 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
803 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
804 mmio_read->read_resp;
9f95a23c 805 u32 mmio_read_reg, ret, i;
7c673cae 806 unsigned long flags = 0;
9f95a23c 807 u32 timeout = mmio_read->reg_read_to;
7c673cae
FG
808
809 ENA_MIGHT_SLEEP();
810
9f95a23c
TL
811 if (timeout == 0)
812 timeout = ENA_REG_READ_TIMEOUT;
813
7c673cae
FG
814 /* If readless is disabled, perform regular read */
815 if (!mmio_read->readless_supported)
9f95a23c 816 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
7c673cae
FG
817
818 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
819 mmio_read->seq_num++;
820
821 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
822 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
823 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
824 mmio_read_reg |= mmio_read->seq_num &
825 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
826
9f95a23c
TL
827 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
828 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
7c673cae 829
9f95a23c
TL
830 for (i = 0; i < timeout; i++) {
831 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
7c673cae
FG
832 break;
833
834 ENA_UDELAY(1);
835 }
836
9f95a23c 837 if (unlikely(i == timeout)) {
7c673cae
FG
838 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
839 mmio_read->seq_num,
840 offset,
841 read_resp->req_id,
842 read_resp->reg_off);
843 ret = ENA_MMIO_READ_TIMEOUT;
844 goto err;
845 }
846
847 if (read_resp->reg_off != offset) {
9f95a23c 848 ena_trc_err("Read failure: wrong offset provided");
7c673cae
FG
849 ret = ENA_MMIO_READ_TIMEOUT;
850 } else {
851 ret = read_resp->reg_val;
852 }
853err:
854 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
855
856 return ret;
857}
858
859/* There are two types to wait for completion.
860 * Polling mode - wait until the completion is available.
861 * Async mode - wait on wait queue until the completion is ready
862 * (or the timeout expired).
863 * It is expected that the IRQ called ena_com_handle_admin_completion
864 * to mark the completions.
865 */
9f95a23c
TL
866static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
867 struct ena_com_admin_queue *admin_queue)
7c673cae
FG
868{
869 if (admin_queue->polling)
870 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
871 admin_queue);
872
873 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
874 admin_queue);
875}
876
877static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
878 struct ena_com_io_sq *io_sq)
879{
880 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
881 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
882 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
883 u8 direction;
884 int ret;
885
9f95a23c 886 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
7c673cae
FG
887
888 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
889 direction = ENA_ADMIN_SQ_DIRECTION_TX;
890 else
891 direction = ENA_ADMIN_SQ_DIRECTION_RX;
892
893 destroy_cmd.sq.sq_identity |= (direction <<
894 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
895 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
896
897 destroy_cmd.sq.sq_idx = io_sq->idx;
898 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
899
9f95a23c
TL
900 ret = ena_com_execute_admin_command(admin_queue,
901 (struct ena_admin_aq_entry *)&destroy_cmd,
902 sizeof(destroy_cmd),
903 (struct ena_admin_acq_entry *)&destroy_resp,
904 sizeof(destroy_resp));
7c673cae
FG
905
906 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
907 ena_trc_err("failed to destroy io sq error: %d\n", ret);
908
909 return ret;
910}
911
912static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
913 struct ena_com_io_sq *io_sq,
914 struct ena_com_io_cq *io_cq)
915{
916 size_t size;
917
918 if (io_cq->cdesc_addr.virt_addr) {
919 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
920
921 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
922 size,
923 io_cq->cdesc_addr.virt_addr,
924 io_cq->cdesc_addr.phys_addr,
925 io_cq->cdesc_addr.mem_handle);
926
927 io_cq->cdesc_addr.virt_addr = NULL;
928 }
929
930 if (io_sq->desc_addr.virt_addr) {
931 size = io_sq->desc_entry_size * io_sq->q_depth;
932
9f95a23c
TL
933 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
934 size,
935 io_sq->desc_addr.virt_addr,
936 io_sq->desc_addr.phys_addr,
937 io_sq->desc_addr.mem_handle);
7c673cae
FG
938
939 io_sq->desc_addr.virt_addr = NULL;
940 }
9f95a23c
TL
941
942 if (io_sq->bounce_buf_ctrl.base_buffer) {
943 size = io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
944 ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
945 io_sq->bounce_buf_ctrl.base_buffer = NULL;
946 }
7c673cae
FG
947}
948
9f95a23c
TL
949static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
950 u16 exp_state)
7c673cae
FG
951{
952 u32 val, i;
953
9f95a23c
TL
954 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
955 timeout = (timeout * 100) / ENA_POLL_MS;
956
7c673cae
FG
957 for (i = 0; i < timeout; i++) {
958 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
959
960 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
961 ena_trc_err("Reg read timeout occurred\n");
962 return ENA_COM_TIMER_EXPIRED;
963 }
964
965 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
966 exp_state)
967 return 0;
968
9f95a23c 969 ENA_MSLEEP(ENA_POLL_MS);
7c673cae
FG
970 }
971
972 return ENA_COM_TIMER_EXPIRED;
973}
974
9f95a23c
TL
975static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
976 enum ena_admin_aq_feature_id feature_id)
7c673cae
FG
977{
978 u32 feature_mask = 1 << feature_id;
979
980 /* Device attributes is always supported */
981 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
982 !(ena_dev->supported_features & feature_mask))
983 return false;
984
985 return true;
986}
987
988static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
989 struct ena_admin_get_feat_resp *get_resp,
990 enum ena_admin_aq_feature_id feature_id,
991 dma_addr_t control_buf_dma_addr,
9f95a23c
TL
992 u32 control_buff_size,
993 u8 feature_ver)
7c673cae
FG
994{
995 struct ena_com_admin_queue *admin_queue;
996 struct ena_admin_get_feat_cmd get_cmd;
997 int ret;
998
7c673cae 999 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
9f95a23c
TL
1000 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
1001 return ENA_COM_UNSUPPORTED;
7c673cae
FG
1002 }
1003
1004 memset(&get_cmd, 0x0, sizeof(get_cmd));
1005 admin_queue = &ena_dev->admin_queue;
1006
1007 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1008
1009 if (control_buff_size)
1010 get_cmd.aq_common_descriptor.flags =
1011 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1012 else
1013 get_cmd.aq_common_descriptor.flags = 0;
1014
1015 ret = ena_com_mem_addr_set(ena_dev,
1016 &get_cmd.control_buffer.address,
1017 control_buf_dma_addr);
1018 if (unlikely(ret)) {
1019 ena_trc_err("memory address set failed\n");
1020 return ret;
1021 }
1022
1023 get_cmd.control_buffer.length = control_buff_size;
9f95a23c 1024 get_cmd.feat_common.feature_version = feature_ver;
7c673cae
FG
1025 get_cmd.feat_common.feature_id = feature_id;
1026
1027 ret = ena_com_execute_admin_command(admin_queue,
1028 (struct ena_admin_aq_entry *)
1029 &get_cmd,
1030 sizeof(get_cmd),
1031 (struct ena_admin_acq_entry *)
1032 get_resp,
1033 sizeof(*get_resp));
1034
1035 if (unlikely(ret))
1036 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1037 feature_id, ret);
1038
1039 return ret;
1040}
1041
1042static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1043 struct ena_admin_get_feat_resp *get_resp,
9f95a23c
TL
1044 enum ena_admin_aq_feature_id feature_id,
1045 u8 feature_ver)
7c673cae
FG
1046{
1047 return ena_com_get_feature_ex(ena_dev,
1048 get_resp,
1049 feature_id,
1050 0,
9f95a23c
TL
1051 0,
1052 feature_ver);
7c673cae
FG
1053}
1054
1055static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1056{
1057 struct ena_rss *rss = &ena_dev->rss;
1058
1059 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1060 sizeof(*rss->hash_key),
1061 rss->hash_key,
1062 rss->hash_key_dma_addr,
1063 rss->hash_key_mem_handle);
1064
1065 if (unlikely(!rss->hash_key))
1066 return ENA_COM_NO_MEM;
1067
1068 return 0;
1069}
1070
1071static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1072{
1073 struct ena_rss *rss = &ena_dev->rss;
1074
1075 if (rss->hash_key)
1076 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1077 sizeof(*rss->hash_key),
1078 rss->hash_key,
1079 rss->hash_key_dma_addr,
1080 rss->hash_key_mem_handle);
1081 rss->hash_key = NULL;
1082}
1083
1084static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1085{
1086 struct ena_rss *rss = &ena_dev->rss;
1087
1088 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1089 sizeof(*rss->hash_ctrl),
1090 rss->hash_ctrl,
1091 rss->hash_ctrl_dma_addr,
1092 rss->hash_ctrl_mem_handle);
1093
1094 if (unlikely(!rss->hash_ctrl))
1095 return ENA_COM_NO_MEM;
1096
1097 return 0;
1098}
1099
1100static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1101{
1102 struct ena_rss *rss = &ena_dev->rss;
1103
1104 if (rss->hash_ctrl)
1105 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1106 sizeof(*rss->hash_ctrl),
1107 rss->hash_ctrl,
1108 rss->hash_ctrl_dma_addr,
1109 rss->hash_ctrl_mem_handle);
1110 rss->hash_ctrl = NULL;
1111}
1112
1113static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1114 u16 log_size)
1115{
1116 struct ena_rss *rss = &ena_dev->rss;
1117 struct ena_admin_get_feat_resp get_resp;
1118 size_t tbl_size;
1119 int ret;
1120
1121 ret = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 1122 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
7c673cae
FG
1123 if (unlikely(ret))
1124 return ret;
1125
1126 if ((get_resp.u.ind_table.min_size > log_size) ||
1127 (get_resp.u.ind_table.max_size < log_size)) {
1128 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1129 1 << log_size,
1130 1 << get_resp.u.ind_table.min_size,
1131 1 << get_resp.u.ind_table.max_size);
1132 return ENA_COM_INVAL;
1133 }
1134
1135 tbl_size = (1ULL << log_size) *
1136 sizeof(struct ena_admin_rss_ind_table_entry);
1137
1138 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
9f95a23c
TL
1139 tbl_size,
1140 rss->rss_ind_tbl,
1141 rss->rss_ind_tbl_dma_addr,
1142 rss->rss_ind_tbl_mem_handle);
7c673cae
FG
1143 if (unlikely(!rss->rss_ind_tbl))
1144 goto mem_err1;
1145
1146 tbl_size = (1ULL << log_size) * sizeof(u16);
1147 rss->host_rss_ind_tbl =
1148 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1149 if (unlikely(!rss->host_rss_ind_tbl))
1150 goto mem_err2;
1151
1152 rss->tbl_log_size = log_size;
1153
1154 return 0;
1155
1156mem_err2:
1157 tbl_size = (1ULL << log_size) *
1158 sizeof(struct ena_admin_rss_ind_table_entry);
1159
1160 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1161 tbl_size,
1162 rss->rss_ind_tbl,
1163 rss->rss_ind_tbl_dma_addr,
1164 rss->rss_ind_tbl_mem_handle);
1165 rss->rss_ind_tbl = NULL;
1166mem_err1:
1167 rss->tbl_log_size = 0;
1168 return ENA_COM_NO_MEM;
1169}
1170
1171static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1172{
1173 struct ena_rss *rss = &ena_dev->rss;
1174 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1175 sizeof(struct ena_admin_rss_ind_table_entry);
1176
1177 if (rss->rss_ind_tbl)
1178 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1179 tbl_size,
1180 rss->rss_ind_tbl,
1181 rss->rss_ind_tbl_dma_addr,
1182 rss->rss_ind_tbl_mem_handle);
1183 rss->rss_ind_tbl = NULL;
1184
1185 if (rss->host_rss_ind_tbl)
1186 ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
1187 rss->host_rss_ind_tbl = NULL;
1188}
1189
1190static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1191 struct ena_com_io_sq *io_sq, u16 cq_idx)
1192{
1193 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1194 struct ena_admin_aq_create_sq_cmd create_cmd;
1195 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1196 u8 direction;
1197 int ret;
1198
9f95a23c 1199 memset(&create_cmd, 0x0, sizeof(create_cmd));
7c673cae
FG
1200
1201 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1202
1203 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1204 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1205 else
1206 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1207
1208 create_cmd.sq_identity |= (direction <<
1209 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1210 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1211
1212 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1213 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1214
1215 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1216 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1217 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1218
1219 create_cmd.sq_caps_3 |=
1220 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1221
1222 create_cmd.cq_idx = cq_idx;
1223 create_cmd.sq_depth = io_sq->q_depth;
1224
1225 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1226 ret = ena_com_mem_addr_set(ena_dev,
1227 &create_cmd.sq_ba,
1228 io_sq->desc_addr.phys_addr);
1229 if (unlikely(ret)) {
1230 ena_trc_err("memory address set failed\n");
1231 return ret;
1232 }
1233 }
1234
9f95a23c
TL
1235 ret = ena_com_execute_admin_command(admin_queue,
1236 (struct ena_admin_aq_entry *)&create_cmd,
1237 sizeof(create_cmd),
1238 (struct ena_admin_acq_entry *)&cmd_completion,
1239 sizeof(cmd_completion));
7c673cae
FG
1240 if (unlikely(ret)) {
1241 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1242 return ret;
1243 }
1244
1245 io_sq->idx = cmd_completion.sq_idx;
1246
1247 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1248 (uintptr_t)cmd_completion.sq_doorbell_offset);
1249
1250 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1251 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1252 + cmd_completion.llq_headers_offset);
1253
1254 io_sq->desc_addr.pbuf_dev_addr =
1255 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1256 cmd_completion.llq_descriptors_offset);
1257 }
1258
1259 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1260
1261 return ret;
1262}
1263
1264static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1265{
1266 struct ena_rss *rss = &ena_dev->rss;
1267 struct ena_com_io_sq *io_sq;
1268 u16 qid;
1269 int i;
1270
1271 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1272 qid = rss->host_rss_ind_tbl[i];
1273 if (qid >= ENA_TOTAL_NUM_QUEUES)
1274 return ENA_COM_INVAL;
1275
1276 io_sq = &ena_dev->io_sq_queues[qid];
1277
1278 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1279 return ENA_COM_INVAL;
1280
1281 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1282 }
1283
1284 return 0;
1285}
1286
1287static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1288{
1289 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1290 struct ena_rss *rss = &ena_dev->rss;
1291 u8 idx;
1292 u16 i;
1293
1294 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1295 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1296
1297 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1298 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1299 return ENA_COM_INVAL;
1300 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1301
1302 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1303 return ENA_COM_INVAL;
1304
1305 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1306 }
1307
1308 return 0;
1309}
1310
1311static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1312{
1313 size_t size;
1314
1315 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1316
1317 ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
1318 if (!ena_dev->intr_moder_tbl)
1319 return ENA_COM_NO_MEM;
1320
1321 ena_com_config_default_interrupt_moderation_table(ena_dev);
1322
1323 return 0;
1324}
1325
9f95a23c
TL
1326static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1327 u16 intr_delay_resolution)
7c673cae
FG
1328{
1329 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1330 unsigned int i;
1331
1332 if (!intr_delay_resolution) {
1333 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1334 intr_delay_resolution = 1;
1335 }
1336 ena_dev->intr_delay_resolution = intr_delay_resolution;
1337
1338 /* update Rx */
1339 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1340 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1341
1342 /* update Tx */
1343 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1344}
1345
1346/*****************************************************************************/
1347/******************************* API ******************************/
1348/*****************************************************************************/
1349
1350int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1351 struct ena_admin_aq_entry *cmd,
1352 size_t cmd_size,
1353 struct ena_admin_acq_entry *comp,
1354 size_t comp_size)
1355{
1356 struct ena_comp_ctx *comp_ctx;
9f95a23c 1357 int ret;
7c673cae
FG
1358
1359 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1360 comp, comp_size);
9f95a23c
TL
1361 if (IS_ERR(comp_ctx)) {
1362 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1363 ena_trc_dbg("Failed to submit command [%ld]\n",
1364 PTR_ERR(comp_ctx));
1365 else
1366 ena_trc_err("Failed to submit command [%ld]\n",
1367 PTR_ERR(comp_ctx));
1368
7c673cae
FG
1369 return PTR_ERR(comp_ctx);
1370 }
1371
1372 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1373 if (unlikely(ret)) {
1374 if (admin_queue->running_state)
1375 ena_trc_err("Failed to process command. ret = %d\n",
1376 ret);
1377 else
1378 ena_trc_dbg("Failed to process command. ret = %d\n",
1379 ret);
1380 }
1381 return ret;
1382}
1383
1384int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1385 struct ena_com_io_cq *io_cq)
1386{
1387 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1388 struct ena_admin_aq_create_cq_cmd create_cmd;
1389 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1390 int ret;
1391
9f95a23c 1392 memset(&create_cmd, 0x0, sizeof(create_cmd));
7c673cae
FG
1393
1394 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1395
1396 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1397 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1398 create_cmd.cq_caps_1 |=
1399 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1400
1401 create_cmd.msix_vector = io_cq->msix_vector;
1402 create_cmd.cq_depth = io_cq->q_depth;
1403
1404 ret = ena_com_mem_addr_set(ena_dev,
1405 &create_cmd.cq_ba,
1406 io_cq->cdesc_addr.phys_addr);
1407 if (unlikely(ret)) {
1408 ena_trc_err("memory address set failed\n");
1409 return ret;
1410 }
1411
9f95a23c
TL
1412 ret = ena_com_execute_admin_command(admin_queue,
1413 (struct ena_admin_aq_entry *)&create_cmd,
1414 sizeof(create_cmd),
1415 (struct ena_admin_acq_entry *)&cmd_completion,
1416 sizeof(cmd_completion));
7c673cae
FG
1417 if (unlikely(ret)) {
1418 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1419 return ret;
1420 }
1421
1422 io_cq->idx = cmd_completion.cq_idx;
1423
1424 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1425 cmd_completion.cq_interrupt_unmask_register_offset);
1426
1427 if (cmd_completion.cq_head_db_register_offset)
1428 io_cq->cq_head_db_reg =
1429 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1430 cmd_completion.cq_head_db_register_offset);
1431
1432 if (cmd_completion.numa_node_register_offset)
1433 io_cq->numa_node_cfg_reg =
1434 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1435 cmd_completion.numa_node_register_offset);
1436
1437 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1438
1439 return ret;
1440}
1441
1442int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1443 struct ena_com_io_sq **io_sq,
1444 struct ena_com_io_cq **io_cq)
1445{
1446 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1447 ena_trc_err("Invalid queue number %d but the max is %d\n",
1448 qid, ENA_TOTAL_NUM_QUEUES);
1449 return ENA_COM_INVAL;
1450 }
1451
1452 *io_sq = &ena_dev->io_sq_queues[qid];
1453 *io_cq = &ena_dev->io_cq_queues[qid];
1454
1455 return 0;
1456}
1457
1458void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1459{
1460 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1461 struct ena_comp_ctx *comp_ctx;
1462 u16 i;
1463
1464 if (!admin_queue->comp_ctx)
1465 return;
1466
1467 for (i = 0; i < admin_queue->q_depth; i++) {
1468 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1469 if (unlikely(!comp_ctx))
1470 break;
1471
1472 comp_ctx->status = ENA_CMD_ABORTED;
1473
1474 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1475 }
1476}
1477
1478void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1479{
1480 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1481 unsigned long flags = 0;
1482
1483 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1484 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1485 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
9f95a23c 1486 ENA_MSLEEP(ENA_POLL_MS);
7c673cae
FG
1487 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1488 }
1489 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1490}
1491
1492int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1493 struct ena_com_io_cq *io_cq)
1494{
1495 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1496 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1497 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1498 int ret;
1499
9f95a23c 1500 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
7c673cae
FG
1501
1502 destroy_cmd.cq_idx = io_cq->idx;
1503 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1504
9f95a23c
TL
1505 ret = ena_com_execute_admin_command(admin_queue,
1506 (struct ena_admin_aq_entry *)&destroy_cmd,
1507 sizeof(destroy_cmd),
1508 (struct ena_admin_acq_entry *)&destroy_resp,
1509 sizeof(destroy_resp));
7c673cae
FG
1510
1511 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1512 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1513
1514 return ret;
1515}
1516
1517bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1518{
1519 return ena_dev->admin_queue.running_state;
1520}
1521
1522void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1523{
1524 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1525 unsigned long flags = 0;
1526
1527 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1528 ena_dev->admin_queue.running_state = state;
1529 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1530}
1531
1532void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1533{
1534 u16 depth = ena_dev->aenq.q_depth;
1535
9f95a23c 1536 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
7c673cae
FG
1537
1538 /* Init head_db to mark that all entries in the queue
1539 * are initially available
1540 */
9f95a23c 1541 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
7c673cae
FG
1542}
1543
1544int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1545{
1546 struct ena_com_admin_queue *admin_queue;
1547 struct ena_admin_set_feat_cmd cmd;
1548 struct ena_admin_set_feat_resp resp;
1549 struct ena_admin_get_feat_resp get_resp;
9f95a23c 1550 int ret;
7c673cae 1551
9f95a23c 1552 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
7c673cae
FG
1553 if (ret) {
1554 ena_trc_info("Can't get aenq configuration\n");
1555 return ret;
1556 }
1557
1558 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
9f95a23c 1559 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
7c673cae
FG
1560 get_resp.u.aenq.supported_groups,
1561 groups_flag);
9f95a23c 1562 return ENA_COM_UNSUPPORTED;
7c673cae
FG
1563 }
1564
1565 memset(&cmd, 0x0, sizeof(cmd));
1566 admin_queue = &ena_dev->admin_queue;
1567
1568 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1569 cmd.aq_common_descriptor.flags = 0;
1570 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1571 cmd.u.aenq.enabled_groups = groups_flag;
1572
1573 ret = ena_com_execute_admin_command(admin_queue,
1574 (struct ena_admin_aq_entry *)&cmd,
1575 sizeof(cmd),
1576 (struct ena_admin_acq_entry *)&resp,
1577 sizeof(resp));
1578
1579 if (unlikely(ret))
1580 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1581
1582 return ret;
1583}
1584
1585int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1586{
1587 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1588 int width;
1589
1590 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1591 ena_trc_err("Reg read timeout occurred\n");
1592 return ENA_COM_TIMER_EXPIRED;
1593 }
1594
1595 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1596 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1597
1598 ena_trc_dbg("ENA dma width: %d\n", width);
1599
1600 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1601 ena_trc_err("DMA width illegal value: %d\n", width);
1602 return ENA_COM_INVAL;
1603 }
1604
1605 ena_dev->dma_addr_bits = width;
1606
1607 return width;
1608}
1609
1610int ena_com_validate_version(struct ena_com_dev *ena_dev)
1611{
1612 u32 ver;
1613 u32 ctrl_ver;
1614 u32 ctrl_ver_masked;
1615
1616 /* Make sure the ENA version and the controller version are at least
1617 * as the driver expects
1618 */
1619 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1620 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1621 ENA_REGS_CONTROLLER_VERSION_OFF);
1622
1623 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1624 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1625 ena_trc_err("Reg read timeout occurred\n");
1626 return ENA_COM_TIMER_EXPIRED;
1627 }
1628
1629 ena_trc_info("ena device version: %d.%d\n",
1630 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1631 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1632 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1633
7c673cae
FG
1634 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1635 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1636 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1637 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1638 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1639 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1640 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1641 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1642
1643 ctrl_ver_masked =
1644 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1645 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1646 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1647
1648 /* Validate the ctrl version without the implementation ID */
1649 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1650 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1651 return -1;
1652 }
1653
1654 return 0;
1655}
1656
1657void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1658{
1659 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
9f95a23c
TL
1660 struct ena_com_admin_cq *cq = &admin_queue->cq;
1661 struct ena_com_admin_sq *sq = &admin_queue->sq;
1662 struct ena_com_aenq *aenq = &ena_dev->aenq;
1663 u16 size;
7c673cae 1664
9f95a23c 1665 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
7c673cae
FG
1666 if (admin_queue->comp_ctx)
1667 ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
1668 admin_queue->comp_ctx = NULL;
9f95a23c
TL
1669 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1670 if (sq->entries)
1671 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1672 sq->dma_addr, sq->mem_handle);
1673 sq->entries = NULL;
1674
1675 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1676 if (cq->entries)
1677 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1678 cq->dma_addr, cq->mem_handle);
1679 cq->entries = NULL;
1680
1681 size = ADMIN_AENQ_SIZE(aenq->q_depth);
7c673cae 1682 if (ena_dev->aenq.entries)
9f95a23c
TL
1683 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1684 aenq->dma_addr, aenq->mem_handle);
1685 aenq->entries = NULL;
1686 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
7c673cae
FG
1687}
1688
1689void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1690{
9f95a23c
TL
1691 u32 mask_value = 0;
1692
1693 if (polling)
1694 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1695
1696 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1697 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
7c673cae
FG
1698 ena_dev->admin_queue.polling = polling;
1699}
1700
1701int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1702{
1703 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1704
1705 ENA_SPINLOCK_INIT(mmio_read->lock);
1706 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1707 sizeof(*mmio_read->read_resp),
1708 mmio_read->read_resp,
1709 mmio_read->read_resp_dma_addr,
1710 mmio_read->read_resp_mem_handle);
1711 if (unlikely(!mmio_read->read_resp))
9f95a23c 1712 goto err;
7c673cae
FG
1713
1714 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1715
1716 mmio_read->read_resp->req_id = 0x0;
1717 mmio_read->seq_num = 0x0;
1718 mmio_read->readless_supported = true;
1719
1720 return 0;
9f95a23c
TL
1721
1722err:
1723 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1724 return ENA_COM_NO_MEM;
7c673cae
FG
1725}
1726
9f95a23c 1727void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
7c673cae
FG
1728{
1729 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1730
1731 mmio_read->readless_supported = readless_supported;
1732}
1733
1734void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1735{
1736 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1737
9f95a23c
TL
1738 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1739 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
7c673cae
FG
1740
1741 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1742 sizeof(*mmio_read->read_resp),
1743 mmio_read->read_resp,
1744 mmio_read->read_resp_dma_addr,
1745 mmio_read->read_resp_mem_handle);
1746
1747 mmio_read->read_resp = NULL;
9f95a23c 1748 ENA_SPINLOCK_DESTROY(mmio_read->lock);
7c673cae
FG
1749}
1750
1751void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1752{
1753 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1754 u32 addr_low, addr_high;
1755
1756 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1757 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1758
9f95a23c
TL
1759 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1760 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
7c673cae
FG
1761}
1762
1763int ena_com_admin_init(struct ena_com_dev *ena_dev,
9f95a23c 1764 struct ena_aenq_handlers *aenq_handlers)
7c673cae
FG
1765{
1766 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1767 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1768 int ret;
1769
1770 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1771
1772 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1773 ena_trc_err("Reg read timeout occurred\n");
1774 return ENA_COM_TIMER_EXPIRED;
1775 }
1776
1777 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1778 ena_trc_err("Device isn't ready, abort com init\n");
1779 return ENA_COM_NO_DEVICE;
1780 }
1781
1782 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1783
9f95a23c 1784 admin_queue->bus = ena_dev->bus;
7c673cae
FG
1785 admin_queue->q_dmadev = ena_dev->dmadev;
1786 admin_queue->polling = false;
1787 admin_queue->curr_cmd_id = 0;
1788
1789 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1790
9f95a23c 1791 ENA_SPINLOCK_INIT(admin_queue->q_lock);
7c673cae
FG
1792
1793 ret = ena_com_init_comp_ctxt(admin_queue);
1794 if (ret)
1795 goto error;
1796
1797 ret = ena_com_admin_init_sq(admin_queue);
1798 if (ret)
1799 goto error;
1800
1801 ret = ena_com_admin_init_cq(admin_queue);
1802 if (ret)
1803 goto error;
1804
9f95a23c
TL
1805 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1806 ENA_REGS_AQ_DB_OFF);
7c673cae
FG
1807
1808 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1809 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1810
9f95a23c
TL
1811 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1812 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
7c673cae
FG
1813
1814 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1815 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1816
9f95a23c
TL
1817 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1818 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
7c673cae
FG
1819
1820 aq_caps = 0;
1821 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1822 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1823 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1824 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1825
1826 acq_caps = 0;
1827 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1828 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1829 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1830 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1831
9f95a23c
TL
1832 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1833 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
7c673cae
FG
1834 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1835 if (ret)
1836 goto error;
1837
1838 admin_queue->running_state = true;
1839
1840 return 0;
1841error:
1842 ena_com_admin_destroy(ena_dev);
1843
1844 return ret;
1845}
1846
1847int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1848 struct ena_com_create_io_ctx *ctx)
1849{
1850 struct ena_com_io_sq *io_sq;
1851 struct ena_com_io_cq *io_cq;
9f95a23c 1852 int ret;
7c673cae
FG
1853
1854 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1855 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1856 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1857 return ENA_COM_INVAL;
1858 }
1859
1860 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1861 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1862
9f95a23c
TL
1863 memset(io_sq, 0x0, sizeof(*io_sq));
1864 memset(io_cq, 0x0, sizeof(*io_cq));
7c673cae
FG
1865
1866 /* Init CQ */
1867 io_cq->q_depth = ctx->queue_size;
1868 io_cq->direction = ctx->direction;
1869 io_cq->qid = ctx->qid;
1870
1871 io_cq->msix_vector = ctx->msix_vector;
1872
1873 io_sq->q_depth = ctx->queue_size;
1874 io_sq->direction = ctx->direction;
1875 io_sq->qid = ctx->qid;
1876
1877 io_sq->mem_queue_type = ctx->mem_queue_type;
1878
1879 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1880 /* header length is limited to 8 bits */
1881 io_sq->tx_max_header_size =
1882 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1883
1884 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1885 if (ret)
1886 goto error;
1887 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1888 if (ret)
1889 goto error;
1890
1891 ret = ena_com_create_io_cq(ena_dev, io_cq);
1892 if (ret)
1893 goto error;
1894
1895 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1896 if (ret)
1897 goto destroy_io_cq;
1898
1899 return 0;
1900
1901destroy_io_cq:
1902 ena_com_destroy_io_cq(ena_dev, io_cq);
1903error:
1904 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1905 return ret;
1906}
1907
1908void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1909{
1910 struct ena_com_io_sq *io_sq;
1911 struct ena_com_io_cq *io_cq;
1912
1913 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1914 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1915 qid, ENA_TOTAL_NUM_QUEUES);
1916 return;
1917 }
1918
1919 io_sq = &ena_dev->io_sq_queues[qid];
1920 io_cq = &ena_dev->io_cq_queues[qid];
1921
1922 ena_com_destroy_io_sq(ena_dev, io_sq);
1923 ena_com_destroy_io_cq(ena_dev, io_cq);
1924
1925 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1926}
1927
1928int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1929 struct ena_admin_get_feat_resp *resp)
1930{
9f95a23c
TL
1931 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1932}
1933
1934int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
1935{
1936 struct ena_admin_get_feat_resp resp;
1937 struct ena_extra_properties_strings *extra_properties_strings =
1938 &ena_dev->extra_properties_strings;
1939 u32 rc;
1940 extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
1941 ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
1942
1943 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1944 extra_properties_strings->size,
1945 extra_properties_strings->virt_addr,
1946 extra_properties_strings->dma_addr,
1947 extra_properties_strings->dma_handle);
1948 if (unlikely(!extra_properties_strings->virt_addr)) {
1949 ena_trc_err("Failed to allocate extra properties strings\n");
1950 return 0;
1951 }
1952
1953 rc = ena_com_get_feature_ex(ena_dev, &resp,
1954 ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
1955 extra_properties_strings->dma_addr,
1956 extra_properties_strings->size, 0);
1957 if (rc) {
1958 ena_trc_dbg("Failed to get extra properties strings\n");
1959 goto err;
1960 }
1961
1962 return resp.u.extra_properties_strings.count;
1963err:
1964 ena_com_delete_extra_properties_strings(ena_dev);
1965 return 0;
1966}
1967
1968void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
1969{
1970 struct ena_extra_properties_strings *extra_properties_strings =
1971 &ena_dev->extra_properties_strings;
1972
1973 if (extra_properties_strings->virt_addr) {
1974 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1975 extra_properties_strings->size,
1976 extra_properties_strings->virt_addr,
1977 extra_properties_strings->dma_addr,
1978 extra_properties_strings->dma_handle);
1979 extra_properties_strings->virt_addr = NULL;
1980 }
1981}
1982
1983int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
1984 struct ena_admin_get_feat_resp *resp)
1985{
1986 return ena_com_get_feature(ena_dev, resp,
1987 ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
7c673cae
FG
1988}
1989
1990int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1991 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1992{
1993 struct ena_admin_get_feat_resp get_resp;
1994 int rc;
1995
1996 rc = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 1997 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
7c673cae
FG
1998 if (rc)
1999 return rc;
2000
2001 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
2002 sizeof(get_resp.u.dev_attr));
2003 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
2004
9f95a23c
TL
2005 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
2006 rc = ena_com_get_feature(ena_dev, &get_resp,
2007 ENA_ADMIN_MAX_QUEUES_EXT,
2008 ENA_FEATURE_MAX_QUEUE_EXT_VER);
2009 if (rc)
2010 return rc;
7c673cae 2011
9f95a23c
TL
2012 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
2013 return -EINVAL;
2014
2015 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
2016 sizeof(get_resp.u.max_queue_ext));
2017 ena_dev->tx_max_header_size =
2018 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
2019 } else {
2020 rc = ena_com_get_feature(ena_dev, &get_resp,
2021 ENA_ADMIN_MAX_QUEUES_NUM, 0);
2022 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
2023 sizeof(get_resp.u.max_queue));
2024 ena_dev->tx_max_header_size =
2025 get_resp.u.max_queue.max_header_size;
2026
2027 if (rc)
2028 return rc;
2029 }
7c673cae
FG
2030
2031 rc = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 2032 ENA_ADMIN_AENQ_CONFIG, 0);
7c673cae
FG
2033 if (rc)
2034 return rc;
2035
2036 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2037 sizeof(get_resp.u.aenq));
2038
2039 rc = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 2040 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
7c673cae
FG
2041 if (rc)
2042 return rc;
2043
2044 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2045 sizeof(get_resp.u.offload));
2046
9f95a23c
TL
2047 /* Driver hints isn't mandatory admin command. So in case the
2048 * command isn't supported set driver hints to 0
2049 */
2050 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2051
2052 if (!rc)
2053 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2054 sizeof(get_resp.u.hw_hints));
2055 else if (rc == ENA_COM_UNSUPPORTED)
2056 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
2057 else
2058 return rc;
2059
2060 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2061 if (!rc)
2062 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2063 sizeof(get_resp.u.llq));
2064 else if (rc == ENA_COM_UNSUPPORTED)
2065 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2066 else
2067 return rc;
2068
2069 rc = ena_com_get_feature(ena_dev, &get_resp,
2070 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2071 if (!rc)
2072 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2073 sizeof(get_resp.u.ind_table));
2074 else if (rc == ENA_COM_UNSUPPORTED)
2075 memset(&get_feat_ctx->ind_table, 0x0,
2076 sizeof(get_feat_ctx->ind_table));
2077 else
2078 return rc;
2079
7c673cae
FG
2080 return 0;
2081}
2082
2083void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2084{
2085 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2086}
2087
2088/* ena_handle_specific_aenq_event:
2089 * return the handler that is relevant to the specific event group
2090 */
2091static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2092 u16 group)
2093{
2094 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2095
2096 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2097 return aenq_handlers->handlers[group];
2098
2099 return aenq_handlers->unimplemented_handler;
2100}
2101
2102/* ena_aenq_intr_handler:
2103 * handles the aenq incoming events.
2104 * pop events from the queue and apply the specific handler
2105 */
2106void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2107{
2108 struct ena_admin_aenq_entry *aenq_e;
2109 struct ena_admin_aenq_common_desc *aenq_common;
2110 struct ena_com_aenq *aenq = &dev->aenq;
9f95a23c 2111 unsigned long long timestamp;
7c673cae
FG
2112 ena_aenq_handler handler_cb;
2113 u16 masked_head, processed = 0;
2114 u8 phase;
2115
2116 masked_head = aenq->head & (aenq->q_depth - 1);
2117 phase = aenq->phase;
2118 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2119 aenq_common = &aenq_e->aenq_common_desc;
2120
2121 /* Go over all the events */
9f95a23c
TL
2122 while ((READ_ONCE8(aenq_common->flags) &
2123 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2124 /* Make sure the phase bit (ownership) is as expected before
2125 * reading the rest of the descriptor.
2126 */
2127 dma_rmb();
2128
2129 timestamp = (unsigned long long)aenq_common->timestamp_low |
2130 ((unsigned long long)aenq_common->timestamp_high << 32);
2131 ENA_TOUCH(timestamp); /* In case debug is disabled */
7c673cae
FG
2132 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2133 aenq_common->group,
2134 aenq_common->syndrom,
9f95a23c 2135 timestamp);
7c673cae
FG
2136
2137 /* Handle specific event*/
2138 handler_cb = ena_com_get_specific_aenq_cb(dev,
2139 aenq_common->group);
2140 handler_cb(data, aenq_e); /* call the actual event handler*/
2141
2142 /* Get next event entry */
2143 masked_head++;
2144 processed++;
2145
2146 if (unlikely(masked_head == aenq->q_depth)) {
2147 masked_head = 0;
2148 phase = !phase;
2149 }
2150 aenq_e = &aenq->entries[masked_head];
2151 aenq_common = &aenq_e->aenq_common_desc;
2152 }
2153
2154 aenq->head += processed;
2155 aenq->phase = phase;
2156
2157 /* Don't update aenq doorbell if there weren't any processed events */
2158 if (!processed)
2159 return;
2160
2161 /* write the aenq doorbell after all AENQ descriptors were read */
2162 mb();
9f95a23c
TL
2163 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2164 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2165 mmiowb();
7c673cae
FG
2166}
2167
9f95a23c
TL
2168int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2169 enum ena_regs_reset_reason_types reset_reason)
7c673cae
FG
2170{
2171 u32 stat, timeout, cap, reset_val;
2172 int rc;
2173
2174 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2175 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2176
2177 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2178 (cap == ENA_MMIO_READ_TIMEOUT))) {
2179 ena_trc_err("Reg read32 timeout occurred\n");
2180 return ENA_COM_TIMER_EXPIRED;
2181 }
2182
2183 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2184 ena_trc_err("Device isn't ready, can't reset device\n");
2185 return ENA_COM_INVAL;
2186 }
2187
2188 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2189 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2190 if (timeout == 0) {
2191 ena_trc_err("Invalid timeout value\n");
2192 return ENA_COM_INVAL;
2193 }
2194
2195 /* start reset */
2196 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
9f95a23c
TL
2197 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2198 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2199 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
7c673cae
FG
2200
2201 /* Write again the MMIO read request address */
2202 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2203
2204 rc = wait_for_reset_state(ena_dev, timeout,
2205 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2206 if (rc != 0) {
2207 ena_trc_err("Reset indication didn't turn on\n");
2208 return rc;
2209 }
2210
2211 /* reset done */
9f95a23c 2212 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
7c673cae
FG
2213 rc = wait_for_reset_state(ena_dev, timeout, 0);
2214 if (rc != 0) {
2215 ena_trc_err("Reset indication didn't turn off\n");
2216 return rc;
2217 }
2218
9f95a23c
TL
2219 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2220 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2221 if (timeout)
2222 /* the resolution of timeout reg is 100ms */
2223 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2224 else
2225 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2226
7c673cae
FG
2227 return 0;
2228}
2229
2230static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
9f95a23c 2231 struct ena_com_stats_ctx *ctx,
7c673cae
FG
2232 enum ena_admin_get_stats_type type)
2233{
9f95a23c
TL
2234 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2235 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
7c673cae 2236 struct ena_com_admin_queue *admin_queue;
9f95a23c 2237 int ret;
7c673cae
FG
2238
2239 admin_queue = &ena_dev->admin_queue;
2240
2241 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2242 get_cmd->aq_common_descriptor.flags = 0;
2243 get_cmd->type = type;
2244
9f95a23c
TL
2245 ret = ena_com_execute_admin_command(admin_queue,
2246 (struct ena_admin_aq_entry *)get_cmd,
2247 sizeof(*get_cmd),
2248 (struct ena_admin_acq_entry *)get_resp,
2249 sizeof(*get_resp));
7c673cae
FG
2250
2251 if (unlikely(ret))
2252 ena_trc_err("Failed to get stats. error: %d\n", ret);
2253
2254 return ret;
2255}
2256
2257int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2258 struct ena_admin_basic_stats *stats)
2259{
9f95a23c
TL
2260 struct ena_com_stats_ctx ctx;
2261 int ret;
7c673cae 2262
9f95a23c
TL
2263 memset(&ctx, 0x0, sizeof(ctx));
2264 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
7c673cae 2265 if (likely(ret == 0))
9f95a23c
TL
2266 memcpy(stats, &ctx.get_resp.basic_stats,
2267 sizeof(ctx.get_resp.basic_stats));
7c673cae 2268
7c673cae
FG
2269 return ret;
2270}
2271
2272int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2273{
2274 struct ena_com_admin_queue *admin_queue;
2275 struct ena_admin_set_feat_cmd cmd;
2276 struct ena_admin_set_feat_resp resp;
9f95a23c 2277 int ret;
7c673cae
FG
2278
2279 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
9f95a23c
TL
2280 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2281 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2282 }
2283
2284 memset(&cmd, 0x0, sizeof(cmd));
2285 admin_queue = &ena_dev->admin_queue;
2286
2287 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2288 cmd.aq_common_descriptor.flags = 0;
2289 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2290 cmd.u.mtu.mtu = mtu;
2291
2292 ret = ena_com_execute_admin_command(admin_queue,
2293 (struct ena_admin_aq_entry *)&cmd,
2294 sizeof(cmd),
2295 (struct ena_admin_acq_entry *)&resp,
2296 sizeof(resp));
2297
9f95a23c 2298 if (unlikely(ret))
7c673cae 2299 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
9f95a23c
TL
2300
2301 return ret;
7c673cae
FG
2302}
2303
2304int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2305 struct ena_admin_feature_offload_desc *offload)
2306{
2307 int ret;
2308 struct ena_admin_get_feat_resp resp;
2309
2310 ret = ena_com_get_feature(ena_dev, &resp,
9f95a23c 2311 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
7c673cae
FG
2312 if (unlikely(ret)) {
2313 ena_trc_err("Failed to get offload capabilities %d\n", ret);
9f95a23c 2314 return ret;
7c673cae
FG
2315 }
2316
2317 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2318
2319 return 0;
2320}
2321
2322int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2323{
2324 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2325 struct ena_rss *rss = &ena_dev->rss;
2326 struct ena_admin_set_feat_cmd cmd;
2327 struct ena_admin_set_feat_resp resp;
2328 struct ena_admin_get_feat_resp get_resp;
2329 int ret;
2330
2331 if (!ena_com_check_supported_feature_id(ena_dev,
2332 ENA_ADMIN_RSS_HASH_FUNCTION)) {
9f95a23c
TL
2333 ena_trc_dbg("Feature %d isn't supported\n",
2334 ENA_ADMIN_RSS_HASH_FUNCTION);
2335 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2336 }
2337
2338 /* Validate hash function is supported */
2339 ret = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 2340 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
7c673cae
FG
2341 if (unlikely(ret))
2342 return ret;
2343
2344 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2345 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2346 rss->hash_func);
9f95a23c 2347 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2348 }
2349
2350 memset(&cmd, 0x0, sizeof(cmd));
2351
2352 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2353 cmd.aq_common_descriptor.flags =
2354 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2355 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2356 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2357 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2358
2359 ret = ena_com_mem_addr_set(ena_dev,
2360 &cmd.control_buffer.address,
2361 rss->hash_key_dma_addr);
2362 if (unlikely(ret)) {
2363 ena_trc_err("memory address set failed\n");
2364 return ret;
2365 }
2366
2367 cmd.control_buffer.length = sizeof(*rss->hash_key);
2368
2369 ret = ena_com_execute_admin_command(admin_queue,
2370 (struct ena_admin_aq_entry *)&cmd,
2371 sizeof(cmd),
2372 (struct ena_admin_acq_entry *)&resp,
2373 sizeof(resp));
2374 if (unlikely(ret)) {
2375 ena_trc_err("Failed to set hash function %d. error: %d\n",
2376 rss->hash_func, ret);
2377 return ENA_COM_INVAL;
2378 }
2379
2380 return 0;
2381}
2382
2383int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2384 enum ena_admin_hash_functions func,
2385 const u8 *key, u16 key_len, u32 init_val)
2386{
2387 struct ena_rss *rss = &ena_dev->rss;
2388 struct ena_admin_get_feat_resp get_resp;
2389 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2390 rss->hash_key;
2391 int rc;
2392
2393 /* Make sure size is a mult of DWs */
2394 if (unlikely(key_len & 0x3))
2395 return ENA_COM_INVAL;
2396
2397 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2398 ENA_ADMIN_RSS_HASH_FUNCTION,
2399 rss->hash_key_dma_addr,
9f95a23c 2400 sizeof(*rss->hash_key), 0);
7c673cae
FG
2401 if (unlikely(rc))
2402 return rc;
2403
2404 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2405 ena_trc_err("Flow hash function %d isn't supported\n", func);
9f95a23c 2406 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2407 }
2408
2409 switch (func) {
2410 case ENA_ADMIN_TOEPLITZ:
2411 if (key_len > sizeof(hash_key->key)) {
2412 ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
2413 key_len, sizeof(hash_key->key));
2414 return ENA_COM_INVAL;
2415 }
2416
2417 memcpy(hash_key->key, key, key_len);
2418 rss->hash_init_val = init_val;
2419 hash_key->keys_num = key_len >> 2;
2420 break;
2421 case ENA_ADMIN_CRC32:
2422 rss->hash_init_val = init_val;
2423 break;
2424 default:
2425 ena_trc_err("Invalid hash function (%d)\n", func);
2426 return ENA_COM_INVAL;
2427 }
2428
2429 rc = ena_com_set_hash_function(ena_dev);
2430
2431 /* Restore the old function */
2432 if (unlikely(rc))
2433 ena_com_get_hash_function(ena_dev, NULL, NULL);
2434
2435 return rc;
2436}
2437
2438int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2439 enum ena_admin_hash_functions *func,
2440 u8 *key)
2441{
2442 struct ena_rss *rss = &ena_dev->rss;
2443 struct ena_admin_get_feat_resp get_resp;
2444 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2445 rss->hash_key;
2446 int rc;
2447
2448 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2449 ENA_ADMIN_RSS_HASH_FUNCTION,
2450 rss->hash_key_dma_addr,
9f95a23c 2451 sizeof(*rss->hash_key), 0);
7c673cae
FG
2452 if (unlikely(rc))
2453 return rc;
2454
9f95a23c 2455 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
7c673cae
FG
2456 if (func)
2457 *func = rss->hash_func;
2458
2459 if (key)
2460 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2461
2462 return 0;
2463}
2464
2465int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2466 enum ena_admin_flow_hash_proto proto,
2467 u16 *fields)
2468{
2469 struct ena_rss *rss = &ena_dev->rss;
2470 struct ena_admin_get_feat_resp get_resp;
2471 int rc;
2472
2473 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2474 ENA_ADMIN_RSS_HASH_INPUT,
2475 rss->hash_ctrl_dma_addr,
9f95a23c 2476 sizeof(*rss->hash_ctrl), 0);
7c673cae
FG
2477 if (unlikely(rc))
2478 return rc;
2479
2480 if (fields)
2481 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2482
2483 return 0;
2484}
2485
2486int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2487{
2488 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2489 struct ena_rss *rss = &ena_dev->rss;
9f95a23c 2490 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
7c673cae
FG
2491 struct ena_admin_set_feat_cmd cmd;
2492 struct ena_admin_set_feat_resp resp;
2493 int ret;
2494
2495 if (!ena_com_check_supported_feature_id(ena_dev,
2496 ENA_ADMIN_RSS_HASH_INPUT)) {
9f95a23c
TL
2497 ena_trc_dbg("Feature %d isn't supported\n",
2498 ENA_ADMIN_RSS_HASH_INPUT);
2499 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2500 }
2501
9f95a23c
TL
2502 memset(&cmd, 0x0, sizeof(cmd));
2503
7c673cae
FG
2504 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2505 cmd.aq_common_descriptor.flags =
2506 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2507 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2508 cmd.u.flow_hash_input.enabled_input_sort =
2509 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2510 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2511
2512 ret = ena_com_mem_addr_set(ena_dev,
2513 &cmd.control_buffer.address,
2514 rss->hash_ctrl_dma_addr);
2515 if (unlikely(ret)) {
2516 ena_trc_err("memory address set failed\n");
2517 return ret;
2518 }
9f95a23c 2519 cmd.control_buffer.length = sizeof(*hash_ctrl);
7c673cae
FG
2520
2521 ret = ena_com_execute_admin_command(admin_queue,
2522 (struct ena_admin_aq_entry *)&cmd,
2523 sizeof(cmd),
2524 (struct ena_admin_acq_entry *)&resp,
2525 sizeof(resp));
9f95a23c 2526 if (unlikely(ret))
7c673cae 2527 ena_trc_err("Failed to set hash input. error: %d\n", ret);
7c673cae 2528
9f95a23c 2529 return ret;
7c673cae
FG
2530}
2531
2532int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2533{
2534 struct ena_rss *rss = &ena_dev->rss;
2535 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2536 rss->hash_ctrl;
2537 u16 available_fields = 0;
2538 int rc, i;
2539
2540 /* Get the supported hash input */
9f95a23c 2541 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
7c673cae
FG
2542 if (unlikely(rc))
2543 return rc;
2544
2545 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2546 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2547 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2548
2549 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2550 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2551 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2552
2553 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2554 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2555 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2556
2557 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2558 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2559 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2560
2561 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2562 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2563
2564 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2565 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2566
2567 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2568 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2569
9f95a23c 2570 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
7c673cae
FG
2571 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2572
2573 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2574 available_fields = hash_ctrl->selected_fields[i].fields &
2575 hash_ctrl->supported_fields[i].fields;
2576 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2577 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2578 i, hash_ctrl->supported_fields[i].fields,
2579 hash_ctrl->selected_fields[i].fields);
9f95a23c 2580 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2581 }
2582 }
2583
2584 rc = ena_com_set_hash_ctrl(ena_dev);
2585
2586 /* In case of failure, restore the old hash ctrl */
2587 if (unlikely(rc))
9f95a23c 2588 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
7c673cae
FG
2589
2590 return rc;
2591}
2592
2593int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2594 enum ena_admin_flow_hash_proto proto,
2595 u16 hash_fields)
2596{
2597 struct ena_rss *rss = &ena_dev->rss;
2598 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2599 u16 supported_fields;
2600 int rc;
2601
2602 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2603 ena_trc_err("Invalid proto num (%u)\n", proto);
2604 return ENA_COM_INVAL;
2605 }
2606
2607 /* Get the ctrl table */
2608 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2609 if (unlikely(rc))
2610 return rc;
2611
2612 /* Make sure all the fields are supported */
2613 supported_fields = hash_ctrl->supported_fields[proto].fields;
2614 if ((hash_fields & supported_fields) != hash_fields) {
2615 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2616 proto, hash_fields, supported_fields);
2617 }
2618
2619 hash_ctrl->selected_fields[proto].fields = hash_fields;
2620
2621 rc = ena_com_set_hash_ctrl(ena_dev);
2622
2623 /* In case of failure, restore the old hash ctrl */
2624 if (unlikely(rc))
9f95a23c 2625 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
7c673cae
FG
2626
2627 return 0;
2628}
2629
2630int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2631 u16 entry_idx, u16 entry_value)
2632{
2633 struct ena_rss *rss = &ena_dev->rss;
2634
2635 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2636 return ENA_COM_INVAL;
2637
2638 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2639 return ENA_COM_INVAL;
2640
2641 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2642
2643 return 0;
2644}
2645
2646int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2647{
2648 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2649 struct ena_rss *rss = &ena_dev->rss;
2650 struct ena_admin_set_feat_cmd cmd;
2651 struct ena_admin_set_feat_resp resp;
9f95a23c 2652 int ret;
7c673cae 2653
9f95a23c
TL
2654 if (!ena_com_check_supported_feature_id(ena_dev,
2655 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2656 ena_trc_dbg("Feature %d isn't supported\n",
2657 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2658 return ENA_COM_UNSUPPORTED;
7c673cae
FG
2659 }
2660
2661 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2662 if (ret) {
2663 ena_trc_err("Failed to convert host indirection table to device table\n");
2664 return ret;
2665 }
2666
2667 memset(&cmd, 0x0, sizeof(cmd));
2668
2669 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2670 cmd.aq_common_descriptor.flags =
2671 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2672 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2673 cmd.u.ind_table.size = rss->tbl_log_size;
2674 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2675
2676 ret = ena_com_mem_addr_set(ena_dev,
2677 &cmd.control_buffer.address,
2678 rss->rss_ind_tbl_dma_addr);
2679 if (unlikely(ret)) {
2680 ena_trc_err("memory address set failed\n");
2681 return ret;
2682 }
2683
2684 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2685 sizeof(struct ena_admin_rss_ind_table_entry);
2686
2687 ret = ena_com_execute_admin_command(admin_queue,
2688 (struct ena_admin_aq_entry *)&cmd,
2689 sizeof(cmd),
2690 (struct ena_admin_acq_entry *)&resp,
2691 sizeof(resp));
2692
9f95a23c 2693 if (unlikely(ret))
7c673cae 2694 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
7c673cae 2695
9f95a23c 2696 return ret;
7c673cae
FG
2697}
2698
2699int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2700{
2701 struct ena_rss *rss = &ena_dev->rss;
2702 struct ena_admin_get_feat_resp get_resp;
2703 u32 tbl_size;
2704 int i, rc;
2705
2706 tbl_size = (1ULL << rss->tbl_log_size) *
2707 sizeof(struct ena_admin_rss_ind_table_entry);
2708
2709 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2710 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2711 rss->rss_ind_tbl_dma_addr,
9f95a23c 2712 tbl_size, 0);
7c673cae
FG
2713 if (unlikely(rc))
2714 return rc;
2715
2716 if (!ind_tbl)
2717 return 0;
2718
2719 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2720 if (unlikely(rc))
2721 return rc;
2722
2723 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2724 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2725
2726 return 0;
2727}
2728
2729int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2730{
2731 int rc;
2732
2733 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2734
2735 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2736 if (unlikely(rc))
2737 goto err_indr_tbl;
2738
2739 rc = ena_com_hash_key_allocate(ena_dev);
2740 if (unlikely(rc))
2741 goto err_hash_key;
2742
2743 rc = ena_com_hash_ctrl_init(ena_dev);
2744 if (unlikely(rc))
2745 goto err_hash_ctrl;
2746
2747 return 0;
2748
2749err_hash_ctrl:
2750 ena_com_hash_key_destroy(ena_dev);
2751err_hash_key:
2752 ena_com_indirect_table_destroy(ena_dev);
2753err_indr_tbl:
2754
2755 return rc;
2756}
2757
2758void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2759{
2760 ena_com_indirect_table_destroy(ena_dev);
2761 ena_com_hash_key_destroy(ena_dev);
2762 ena_com_hash_ctrl_destroy(ena_dev);
2763
2764 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2765}
2766
2767int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2768{
2769 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2770
2771 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2772 SZ_4K,
2773 host_attr->host_info,
2774 host_attr->host_info_dma_addr,
2775 host_attr->host_info_dma_handle);
2776 if (unlikely(!host_attr->host_info))
2777 return ENA_COM_NO_MEM;
2778
9f95a23c
TL
2779 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2780 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2781 (ENA_COMMON_SPEC_VERSION_MINOR));
2782
7c673cae
FG
2783 return 0;
2784}
2785
2786int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
9f95a23c
TL
2787 u32 debug_area_size)
2788{
7c673cae
FG
2789 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2790
9f95a23c
TL
2791 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2792 debug_area_size,
2793 host_attr->debug_area_virt_addr,
2794 host_attr->debug_area_dma_addr,
2795 host_attr->debug_area_dma_handle);
2796 if (unlikely(!host_attr->debug_area_virt_addr)) {
2797 host_attr->debug_area_size = 0;
2798 return ENA_COM_NO_MEM;
7c673cae
FG
2799 }
2800
2801 host_attr->debug_area_size = debug_area_size;
2802
2803 return 0;
2804}
2805
2806void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2807{
2808 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2809
2810 if (host_attr->host_info) {
2811 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2812 SZ_4K,
2813 host_attr->host_info,
2814 host_attr->host_info_dma_addr,
2815 host_attr->host_info_dma_handle);
2816 host_attr->host_info = NULL;
2817 }
2818}
2819
2820void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2821{
2822 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2823
2824 if (host_attr->debug_area_virt_addr) {
2825 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2826 host_attr->debug_area_size,
2827 host_attr->debug_area_virt_addr,
2828 host_attr->debug_area_dma_addr,
2829 host_attr->debug_area_dma_handle);
2830 host_attr->debug_area_virt_addr = NULL;
2831 }
2832}
2833
2834int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2835{
2836 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2837 struct ena_com_admin_queue *admin_queue;
2838 struct ena_admin_set_feat_cmd cmd;
2839 struct ena_admin_set_feat_resp resp;
9f95a23c 2840
11fdf7f2 2841 int ret;
7c673cae 2842
11fdf7f2
TL
2843 /* Host attribute config is called before ena_com_get_dev_attr_feat
2844 * so ena_com can't check if the feature is supported.
2845 */
7c673cae
FG
2846
2847 memset(&cmd, 0x0, sizeof(cmd));
2848 admin_queue = &ena_dev->admin_queue;
2849
2850 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2851 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2852
2853 ret = ena_com_mem_addr_set(ena_dev,
2854 &cmd.u.host_attr.debug_ba,
2855 host_attr->debug_area_dma_addr);
2856 if (unlikely(ret)) {
2857 ena_trc_err("memory address set failed\n");
2858 return ret;
2859 }
2860
2861 ret = ena_com_mem_addr_set(ena_dev,
2862 &cmd.u.host_attr.os_info_ba,
2863 host_attr->host_info_dma_addr);
2864 if (unlikely(ret)) {
2865 ena_trc_err("memory address set failed\n");
2866 return ret;
2867 }
2868
2869 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2870
2871 ret = ena_com_execute_admin_command(admin_queue,
2872 (struct ena_admin_aq_entry *)&cmd,
2873 sizeof(cmd),
2874 (struct ena_admin_acq_entry *)&resp,
2875 sizeof(resp));
2876
2877 if (unlikely(ret))
2878 ena_trc_err("Failed to set host attributes: %d\n", ret);
2879
2880 return ret;
2881}
2882
2883/* Interrupt moderation */
2884bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2885{
9f95a23c
TL
2886 return ena_com_check_supported_feature_id(ena_dev,
2887 ENA_ADMIN_INTERRUPT_MODERATION);
7c673cae
FG
2888}
2889
9f95a23c
TL
2890int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2891 u32 tx_coalesce_usecs)
7c673cae
FG
2892{
2893 if (!ena_dev->intr_delay_resolution) {
2894 ena_trc_err("Illegal interrupt delay granularity value\n");
2895 return ENA_COM_FAULT;
2896 }
2897
2898 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2899 ena_dev->intr_delay_resolution;
2900
2901 return 0;
2902}
2903
9f95a23c
TL
2904int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2905 u32 rx_coalesce_usecs)
7c673cae
FG
2906{
2907 if (!ena_dev->intr_delay_resolution) {
2908 ena_trc_err("Illegal interrupt delay granularity value\n");
2909 return ENA_COM_FAULT;
2910 }
2911
2912 /* We use LOWEST entry of moderation table for storing
2913 * nonadaptive interrupt coalescing values
2914 */
2915 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2916 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2917
2918 return 0;
2919}
2920
2921void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2922{
2923 if (ena_dev->intr_moder_tbl)
2924 ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2925 ena_dev->intr_moder_tbl = NULL;
2926}
2927
2928int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2929{
2930 struct ena_admin_get_feat_resp get_resp;
2931 u16 delay_resolution;
2932 int rc;
2933
2934 rc = ena_com_get_feature(ena_dev, &get_resp,
9f95a23c 2935 ENA_ADMIN_INTERRUPT_MODERATION, 0);
7c673cae
FG
2936
2937 if (rc) {
9f95a23c
TL
2938 if (rc == ENA_COM_UNSUPPORTED) {
2939 ena_trc_dbg("Feature %d isn't supported\n",
2940 ENA_ADMIN_INTERRUPT_MODERATION);
7c673cae
FG
2941 rc = 0;
2942 } else {
2943 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2944 rc);
2945 }
2946
2947 /* no moderation supported, disable adaptive support */
2948 ena_com_disable_adaptive_moderation(ena_dev);
2949 return rc;
2950 }
2951
2952 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2953 if (rc)
2954 goto err;
2955
2956 /* if moderation is supported by device we set adaptive moderation */
2957 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2958 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2959 ena_com_enable_adaptive_moderation(ena_dev);
2960
2961 return 0;
2962err:
2963 ena_com_destroy_interrupt_moderation(ena_dev);
2964 return rc;
2965}
2966
9f95a23c 2967void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
7c673cae
FG
2968{
2969 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2970
2971 if (!intr_moder_tbl)
2972 return;
2973
2974 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2975 ENA_INTR_LOWEST_USECS;
2976 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2977 ENA_INTR_LOWEST_PKTS;
2978 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2979 ENA_INTR_LOWEST_BYTES;
2980
2981 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2982 ENA_INTR_LOW_USECS;
2983 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2984 ENA_INTR_LOW_PKTS;
2985 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2986 ENA_INTR_LOW_BYTES;
2987
2988 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2989 ENA_INTR_MID_USECS;
2990 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2991 ENA_INTR_MID_PKTS;
2992 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2993 ENA_INTR_MID_BYTES;
2994
2995 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2996 ENA_INTR_HIGH_USECS;
2997 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2998 ENA_INTR_HIGH_PKTS;
2999 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
3000 ENA_INTR_HIGH_BYTES;
3001
3002 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
3003 ENA_INTR_HIGHEST_USECS;
3004 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
3005 ENA_INTR_HIGHEST_PKTS;
3006 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
3007 ENA_INTR_HIGHEST_BYTES;
3008}
3009
9f95a23c 3010unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
7c673cae
FG
3011{
3012 return ena_dev->intr_moder_tx_interval;
3013}
3014
9f95a23c 3015unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
7c673cae
FG
3016{
3017 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3018
3019 if (intr_moder_tbl)
3020 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
3021
3022 return 0;
3023}
3024
3025void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
3026 enum ena_intr_moder_level level,
3027 struct ena_intr_moder_entry *entry)
3028{
3029 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3030
3031 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3032 return;
3033
3034 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
3035 if (ena_dev->intr_delay_resolution)
3036 intr_moder_tbl[level].intr_moder_interval /=
3037 ena_dev->intr_delay_resolution;
3038 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
9f95a23c
TL
3039
3040 /* use hardcoded value until ethtool supports bytecount parameter */
3041 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
3042 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
7c673cae
FG
3043}
3044
3045void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
3046 enum ena_intr_moder_level level,
3047 struct ena_intr_moder_entry *entry)
3048{
3049 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
3050
3051 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
3052 return;
3053
3054 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
3055 if (ena_dev->intr_delay_resolution)
3056 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
3057 entry->pkts_per_interval =
3058 intr_moder_tbl[level].pkts_per_interval;
3059 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
3060}
9f95a23c
TL
3061
3062int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
3063 struct ena_admin_feature_llq_desc *llq_features,
3064 struct ena_llq_configurations *llq_default_cfg)
3065{
3066 int rc;
3067 int size;
3068
3069 if (!llq_features->max_llq_num) {
3070 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3071 return 0;
3072 }
3073
3074 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
3075 if (rc)
3076 return rc;
3077
3078 /* Validate the descriptor is not too big */
3079 size = ena_dev->tx_max_header_size;
3080 size += ena_dev->llq_info.descs_num_before_header *
3081 sizeof(struct ena_eth_io_tx_desc);
3082
3083 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
3084 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
3085 return ENA_COM_INVAL;
3086 }
3087
3088 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
3089
3090 return 0;
3091}