]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/amazon/ena/ena_com.c
5220c7578d6b99a4a5b38044b24afce24c56c946
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / amazon / ena / ena_com.c
1 /*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "ena_com.h"
34
35 /*****************************************************************************/
36 /*****************************************************************************/
37
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
40
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45 #define ENA_CTRL_MAJOR 0
46 #define ENA_CTRL_MINOR 0
47 #define ENA_CTRL_SUB_MINOR 1
48
49 #define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 (ENA_CTRL_SUB_MINOR))
55
56 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
58
59 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61 #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
63 #define ENA_REGS_ADMIN_INTR_MASK 1
64
65 #define ENA_POLL_MS 5
66
67 /*****************************************************************************/
68 /*****************************************************************************/
69 /*****************************************************************************/
70
71 enum ena_cmd_status {
72 ENA_CMD_SUBMITTED,
73 ENA_CMD_COMPLETED,
74 /* Abort - canceled by the driver */
75 ENA_CMD_ABORTED,
76 };
77
78 struct ena_comp_ctx {
79 struct completion wait_event;
80 struct ena_admin_acq_entry *user_cqe;
81 u32 comp_size;
82 enum ena_cmd_status status;
83 /* status from the device */
84 u8 comp_status;
85 u8 cmd_opcode;
86 bool occupied;
87 };
88
89 struct ena_com_stats_ctx {
90 struct ena_admin_aq_get_stats_cmd get_cmd;
91 struct ena_admin_acq_get_stats_resp get_resp;
92 };
93
94 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 struct ena_common_mem_addr *ena_addr,
96 dma_addr_t addr)
97 {
98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 pr_err("dma address has more bits that the device supports\n");
100 return -EINVAL;
101 }
102
103 ena_addr->mem_addr_low = lower_32_bits(addr);
104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
105
106 return 0;
107 }
108
109 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110 {
111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113
114 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL);
116
117 if (!sq->entries) {
118 pr_err("memory allocation failed");
119 return -ENOMEM;
120 }
121
122 sq->head = 0;
123 sq->tail = 0;
124 sq->phase = 1;
125
126 sq->db_addr = NULL;
127
128 return 0;
129 }
130
131 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132 {
133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135
136 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL);
138
139 if (!cq->entries) {
140 pr_err("memory allocation failed");
141 return -ENOMEM;
142 }
143
144 cq->head = 0;
145 cq->phase = 1;
146
147 return 0;
148 }
149
150 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 struct ena_aenq_handlers *aenq_handlers)
152 {
153 struct ena_com_aenq *aenq = &dev->aenq;
154 u32 addr_low, addr_high, aenq_caps;
155 u16 size;
156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL);
161
162 if (!aenq->entries) {
163 pr_err("memory allocation failed");
164 return -ENOMEM;
165 }
166
167 aenq->head = aenq->q_depth;
168 aenq->phase = 1;
169
170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172
173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
175
176 aenq_caps = 0;
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182
183 if (unlikely(!aenq_handlers)) {
184 pr_err("aenq handlers pointer is NULL\n");
185 return -EINVAL;
186 }
187
188 aenq->aenq_handlers = aenq_handlers;
189
190 return 0;
191 }
192
193 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 struct ena_comp_ctx *comp_ctx)
195 {
196 comp_ctx->occupied = false;
197 atomic_dec(&queue->outstanding_cmds);
198 }
199
200 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 u16 command_id, bool capture)
202 {
203 if (unlikely(command_id >= queue->q_depth)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id, queue->q_depth);
206 return NULL;
207 }
208
209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
210 pr_err("Completion context is occupied\n");
211 return NULL;
212 }
213
214 if (capture) {
215 atomic_inc(&queue->outstanding_cmds);
216 queue->comp_ctx[command_id].occupied = true;
217 }
218
219 return &queue->comp_ctx[command_id];
220 }
221
222 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
223 struct ena_admin_aq_entry *cmd,
224 size_t cmd_size_in_bytes,
225 struct ena_admin_acq_entry *comp,
226 size_t comp_size_in_bytes)
227 {
228 struct ena_comp_ctx *comp_ctx;
229 u16 tail_masked, cmd_id;
230 u16 queue_size_mask;
231 u16 cnt;
232
233 queue_size_mask = admin_queue->q_depth - 1;
234
235 tail_masked = admin_queue->sq.tail & queue_size_mask;
236
237 /* In case of queue FULL */
238 cnt = atomic_read(&admin_queue->outstanding_cmds);
239 if (cnt >= admin_queue->q_depth) {
240 pr_debug("admin queue is full.\n");
241 admin_queue->stats.out_of_space++;
242 return ERR_PTR(-ENOSPC);
243 }
244
245 cmd_id = admin_queue->curr_cmd_id;
246
247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249
250 cmd->aq_common_descriptor.command_id |= cmd_id &
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252
253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
254 if (unlikely(!comp_ctx))
255 return ERR_PTR(-EINVAL);
256
257 comp_ctx->status = ENA_CMD_SUBMITTED;
258 comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 comp_ctx->user_cqe = comp;
260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261
262 reinit_completion(&comp_ctx->wait_event);
263
264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265
266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
267 queue_size_mask;
268
269 admin_queue->sq.tail++;
270 admin_queue->stats.submitted_cmd++;
271
272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 admin_queue->sq.phase = !admin_queue->sq.phase;
274
275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
276
277 return comp_ctx;
278 }
279
280 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281 {
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
283 struct ena_comp_ctx *comp_ctx;
284 u16 i;
285
286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
287 if (unlikely(!queue->comp_ctx)) {
288 pr_err("memory allocation failed");
289 return -ENOMEM;
290 }
291
292 for (i = 0; i < queue->q_depth; i++) {
293 comp_ctx = get_comp_ctxt(queue, i, false);
294 if (comp_ctx)
295 init_completion(&comp_ctx->wait_event);
296 }
297
298 return 0;
299 }
300
301 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
302 struct ena_admin_aq_entry *cmd,
303 size_t cmd_size_in_bytes,
304 struct ena_admin_acq_entry *comp,
305 size_t comp_size_in_bytes)
306 {
307 unsigned long flags;
308 struct ena_comp_ctx *comp_ctx;
309
310 spin_lock_irqsave(&admin_queue->q_lock, flags);
311 if (unlikely(!admin_queue->running_state)) {
312 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
313 return ERR_PTR(-ENODEV);
314 }
315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
316 cmd_size_in_bytes,
317 comp,
318 comp_size_in_bytes);
319 if (IS_ERR(comp_ctx))
320 admin_queue->running_state = false;
321 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
322
323 return comp_ctx;
324 }
325
326 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
327 struct ena_com_create_io_ctx *ctx,
328 struct ena_com_io_sq *io_sq)
329 {
330 size_t size;
331 int dev_node = 0;
332
333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
334
335 io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) :
339 sizeof(struct ena_eth_io_rx_desc);
340
341 size = io_sq->desc_entry_size * io_sq->q_depth;
342
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr =
347 dma_zalloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr =
353 dma_zalloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL);
356 }
357
358 if (!io_sq->desc_addr.virt_addr) {
359 pr_err("memory allocation failed");
360 return -ENOMEM;
361 }
362 }
363
364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
365 /* Allocate bounce buffers */
366 io_sq->bounce_buf_ctrl.buffer_size =
367 ena_dev->llq_info.desc_list_entry_size;
368 io_sq->bounce_buf_ctrl.buffers_num =
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
370 io_sq->bounce_buf_ctrl.next_to_use = 0;
371
372 size = io_sq->bounce_buf_ctrl.buffer_size *
373 io_sq->bounce_buf_ctrl.buffers_num;
374
375 dev_node = dev_to_node(ena_dev->dmadev);
376 set_dev_node(ena_dev->dmadev, ctx->numa_node);
377 io_sq->bounce_buf_ctrl.base_buffer =
378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
379 set_dev_node(ena_dev->dmadev, dev_node);
380 if (!io_sq->bounce_buf_ctrl.base_buffer)
381 io_sq->bounce_buf_ctrl.base_buffer =
382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
383
384 if (!io_sq->bounce_buf_ctrl.base_buffer) {
385 pr_err("bounce buffer memory allocation failed");
386 return -ENOMEM;
387 }
388
389 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
390 sizeof(io_sq->llq_info));
391
392 /* Initiate the first bounce buffer */
393 io_sq->llq_buf_ctrl.curr_bounce_buf =
394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
396 0x0, io_sq->llq_info.desc_list_entry_size);
397 io_sq->llq_buf_ctrl.descs_left_in_line =
398 io_sq->llq_info.descs_num_before_header;
399 }
400
401 io_sq->tail = 0;
402 io_sq->next_to_comp = 0;
403 io_sq->phase = 1;
404
405 return 0;
406 }
407
408 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
409 struct ena_com_create_io_ctx *ctx,
410 struct ena_com_io_cq *io_cq)
411 {
412 size_t size;
413 int prev_node = 0;
414
415 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
416
417 /* Use the basic completion descriptor for Rx */
418 io_cq->cdesc_entry_size_in_bytes =
419 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
420 sizeof(struct ena_eth_io_tx_cdesc) :
421 sizeof(struct ena_eth_io_rx_cdesc_base);
422
423 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
424
425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr =
428 dma_zalloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr =
433 dma_zalloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL);
436 }
437
438 if (!io_cq->cdesc_addr.virt_addr) {
439 pr_err("memory allocation failed");
440 return -ENOMEM;
441 }
442
443 io_cq->phase = 1;
444 io_cq->head = 0;
445
446 return 0;
447 }
448
449 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
450 struct ena_admin_acq_entry *cqe)
451 {
452 struct ena_comp_ctx *comp_ctx;
453 u16 cmd_id;
454
455 cmd_id = cqe->acq_common_descriptor.command &
456 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
457
458 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
459 if (unlikely(!comp_ctx)) {
460 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
461 admin_queue->running_state = false;
462 return;
463 }
464
465 comp_ctx->status = ENA_CMD_COMPLETED;
466 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
467
468 if (comp_ctx->user_cqe)
469 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
470
471 if (!admin_queue->polling)
472 complete(&comp_ctx->wait_event);
473 }
474
475 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
476 {
477 struct ena_admin_acq_entry *cqe = NULL;
478 u16 comp_num = 0;
479 u16 head_masked;
480 u8 phase;
481
482 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
483 phase = admin_queue->cq.phase;
484
485 cqe = &admin_queue->cq.entries[head_masked];
486
487 /* Go over all the completions */
488 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
489 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
490 /* Do not read the rest of the completion entry before the
491 * phase bit was validated
492 */
493 dma_rmb();
494 ena_com_handle_single_admin_completion(admin_queue, cqe);
495
496 head_masked++;
497 comp_num++;
498 if (unlikely(head_masked == admin_queue->q_depth)) {
499 head_masked = 0;
500 phase = !phase;
501 }
502
503 cqe = &admin_queue->cq.entries[head_masked];
504 }
505
506 admin_queue->cq.head += comp_num;
507 admin_queue->cq.phase = phase;
508 admin_queue->sq.head += comp_num;
509 admin_queue->stats.completed_cmd += comp_num;
510 }
511
512 static int ena_com_comp_status_to_errno(u8 comp_status)
513 {
514 if (unlikely(comp_status != 0))
515 pr_err("admin command failed[%u]\n", comp_status);
516
517 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
518 return -EINVAL;
519
520 switch (comp_status) {
521 case ENA_ADMIN_SUCCESS:
522 return 0;
523 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
524 return -ENOMEM;
525 case ENA_ADMIN_UNSUPPORTED_OPCODE:
526 return -EOPNOTSUPP;
527 case ENA_ADMIN_BAD_OPCODE:
528 case ENA_ADMIN_MALFORMED_REQUEST:
529 case ENA_ADMIN_ILLEGAL_PARAMETER:
530 case ENA_ADMIN_UNKNOWN_ERROR:
531 return -EINVAL;
532 }
533
534 return 0;
535 }
536
537 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
538 struct ena_com_admin_queue *admin_queue)
539 {
540 unsigned long flags, timeout;
541 int ret;
542
543 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
544
545 while (1) {
546 spin_lock_irqsave(&admin_queue->q_lock, flags);
547 ena_com_handle_admin_completion(admin_queue);
548 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
549
550 if (comp_ctx->status != ENA_CMD_SUBMITTED)
551 break;
552
553 if (time_is_before_jiffies(timeout)) {
554 pr_err("Wait for completion (polling) timeout\n");
555 /* ENA didn't have any completion */
556 spin_lock_irqsave(&admin_queue->q_lock, flags);
557 admin_queue->stats.no_completion++;
558 admin_queue->running_state = false;
559 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
560
561 ret = -ETIME;
562 goto err;
563 }
564
565 msleep(ENA_POLL_MS);
566 }
567
568 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
569 pr_err("Command was aborted\n");
570 spin_lock_irqsave(&admin_queue->q_lock, flags);
571 admin_queue->stats.aborted_cmd++;
572 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
573 ret = -ENODEV;
574 goto err;
575 }
576
577 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
578 comp_ctx->status);
579
580 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
581 err:
582 comp_ctxt_release(admin_queue, comp_ctx);
583 return ret;
584 }
585
586 /**
587 * Set the LLQ configurations of the firmware
588 *
589 * The driver provides only the enabled feature values to the device,
590 * which in turn, checks if they are supported.
591 */
592 static int ena_com_set_llq(struct ena_com_dev *ena_dev)
593 {
594 struct ena_com_admin_queue *admin_queue;
595 struct ena_admin_set_feat_cmd cmd;
596 struct ena_admin_set_feat_resp resp;
597 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
598 int ret;
599
600 memset(&cmd, 0x0, sizeof(cmd));
601 admin_queue = &ena_dev->admin_queue;
602
603 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
604 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
605
606 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
607 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
608 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
609 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
610
611 ret = ena_com_execute_admin_command(admin_queue,
612 (struct ena_admin_aq_entry *)&cmd,
613 sizeof(cmd),
614 (struct ena_admin_acq_entry *)&resp,
615 sizeof(resp));
616
617 if (unlikely(ret))
618 pr_err("Failed to set LLQ configurations: %d\n", ret);
619
620 return ret;
621 }
622
623 static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
624 struct ena_admin_feature_llq_desc *llq_features,
625 struct ena_llq_configurations *llq_default_cfg)
626 {
627 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
628 u16 supported_feat;
629 int rc;
630
631 memset(llq_info, 0, sizeof(*llq_info));
632
633 supported_feat = llq_features->header_location_ctrl_supported;
634
635 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
636 llq_info->header_location_ctrl =
637 llq_default_cfg->llq_header_location;
638 } else {
639 pr_err("Invalid header location control, supported: 0x%x\n",
640 supported_feat);
641 return -EINVAL;
642 }
643
644 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
645 supported_feat = llq_features->descriptors_stride_ctrl_supported;
646 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
647 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
648 } else {
649 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
650 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
651 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
652 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
653 } else {
654 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
655 supported_feat);
656 return -EINVAL;
657 }
658
659 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
660 llq_default_cfg->llq_stride_ctrl, supported_feat,
661 llq_info->desc_stride_ctrl);
662 }
663 } else {
664 llq_info->desc_stride_ctrl = 0;
665 }
666
667 supported_feat = llq_features->entry_size_ctrl_supported;
668 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
669 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
670 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
671 } else {
672 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
673 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
674 llq_info->desc_list_entry_size = 128;
675 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
676 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
677 llq_info->desc_list_entry_size = 192;
678 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
679 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
680 llq_info->desc_list_entry_size = 256;
681 } else {
682 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
683 supported_feat);
684 return -EINVAL;
685 }
686
687 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
688 llq_default_cfg->llq_ring_entry_size, supported_feat,
689 llq_info->desc_list_entry_size);
690 }
691 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
692 /* The desc list entry size should be whole multiply of 8
693 * This requirement comes from __iowrite64_copy()
694 */
695 pr_err("illegal entry size %d\n",
696 llq_info->desc_list_entry_size);
697 return -EINVAL;
698 }
699
700 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
701 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
702 sizeof(struct ena_eth_io_tx_desc);
703 else
704 llq_info->descs_per_entry = 1;
705
706 supported_feat = llq_features->desc_num_before_header_supported;
707 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
708 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
709 } else {
710 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
711 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
712 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
713 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
714 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
715 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
716 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
717 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
718 } else {
719 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
720 supported_feat);
721 return -EINVAL;
722 }
723
724 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
725 llq_default_cfg->llq_num_decs_before_header,
726 supported_feat, llq_info->descs_num_before_header);
727 }
728
729 rc = ena_com_set_llq(ena_dev);
730 if (rc)
731 pr_err("Cannot set LLQ configuration: %d\n", rc);
732
733 return 0;
734 }
735
736 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
737 struct ena_com_admin_queue *admin_queue)
738 {
739 unsigned long flags;
740 int ret;
741
742 wait_for_completion_timeout(&comp_ctx->wait_event,
743 usecs_to_jiffies(
744 admin_queue->completion_timeout));
745
746 /* In case the command wasn't completed find out the root cause.
747 * There might be 2 kinds of errors
748 * 1) No completion (timeout reached)
749 * 2) There is completion but the device didn't get any msi-x interrupt.
750 */
751 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
752 spin_lock_irqsave(&admin_queue->q_lock, flags);
753 ena_com_handle_admin_completion(admin_queue);
754 admin_queue->stats.no_completion++;
755 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
756
757 if (comp_ctx->status == ENA_CMD_COMPLETED)
758 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
759 comp_ctx->cmd_opcode);
760 else
761 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
762 comp_ctx->cmd_opcode, comp_ctx->status);
763
764 admin_queue->running_state = false;
765 ret = -ETIME;
766 goto err;
767 }
768
769 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
770 err:
771 comp_ctxt_release(admin_queue, comp_ctx);
772 return ret;
773 }
774
775 /* This method read the hardware device register through posting writes
776 * and waiting for response
777 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
778 */
779 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
780 {
781 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
782 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
783 mmio_read->read_resp;
784 u32 mmio_read_reg, ret, i;
785 unsigned long flags;
786 u32 timeout = mmio_read->reg_read_to;
787
788 might_sleep();
789
790 if (timeout == 0)
791 timeout = ENA_REG_READ_TIMEOUT;
792
793 /* If readless is disabled, perform regular read */
794 if (!mmio_read->readless_supported)
795 return readl(ena_dev->reg_bar + offset);
796
797 spin_lock_irqsave(&mmio_read->lock, flags);
798 mmio_read->seq_num++;
799
800 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
801 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
802 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
803 mmio_read_reg |= mmio_read->seq_num &
804 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
805
806 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
807
808 for (i = 0; i < timeout; i++) {
809 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
810 break;
811
812 udelay(1);
813 }
814
815 if (unlikely(i == timeout)) {
816 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
817 mmio_read->seq_num, offset, read_resp->req_id,
818 read_resp->reg_off);
819 ret = ENA_MMIO_READ_TIMEOUT;
820 goto err;
821 }
822
823 if (read_resp->reg_off != offset) {
824 pr_err("Read failure: wrong offset provided");
825 ret = ENA_MMIO_READ_TIMEOUT;
826 } else {
827 ret = read_resp->reg_val;
828 }
829 err:
830 spin_unlock_irqrestore(&mmio_read->lock, flags);
831
832 return ret;
833 }
834
835 /* There are two types to wait for completion.
836 * Polling mode - wait until the completion is available.
837 * Async mode - wait on wait queue until the completion is ready
838 * (or the timeout expired).
839 * It is expected that the IRQ called ena_com_handle_admin_completion
840 * to mark the completions.
841 */
842 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
843 struct ena_com_admin_queue *admin_queue)
844 {
845 if (admin_queue->polling)
846 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
847 admin_queue);
848
849 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
850 admin_queue);
851 }
852
853 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
854 struct ena_com_io_sq *io_sq)
855 {
856 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
857 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
858 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
859 u8 direction;
860 int ret;
861
862 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
863
864 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
865 direction = ENA_ADMIN_SQ_DIRECTION_TX;
866 else
867 direction = ENA_ADMIN_SQ_DIRECTION_RX;
868
869 destroy_cmd.sq.sq_identity |= (direction <<
870 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
871 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
872
873 destroy_cmd.sq.sq_idx = io_sq->idx;
874 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
875
876 ret = ena_com_execute_admin_command(admin_queue,
877 (struct ena_admin_aq_entry *)&destroy_cmd,
878 sizeof(destroy_cmd),
879 (struct ena_admin_acq_entry *)&destroy_resp,
880 sizeof(destroy_resp));
881
882 if (unlikely(ret && (ret != -ENODEV)))
883 pr_err("failed to destroy io sq error: %d\n", ret);
884
885 return ret;
886 }
887
888 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
889 struct ena_com_io_sq *io_sq,
890 struct ena_com_io_cq *io_cq)
891 {
892 size_t size;
893
894 if (io_cq->cdesc_addr.virt_addr) {
895 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
896
897 dma_free_coherent(ena_dev->dmadev, size,
898 io_cq->cdesc_addr.virt_addr,
899 io_cq->cdesc_addr.phys_addr);
900
901 io_cq->cdesc_addr.virt_addr = NULL;
902 }
903
904 if (io_sq->desc_addr.virt_addr) {
905 size = io_sq->desc_entry_size * io_sq->q_depth;
906
907 dma_free_coherent(ena_dev->dmadev, size,
908 io_sq->desc_addr.virt_addr,
909 io_sq->desc_addr.phys_addr);
910
911 io_sq->desc_addr.virt_addr = NULL;
912 }
913
914 if (io_sq->bounce_buf_ctrl.base_buffer) {
915 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
916 io_sq->bounce_buf_ctrl.base_buffer = NULL;
917 }
918 }
919
920 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
921 u16 exp_state)
922 {
923 u32 val, i;
924
925 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
926 timeout = (timeout * 100) / ENA_POLL_MS;
927
928 for (i = 0; i < timeout; i++) {
929 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
930
931 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
932 pr_err("Reg read timeout occurred\n");
933 return -ETIME;
934 }
935
936 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
937 exp_state)
938 return 0;
939
940 msleep(ENA_POLL_MS);
941 }
942
943 return -ETIME;
944 }
945
946 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
947 enum ena_admin_aq_feature_id feature_id)
948 {
949 u32 feature_mask = 1 << feature_id;
950
951 /* Device attributes is always supported */
952 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
953 !(ena_dev->supported_features & feature_mask))
954 return false;
955
956 return true;
957 }
958
959 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
960 struct ena_admin_get_feat_resp *get_resp,
961 enum ena_admin_aq_feature_id feature_id,
962 dma_addr_t control_buf_dma_addr,
963 u32 control_buff_size)
964 {
965 struct ena_com_admin_queue *admin_queue;
966 struct ena_admin_get_feat_cmd get_cmd;
967 int ret;
968
969 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
970 pr_debug("Feature %d isn't supported\n", feature_id);
971 return -EOPNOTSUPP;
972 }
973
974 memset(&get_cmd, 0x0, sizeof(get_cmd));
975 admin_queue = &ena_dev->admin_queue;
976
977 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
978
979 if (control_buff_size)
980 get_cmd.aq_common_descriptor.flags =
981 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
982 else
983 get_cmd.aq_common_descriptor.flags = 0;
984
985 ret = ena_com_mem_addr_set(ena_dev,
986 &get_cmd.control_buffer.address,
987 control_buf_dma_addr);
988 if (unlikely(ret)) {
989 pr_err("memory address set failed\n");
990 return ret;
991 }
992
993 get_cmd.control_buffer.length = control_buff_size;
994
995 get_cmd.feat_common.feature_id = feature_id;
996
997 ret = ena_com_execute_admin_command(admin_queue,
998 (struct ena_admin_aq_entry *)
999 &get_cmd,
1000 sizeof(get_cmd),
1001 (struct ena_admin_acq_entry *)
1002 get_resp,
1003 sizeof(*get_resp));
1004
1005 if (unlikely(ret))
1006 pr_err("Failed to submit get_feature command %d error: %d\n",
1007 feature_id, ret);
1008
1009 return ret;
1010 }
1011
1012 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1013 struct ena_admin_get_feat_resp *get_resp,
1014 enum ena_admin_aq_feature_id feature_id)
1015 {
1016 return ena_com_get_feature_ex(ena_dev,
1017 get_resp,
1018 feature_id,
1019 0,
1020 0);
1021 }
1022
1023 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1024 {
1025 struct ena_rss *rss = &ena_dev->rss;
1026
1027 rss->hash_key =
1028 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1029 &rss->hash_key_dma_addr, GFP_KERNEL);
1030
1031 if (unlikely(!rss->hash_key))
1032 return -ENOMEM;
1033
1034 return 0;
1035 }
1036
1037 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1038 {
1039 struct ena_rss *rss = &ena_dev->rss;
1040
1041 if (rss->hash_key)
1042 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1043 rss->hash_key, rss->hash_key_dma_addr);
1044 rss->hash_key = NULL;
1045 }
1046
1047 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1048 {
1049 struct ena_rss *rss = &ena_dev->rss;
1050
1051 rss->hash_ctrl =
1052 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1053 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1054
1055 if (unlikely(!rss->hash_ctrl))
1056 return -ENOMEM;
1057
1058 return 0;
1059 }
1060
1061 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1062 {
1063 struct ena_rss *rss = &ena_dev->rss;
1064
1065 if (rss->hash_ctrl)
1066 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1067 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1068 rss->hash_ctrl = NULL;
1069 }
1070
1071 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1072 u16 log_size)
1073 {
1074 struct ena_rss *rss = &ena_dev->rss;
1075 struct ena_admin_get_feat_resp get_resp;
1076 size_t tbl_size;
1077 int ret;
1078
1079 ret = ena_com_get_feature(ena_dev, &get_resp,
1080 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
1081 if (unlikely(ret))
1082 return ret;
1083
1084 if ((get_resp.u.ind_table.min_size > log_size) ||
1085 (get_resp.u.ind_table.max_size < log_size)) {
1086 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1087 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1088 1 << get_resp.u.ind_table.max_size);
1089 return -EINVAL;
1090 }
1091
1092 tbl_size = (1ULL << log_size) *
1093 sizeof(struct ena_admin_rss_ind_table_entry);
1094
1095 rss->rss_ind_tbl =
1096 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
1097 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1098 if (unlikely(!rss->rss_ind_tbl))
1099 goto mem_err1;
1100
1101 tbl_size = (1ULL << log_size) * sizeof(u16);
1102 rss->host_rss_ind_tbl =
1103 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1104 if (unlikely(!rss->host_rss_ind_tbl))
1105 goto mem_err2;
1106
1107 rss->tbl_log_size = log_size;
1108
1109 return 0;
1110
1111 mem_err2:
1112 tbl_size = (1ULL << log_size) *
1113 sizeof(struct ena_admin_rss_ind_table_entry);
1114
1115 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1116 rss->rss_ind_tbl_dma_addr);
1117 rss->rss_ind_tbl = NULL;
1118 mem_err1:
1119 rss->tbl_log_size = 0;
1120 return -ENOMEM;
1121 }
1122
1123 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1124 {
1125 struct ena_rss *rss = &ena_dev->rss;
1126 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1127 sizeof(struct ena_admin_rss_ind_table_entry);
1128
1129 if (rss->rss_ind_tbl)
1130 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1131 rss->rss_ind_tbl_dma_addr);
1132 rss->rss_ind_tbl = NULL;
1133
1134 if (rss->host_rss_ind_tbl)
1135 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1136 rss->host_rss_ind_tbl = NULL;
1137 }
1138
1139 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1140 struct ena_com_io_sq *io_sq, u16 cq_idx)
1141 {
1142 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1143 struct ena_admin_aq_create_sq_cmd create_cmd;
1144 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1145 u8 direction;
1146 int ret;
1147
1148 memset(&create_cmd, 0x0, sizeof(create_cmd));
1149
1150 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1151
1152 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1153 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1154 else
1155 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1156
1157 create_cmd.sq_identity |= (direction <<
1158 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1159 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1160
1161 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1162 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1163
1164 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1165 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1166 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1167
1168 create_cmd.sq_caps_3 |=
1169 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1170
1171 create_cmd.cq_idx = cq_idx;
1172 create_cmd.sq_depth = io_sq->q_depth;
1173
1174 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1175 ret = ena_com_mem_addr_set(ena_dev,
1176 &create_cmd.sq_ba,
1177 io_sq->desc_addr.phys_addr);
1178 if (unlikely(ret)) {
1179 pr_err("memory address set failed\n");
1180 return ret;
1181 }
1182 }
1183
1184 ret = ena_com_execute_admin_command(admin_queue,
1185 (struct ena_admin_aq_entry *)&create_cmd,
1186 sizeof(create_cmd),
1187 (struct ena_admin_acq_entry *)&cmd_completion,
1188 sizeof(cmd_completion));
1189 if (unlikely(ret)) {
1190 pr_err("Failed to create IO SQ. error: %d\n", ret);
1191 return ret;
1192 }
1193
1194 io_sq->idx = cmd_completion.sq_idx;
1195
1196 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1197 (uintptr_t)cmd_completion.sq_doorbell_offset);
1198
1199 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1200 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1201 + cmd_completion.llq_headers_offset);
1202
1203 io_sq->desc_addr.pbuf_dev_addr =
1204 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1205 cmd_completion.llq_descriptors_offset);
1206 }
1207
1208 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1209
1210 return ret;
1211 }
1212
1213 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1214 {
1215 struct ena_rss *rss = &ena_dev->rss;
1216 struct ena_com_io_sq *io_sq;
1217 u16 qid;
1218 int i;
1219
1220 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1221 qid = rss->host_rss_ind_tbl[i];
1222 if (qid >= ENA_TOTAL_NUM_QUEUES)
1223 return -EINVAL;
1224
1225 io_sq = &ena_dev->io_sq_queues[qid];
1226
1227 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1228 return -EINVAL;
1229
1230 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1231 }
1232
1233 return 0;
1234 }
1235
1236 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1237 {
1238 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1239 struct ena_rss *rss = &ena_dev->rss;
1240 u8 idx;
1241 u16 i;
1242
1243 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1244 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1245
1246 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1247 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1248 return -EINVAL;
1249 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1250
1251 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1252 return -EINVAL;
1253
1254 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1255 }
1256
1257 return 0;
1258 }
1259
1260 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1261 {
1262 size_t size;
1263
1264 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1265
1266 ena_dev->intr_moder_tbl =
1267 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1268 if (!ena_dev->intr_moder_tbl)
1269 return -ENOMEM;
1270
1271 ena_com_config_default_interrupt_moderation_table(ena_dev);
1272
1273 return 0;
1274 }
1275
1276 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1277 u16 intr_delay_resolution)
1278 {
1279 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1280 unsigned int i;
1281
1282 if (!intr_delay_resolution) {
1283 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1284 intr_delay_resolution = 1;
1285 }
1286 ena_dev->intr_delay_resolution = intr_delay_resolution;
1287
1288 /* update Rx */
1289 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1290 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1291
1292 /* update Tx */
1293 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1294 }
1295
1296 /*****************************************************************************/
1297 /******************************* API ******************************/
1298 /*****************************************************************************/
1299
1300 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1301 struct ena_admin_aq_entry *cmd,
1302 size_t cmd_size,
1303 struct ena_admin_acq_entry *comp,
1304 size_t comp_size)
1305 {
1306 struct ena_comp_ctx *comp_ctx;
1307 int ret;
1308
1309 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1310 comp, comp_size);
1311 if (IS_ERR(comp_ctx)) {
1312 if (comp_ctx == ERR_PTR(-ENODEV))
1313 pr_debug("Failed to submit command [%ld]\n",
1314 PTR_ERR(comp_ctx));
1315 else
1316 pr_err("Failed to submit command [%ld]\n",
1317 PTR_ERR(comp_ctx));
1318
1319 return PTR_ERR(comp_ctx);
1320 }
1321
1322 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1323 if (unlikely(ret)) {
1324 if (admin_queue->running_state)
1325 pr_err("Failed to process command. ret = %d\n", ret);
1326 else
1327 pr_debug("Failed to process command. ret = %d\n", ret);
1328 }
1329 return ret;
1330 }
1331
1332 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1333 struct ena_com_io_cq *io_cq)
1334 {
1335 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1336 struct ena_admin_aq_create_cq_cmd create_cmd;
1337 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1338 int ret;
1339
1340 memset(&create_cmd, 0x0, sizeof(create_cmd));
1341
1342 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1343
1344 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1345 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1346 create_cmd.cq_caps_1 |=
1347 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1348
1349 create_cmd.msix_vector = io_cq->msix_vector;
1350 create_cmd.cq_depth = io_cq->q_depth;
1351
1352 ret = ena_com_mem_addr_set(ena_dev,
1353 &create_cmd.cq_ba,
1354 io_cq->cdesc_addr.phys_addr);
1355 if (unlikely(ret)) {
1356 pr_err("memory address set failed\n");
1357 return ret;
1358 }
1359
1360 ret = ena_com_execute_admin_command(admin_queue,
1361 (struct ena_admin_aq_entry *)&create_cmd,
1362 sizeof(create_cmd),
1363 (struct ena_admin_acq_entry *)&cmd_completion,
1364 sizeof(cmd_completion));
1365 if (unlikely(ret)) {
1366 pr_err("Failed to create IO CQ. error: %d\n", ret);
1367 return ret;
1368 }
1369
1370 io_cq->idx = cmd_completion.cq_idx;
1371
1372 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1373 cmd_completion.cq_interrupt_unmask_register_offset);
1374
1375 if (cmd_completion.cq_head_db_register_offset)
1376 io_cq->cq_head_db_reg =
1377 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1378 cmd_completion.cq_head_db_register_offset);
1379
1380 if (cmd_completion.numa_node_register_offset)
1381 io_cq->numa_node_cfg_reg =
1382 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1383 cmd_completion.numa_node_register_offset);
1384
1385 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1386
1387 return ret;
1388 }
1389
1390 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1391 struct ena_com_io_sq **io_sq,
1392 struct ena_com_io_cq **io_cq)
1393 {
1394 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1395 pr_err("Invalid queue number %d but the max is %d\n", qid,
1396 ENA_TOTAL_NUM_QUEUES);
1397 return -EINVAL;
1398 }
1399
1400 *io_sq = &ena_dev->io_sq_queues[qid];
1401 *io_cq = &ena_dev->io_cq_queues[qid];
1402
1403 return 0;
1404 }
1405
1406 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1407 {
1408 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1409 struct ena_comp_ctx *comp_ctx;
1410 u16 i;
1411
1412 if (!admin_queue->comp_ctx)
1413 return;
1414
1415 for (i = 0; i < admin_queue->q_depth; i++) {
1416 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1417 if (unlikely(!comp_ctx))
1418 break;
1419
1420 comp_ctx->status = ENA_CMD_ABORTED;
1421
1422 complete(&comp_ctx->wait_event);
1423 }
1424 }
1425
1426 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1427 {
1428 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1429 unsigned long flags;
1430
1431 spin_lock_irqsave(&admin_queue->q_lock, flags);
1432 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1433 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1434 msleep(ENA_POLL_MS);
1435 spin_lock_irqsave(&admin_queue->q_lock, flags);
1436 }
1437 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1438 }
1439
1440 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1441 struct ena_com_io_cq *io_cq)
1442 {
1443 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1444 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1445 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1446 int ret;
1447
1448 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1449
1450 destroy_cmd.cq_idx = io_cq->idx;
1451 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1452
1453 ret = ena_com_execute_admin_command(admin_queue,
1454 (struct ena_admin_aq_entry *)&destroy_cmd,
1455 sizeof(destroy_cmd),
1456 (struct ena_admin_acq_entry *)&destroy_resp,
1457 sizeof(destroy_resp));
1458
1459 if (unlikely(ret && (ret != -ENODEV)))
1460 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1461
1462 return ret;
1463 }
1464
1465 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1466 {
1467 return ena_dev->admin_queue.running_state;
1468 }
1469
1470 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1471 {
1472 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1473 unsigned long flags;
1474
1475 spin_lock_irqsave(&admin_queue->q_lock, flags);
1476 ena_dev->admin_queue.running_state = state;
1477 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1478 }
1479
1480 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1481 {
1482 u16 depth = ena_dev->aenq.q_depth;
1483
1484 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1485
1486 /* Init head_db to mark that all entries in the queue
1487 * are initially available
1488 */
1489 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1490 }
1491
1492 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1493 {
1494 struct ena_com_admin_queue *admin_queue;
1495 struct ena_admin_set_feat_cmd cmd;
1496 struct ena_admin_set_feat_resp resp;
1497 struct ena_admin_get_feat_resp get_resp;
1498 int ret;
1499
1500 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1501 if (ret) {
1502 pr_info("Can't get aenq configuration\n");
1503 return ret;
1504 }
1505
1506 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1507 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1508 get_resp.u.aenq.supported_groups, groups_flag);
1509 return -EOPNOTSUPP;
1510 }
1511
1512 memset(&cmd, 0x0, sizeof(cmd));
1513 admin_queue = &ena_dev->admin_queue;
1514
1515 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1516 cmd.aq_common_descriptor.flags = 0;
1517 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1518 cmd.u.aenq.enabled_groups = groups_flag;
1519
1520 ret = ena_com_execute_admin_command(admin_queue,
1521 (struct ena_admin_aq_entry *)&cmd,
1522 sizeof(cmd),
1523 (struct ena_admin_acq_entry *)&resp,
1524 sizeof(resp));
1525
1526 if (unlikely(ret))
1527 pr_err("Failed to config AENQ ret: %d\n", ret);
1528
1529 return ret;
1530 }
1531
1532 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1533 {
1534 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1535 int width;
1536
1537 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1538 pr_err("Reg read timeout occurred\n");
1539 return -ETIME;
1540 }
1541
1542 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1543 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1544
1545 pr_debug("ENA dma width: %d\n", width);
1546
1547 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1548 pr_err("DMA width illegal value: %d\n", width);
1549 return -EINVAL;
1550 }
1551
1552 ena_dev->dma_addr_bits = width;
1553
1554 return width;
1555 }
1556
1557 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1558 {
1559 u32 ver;
1560 u32 ctrl_ver;
1561 u32 ctrl_ver_masked;
1562
1563 /* Make sure the ENA version and the controller version are at least
1564 * as the driver expects
1565 */
1566 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1567 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1568 ENA_REGS_CONTROLLER_VERSION_OFF);
1569
1570 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1571 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1572 pr_err("Reg read timeout occurred\n");
1573 return -ETIME;
1574 }
1575
1576 pr_info("ena device version: %d.%d\n",
1577 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1578 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1579 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1580
1581 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1582 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1583 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1584 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1585 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1586 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1587 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1588 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1589
1590 ctrl_ver_masked =
1591 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1592 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1593 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1594
1595 /* Validate the ctrl version without the implementation ID */
1596 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1597 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1598 return -1;
1599 }
1600
1601 return 0;
1602 }
1603
1604 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1605 {
1606 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1607 struct ena_com_admin_cq *cq = &admin_queue->cq;
1608 struct ena_com_admin_sq *sq = &admin_queue->sq;
1609 struct ena_com_aenq *aenq = &ena_dev->aenq;
1610 u16 size;
1611
1612 if (admin_queue->comp_ctx)
1613 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1614 admin_queue->comp_ctx = NULL;
1615 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1616 if (sq->entries)
1617 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1618 sq->dma_addr);
1619 sq->entries = NULL;
1620
1621 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1622 if (cq->entries)
1623 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1624 cq->dma_addr);
1625 cq->entries = NULL;
1626
1627 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1628 if (ena_dev->aenq.entries)
1629 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1630 aenq->dma_addr);
1631 aenq->entries = NULL;
1632 }
1633
1634 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1635 {
1636 u32 mask_value = 0;
1637
1638 if (polling)
1639 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1640
1641 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1642 ena_dev->admin_queue.polling = polling;
1643 }
1644
1645 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1646 {
1647 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1648
1649 spin_lock_init(&mmio_read->lock);
1650 mmio_read->read_resp =
1651 dma_zalloc_coherent(ena_dev->dmadev,
1652 sizeof(*mmio_read->read_resp),
1653 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1654 if (unlikely(!mmio_read->read_resp))
1655 return -ENOMEM;
1656
1657 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1658
1659 mmio_read->read_resp->req_id = 0x0;
1660 mmio_read->seq_num = 0x0;
1661 mmio_read->readless_supported = true;
1662
1663 return 0;
1664 }
1665
1666 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1667 {
1668 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1669
1670 mmio_read->readless_supported = readless_supported;
1671 }
1672
1673 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1674 {
1675 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1676
1677 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1678 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1679
1680 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1681 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1682
1683 mmio_read->read_resp = NULL;
1684 }
1685
1686 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1687 {
1688 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1689 u32 addr_low, addr_high;
1690
1691 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1692 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1693
1694 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1695 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1696 }
1697
1698 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1699 struct ena_aenq_handlers *aenq_handlers,
1700 bool init_spinlock)
1701 {
1702 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1703 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1704 int ret;
1705
1706 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1707
1708 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1709 pr_err("Reg read timeout occurred\n");
1710 return -ETIME;
1711 }
1712
1713 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1714 pr_err("Device isn't ready, abort com init\n");
1715 return -ENODEV;
1716 }
1717
1718 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1719
1720 admin_queue->q_dmadev = ena_dev->dmadev;
1721 admin_queue->polling = false;
1722 admin_queue->curr_cmd_id = 0;
1723
1724 atomic_set(&admin_queue->outstanding_cmds, 0);
1725
1726 if (init_spinlock)
1727 spin_lock_init(&admin_queue->q_lock);
1728
1729 ret = ena_com_init_comp_ctxt(admin_queue);
1730 if (ret)
1731 goto error;
1732
1733 ret = ena_com_admin_init_sq(admin_queue);
1734 if (ret)
1735 goto error;
1736
1737 ret = ena_com_admin_init_cq(admin_queue);
1738 if (ret)
1739 goto error;
1740
1741 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1742 ENA_REGS_AQ_DB_OFF);
1743
1744 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1745 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1746
1747 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1748 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1749
1750 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1751 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1752
1753 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1754 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1755
1756 aq_caps = 0;
1757 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1758 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1759 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1760 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1761
1762 acq_caps = 0;
1763 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1764 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1765 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1766 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1767
1768 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1769 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1770 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1771 if (ret)
1772 goto error;
1773
1774 admin_queue->running_state = true;
1775
1776 return 0;
1777 error:
1778 ena_com_admin_destroy(ena_dev);
1779
1780 return ret;
1781 }
1782
1783 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1784 struct ena_com_create_io_ctx *ctx)
1785 {
1786 struct ena_com_io_sq *io_sq;
1787 struct ena_com_io_cq *io_cq;
1788 int ret;
1789
1790 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1791 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1792 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1793 return -EINVAL;
1794 }
1795
1796 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1797 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1798
1799 memset(io_sq, 0x0, sizeof(*io_sq));
1800 memset(io_cq, 0x0, sizeof(*io_cq));
1801
1802 /* Init CQ */
1803 io_cq->q_depth = ctx->queue_size;
1804 io_cq->direction = ctx->direction;
1805 io_cq->qid = ctx->qid;
1806
1807 io_cq->msix_vector = ctx->msix_vector;
1808
1809 io_sq->q_depth = ctx->queue_size;
1810 io_sq->direction = ctx->direction;
1811 io_sq->qid = ctx->qid;
1812
1813 io_sq->mem_queue_type = ctx->mem_queue_type;
1814
1815 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1816 /* header length is limited to 8 bits */
1817 io_sq->tx_max_header_size =
1818 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1819
1820 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1821 if (ret)
1822 goto error;
1823 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1824 if (ret)
1825 goto error;
1826
1827 ret = ena_com_create_io_cq(ena_dev, io_cq);
1828 if (ret)
1829 goto error;
1830
1831 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1832 if (ret)
1833 goto destroy_io_cq;
1834
1835 return 0;
1836
1837 destroy_io_cq:
1838 ena_com_destroy_io_cq(ena_dev, io_cq);
1839 error:
1840 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1841 return ret;
1842 }
1843
1844 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1845 {
1846 struct ena_com_io_sq *io_sq;
1847 struct ena_com_io_cq *io_cq;
1848
1849 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1850 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1851 ENA_TOTAL_NUM_QUEUES);
1852 return;
1853 }
1854
1855 io_sq = &ena_dev->io_sq_queues[qid];
1856 io_cq = &ena_dev->io_cq_queues[qid];
1857
1858 ena_com_destroy_io_sq(ena_dev, io_sq);
1859 ena_com_destroy_io_cq(ena_dev, io_cq);
1860
1861 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1862 }
1863
1864 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1865 struct ena_admin_get_feat_resp *resp)
1866 {
1867 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1868 }
1869
1870 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1871 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1872 {
1873 struct ena_admin_get_feat_resp get_resp;
1874 int rc;
1875
1876 rc = ena_com_get_feature(ena_dev, &get_resp,
1877 ENA_ADMIN_DEVICE_ATTRIBUTES);
1878 if (rc)
1879 return rc;
1880
1881 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1882 sizeof(get_resp.u.dev_attr));
1883 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1884
1885 rc = ena_com_get_feature(ena_dev, &get_resp,
1886 ENA_ADMIN_MAX_QUEUES_NUM);
1887 if (rc)
1888 return rc;
1889
1890 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1891 sizeof(get_resp.u.max_queue));
1892 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1893
1894 rc = ena_com_get_feature(ena_dev, &get_resp,
1895 ENA_ADMIN_AENQ_CONFIG);
1896 if (rc)
1897 return rc;
1898
1899 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1900 sizeof(get_resp.u.aenq));
1901
1902 rc = ena_com_get_feature(ena_dev, &get_resp,
1903 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1904 if (rc)
1905 return rc;
1906
1907 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1908 sizeof(get_resp.u.offload));
1909
1910 /* Driver hints isn't mandatory admin command. So in case the
1911 * command isn't supported set driver hints to 0
1912 */
1913 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1914
1915 if (!rc)
1916 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1917 sizeof(get_resp.u.hw_hints));
1918 else if (rc == -EOPNOTSUPP)
1919 memset(&get_feat_ctx->hw_hints, 0x0,
1920 sizeof(get_feat_ctx->hw_hints));
1921 else
1922 return rc;
1923
1924 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
1925 if (!rc)
1926 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1927 sizeof(get_resp.u.llq));
1928 else if (rc == -EOPNOTSUPP)
1929 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1930 else
1931 return rc;
1932
1933 return 0;
1934 }
1935
1936 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1937 {
1938 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1939 }
1940
1941 /* ena_handle_specific_aenq_event:
1942 * return the handler that is relevant to the specific event group
1943 */
1944 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1945 u16 group)
1946 {
1947 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1948
1949 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1950 return aenq_handlers->handlers[group];
1951
1952 return aenq_handlers->unimplemented_handler;
1953 }
1954
1955 /* ena_aenq_intr_handler:
1956 * handles the aenq incoming events.
1957 * pop events from the queue and apply the specific handler
1958 */
1959 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1960 {
1961 struct ena_admin_aenq_entry *aenq_e;
1962 struct ena_admin_aenq_common_desc *aenq_common;
1963 struct ena_com_aenq *aenq = &dev->aenq;
1964 ena_aenq_handler handler_cb;
1965 u16 masked_head, processed = 0;
1966 u8 phase;
1967
1968 masked_head = aenq->head & (aenq->q_depth - 1);
1969 phase = aenq->phase;
1970 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1971 aenq_common = &aenq_e->aenq_common_desc;
1972
1973 /* Go over all the events */
1974 while ((READ_ONCE(aenq_common->flags) &
1975 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1976 /* Make sure the phase bit (ownership) is as expected before
1977 * reading the rest of the descriptor.
1978 */
1979 dma_rmb();
1980
1981 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1982 aenq_common->group, aenq_common->syndrom,
1983 (u64)aenq_common->timestamp_low +
1984 ((u64)aenq_common->timestamp_high << 32));
1985
1986 /* Handle specific event*/
1987 handler_cb = ena_com_get_specific_aenq_cb(dev,
1988 aenq_common->group);
1989 handler_cb(data, aenq_e); /* call the actual event handler*/
1990
1991 /* Get next event entry */
1992 masked_head++;
1993 processed++;
1994
1995 if (unlikely(masked_head == aenq->q_depth)) {
1996 masked_head = 0;
1997 phase = !phase;
1998 }
1999 aenq_e = &aenq->entries[masked_head];
2000 aenq_common = &aenq_e->aenq_common_desc;
2001 }
2002
2003 aenq->head += processed;
2004 aenq->phase = phase;
2005
2006 /* Don't update aenq doorbell if there weren't any processed events */
2007 if (!processed)
2008 return;
2009
2010 /* write the aenq doorbell after all AENQ descriptors were read */
2011 mb();
2012 writel_relaxed((u32)aenq->head,
2013 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2014 mmiowb();
2015 }
2016
2017 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2018 enum ena_regs_reset_reason_types reset_reason)
2019 {
2020 u32 stat, timeout, cap, reset_val;
2021 int rc;
2022
2023 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2024 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2025
2026 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2027 (cap == ENA_MMIO_READ_TIMEOUT))) {
2028 pr_err("Reg read32 timeout occurred\n");
2029 return -ETIME;
2030 }
2031
2032 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2033 pr_err("Device isn't ready, can't reset device\n");
2034 return -EINVAL;
2035 }
2036
2037 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2038 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2039 if (timeout == 0) {
2040 pr_err("Invalid timeout value\n");
2041 return -EINVAL;
2042 }
2043
2044 /* start reset */
2045 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2046 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2047 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2048 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2049
2050 /* Write again the MMIO read request address */
2051 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2052
2053 rc = wait_for_reset_state(ena_dev, timeout,
2054 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2055 if (rc != 0) {
2056 pr_err("Reset indication didn't turn on\n");
2057 return rc;
2058 }
2059
2060 /* reset done */
2061 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2062 rc = wait_for_reset_state(ena_dev, timeout, 0);
2063 if (rc != 0) {
2064 pr_err("Reset indication didn't turn off\n");
2065 return rc;
2066 }
2067
2068 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2069 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2070 if (timeout)
2071 /* the resolution of timeout reg is 100ms */
2072 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2073 else
2074 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2075
2076 return 0;
2077 }
2078
2079 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2080 struct ena_com_stats_ctx *ctx,
2081 enum ena_admin_get_stats_type type)
2082 {
2083 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2084 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2085 struct ena_com_admin_queue *admin_queue;
2086 int ret;
2087
2088 admin_queue = &ena_dev->admin_queue;
2089
2090 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2091 get_cmd->aq_common_descriptor.flags = 0;
2092 get_cmd->type = type;
2093
2094 ret = ena_com_execute_admin_command(admin_queue,
2095 (struct ena_admin_aq_entry *)get_cmd,
2096 sizeof(*get_cmd),
2097 (struct ena_admin_acq_entry *)get_resp,
2098 sizeof(*get_resp));
2099
2100 if (unlikely(ret))
2101 pr_err("Failed to get stats. error: %d\n", ret);
2102
2103 return ret;
2104 }
2105
2106 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2107 struct ena_admin_basic_stats *stats)
2108 {
2109 struct ena_com_stats_ctx ctx;
2110 int ret;
2111
2112 memset(&ctx, 0x0, sizeof(ctx));
2113 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2114 if (likely(ret == 0))
2115 memcpy(stats, &ctx.get_resp.basic_stats,
2116 sizeof(ctx.get_resp.basic_stats));
2117
2118 return ret;
2119 }
2120
2121 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2122 {
2123 struct ena_com_admin_queue *admin_queue;
2124 struct ena_admin_set_feat_cmd cmd;
2125 struct ena_admin_set_feat_resp resp;
2126 int ret;
2127
2128 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2129 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2130 return -EOPNOTSUPP;
2131 }
2132
2133 memset(&cmd, 0x0, sizeof(cmd));
2134 admin_queue = &ena_dev->admin_queue;
2135
2136 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2137 cmd.aq_common_descriptor.flags = 0;
2138 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2139 cmd.u.mtu.mtu = mtu;
2140
2141 ret = ena_com_execute_admin_command(admin_queue,
2142 (struct ena_admin_aq_entry *)&cmd,
2143 sizeof(cmd),
2144 (struct ena_admin_acq_entry *)&resp,
2145 sizeof(resp));
2146
2147 if (unlikely(ret))
2148 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2149
2150 return ret;
2151 }
2152
2153 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2154 struct ena_admin_feature_offload_desc *offload)
2155 {
2156 int ret;
2157 struct ena_admin_get_feat_resp resp;
2158
2159 ret = ena_com_get_feature(ena_dev, &resp,
2160 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2161 if (unlikely(ret)) {
2162 pr_err("Failed to get offload capabilities %d\n", ret);
2163 return ret;
2164 }
2165
2166 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2167
2168 return 0;
2169 }
2170
2171 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2172 {
2173 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2174 struct ena_rss *rss = &ena_dev->rss;
2175 struct ena_admin_set_feat_cmd cmd;
2176 struct ena_admin_set_feat_resp resp;
2177 struct ena_admin_get_feat_resp get_resp;
2178 int ret;
2179
2180 if (!ena_com_check_supported_feature_id(ena_dev,
2181 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2182 pr_debug("Feature %d isn't supported\n",
2183 ENA_ADMIN_RSS_HASH_FUNCTION);
2184 return -EOPNOTSUPP;
2185 }
2186
2187 /* Validate hash function is supported */
2188 ret = ena_com_get_feature(ena_dev, &get_resp,
2189 ENA_ADMIN_RSS_HASH_FUNCTION);
2190 if (unlikely(ret))
2191 return ret;
2192
2193 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2194 pr_err("Func hash %d isn't supported by device, abort\n",
2195 rss->hash_func);
2196 return -EOPNOTSUPP;
2197 }
2198
2199 memset(&cmd, 0x0, sizeof(cmd));
2200
2201 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2202 cmd.aq_common_descriptor.flags =
2203 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2204 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2205 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2206 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2207
2208 ret = ena_com_mem_addr_set(ena_dev,
2209 &cmd.control_buffer.address,
2210 rss->hash_key_dma_addr);
2211 if (unlikely(ret)) {
2212 pr_err("memory address set failed\n");
2213 return ret;
2214 }
2215
2216 cmd.control_buffer.length = sizeof(*rss->hash_key);
2217
2218 ret = ena_com_execute_admin_command(admin_queue,
2219 (struct ena_admin_aq_entry *)&cmd,
2220 sizeof(cmd),
2221 (struct ena_admin_acq_entry *)&resp,
2222 sizeof(resp));
2223 if (unlikely(ret)) {
2224 pr_err("Failed to set hash function %d. error: %d\n",
2225 rss->hash_func, ret);
2226 return -EINVAL;
2227 }
2228
2229 return 0;
2230 }
2231
2232 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2233 enum ena_admin_hash_functions func,
2234 const u8 *key, u16 key_len, u32 init_val)
2235 {
2236 struct ena_rss *rss = &ena_dev->rss;
2237 struct ena_admin_get_feat_resp get_resp;
2238 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2239 rss->hash_key;
2240 int rc;
2241
2242 /* Make sure size is a mult of DWs */
2243 if (unlikely(key_len & 0x3))
2244 return -EINVAL;
2245
2246 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2247 ENA_ADMIN_RSS_HASH_FUNCTION,
2248 rss->hash_key_dma_addr,
2249 sizeof(*rss->hash_key));
2250 if (unlikely(rc))
2251 return rc;
2252
2253 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2254 pr_err("Flow hash function %d isn't supported\n", func);
2255 return -EOPNOTSUPP;
2256 }
2257
2258 switch (func) {
2259 case ENA_ADMIN_TOEPLITZ:
2260 if (key_len > sizeof(hash_key->key)) {
2261 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2262 key_len, sizeof(hash_key->key));
2263 return -EINVAL;
2264 }
2265
2266 memcpy(hash_key->key, key, key_len);
2267 rss->hash_init_val = init_val;
2268 hash_key->keys_num = key_len >> 2;
2269 break;
2270 case ENA_ADMIN_CRC32:
2271 rss->hash_init_val = init_val;
2272 break;
2273 default:
2274 pr_err("Invalid hash function (%d)\n", func);
2275 return -EINVAL;
2276 }
2277
2278 rc = ena_com_set_hash_function(ena_dev);
2279
2280 /* Restore the old function */
2281 if (unlikely(rc))
2282 ena_com_get_hash_function(ena_dev, NULL, NULL);
2283
2284 return rc;
2285 }
2286
2287 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2288 enum ena_admin_hash_functions *func,
2289 u8 *key)
2290 {
2291 struct ena_rss *rss = &ena_dev->rss;
2292 struct ena_admin_get_feat_resp get_resp;
2293 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2294 rss->hash_key;
2295 int rc;
2296
2297 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2298 ENA_ADMIN_RSS_HASH_FUNCTION,
2299 rss->hash_key_dma_addr,
2300 sizeof(*rss->hash_key));
2301 if (unlikely(rc))
2302 return rc;
2303
2304 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2305 if (func)
2306 *func = rss->hash_func;
2307
2308 if (key)
2309 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2310
2311 return 0;
2312 }
2313
2314 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2315 enum ena_admin_flow_hash_proto proto,
2316 u16 *fields)
2317 {
2318 struct ena_rss *rss = &ena_dev->rss;
2319 struct ena_admin_get_feat_resp get_resp;
2320 int rc;
2321
2322 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2323 ENA_ADMIN_RSS_HASH_INPUT,
2324 rss->hash_ctrl_dma_addr,
2325 sizeof(*rss->hash_ctrl));
2326 if (unlikely(rc))
2327 return rc;
2328
2329 if (fields)
2330 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2331
2332 return 0;
2333 }
2334
2335 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2336 {
2337 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2338 struct ena_rss *rss = &ena_dev->rss;
2339 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2340 struct ena_admin_set_feat_cmd cmd;
2341 struct ena_admin_set_feat_resp resp;
2342 int ret;
2343
2344 if (!ena_com_check_supported_feature_id(ena_dev,
2345 ENA_ADMIN_RSS_HASH_INPUT)) {
2346 pr_debug("Feature %d isn't supported\n",
2347 ENA_ADMIN_RSS_HASH_INPUT);
2348 return -EOPNOTSUPP;
2349 }
2350
2351 memset(&cmd, 0x0, sizeof(cmd));
2352
2353 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2354 cmd.aq_common_descriptor.flags =
2355 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2356 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2357 cmd.u.flow_hash_input.enabled_input_sort =
2358 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2359 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2360
2361 ret = ena_com_mem_addr_set(ena_dev,
2362 &cmd.control_buffer.address,
2363 rss->hash_ctrl_dma_addr);
2364 if (unlikely(ret)) {
2365 pr_err("memory address set failed\n");
2366 return ret;
2367 }
2368 cmd.control_buffer.length = sizeof(*hash_ctrl);
2369
2370 ret = ena_com_execute_admin_command(admin_queue,
2371 (struct ena_admin_aq_entry *)&cmd,
2372 sizeof(cmd),
2373 (struct ena_admin_acq_entry *)&resp,
2374 sizeof(resp));
2375 if (unlikely(ret))
2376 pr_err("Failed to set hash input. error: %d\n", ret);
2377
2378 return ret;
2379 }
2380
2381 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2382 {
2383 struct ena_rss *rss = &ena_dev->rss;
2384 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2385 rss->hash_ctrl;
2386 u16 available_fields = 0;
2387 int rc, i;
2388
2389 /* Get the supported hash input */
2390 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2391 if (unlikely(rc))
2392 return rc;
2393
2394 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2395 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2396 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2397
2398 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2399 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2400 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2401
2402 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2403 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2404 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2405
2406 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2407 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2408 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2409
2410 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2411 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2412
2413 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2414 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2415
2416 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2417 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2418
2419 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2420 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2421
2422 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2423 available_fields = hash_ctrl->selected_fields[i].fields &
2424 hash_ctrl->supported_fields[i].fields;
2425 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2426 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2427 i, hash_ctrl->supported_fields[i].fields,
2428 hash_ctrl->selected_fields[i].fields);
2429 return -EOPNOTSUPP;
2430 }
2431 }
2432
2433 rc = ena_com_set_hash_ctrl(ena_dev);
2434
2435 /* In case of failure, restore the old hash ctrl */
2436 if (unlikely(rc))
2437 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2438
2439 return rc;
2440 }
2441
2442 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2443 enum ena_admin_flow_hash_proto proto,
2444 u16 hash_fields)
2445 {
2446 struct ena_rss *rss = &ena_dev->rss;
2447 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2448 u16 supported_fields;
2449 int rc;
2450
2451 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2452 pr_err("Invalid proto num (%u)\n", proto);
2453 return -EINVAL;
2454 }
2455
2456 /* Get the ctrl table */
2457 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2458 if (unlikely(rc))
2459 return rc;
2460
2461 /* Make sure all the fields are supported */
2462 supported_fields = hash_ctrl->supported_fields[proto].fields;
2463 if ((hash_fields & supported_fields) != hash_fields) {
2464 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2465 proto, hash_fields, supported_fields);
2466 }
2467
2468 hash_ctrl->selected_fields[proto].fields = hash_fields;
2469
2470 rc = ena_com_set_hash_ctrl(ena_dev);
2471
2472 /* In case of failure, restore the old hash ctrl */
2473 if (unlikely(rc))
2474 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2475
2476 return 0;
2477 }
2478
2479 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2480 u16 entry_idx, u16 entry_value)
2481 {
2482 struct ena_rss *rss = &ena_dev->rss;
2483
2484 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2485 return -EINVAL;
2486
2487 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2488 return -EINVAL;
2489
2490 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2491
2492 return 0;
2493 }
2494
2495 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2496 {
2497 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2498 struct ena_rss *rss = &ena_dev->rss;
2499 struct ena_admin_set_feat_cmd cmd;
2500 struct ena_admin_set_feat_resp resp;
2501 int ret;
2502
2503 if (!ena_com_check_supported_feature_id(
2504 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2505 pr_debug("Feature %d isn't supported\n",
2506 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2507 return -EOPNOTSUPP;
2508 }
2509
2510 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2511 if (ret) {
2512 pr_err("Failed to convert host indirection table to device table\n");
2513 return ret;
2514 }
2515
2516 memset(&cmd, 0x0, sizeof(cmd));
2517
2518 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2519 cmd.aq_common_descriptor.flags =
2520 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2521 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2522 cmd.u.ind_table.size = rss->tbl_log_size;
2523 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2524
2525 ret = ena_com_mem_addr_set(ena_dev,
2526 &cmd.control_buffer.address,
2527 rss->rss_ind_tbl_dma_addr);
2528 if (unlikely(ret)) {
2529 pr_err("memory address set failed\n");
2530 return ret;
2531 }
2532
2533 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2534 sizeof(struct ena_admin_rss_ind_table_entry);
2535
2536 ret = ena_com_execute_admin_command(admin_queue,
2537 (struct ena_admin_aq_entry *)&cmd,
2538 sizeof(cmd),
2539 (struct ena_admin_acq_entry *)&resp,
2540 sizeof(resp));
2541
2542 if (unlikely(ret))
2543 pr_err("Failed to set indirect table. error: %d\n", ret);
2544
2545 return ret;
2546 }
2547
2548 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2549 {
2550 struct ena_rss *rss = &ena_dev->rss;
2551 struct ena_admin_get_feat_resp get_resp;
2552 u32 tbl_size;
2553 int i, rc;
2554
2555 tbl_size = (1ULL << rss->tbl_log_size) *
2556 sizeof(struct ena_admin_rss_ind_table_entry);
2557
2558 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2559 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2560 rss->rss_ind_tbl_dma_addr,
2561 tbl_size);
2562 if (unlikely(rc))
2563 return rc;
2564
2565 if (!ind_tbl)
2566 return 0;
2567
2568 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2569 if (unlikely(rc))
2570 return rc;
2571
2572 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2573 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2574
2575 return 0;
2576 }
2577
2578 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2579 {
2580 int rc;
2581
2582 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2583
2584 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2585 if (unlikely(rc))
2586 goto err_indr_tbl;
2587
2588 rc = ena_com_hash_key_allocate(ena_dev);
2589 if (unlikely(rc))
2590 goto err_hash_key;
2591
2592 rc = ena_com_hash_ctrl_init(ena_dev);
2593 if (unlikely(rc))
2594 goto err_hash_ctrl;
2595
2596 return 0;
2597
2598 err_hash_ctrl:
2599 ena_com_hash_key_destroy(ena_dev);
2600 err_hash_key:
2601 ena_com_indirect_table_destroy(ena_dev);
2602 err_indr_tbl:
2603
2604 return rc;
2605 }
2606
2607 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2608 {
2609 ena_com_indirect_table_destroy(ena_dev);
2610 ena_com_hash_key_destroy(ena_dev);
2611 ena_com_hash_ctrl_destroy(ena_dev);
2612
2613 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2614 }
2615
2616 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2617 {
2618 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2619
2620 host_attr->host_info =
2621 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2622 &host_attr->host_info_dma_addr, GFP_KERNEL);
2623 if (unlikely(!host_attr->host_info))
2624 return -ENOMEM;
2625
2626 host_attr->host_info->ena_spec_version =
2627 ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2628 (ENA_COMMON_SPEC_VERSION_MINOR));
2629
2630 return 0;
2631 }
2632
2633 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2634 u32 debug_area_size)
2635 {
2636 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2637
2638 host_attr->debug_area_virt_addr =
2639 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2640 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2641 if (unlikely(!host_attr->debug_area_virt_addr)) {
2642 host_attr->debug_area_size = 0;
2643 return -ENOMEM;
2644 }
2645
2646 host_attr->debug_area_size = debug_area_size;
2647
2648 return 0;
2649 }
2650
2651 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2652 {
2653 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2654
2655 if (host_attr->host_info) {
2656 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2657 host_attr->host_info_dma_addr);
2658 host_attr->host_info = NULL;
2659 }
2660 }
2661
2662 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2663 {
2664 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2665
2666 if (host_attr->debug_area_virt_addr) {
2667 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2668 host_attr->debug_area_virt_addr,
2669 host_attr->debug_area_dma_addr);
2670 host_attr->debug_area_virt_addr = NULL;
2671 }
2672 }
2673
2674 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2675 {
2676 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2677 struct ena_com_admin_queue *admin_queue;
2678 struct ena_admin_set_feat_cmd cmd;
2679 struct ena_admin_set_feat_resp resp;
2680
2681 int ret;
2682
2683 /* Host attribute config is called before ena_com_get_dev_attr_feat
2684 * so ena_com can't check if the feature is supported.
2685 */
2686
2687 memset(&cmd, 0x0, sizeof(cmd));
2688 admin_queue = &ena_dev->admin_queue;
2689
2690 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2691 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2692
2693 ret = ena_com_mem_addr_set(ena_dev,
2694 &cmd.u.host_attr.debug_ba,
2695 host_attr->debug_area_dma_addr);
2696 if (unlikely(ret)) {
2697 pr_err("memory address set failed\n");
2698 return ret;
2699 }
2700
2701 ret = ena_com_mem_addr_set(ena_dev,
2702 &cmd.u.host_attr.os_info_ba,
2703 host_attr->host_info_dma_addr);
2704 if (unlikely(ret)) {
2705 pr_err("memory address set failed\n");
2706 return ret;
2707 }
2708
2709 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2710
2711 ret = ena_com_execute_admin_command(admin_queue,
2712 (struct ena_admin_aq_entry *)&cmd,
2713 sizeof(cmd),
2714 (struct ena_admin_acq_entry *)&resp,
2715 sizeof(resp));
2716
2717 if (unlikely(ret))
2718 pr_err("Failed to set host attributes: %d\n", ret);
2719
2720 return ret;
2721 }
2722
2723 /* Interrupt moderation */
2724 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2725 {
2726 return ena_com_check_supported_feature_id(ena_dev,
2727 ENA_ADMIN_INTERRUPT_MODERATION);
2728 }
2729
2730 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2731 u32 tx_coalesce_usecs)
2732 {
2733 if (!ena_dev->intr_delay_resolution) {
2734 pr_err("Illegal interrupt delay granularity value\n");
2735 return -EFAULT;
2736 }
2737
2738 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2739 ena_dev->intr_delay_resolution;
2740
2741 return 0;
2742 }
2743
2744 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2745 u32 rx_coalesce_usecs)
2746 {
2747 if (!ena_dev->intr_delay_resolution) {
2748 pr_err("Illegal interrupt delay granularity value\n");
2749 return -EFAULT;
2750 }
2751
2752 /* We use LOWEST entry of moderation table for storing
2753 * nonadaptive interrupt coalescing values
2754 */
2755 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2756 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2757
2758 return 0;
2759 }
2760
2761 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2762 {
2763 if (ena_dev->intr_moder_tbl)
2764 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2765 ena_dev->intr_moder_tbl = NULL;
2766 }
2767
2768 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2769 {
2770 struct ena_admin_get_feat_resp get_resp;
2771 u16 delay_resolution;
2772 int rc;
2773
2774 rc = ena_com_get_feature(ena_dev, &get_resp,
2775 ENA_ADMIN_INTERRUPT_MODERATION);
2776
2777 if (rc) {
2778 if (rc == -EOPNOTSUPP) {
2779 pr_debug("Feature %d isn't supported\n",
2780 ENA_ADMIN_INTERRUPT_MODERATION);
2781 rc = 0;
2782 } else {
2783 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2784 rc);
2785 }
2786
2787 /* no moderation supported, disable adaptive support */
2788 ena_com_disable_adaptive_moderation(ena_dev);
2789 return rc;
2790 }
2791
2792 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2793 if (rc)
2794 goto err;
2795
2796 /* if moderation is supported by device we set adaptive moderation */
2797 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2798 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2799 ena_com_enable_adaptive_moderation(ena_dev);
2800
2801 return 0;
2802 err:
2803 ena_com_destroy_interrupt_moderation(ena_dev);
2804 return rc;
2805 }
2806
2807 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2808 {
2809 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2810
2811 if (!intr_moder_tbl)
2812 return;
2813
2814 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2815 ENA_INTR_LOWEST_USECS;
2816 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2817 ENA_INTR_LOWEST_PKTS;
2818 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2819 ENA_INTR_LOWEST_BYTES;
2820
2821 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2822 ENA_INTR_LOW_USECS;
2823 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2824 ENA_INTR_LOW_PKTS;
2825 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2826 ENA_INTR_LOW_BYTES;
2827
2828 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2829 ENA_INTR_MID_USECS;
2830 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2831 ENA_INTR_MID_PKTS;
2832 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2833 ENA_INTR_MID_BYTES;
2834
2835 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2836 ENA_INTR_HIGH_USECS;
2837 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2838 ENA_INTR_HIGH_PKTS;
2839 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2840 ENA_INTR_HIGH_BYTES;
2841
2842 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2843 ENA_INTR_HIGHEST_USECS;
2844 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2845 ENA_INTR_HIGHEST_PKTS;
2846 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2847 ENA_INTR_HIGHEST_BYTES;
2848 }
2849
2850 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2851 {
2852 return ena_dev->intr_moder_tx_interval;
2853 }
2854
2855 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2856 {
2857 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2858
2859 if (intr_moder_tbl)
2860 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2861
2862 return 0;
2863 }
2864
2865 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2866 enum ena_intr_moder_level level,
2867 struct ena_intr_moder_entry *entry)
2868 {
2869 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2870
2871 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2872 return;
2873
2874 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2875 if (ena_dev->intr_delay_resolution)
2876 intr_moder_tbl[level].intr_moder_interval /=
2877 ena_dev->intr_delay_resolution;
2878 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2879
2880 /* use hardcoded value until ethtool supports bytecount parameter */
2881 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2882 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2883 }
2884
2885 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2886 enum ena_intr_moder_level level,
2887 struct ena_intr_moder_entry *entry)
2888 {
2889 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2890
2891 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2892 return;
2893
2894 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2895 if (ena_dev->intr_delay_resolution)
2896 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2897 entry->pkts_per_interval =
2898 intr_moder_tbl[level].pkts_per_interval;
2899 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2900 }
2901
2902 int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2903 struct ena_admin_feature_llq_desc *llq_features,
2904 struct ena_llq_configurations *llq_default_cfg)
2905 {
2906 int rc;
2907 int size;
2908
2909 if (!llq_features->max_llq_num) {
2910 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2911 return 0;
2912 }
2913
2914 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2915 if (rc)
2916 return rc;
2917
2918 /* Validate the descriptor is not too big */
2919 size = ena_dev->tx_max_header_size;
2920 size += ena_dev->llq_info.descs_num_before_header *
2921 sizeof(struct ena_eth_io_tx_desc);
2922
2923 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
2924 pr_err("the size of the LLQ entry is smaller than needed\n");
2925 return -EINVAL;
2926 }
2927
2928 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2929
2930 return 0;
2931 }