]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/amazon/ena/ena_com.c
net: ena: fix missing calls to READ_ONCE
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / amazon / ena / ena_com.c
CommitLineData
1738cd3e
NB
1/*
2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include "ena_com.h"
34
35/*****************************************************************************/
36/*****************************************************************************/
37
38/* Timeout in micro-sec */
7102a18a 39#define ADMIN_CMD_TIMEOUT_US (3000000)
1738cd3e 40
7102a18a 41#define ENA_ASYNC_QUEUE_DEPTH 16
1738cd3e
NB
42#define ENA_ADMIN_QUEUE_DEPTH 32
43
44#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
47
48#define ENA_CTRL_MAJOR 0
49#define ENA_CTRL_MINOR 0
50#define ENA_CTRL_SUB_MINOR 1
51
52#define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
57 (ENA_CTRL_SUB_MINOR))
58
59#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
61
62#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63
a2cc5198
NB
64#define ENA_REGS_ADMIN_INTR_MASK 1
65
88aef2f5
NB
66#define ENA_POLL_MS 5
67
1738cd3e
NB
68/*****************************************************************************/
69/*****************************************************************************/
70/*****************************************************************************/
71
72enum ena_cmd_status {
73 ENA_CMD_SUBMITTED,
74 ENA_CMD_COMPLETED,
75 /* Abort - canceled by the driver */
76 ENA_CMD_ABORTED,
77};
78
79struct ena_comp_ctx {
80 struct completion wait_event;
81 struct ena_admin_acq_entry *user_cqe;
82 u32 comp_size;
83 enum ena_cmd_status status;
84 /* status from the device */
85 u8 comp_status;
86 u8 cmd_opcode;
87 bool occupied;
88};
89
90struct ena_com_stats_ctx {
91 struct ena_admin_aq_get_stats_cmd get_cmd;
92 struct ena_admin_acq_get_stats_resp get_resp;
93};
94
95static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
96 struct ena_common_mem_addr *ena_addr,
97 dma_addr_t addr)
98{
99 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
100 pr_err("dma address has more bits that the device supports\n");
101 return -EINVAL;
102 }
103
3ae5907c
NB
104 ena_addr->mem_addr_low = lower_32_bits(addr);
105 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
1738cd3e
NB
106
107 return 0;
108}
109
110static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111{
112 struct ena_com_admin_sq *sq = &queue->sq;
113 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
114
115 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
116 GFP_KERNEL);
117
118 if (!sq->entries) {
119 pr_err("memory allocation failed");
120 return -ENOMEM;
121 }
122
123 sq->head = 0;
124 sq->tail = 0;
125 sq->phase = 1;
126
127 sq->db_addr = NULL;
128
129 return 0;
130}
131
132static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133{
134 struct ena_com_admin_cq *cq = &queue->cq;
135 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
136
137 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
138 GFP_KERNEL);
139
140 if (!cq->entries) {
141 pr_err("memory allocation failed");
142 return -ENOMEM;
143 }
144
145 cq->head = 0;
146 cq->phase = 1;
147
148 return 0;
149}
150
151static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
152 struct ena_aenq_handlers *aenq_handlers)
153{
154 struct ena_com_aenq *aenq = &dev->aenq;
155 u32 addr_low, addr_high, aenq_caps;
156 u16 size;
157
158 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
159 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
160 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
161 GFP_KERNEL);
162
163 if (!aenq->entries) {
164 pr_err("memory allocation failed");
165 return -ENOMEM;
166 }
167
168 aenq->head = aenq->q_depth;
169 aenq->phase = 1;
170
171 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
172 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
173
174 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
175 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
176
177 aenq_caps = 0;
178 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
179 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
180 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
181 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
182 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
183
184 if (unlikely(!aenq_handlers)) {
185 pr_err("aenq handlers pointer is NULL\n");
186 return -EINVAL;
187 }
188
189 aenq->aenq_handlers = aenq_handlers;
190
191 return 0;
192}
193
194static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
195 struct ena_comp_ctx *comp_ctx)
196{
197 comp_ctx->occupied = false;
198 atomic_dec(&queue->outstanding_cmds);
199}
200
201static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
202 u16 command_id, bool capture)
203{
204 if (unlikely(command_id >= queue->q_depth)) {
205 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
206 command_id, queue->q_depth);
207 return NULL;
208 }
209
210 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
211 pr_err("Completion context is occupied\n");
212 return NULL;
213 }
214
215 if (capture) {
216 atomic_inc(&queue->outstanding_cmds);
217 queue->comp_ctx[command_id].occupied = true;
218 }
219
220 return &queue->comp_ctx[command_id];
221}
222
223static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
224 struct ena_admin_aq_entry *cmd,
225 size_t cmd_size_in_bytes,
226 struct ena_admin_acq_entry *comp,
227 size_t comp_size_in_bytes)
228{
229 struct ena_comp_ctx *comp_ctx;
230 u16 tail_masked, cmd_id;
231 u16 queue_size_mask;
232 u16 cnt;
233
234 queue_size_mask = admin_queue->q_depth - 1;
235
236 tail_masked = admin_queue->sq.tail & queue_size_mask;
237
238 /* In case of queue FULL */
661d2b0c 239 cnt = atomic_read(&admin_queue->outstanding_cmds);
1738cd3e 240 if (cnt >= admin_queue->q_depth) {
661d2b0c 241 pr_debug("admin queue is full.\n");
1738cd3e
NB
242 admin_queue->stats.out_of_space++;
243 return ERR_PTR(-ENOSPC);
244 }
245
246 cmd_id = admin_queue->curr_cmd_id;
247
248 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
249 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
250
251 cmd->aq_common_descriptor.command_id |= cmd_id &
252 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
253
254 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
255 if (unlikely(!comp_ctx))
256 return ERR_PTR(-EINVAL);
257
258 comp_ctx->status = ENA_CMD_SUBMITTED;
259 comp_ctx->comp_size = (u32)comp_size_in_bytes;
260 comp_ctx->user_cqe = comp;
261 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
262
263 reinit_completion(&comp_ctx->wait_event);
264
265 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
266
267 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
268 queue_size_mask;
269
270 admin_queue->sq.tail++;
271 admin_queue->stats.submitted_cmd++;
272
273 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
274 admin_queue->sq.phase = !admin_queue->sq.phase;
275
276 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
277
278 return comp_ctx;
279}
280
281static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
282{
283 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
284 struct ena_comp_ctx *comp_ctx;
285 u16 i;
286
287 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
288 if (unlikely(!queue->comp_ctx)) {
289 pr_err("memory allocation failed");
290 return -ENOMEM;
291 }
292
293 for (i = 0; i < queue->q_depth; i++) {
294 comp_ctx = get_comp_ctxt(queue, i, false);
295 if (comp_ctx)
296 init_completion(&comp_ctx->wait_event);
297 }
298
299 return 0;
300}
301
302static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
303 struct ena_admin_aq_entry *cmd,
304 size_t cmd_size_in_bytes,
305 struct ena_admin_acq_entry *comp,
306 size_t comp_size_in_bytes)
307{
308 unsigned long flags;
309 struct ena_comp_ctx *comp_ctx;
310
311 spin_lock_irqsave(&admin_queue->q_lock, flags);
312 if (unlikely(!admin_queue->running_state)) {
313 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
314 return ERR_PTR(-ENODEV);
315 }
316 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
317 cmd_size_in_bytes,
318 comp,
319 comp_size_in_bytes);
1f4cf93b 320 if (IS_ERR(comp_ctx))
1738cd3e
NB
321 admin_queue->running_state = false;
322 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
323
324 return comp_ctx;
325}
326
327static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
328 struct ena_com_create_io_ctx *ctx,
329 struct ena_com_io_sq *io_sq)
330{
331 size_t size;
332 int dev_node = 0;
333
91750110 334 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
1738cd3e 335
101f0cd4 336 io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
1738cd3e
NB
337 io_sq->desc_entry_size =
338 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
339 sizeof(struct ena_eth_io_tx_desc) :
340 sizeof(struct ena_eth_io_rx_desc);
341
342 size = io_sq->desc_entry_size * io_sq->q_depth;
343
344 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
345 dev_node = dev_to_node(ena_dev->dmadev);
346 set_dev_node(ena_dev->dmadev, ctx->numa_node);
347 io_sq->desc_addr.virt_addr =
348 dma_zalloc_coherent(ena_dev->dmadev, size,
349 &io_sq->desc_addr.phys_addr,
350 GFP_KERNEL);
351 set_dev_node(ena_dev->dmadev, dev_node);
352 if (!io_sq->desc_addr.virt_addr) {
353 io_sq->desc_addr.virt_addr =
354 dma_zalloc_coherent(ena_dev->dmadev, size,
355 &io_sq->desc_addr.phys_addr,
356 GFP_KERNEL);
357 }
358 } else {
359 dev_node = dev_to_node(ena_dev->dmadev);
360 set_dev_node(ena_dev->dmadev, ctx->numa_node);
361 io_sq->desc_addr.virt_addr =
362 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
363 set_dev_node(ena_dev->dmadev, dev_node);
364 if (!io_sq->desc_addr.virt_addr) {
365 io_sq->desc_addr.virt_addr =
366 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
367 }
368 }
369
370 if (!io_sq->desc_addr.virt_addr) {
371 pr_err("memory allocation failed");
372 return -ENOMEM;
373 }
374
375 io_sq->tail = 0;
376 io_sq->next_to_comp = 0;
377 io_sq->phase = 1;
378
379 return 0;
380}
381
382static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
383 struct ena_com_create_io_ctx *ctx,
384 struct ena_com_io_cq *io_cq)
385{
386 size_t size;
387 int prev_node = 0;
388
91750110 389 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
1738cd3e
NB
390
391 /* Use the basic completion descriptor for Rx */
392 io_cq->cdesc_entry_size_in_bytes =
393 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
394 sizeof(struct ena_eth_io_tx_cdesc) :
395 sizeof(struct ena_eth_io_rx_cdesc_base);
396
397 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
398
399 prev_node = dev_to_node(ena_dev->dmadev);
400 set_dev_node(ena_dev->dmadev, ctx->numa_node);
401 io_cq->cdesc_addr.virt_addr =
402 dma_zalloc_coherent(ena_dev->dmadev, size,
403 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
404 set_dev_node(ena_dev->dmadev, prev_node);
405 if (!io_cq->cdesc_addr.virt_addr) {
406 io_cq->cdesc_addr.virt_addr =
407 dma_zalloc_coherent(ena_dev->dmadev, size,
408 &io_cq->cdesc_addr.phys_addr,
409 GFP_KERNEL);
410 }
411
412 if (!io_cq->cdesc_addr.virt_addr) {
413 pr_err("memory allocation failed");
414 return -ENOMEM;
415 }
416
417 io_cq->phase = 1;
418 io_cq->head = 0;
419
420 return 0;
421}
422
423static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
424 struct ena_admin_acq_entry *cqe)
425{
426 struct ena_comp_ctx *comp_ctx;
427 u16 cmd_id;
428
429 cmd_id = cqe->acq_common_descriptor.command &
430 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
431
432 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
433 if (unlikely(!comp_ctx)) {
434 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
435 admin_queue->running_state = false;
436 return;
437 }
438
439 comp_ctx->status = ENA_CMD_COMPLETED;
440 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
441
442 if (comp_ctx->user_cqe)
443 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
444
445 if (!admin_queue->polling)
446 complete(&comp_ctx->wait_event);
447}
448
449static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
450{
451 struct ena_admin_acq_entry *cqe = NULL;
452 u16 comp_num = 0;
453 u16 head_masked;
454 u8 phase;
455
456 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
457 phase = admin_queue->cq.phase;
458
459 cqe = &admin_queue->cq.entries[head_masked];
460
461 /* Go over all the completions */
28abf4e9 462 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
1738cd3e
NB
463 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
464 /* Do not read the rest of the completion entry before the
465 * phase bit was validated
466 */
467 rmb();
468 ena_com_handle_single_admin_completion(admin_queue, cqe);
469
470 head_masked++;
471 comp_num++;
472 if (unlikely(head_masked == admin_queue->q_depth)) {
473 head_masked = 0;
474 phase = !phase;
475 }
476
477 cqe = &admin_queue->cq.entries[head_masked];
478 }
479
480 admin_queue->cq.head += comp_num;
481 admin_queue->cq.phase = phase;
482 admin_queue->sq.head += comp_num;
483 admin_queue->stats.completed_cmd += comp_num;
484}
485
486static int ena_com_comp_status_to_errno(u8 comp_status)
487{
488 if (unlikely(comp_status != 0))
489 pr_err("admin command failed[%u]\n", comp_status);
490
491 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
492 return -EINVAL;
493
494 switch (comp_status) {
495 case ENA_ADMIN_SUCCESS:
496 return 0;
497 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
498 return -ENOMEM;
499 case ENA_ADMIN_UNSUPPORTED_OPCODE:
d1497638 500 return -EOPNOTSUPP;
1738cd3e
NB
501 case ENA_ADMIN_BAD_OPCODE:
502 case ENA_ADMIN_MALFORMED_REQUEST:
503 case ENA_ADMIN_ILLEGAL_PARAMETER:
504 case ENA_ADMIN_UNKNOWN_ERROR:
505 return -EINVAL;
506 }
507
508 return 0;
509}
510
511static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
512 struct ena_com_admin_queue *admin_queue)
513{
a77c1aaf 514 unsigned long flags, timeout;
1738cd3e
NB
515 int ret;
516
82ef30f1 517 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
a77c1aaf
NB
518
519 while (1) {
520 spin_lock_irqsave(&admin_queue->q_lock, flags);
521 ena_com_handle_admin_completion(admin_queue);
522 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
523
524 if (comp_ctx->status != ENA_CMD_SUBMITTED)
525 break;
1738cd3e 526
a77c1aaf 527 if (time_is_before_jiffies(timeout)) {
1738cd3e
NB
528 pr_err("Wait for completion (polling) timeout\n");
529 /* ENA didn't have any completion */
530 spin_lock_irqsave(&admin_queue->q_lock, flags);
531 admin_queue->stats.no_completion++;
532 admin_queue->running_state = false;
533 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
534
535 ret = -ETIME;
536 goto err;
537 }
538
88aef2f5 539 msleep(ENA_POLL_MS);
1738cd3e
NB
540 }
541
542 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
543 pr_err("Command was aborted\n");
544 spin_lock_irqsave(&admin_queue->q_lock, flags);
545 admin_queue->stats.aborted_cmd++;
546 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
547 ret = -ENODEV;
548 goto err;
549 }
550
551 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
552 comp_ctx->status);
553
554 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
555err:
556 comp_ctxt_release(admin_queue, comp_ctx);
557 return ret;
558}
559
560static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
561 struct ena_com_admin_queue *admin_queue)
562{
563 unsigned long flags;
564 int ret;
565
566 wait_for_completion_timeout(&comp_ctx->wait_event,
82ef30f1
NB
567 usecs_to_jiffies(
568 admin_queue->completion_timeout));
1738cd3e
NB
569
570 /* In case the command wasn't completed find out the root cause.
571 * There might be 2 kinds of errors
572 * 1) No completion (timeout reached)
573 * 2) There is completion but the device didn't get any msi-x interrupt.
574 */
575 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
576 spin_lock_irqsave(&admin_queue->q_lock, flags);
577 ena_com_handle_admin_completion(admin_queue);
578 admin_queue->stats.no_completion++;
579 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
580
581 if (comp_ctx->status == ENA_CMD_COMPLETED)
582 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
583 comp_ctx->cmd_opcode);
584 else
585 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
586 comp_ctx->cmd_opcode, comp_ctx->status);
587
588 admin_queue->running_state = false;
589 ret = -ETIME;
590 goto err;
591 }
592
593 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
594err:
595 comp_ctxt_release(admin_queue, comp_ctx);
596 return ret;
597}
598
599/* This method read the hardware device register through posting writes
600 * and waiting for response
601 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
602 */
603static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
604{
605 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
606 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
607 mmio_read->read_resp;
82ef30f1 608 u32 mmio_read_reg, ret, i;
1738cd3e 609 unsigned long flags;
82ef30f1 610 u32 timeout = mmio_read->reg_read_to;
1738cd3e
NB
611
612 might_sleep();
613
82ef30f1
NB
614 if (timeout == 0)
615 timeout = ENA_REG_READ_TIMEOUT;
616
1738cd3e
NB
617 /* If readless is disabled, perform regular read */
618 if (!mmio_read->readless_supported)
619 return readl(ena_dev->reg_bar + offset);
620
621 spin_lock_irqsave(&mmio_read->lock, flags);
622 mmio_read->seq_num++;
623
624 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
625 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
626 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
627 mmio_read_reg |= mmio_read->seq_num &
628 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
629
630 /* make sure read_resp->req_id get updated before the hw can write
631 * there
632 */
633 wmb();
634
6d2e1a8d
SK
635 writel_relaxed(mmio_read_reg,
636 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
1738cd3e 637
6d2e1a8d 638 mmiowb();
82ef30f1 639 for (i = 0; i < timeout; i++) {
28abf4e9 640 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
1738cd3e
NB
641 break;
642
643 udelay(1);
644 }
645
82ef30f1 646 if (unlikely(i == timeout)) {
1738cd3e
NB
647 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
648 mmio_read->seq_num, offset, read_resp->req_id,
649 read_resp->reg_off);
650 ret = ENA_MMIO_READ_TIMEOUT;
651 goto err;
652 }
653
654 if (read_resp->reg_off != offset) {
655 pr_err("Read failure: wrong offset provided");
656 ret = ENA_MMIO_READ_TIMEOUT;
657 } else {
658 ret = read_resp->reg_val;
659 }
660err:
661 spin_unlock_irqrestore(&mmio_read->lock, flags);
662
663 return ret;
664}
665
666/* There are two types to wait for completion.
667 * Polling mode - wait until the completion is available.
668 * Async mode - wait on wait queue until the completion is ready
669 * (or the timeout expired).
670 * It is expected that the IRQ called ena_com_handle_admin_completion
671 * to mark the completions.
672 */
673static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
674 struct ena_com_admin_queue *admin_queue)
675{
676 if (admin_queue->polling)
677 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
678 admin_queue);
679
680 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
681 admin_queue);
682}
683
684static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
685 struct ena_com_io_sq *io_sq)
686{
687 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
688 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
689 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
690 u8 direction;
691 int ret;
692
91750110 693 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1738cd3e
NB
694
695 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
696 direction = ENA_ADMIN_SQ_DIRECTION_TX;
697 else
698 direction = ENA_ADMIN_SQ_DIRECTION_RX;
699
700 destroy_cmd.sq.sq_identity |= (direction <<
701 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
702 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
703
704 destroy_cmd.sq.sq_idx = io_sq->idx;
705 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
706
707 ret = ena_com_execute_admin_command(admin_queue,
708 (struct ena_admin_aq_entry *)&destroy_cmd,
709 sizeof(destroy_cmd),
710 (struct ena_admin_acq_entry *)&destroy_resp,
711 sizeof(destroy_resp));
712
713 if (unlikely(ret && (ret != -ENODEV)))
714 pr_err("failed to destroy io sq error: %d\n", ret);
715
716 return ret;
717}
718
719static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
720 struct ena_com_io_sq *io_sq,
721 struct ena_com_io_cq *io_cq)
722{
723 size_t size;
724
725 if (io_cq->cdesc_addr.virt_addr) {
726 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
727
728 dma_free_coherent(ena_dev->dmadev, size,
729 io_cq->cdesc_addr.virt_addr,
730 io_cq->cdesc_addr.phys_addr);
731
732 io_cq->cdesc_addr.virt_addr = NULL;
733 }
734
735 if (io_sq->desc_addr.virt_addr) {
736 size = io_sq->desc_entry_size * io_sq->q_depth;
737
738 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
739 dma_free_coherent(ena_dev->dmadev, size,
740 io_sq->desc_addr.virt_addr,
741 io_sq->desc_addr.phys_addr);
742 else
743 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
744
745 io_sq->desc_addr.virt_addr = NULL;
746 }
747}
748
749static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
750 u16 exp_state)
751{
752 u32 val, i;
753
88aef2f5
NB
754 /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
755 timeout = (timeout * 100) / ENA_POLL_MS;
756
1738cd3e
NB
757 for (i = 0; i < timeout; i++) {
758 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
759
760 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
761 pr_err("Reg read timeout occurred\n");
762 return -ETIME;
763 }
764
765 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
766 exp_state)
767 return 0;
768
88aef2f5 769 msleep(ENA_POLL_MS);
1738cd3e
NB
770 }
771
772 return -ETIME;
773}
774
775static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
776 enum ena_admin_aq_feature_id feature_id)
777{
778 u32 feature_mask = 1 << feature_id;
779
780 /* Device attributes is always supported */
781 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
782 !(ena_dev->supported_features & feature_mask))
783 return false;
784
785 return true;
786}
787
788static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
789 struct ena_admin_get_feat_resp *get_resp,
790 enum ena_admin_aq_feature_id feature_id,
791 dma_addr_t control_buf_dma_addr,
792 u32 control_buff_size)
793{
794 struct ena_com_admin_queue *admin_queue;
795 struct ena_admin_get_feat_cmd get_cmd;
796 int ret;
797
798 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
5add6e4a 799 pr_debug("Feature %d isn't supported\n", feature_id);
d1497638 800 return -EOPNOTSUPP;
1738cd3e
NB
801 }
802
803 memset(&get_cmd, 0x0, sizeof(get_cmd));
804 admin_queue = &ena_dev->admin_queue;
805
806 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
807
808 if (control_buff_size)
809 get_cmd.aq_common_descriptor.flags =
810 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
811 else
812 get_cmd.aq_common_descriptor.flags = 0;
813
814 ret = ena_com_mem_addr_set(ena_dev,
815 &get_cmd.control_buffer.address,
816 control_buf_dma_addr);
817 if (unlikely(ret)) {
818 pr_err("memory address set failed\n");
819 return ret;
820 }
821
822 get_cmd.control_buffer.length = control_buff_size;
823
824 get_cmd.feat_common.feature_id = feature_id;
825
826 ret = ena_com_execute_admin_command(admin_queue,
827 (struct ena_admin_aq_entry *)
828 &get_cmd,
829 sizeof(get_cmd),
830 (struct ena_admin_acq_entry *)
831 get_resp,
832 sizeof(*get_resp));
833
834 if (unlikely(ret))
835 pr_err("Failed to submit get_feature command %d error: %d\n",
836 feature_id, ret);
837
838 return ret;
839}
840
841static int ena_com_get_feature(struct ena_com_dev *ena_dev,
842 struct ena_admin_get_feat_resp *get_resp,
843 enum ena_admin_aq_feature_id feature_id)
844{
845 return ena_com_get_feature_ex(ena_dev,
846 get_resp,
847 feature_id,
848 0,
849 0);
850}
851
852static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
853{
854 struct ena_rss *rss = &ena_dev->rss;
855
856 rss->hash_key =
857 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
858 &rss->hash_key_dma_addr, GFP_KERNEL);
859
860 if (unlikely(!rss->hash_key))
861 return -ENOMEM;
862
863 return 0;
864}
865
866static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
867{
868 struct ena_rss *rss = &ena_dev->rss;
869
870 if (rss->hash_key)
871 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
872 rss->hash_key, rss->hash_key_dma_addr);
873 rss->hash_key = NULL;
874}
875
876static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
877{
878 struct ena_rss *rss = &ena_dev->rss;
879
880 rss->hash_ctrl =
881 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
882 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
883
884 if (unlikely(!rss->hash_ctrl))
885 return -ENOMEM;
886
887 return 0;
888}
889
890static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
891{
892 struct ena_rss *rss = &ena_dev->rss;
893
894 if (rss->hash_ctrl)
895 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
896 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
897 rss->hash_ctrl = NULL;
898}
899
900static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
901 u16 log_size)
902{
903 struct ena_rss *rss = &ena_dev->rss;
904 struct ena_admin_get_feat_resp get_resp;
905 size_t tbl_size;
906 int ret;
907
908 ret = ena_com_get_feature(ena_dev, &get_resp,
909 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
910 if (unlikely(ret))
911 return ret;
912
913 if ((get_resp.u.ind_table.min_size > log_size) ||
914 (get_resp.u.ind_table.max_size < log_size)) {
915 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
916 1 << log_size, 1 << get_resp.u.ind_table.min_size,
917 1 << get_resp.u.ind_table.max_size);
918 return -EINVAL;
919 }
920
921 tbl_size = (1ULL << log_size) *
922 sizeof(struct ena_admin_rss_ind_table_entry);
923
924 rss->rss_ind_tbl =
925 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
926 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
927 if (unlikely(!rss->rss_ind_tbl))
928 goto mem_err1;
929
930 tbl_size = (1ULL << log_size) * sizeof(u16);
931 rss->host_rss_ind_tbl =
932 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
933 if (unlikely(!rss->host_rss_ind_tbl))
934 goto mem_err2;
935
936 rss->tbl_log_size = log_size;
937
938 return 0;
939
940mem_err2:
941 tbl_size = (1ULL << log_size) *
942 sizeof(struct ena_admin_rss_ind_table_entry);
943
944 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
945 rss->rss_ind_tbl_dma_addr);
946 rss->rss_ind_tbl = NULL;
947mem_err1:
948 rss->tbl_log_size = 0;
949 return -ENOMEM;
950}
951
952static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
953{
954 struct ena_rss *rss = &ena_dev->rss;
955 size_t tbl_size = (1ULL << rss->tbl_log_size) *
956 sizeof(struct ena_admin_rss_ind_table_entry);
957
958 if (rss->rss_ind_tbl)
959 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
960 rss->rss_ind_tbl_dma_addr);
961 rss->rss_ind_tbl = NULL;
962
963 if (rss->host_rss_ind_tbl)
964 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
965 rss->host_rss_ind_tbl = NULL;
966}
967
968static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
969 struct ena_com_io_sq *io_sq, u16 cq_idx)
970{
971 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
972 struct ena_admin_aq_create_sq_cmd create_cmd;
973 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
974 u8 direction;
975 int ret;
976
91750110 977 memset(&create_cmd, 0x0, sizeof(create_cmd));
1738cd3e
NB
978
979 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
980
981 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
982 direction = ENA_ADMIN_SQ_DIRECTION_TX;
983 else
984 direction = ENA_ADMIN_SQ_DIRECTION_RX;
985
986 create_cmd.sq_identity |= (direction <<
987 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
988 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
989
990 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
991 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
992
993 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
994 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
995 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
996
997 create_cmd.sq_caps_3 |=
998 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
999
1000 create_cmd.cq_idx = cq_idx;
1001 create_cmd.sq_depth = io_sq->q_depth;
1002
1003 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1004 ret = ena_com_mem_addr_set(ena_dev,
1005 &create_cmd.sq_ba,
1006 io_sq->desc_addr.phys_addr);
1007 if (unlikely(ret)) {
1008 pr_err("memory address set failed\n");
1009 return ret;
1010 }
1011 }
1012
1013 ret = ena_com_execute_admin_command(admin_queue,
1014 (struct ena_admin_aq_entry *)&create_cmd,
1015 sizeof(create_cmd),
1016 (struct ena_admin_acq_entry *)&cmd_completion,
1017 sizeof(cmd_completion));
1018 if (unlikely(ret)) {
1019 pr_err("Failed to create IO SQ. error: %d\n", ret);
1020 return ret;
1021 }
1022
1023 io_sq->idx = cmd_completion.sq_idx;
1024
1025 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1026 (uintptr_t)cmd_completion.sq_doorbell_offset);
1027
1028 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1029 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1030 + cmd_completion.llq_headers_offset);
1031
1032 io_sq->desc_addr.pbuf_dev_addr =
1033 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1034 cmd_completion.llq_descriptors_offset);
1035 }
1036
1037 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1038
1039 return ret;
1040}
1041
1042static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1043{
1044 struct ena_rss *rss = &ena_dev->rss;
1045 struct ena_com_io_sq *io_sq;
1046 u16 qid;
1047 int i;
1048
1049 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1050 qid = rss->host_rss_ind_tbl[i];
1051 if (qid >= ENA_TOTAL_NUM_QUEUES)
1052 return -EINVAL;
1053
1054 io_sq = &ena_dev->io_sq_queues[qid];
1055
1056 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1057 return -EINVAL;
1058
1059 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1060 }
1061
1062 return 0;
1063}
1064
1065static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1066{
1067 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1068 struct ena_rss *rss = &ena_dev->rss;
1069 u8 idx;
1070 u16 i;
1071
1072 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1073 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1074
1075 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1076 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1077 return -EINVAL;
1078 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1079
1080 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1081 return -EINVAL;
1082
1083 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1084 }
1085
1086 return 0;
1087}
1088
1089static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1090{
1091 size_t size;
1092
1093 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1094
1095 ena_dev->intr_moder_tbl =
1096 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1097 if (!ena_dev->intr_moder_tbl)
1098 return -ENOMEM;
1099
1100 ena_com_config_default_interrupt_moderation_table(ena_dev);
1101
1102 return 0;
1103}
1104
1105static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1106 u16 intr_delay_resolution)
1107{
1108 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1109 unsigned int i;
1110
1111 if (!intr_delay_resolution) {
1112 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1113 intr_delay_resolution = 1;
1114 }
1115 ena_dev->intr_delay_resolution = intr_delay_resolution;
1116
1117 /* update Rx */
1118 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1119 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1120
1121 /* update Tx */
1122 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1123}
1124
1125/*****************************************************************************/
1126/******************************* API ******************************/
1127/*****************************************************************************/
1128
1129int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1130 struct ena_admin_aq_entry *cmd,
1131 size_t cmd_size,
1132 struct ena_admin_acq_entry *comp,
1133 size_t comp_size)
1134{
1135 struct ena_comp_ctx *comp_ctx;
1136 int ret;
1137
1138 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1139 comp, comp_size);
1f4cf93b 1140 if (IS_ERR(comp_ctx)) {
5add6e4a
NB
1141 if (comp_ctx == ERR_PTR(-ENODEV))
1142 pr_debug("Failed to submit command [%ld]\n",
1143 PTR_ERR(comp_ctx));
1144 else
1145 pr_err("Failed to submit command [%ld]\n",
1146 PTR_ERR(comp_ctx));
1147
1738cd3e
NB
1148 return PTR_ERR(comp_ctx);
1149 }
1150
1151 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1152 if (unlikely(ret)) {
1153 if (admin_queue->running_state)
1154 pr_err("Failed to process command. ret = %d\n", ret);
1155 else
1156 pr_debug("Failed to process command. ret = %d\n", ret);
1157 }
1158 return ret;
1159}
1160
1161int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1162 struct ena_com_io_cq *io_cq)
1163{
1164 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1165 struct ena_admin_aq_create_cq_cmd create_cmd;
1166 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1167 int ret;
1168
91750110 1169 memset(&create_cmd, 0x0, sizeof(create_cmd));
1738cd3e
NB
1170
1171 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1172
1173 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1174 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1175 create_cmd.cq_caps_1 |=
1176 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1177
1178 create_cmd.msix_vector = io_cq->msix_vector;
1179 create_cmd.cq_depth = io_cq->q_depth;
1180
1181 ret = ena_com_mem_addr_set(ena_dev,
1182 &create_cmd.cq_ba,
1183 io_cq->cdesc_addr.phys_addr);
1184 if (unlikely(ret)) {
1185 pr_err("memory address set failed\n");
1186 return ret;
1187 }
1188
1189 ret = ena_com_execute_admin_command(admin_queue,
1190 (struct ena_admin_aq_entry *)&create_cmd,
1191 sizeof(create_cmd),
1192 (struct ena_admin_acq_entry *)&cmd_completion,
1193 sizeof(cmd_completion));
1194 if (unlikely(ret)) {
1195 pr_err("Failed to create IO CQ. error: %d\n", ret);
1196 return ret;
1197 }
1198
1199 io_cq->idx = cmd_completion.cq_idx;
1200
1201 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1202 cmd_completion.cq_interrupt_unmask_register_offset);
1203
1204 if (cmd_completion.cq_head_db_register_offset)
1205 io_cq->cq_head_db_reg =
1206 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1207 cmd_completion.cq_head_db_register_offset);
1208
1209 if (cmd_completion.numa_node_register_offset)
1210 io_cq->numa_node_cfg_reg =
1211 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1212 cmd_completion.numa_node_register_offset);
1213
1214 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1215
1216 return ret;
1217}
1218
1219int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1220 struct ena_com_io_sq **io_sq,
1221 struct ena_com_io_cq **io_cq)
1222{
1223 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1224 pr_err("Invalid queue number %d but the max is %d\n", qid,
1225 ENA_TOTAL_NUM_QUEUES);
1226 return -EINVAL;
1227 }
1228
1229 *io_sq = &ena_dev->io_sq_queues[qid];
1230 *io_cq = &ena_dev->io_cq_queues[qid];
1231
1232 return 0;
1233}
1234
1235void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1236{
1237 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1238 struct ena_comp_ctx *comp_ctx;
1239 u16 i;
1240
1241 if (!admin_queue->comp_ctx)
1242 return;
1243
1244 for (i = 0; i < admin_queue->q_depth; i++) {
1245 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1246 if (unlikely(!comp_ctx))
1247 break;
1248
1249 comp_ctx->status = ENA_CMD_ABORTED;
1250
1251 complete(&comp_ctx->wait_event);
1252 }
1253}
1254
1255void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1256{
1257 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1258 unsigned long flags;
1259
1260 spin_lock_irqsave(&admin_queue->q_lock, flags);
1261 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1262 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
88aef2f5 1263 msleep(ENA_POLL_MS);
1738cd3e
NB
1264 spin_lock_irqsave(&admin_queue->q_lock, flags);
1265 }
1266 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1267}
1268
1269int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1270 struct ena_com_io_cq *io_cq)
1271{
1272 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1273 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1274 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1275 int ret;
1276
91750110 1277 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1738cd3e
NB
1278
1279 destroy_cmd.cq_idx = io_cq->idx;
1280 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1281
1282 ret = ena_com_execute_admin_command(admin_queue,
1283 (struct ena_admin_aq_entry *)&destroy_cmd,
1284 sizeof(destroy_cmd),
1285 (struct ena_admin_acq_entry *)&destroy_resp,
1286 sizeof(destroy_resp));
1287
1288 if (unlikely(ret && (ret != -ENODEV)))
1289 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1290
1291 return ret;
1292}
1293
1294bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1295{
1296 return ena_dev->admin_queue.running_state;
1297}
1298
1299void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1300{
1301 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1302 unsigned long flags;
1303
1304 spin_lock_irqsave(&admin_queue->q_lock, flags);
1305 ena_dev->admin_queue.running_state = state;
1306 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1307}
1308
1309void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1310{
1311 u16 depth = ena_dev->aenq.q_depth;
1312
1313 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1314
1315 /* Init head_db to mark that all entries in the queue
1316 * are initially available
1317 */
1318 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1319}
1320
1321int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1322{
1323 struct ena_com_admin_queue *admin_queue;
1324 struct ena_admin_set_feat_cmd cmd;
1325 struct ena_admin_set_feat_resp resp;
1326 struct ena_admin_get_feat_resp get_resp;
1327 int ret;
1328
1329 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1330 if (ret) {
1331 pr_info("Can't get aenq configuration\n");
1332 return ret;
1333 }
1334
1335 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1336 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1337 get_resp.u.aenq.supported_groups, groups_flag);
d1497638 1338 return -EOPNOTSUPP;
1738cd3e
NB
1339 }
1340
1341 memset(&cmd, 0x0, sizeof(cmd));
1342 admin_queue = &ena_dev->admin_queue;
1343
1344 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1345 cmd.aq_common_descriptor.flags = 0;
1346 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1347 cmd.u.aenq.enabled_groups = groups_flag;
1348
1349 ret = ena_com_execute_admin_command(admin_queue,
1350 (struct ena_admin_aq_entry *)&cmd,
1351 sizeof(cmd),
1352 (struct ena_admin_acq_entry *)&resp,
1353 sizeof(resp));
1354
1355 if (unlikely(ret))
1356 pr_err("Failed to config AENQ ret: %d\n", ret);
1357
1358 return ret;
1359}
1360
1361int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1362{
1363 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1364 int width;
1365
1366 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1367 pr_err("Reg read timeout occurred\n");
1368 return -ETIME;
1369 }
1370
1371 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1372 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1373
1374 pr_debug("ENA dma width: %d\n", width);
1375
1376 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1377 pr_err("DMA width illegal value: %d\n", width);
1378 return -EINVAL;
1379 }
1380
1381 ena_dev->dma_addr_bits = width;
1382
1383 return width;
1384}
1385
1386int ena_com_validate_version(struct ena_com_dev *ena_dev)
1387{
1388 u32 ver;
1389 u32 ctrl_ver;
1390 u32 ctrl_ver_masked;
1391
1392 /* Make sure the ENA version and the controller version are at least
1393 * as the driver expects
1394 */
1395 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1396 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1397 ENA_REGS_CONTROLLER_VERSION_OFF);
1398
1399 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1400 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1401 pr_err("Reg read timeout occurred\n");
1402 return -ETIME;
1403 }
1404
1405 pr_info("ena device version: %d.%d\n",
1406 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1407 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1408 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1409
1410 if (ver < MIN_ENA_VER) {
1411 pr_err("ENA version is lower than the minimal version the driver supports\n");
1412 return -1;
1413 }
1414
1415 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1416 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1417 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1418 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1419 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1420 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1421 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1422 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1423
1424 ctrl_ver_masked =
1425 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1426 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1427 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1428
1429 /* Validate the ctrl version without the implementation ID */
1430 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1431 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1432 return -1;
1433 }
1434
1435 return 0;
1436}
1437
1438void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1439{
1440 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1441 struct ena_com_admin_cq *cq = &admin_queue->cq;
1442 struct ena_com_admin_sq *sq = &admin_queue->sq;
1443 struct ena_com_aenq *aenq = &ena_dev->aenq;
1444 u16 size;
1445
1446 if (admin_queue->comp_ctx)
1447 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1448 admin_queue->comp_ctx = NULL;
1449 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1450 if (sq->entries)
1451 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1452 sq->dma_addr);
1453 sq->entries = NULL;
1454
1455 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1456 if (cq->entries)
1457 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1458 cq->dma_addr);
1459 cq->entries = NULL;
1460
1461 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1462 if (ena_dev->aenq.entries)
1463 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1464 aenq->dma_addr);
1465 aenq->entries = NULL;
1466}
1467
1468void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1469{
a2cc5198
NB
1470 u32 mask_value = 0;
1471
1472 if (polling)
1473 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1474
1475 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1738cd3e
NB
1476 ena_dev->admin_queue.polling = polling;
1477}
1478
1479int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1480{
1481 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1482
1483 spin_lock_init(&mmio_read->lock);
1484 mmio_read->read_resp =
1485 dma_zalloc_coherent(ena_dev->dmadev,
1486 sizeof(*mmio_read->read_resp),
1487 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1488 if (unlikely(!mmio_read->read_resp))
1489 return -ENOMEM;
1490
1491 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1492
1493 mmio_read->read_resp->req_id = 0x0;
1494 mmio_read->seq_num = 0x0;
1495 mmio_read->readless_supported = true;
1496
1497 return 0;
1498}
1499
1500void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1501{
1502 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1503
1504 mmio_read->readless_supported = readless_supported;
1505}
1506
1507void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1508{
1509 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1510
1511 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1512 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1513
1514 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1515 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1516
1517 mmio_read->read_resp = NULL;
1518}
1519
1520void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1521{
1522 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1523 u32 addr_low, addr_high;
1524
1525 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1526 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1527
1528 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1529 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1530}
1531
1532int ena_com_admin_init(struct ena_com_dev *ena_dev,
1533 struct ena_aenq_handlers *aenq_handlers,
1534 bool init_spinlock)
1535{
1536 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1537 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1538 int ret;
1539
1540 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1541
1542 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1543 pr_err("Reg read timeout occurred\n");
1544 return -ETIME;
1545 }
1546
1547 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1548 pr_err("Device isn't ready, abort com init\n");
1549 return -ENODEV;
1550 }
1551
1552 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1553
1554 admin_queue->q_dmadev = ena_dev->dmadev;
1555 admin_queue->polling = false;
1556 admin_queue->curr_cmd_id = 0;
1557
1558 atomic_set(&admin_queue->outstanding_cmds, 0);
1559
1560 if (init_spinlock)
1561 spin_lock_init(&admin_queue->q_lock);
1562
1563 ret = ena_com_init_comp_ctxt(admin_queue);
1564 if (ret)
1565 goto error;
1566
1567 ret = ena_com_admin_init_sq(admin_queue);
1568 if (ret)
1569 goto error;
1570
1571 ret = ena_com_admin_init_cq(admin_queue);
1572 if (ret)
1573 goto error;
1574
1575 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1576 ENA_REGS_AQ_DB_OFF);
1577
1578 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1579 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1580
1581 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1582 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1583
1584 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1585 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1586
1587 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1588 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1589
1590 aq_caps = 0;
1591 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1592 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1593 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1594 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1595
1596 acq_caps = 0;
1597 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1598 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1599 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1600 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1601
1602 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1603 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1604 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1605 if (ret)
1606 goto error;
1607
1608 admin_queue->running_state = true;
1609
1610 return 0;
1611error:
1612 ena_com_admin_destroy(ena_dev);
1613
1614 return ret;
1615}
1616
1617int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1618 struct ena_com_create_io_ctx *ctx)
1619{
1620 struct ena_com_io_sq *io_sq;
1621 struct ena_com_io_cq *io_cq;
1622 int ret;
1623
1624 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1625 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1626 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1627 return -EINVAL;
1628 }
1629
1630 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1631 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1632
91750110
NB
1633 memset(io_sq, 0x0, sizeof(*io_sq));
1634 memset(io_cq, 0x0, sizeof(*io_cq));
1738cd3e
NB
1635
1636 /* Init CQ */
1637 io_cq->q_depth = ctx->queue_size;
1638 io_cq->direction = ctx->direction;
1639 io_cq->qid = ctx->qid;
1640
1641 io_cq->msix_vector = ctx->msix_vector;
1642
1643 io_sq->q_depth = ctx->queue_size;
1644 io_sq->direction = ctx->direction;
1645 io_sq->qid = ctx->qid;
1646
1647 io_sq->mem_queue_type = ctx->mem_queue_type;
1648
1649 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1650 /* header length is limited to 8 bits */
1651 io_sq->tx_max_header_size =
1652 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1653
1654 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1655 if (ret)
1656 goto error;
1657 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1658 if (ret)
1659 goto error;
1660
1661 ret = ena_com_create_io_cq(ena_dev, io_cq);
1662 if (ret)
1663 goto error;
1664
1665 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1666 if (ret)
1667 goto destroy_io_cq;
1668
1669 return 0;
1670
1671destroy_io_cq:
1672 ena_com_destroy_io_cq(ena_dev, io_cq);
1673error:
1674 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1675 return ret;
1676}
1677
1678void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1679{
1680 struct ena_com_io_sq *io_sq;
1681 struct ena_com_io_cq *io_cq;
1682
1683 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1684 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1685 ENA_TOTAL_NUM_QUEUES);
1686 return;
1687 }
1688
1689 io_sq = &ena_dev->io_sq_queues[qid];
1690 io_cq = &ena_dev->io_cq_queues[qid];
1691
1692 ena_com_destroy_io_sq(ena_dev, io_sq);
1693 ena_com_destroy_io_cq(ena_dev, io_cq);
1694
1695 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1696}
1697
1698int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1699 struct ena_admin_get_feat_resp *resp)
1700{
1701 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1702}
1703
1704int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1705 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1706{
1707 struct ena_admin_get_feat_resp get_resp;
1708 int rc;
1709
1710 rc = ena_com_get_feature(ena_dev, &get_resp,
1711 ENA_ADMIN_DEVICE_ATTRIBUTES);
1712 if (rc)
1713 return rc;
1714
1715 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1716 sizeof(get_resp.u.dev_attr));
1717 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1718
1719 rc = ena_com_get_feature(ena_dev, &get_resp,
1720 ENA_ADMIN_MAX_QUEUES_NUM);
1721 if (rc)
1722 return rc;
1723
1724 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1725 sizeof(get_resp.u.max_queue));
1726 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1727
1728 rc = ena_com_get_feature(ena_dev, &get_resp,
1729 ENA_ADMIN_AENQ_CONFIG);
1730 if (rc)
1731 return rc;
1732
1733 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1734 sizeof(get_resp.u.aenq));
1735
1736 rc = ena_com_get_feature(ena_dev, &get_resp,
1737 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1738 if (rc)
1739 return rc;
1740
1741 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1742 sizeof(get_resp.u.offload));
1743
82ef30f1
NB
1744 /* Driver hints isn't mandatory admin command. So in case the
1745 * command isn't supported set driver hints to 0
1746 */
1747 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1748
1749 if (!rc)
1750 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1751 sizeof(get_resp.u.hw_hints));
1752 else if (rc == -EOPNOTSUPP)
1753 memset(&get_feat_ctx->hw_hints, 0x0,
1754 sizeof(get_feat_ctx->hw_hints));
1755 else
1756 return rc;
1757
1738cd3e
NB
1758 return 0;
1759}
1760
1761void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1762{
1763 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1764}
1765
1766/* ena_handle_specific_aenq_event:
1767 * return the handler that is relevant to the specific event group
1768 */
1769static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1770 u16 group)
1771{
1772 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1773
1774 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1775 return aenq_handlers->handlers[group];
1776
1777 return aenq_handlers->unimplemented_handler;
1778}
1779
1780/* ena_aenq_intr_handler:
1781 * handles the aenq incoming events.
1782 * pop events from the queue and apply the specific handler
1783 */
1784void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1785{
1786 struct ena_admin_aenq_entry *aenq_e;
1787 struct ena_admin_aenq_common_desc *aenq_common;
1788 struct ena_com_aenq *aenq = &dev->aenq;
1789 ena_aenq_handler handler_cb;
1790 u16 masked_head, processed = 0;
1791 u8 phase;
1792
1793 masked_head = aenq->head & (aenq->q_depth - 1);
1794 phase = aenq->phase;
1795 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1796 aenq_common = &aenq_e->aenq_common_desc;
1797
1798 /* Go over all the events */
28abf4e9
NB
1799 while ((READ_ONCE(aenq_common->flags) &
1800 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1738cd3e
NB
1801 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1802 aenq_common->group, aenq_common->syndrom,
1803 (u64)aenq_common->timestamp_low +
1804 ((u64)aenq_common->timestamp_high << 32));
1805
1806 /* Handle specific event*/
1807 handler_cb = ena_com_get_specific_aenq_cb(dev,
1808 aenq_common->group);
1809 handler_cb(data, aenq_e); /* call the actual event handler*/
1810
1811 /* Get next event entry */
1812 masked_head++;
1813 processed++;
1814
1815 if (unlikely(masked_head == aenq->q_depth)) {
1816 masked_head = 0;
1817 phase = !phase;
1818 }
1819 aenq_e = &aenq->entries[masked_head];
1820 aenq_common = &aenq_e->aenq_common_desc;
1821 }
1822
1823 aenq->head += processed;
1824 aenq->phase = phase;
1825
1826 /* Don't update aenq doorbell if there weren't any processed events */
1827 if (!processed)
1828 return;
1829
1830 /* write the aenq doorbell after all AENQ descriptors were read */
1831 mb();
6d2e1a8d
SK
1832 writel_relaxed((u32)aenq->head,
1833 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1834 mmiowb();
1738cd3e
NB
1835}
1836
e2eed0e3
NB
1837int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1838 enum ena_regs_reset_reason_types reset_reason)
1738cd3e
NB
1839{
1840 u32 stat, timeout, cap, reset_val;
1841 int rc;
1842
1843 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1844 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1845
1846 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1847 (cap == ENA_MMIO_READ_TIMEOUT))) {
1848 pr_err("Reg read32 timeout occurred\n");
1849 return -ETIME;
1850 }
1851
1852 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1853 pr_err("Device isn't ready, can't reset device\n");
1854 return -EINVAL;
1855 }
1856
1857 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1858 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1859 if (timeout == 0) {
1860 pr_err("Invalid timeout value\n");
1861 return -EINVAL;
1862 }
1863
1864 /* start reset */
1865 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
e2eed0e3
NB
1866 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1867 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1738cd3e
NB
1868 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1869
1870 /* Write again the MMIO read request address */
1871 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1872
1873 rc = wait_for_reset_state(ena_dev, timeout,
1874 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1875 if (rc != 0) {
1876 pr_err("Reset indication didn't turn on\n");
1877 return rc;
1878 }
1879
1880 /* reset done */
1881 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1882 rc = wait_for_reset_state(ena_dev, timeout, 0);
1883 if (rc != 0) {
1884 pr_err("Reset indication didn't turn off\n");
1885 return rc;
1886 }
1887
82ef30f1
NB
1888 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1889 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1890 if (timeout)
1891 /* the resolution of timeout reg is 100ms */
1892 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1893 else
1894 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1895
1738cd3e
NB
1896 return 0;
1897}
1898
1899static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1900 struct ena_com_stats_ctx *ctx,
1901 enum ena_admin_get_stats_type type)
1902{
1903 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1904 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1905 struct ena_com_admin_queue *admin_queue;
1906 int ret;
1907
1908 admin_queue = &ena_dev->admin_queue;
1909
1910 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1911 get_cmd->aq_common_descriptor.flags = 0;
1912 get_cmd->type = type;
1913
1914 ret = ena_com_execute_admin_command(admin_queue,
1915 (struct ena_admin_aq_entry *)get_cmd,
1916 sizeof(*get_cmd),
1917 (struct ena_admin_acq_entry *)get_resp,
1918 sizeof(*get_resp));
1919
1920 if (unlikely(ret))
1921 pr_err("Failed to get stats. error: %d\n", ret);
1922
1923 return ret;
1924}
1925
1926int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1927 struct ena_admin_basic_stats *stats)
1928{
1929 struct ena_com_stats_ctx ctx;
1930 int ret;
1931
1932 memset(&ctx, 0x0, sizeof(ctx));
1933 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1934 if (likely(ret == 0))
1935 memcpy(stats, &ctx.get_resp.basic_stats,
1936 sizeof(ctx.get_resp.basic_stats));
1937
1938 return ret;
1939}
1940
1941int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1942{
1943 struct ena_com_admin_queue *admin_queue;
1944 struct ena_admin_set_feat_cmd cmd;
1945 struct ena_admin_set_feat_resp resp;
1946 int ret;
1947
1948 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
5add6e4a 1949 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
d1497638 1950 return -EOPNOTSUPP;
1738cd3e
NB
1951 }
1952
1953 memset(&cmd, 0x0, sizeof(cmd));
1954 admin_queue = &ena_dev->admin_queue;
1955
1956 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1957 cmd.aq_common_descriptor.flags = 0;
1958 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1959 cmd.u.mtu.mtu = mtu;
1960
1961 ret = ena_com_execute_admin_command(admin_queue,
1962 (struct ena_admin_aq_entry *)&cmd,
1963 sizeof(cmd),
1964 (struct ena_admin_acq_entry *)&resp,
1965 sizeof(resp));
1966
1967 if (unlikely(ret))
1968 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1969
1970 return ret;
1971}
1972
1973int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1974 struct ena_admin_feature_offload_desc *offload)
1975{
1976 int ret;
1977 struct ena_admin_get_feat_resp resp;
1978
1979 ret = ena_com_get_feature(ena_dev, &resp,
1980 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1981 if (unlikely(ret)) {
1982 pr_err("Failed to get offload capabilities %d\n", ret);
1983 return ret;
1984 }
1985
1986 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
1987
1988 return 0;
1989}
1990
1991int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
1992{
1993 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1994 struct ena_rss *rss = &ena_dev->rss;
1995 struct ena_admin_set_feat_cmd cmd;
1996 struct ena_admin_set_feat_resp resp;
1997 struct ena_admin_get_feat_resp get_resp;
1998 int ret;
1999
2000 if (!ena_com_check_supported_feature_id(ena_dev,
2001 ENA_ADMIN_RSS_HASH_FUNCTION)) {
5add6e4a
NB
2002 pr_debug("Feature %d isn't supported\n",
2003 ENA_ADMIN_RSS_HASH_FUNCTION);
d1497638 2004 return -EOPNOTSUPP;
1738cd3e
NB
2005 }
2006
2007 /* Validate hash function is supported */
2008 ret = ena_com_get_feature(ena_dev, &get_resp,
2009 ENA_ADMIN_RSS_HASH_FUNCTION);
2010 if (unlikely(ret))
2011 return ret;
2012
2013 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2014 pr_err("Func hash %d isn't supported by device, abort\n",
2015 rss->hash_func);
d1497638 2016 return -EOPNOTSUPP;
1738cd3e
NB
2017 }
2018
2019 memset(&cmd, 0x0, sizeof(cmd));
2020
2021 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2022 cmd.aq_common_descriptor.flags =
2023 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2024 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2025 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2026 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2027
2028 ret = ena_com_mem_addr_set(ena_dev,
2029 &cmd.control_buffer.address,
2030 rss->hash_key_dma_addr);
2031 if (unlikely(ret)) {
2032 pr_err("memory address set failed\n");
2033 return ret;
2034 }
2035
2036 cmd.control_buffer.length = sizeof(*rss->hash_key);
2037
2038 ret = ena_com_execute_admin_command(admin_queue,
2039 (struct ena_admin_aq_entry *)&cmd,
2040 sizeof(cmd),
2041 (struct ena_admin_acq_entry *)&resp,
2042 sizeof(resp));
2043 if (unlikely(ret)) {
2044 pr_err("Failed to set hash function %d. error: %d\n",
2045 rss->hash_func, ret);
2046 return -EINVAL;
2047 }
2048
2049 return 0;
2050}
2051
2052int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2053 enum ena_admin_hash_functions func,
2054 const u8 *key, u16 key_len, u32 init_val)
2055{
2056 struct ena_rss *rss = &ena_dev->rss;
2057 struct ena_admin_get_feat_resp get_resp;
2058 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2059 rss->hash_key;
2060 int rc;
2061
2062 /* Make sure size is a mult of DWs */
2063 if (unlikely(key_len & 0x3))
2064 return -EINVAL;
2065
2066 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2067 ENA_ADMIN_RSS_HASH_FUNCTION,
2068 rss->hash_key_dma_addr,
2069 sizeof(*rss->hash_key));
2070 if (unlikely(rc))
2071 return rc;
2072
2073 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2074 pr_err("Flow hash function %d isn't supported\n", func);
d1497638 2075 return -EOPNOTSUPP;
1738cd3e
NB
2076 }
2077
2078 switch (func) {
2079 case ENA_ADMIN_TOEPLITZ:
2080 if (key_len > sizeof(hash_key->key)) {
2081 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2082 key_len, sizeof(hash_key->key));
2083 return -EINVAL;
2084 }
2085
2086 memcpy(hash_key->key, key, key_len);
2087 rss->hash_init_val = init_val;
2088 hash_key->keys_num = key_len >> 2;
2089 break;
2090 case ENA_ADMIN_CRC32:
2091 rss->hash_init_val = init_val;
2092 break;
2093 default:
2094 pr_err("Invalid hash function (%d)\n", func);
2095 return -EINVAL;
2096 }
2097
2098 rc = ena_com_set_hash_function(ena_dev);
2099
2100 /* Restore the old function */
2101 if (unlikely(rc))
2102 ena_com_get_hash_function(ena_dev, NULL, NULL);
2103
2104 return rc;
2105}
2106
2107int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2108 enum ena_admin_hash_functions *func,
2109 u8 *key)
2110{
2111 struct ena_rss *rss = &ena_dev->rss;
2112 struct ena_admin_get_feat_resp get_resp;
2113 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2114 rss->hash_key;
2115 int rc;
2116
2117 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2118 ENA_ADMIN_RSS_HASH_FUNCTION,
2119 rss->hash_key_dma_addr,
2120 sizeof(*rss->hash_key));
2121 if (unlikely(rc))
2122 return rc;
2123
2124 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2125 if (func)
2126 *func = rss->hash_func;
2127
2128 if (key)
2129 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2130
2131 return 0;
2132}
2133
2134int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2135 enum ena_admin_flow_hash_proto proto,
2136 u16 *fields)
2137{
2138 struct ena_rss *rss = &ena_dev->rss;
2139 struct ena_admin_get_feat_resp get_resp;
2140 int rc;
2141
2142 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2143 ENA_ADMIN_RSS_HASH_INPUT,
2144 rss->hash_ctrl_dma_addr,
2145 sizeof(*rss->hash_ctrl));
2146 if (unlikely(rc))
2147 return rc;
2148
2149 if (fields)
2150 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2151
2152 return 0;
2153}
2154
2155int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2156{
2157 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2158 struct ena_rss *rss = &ena_dev->rss;
2159 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2160 struct ena_admin_set_feat_cmd cmd;
2161 struct ena_admin_set_feat_resp resp;
2162 int ret;
2163
2164 if (!ena_com_check_supported_feature_id(ena_dev,
2165 ENA_ADMIN_RSS_HASH_INPUT)) {
5add6e4a
NB
2166 pr_debug("Feature %d isn't supported\n",
2167 ENA_ADMIN_RSS_HASH_INPUT);
d1497638 2168 return -EOPNOTSUPP;
1738cd3e
NB
2169 }
2170
2171 memset(&cmd, 0x0, sizeof(cmd));
2172
2173 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2174 cmd.aq_common_descriptor.flags =
2175 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2176 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2177 cmd.u.flow_hash_input.enabled_input_sort =
2178 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2179 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2180
2181 ret = ena_com_mem_addr_set(ena_dev,
2182 &cmd.control_buffer.address,
2183 rss->hash_ctrl_dma_addr);
2184 if (unlikely(ret)) {
2185 pr_err("memory address set failed\n");
2186 return ret;
2187 }
2188 cmd.control_buffer.length = sizeof(*hash_ctrl);
2189
2190 ret = ena_com_execute_admin_command(admin_queue,
2191 (struct ena_admin_aq_entry *)&cmd,
2192 sizeof(cmd),
2193 (struct ena_admin_acq_entry *)&resp,
2194 sizeof(resp));
2195 if (unlikely(ret))
2196 pr_err("Failed to set hash input. error: %d\n", ret);
2197
2198 return ret;
2199}
2200
2201int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2202{
2203 struct ena_rss *rss = &ena_dev->rss;
2204 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2205 rss->hash_ctrl;
2206 u16 available_fields = 0;
2207 int rc, i;
2208
2209 /* Get the supported hash input */
2210 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2211 if (unlikely(rc))
2212 return rc;
2213
2214 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2215 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2216 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2217
2218 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2219 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2220 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2221
2222 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2223 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2224 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2225
2226 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2227 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2228 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2229
2230 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2231 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2232
2233 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2234 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2235
2236 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2237 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2238
422e21e7 2239 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
1738cd3e
NB
2240 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2241
2242 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2243 available_fields = hash_ctrl->selected_fields[i].fields &
2244 hash_ctrl->supported_fields[i].fields;
2245 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2246 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2247 i, hash_ctrl->supported_fields[i].fields,
2248 hash_ctrl->selected_fields[i].fields);
d1497638 2249 return -EOPNOTSUPP;
1738cd3e
NB
2250 }
2251 }
2252
2253 rc = ena_com_set_hash_ctrl(ena_dev);
2254
2255 /* In case of failure, restore the old hash ctrl */
2256 if (unlikely(rc))
2257 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2258
2259 return rc;
2260}
2261
2262int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2263 enum ena_admin_flow_hash_proto proto,
2264 u16 hash_fields)
2265{
2266 struct ena_rss *rss = &ena_dev->rss;
2267 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2268 u16 supported_fields;
2269 int rc;
2270
2271 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2272 pr_err("Invalid proto num (%u)\n", proto);
2273 return -EINVAL;
2274 }
2275
2276 /* Get the ctrl table */
2277 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2278 if (unlikely(rc))
2279 return rc;
2280
2281 /* Make sure all the fields are supported */
2282 supported_fields = hash_ctrl->supported_fields[proto].fields;
2283 if ((hash_fields & supported_fields) != hash_fields) {
2284 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2285 proto, hash_fields, supported_fields);
2286 }
2287
2288 hash_ctrl->selected_fields[proto].fields = hash_fields;
2289
2290 rc = ena_com_set_hash_ctrl(ena_dev);
2291
2292 /* In case of failure, restore the old hash ctrl */
2293 if (unlikely(rc))
2294 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2295
2296 return 0;
2297}
2298
2299int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2300 u16 entry_idx, u16 entry_value)
2301{
2302 struct ena_rss *rss = &ena_dev->rss;
2303
2304 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2305 return -EINVAL;
2306
2307 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2308 return -EINVAL;
2309
2310 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2311
2312 return 0;
2313}
2314
2315int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2316{
2317 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2318 struct ena_rss *rss = &ena_dev->rss;
2319 struct ena_admin_set_feat_cmd cmd;
2320 struct ena_admin_set_feat_resp resp;
2321 int ret;
2322
2323 if (!ena_com_check_supported_feature_id(
2324 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
5add6e4a
NB
2325 pr_debug("Feature %d isn't supported\n",
2326 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
d1497638 2327 return -EOPNOTSUPP;
1738cd3e
NB
2328 }
2329
2330 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2331 if (ret) {
2332 pr_err("Failed to convert host indirection table to device table\n");
2333 return ret;
2334 }
2335
2336 memset(&cmd, 0x0, sizeof(cmd));
2337
2338 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2339 cmd.aq_common_descriptor.flags =
2340 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2341 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2342 cmd.u.ind_table.size = rss->tbl_log_size;
2343 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2344
2345 ret = ena_com_mem_addr_set(ena_dev,
2346 &cmd.control_buffer.address,
2347 rss->rss_ind_tbl_dma_addr);
2348 if (unlikely(ret)) {
2349 pr_err("memory address set failed\n");
2350 return ret;
2351 }
2352
2353 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2354 sizeof(struct ena_admin_rss_ind_table_entry);
2355
2356 ret = ena_com_execute_admin_command(admin_queue,
2357 (struct ena_admin_aq_entry *)&cmd,
2358 sizeof(cmd),
2359 (struct ena_admin_acq_entry *)&resp,
2360 sizeof(resp));
2361
2362 if (unlikely(ret))
2363 pr_err("Failed to set indirect table. error: %d\n", ret);
2364
2365 return ret;
2366}
2367
2368int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2369{
2370 struct ena_rss *rss = &ena_dev->rss;
2371 struct ena_admin_get_feat_resp get_resp;
2372 u32 tbl_size;
2373 int i, rc;
2374
2375 tbl_size = (1ULL << rss->tbl_log_size) *
2376 sizeof(struct ena_admin_rss_ind_table_entry);
2377
2378 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2379 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2380 rss->rss_ind_tbl_dma_addr,
2381 tbl_size);
2382 if (unlikely(rc))
2383 return rc;
2384
2385 if (!ind_tbl)
2386 return 0;
2387
2388 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2389 if (unlikely(rc))
2390 return rc;
2391
2392 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2393 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2394
2395 return 0;
2396}
2397
2398int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2399{
2400 int rc;
2401
2402 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2403
2404 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2405 if (unlikely(rc))
2406 goto err_indr_tbl;
2407
2408 rc = ena_com_hash_key_allocate(ena_dev);
2409 if (unlikely(rc))
2410 goto err_hash_key;
2411
2412 rc = ena_com_hash_ctrl_init(ena_dev);
2413 if (unlikely(rc))
2414 goto err_hash_ctrl;
2415
2416 return 0;
2417
2418err_hash_ctrl:
2419 ena_com_hash_key_destroy(ena_dev);
2420err_hash_key:
2421 ena_com_indirect_table_destroy(ena_dev);
2422err_indr_tbl:
2423
2424 return rc;
2425}
2426
2427void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2428{
2429 ena_com_indirect_table_destroy(ena_dev);
2430 ena_com_hash_key_destroy(ena_dev);
2431 ena_com_hash_ctrl_destroy(ena_dev);
2432
2433 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2434}
2435
2436int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2437{
2438 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2439
2440 host_attr->host_info =
2441 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2442 &host_attr->host_info_dma_addr, GFP_KERNEL);
2443 if (unlikely(!host_attr->host_info))
2444 return -ENOMEM;
2445
2446 return 0;
2447}
2448
2449int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2450 u32 debug_area_size)
2451{
2452 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2453
2454 host_attr->debug_area_virt_addr =
2455 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2456 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2457 if (unlikely(!host_attr->debug_area_virt_addr)) {
2458 host_attr->debug_area_size = 0;
2459 return -ENOMEM;
2460 }
2461
2462 host_attr->debug_area_size = debug_area_size;
2463
2464 return 0;
2465}
2466
2467void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2468{
2469 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2470
2471 if (host_attr->host_info) {
2472 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2473 host_attr->host_info_dma_addr);
2474 host_attr->host_info = NULL;
2475 }
2476}
2477
2478void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2479{
2480 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2481
2482 if (host_attr->debug_area_virt_addr) {
2483 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2484 host_attr->debug_area_virt_addr,
2485 host_attr->debug_area_dma_addr);
2486 host_attr->debug_area_virt_addr = NULL;
2487 }
2488}
2489
2490int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2491{
2492 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2493 struct ena_com_admin_queue *admin_queue;
2494 struct ena_admin_set_feat_cmd cmd;
2495 struct ena_admin_set_feat_resp resp;
2496
2497 int ret;
2498
dd8427a7
NB
2499 /* Host attribute config is called before ena_com_get_dev_attr_feat
2500 * so ena_com can't check if the feature is supported.
2501 */
1738cd3e
NB
2502
2503 memset(&cmd, 0x0, sizeof(cmd));
2504 admin_queue = &ena_dev->admin_queue;
2505
2506 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2507 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2508
2509 ret = ena_com_mem_addr_set(ena_dev,
2510 &cmd.u.host_attr.debug_ba,
2511 host_attr->debug_area_dma_addr);
2512 if (unlikely(ret)) {
2513 pr_err("memory address set failed\n");
2514 return ret;
2515 }
2516
2517 ret = ena_com_mem_addr_set(ena_dev,
2518 &cmd.u.host_attr.os_info_ba,
2519 host_attr->host_info_dma_addr);
2520 if (unlikely(ret)) {
2521 pr_err("memory address set failed\n");
2522 return ret;
2523 }
2524
2525 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2526
2527 ret = ena_com_execute_admin_command(admin_queue,
2528 (struct ena_admin_aq_entry *)&cmd,
2529 sizeof(cmd),
2530 (struct ena_admin_acq_entry *)&resp,
2531 sizeof(resp));
2532
2533 if (unlikely(ret))
2534 pr_err("Failed to set host attributes: %d\n", ret);
2535
2536 return ret;
2537}
2538
2539/* Interrupt moderation */
2540bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2541{
2542 return ena_com_check_supported_feature_id(ena_dev,
2543 ENA_ADMIN_INTERRUPT_MODERATION);
2544}
2545
2546int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2547 u32 tx_coalesce_usecs)
2548{
2549 if (!ena_dev->intr_delay_resolution) {
2550 pr_err("Illegal interrupt delay granularity value\n");
2551 return -EFAULT;
2552 }
2553
2554 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2555 ena_dev->intr_delay_resolution;
2556
2557 return 0;
2558}
2559
2560int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2561 u32 rx_coalesce_usecs)
2562{
2563 if (!ena_dev->intr_delay_resolution) {
2564 pr_err("Illegal interrupt delay granularity value\n");
2565 return -EFAULT;
2566 }
2567
2568 /* We use LOWEST entry of moderation table for storing
2569 * nonadaptive interrupt coalescing values
2570 */
2571 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2572 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2573
2574 return 0;
2575}
2576
2577void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2578{
2579 if (ena_dev->intr_moder_tbl)
2580 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2581 ena_dev->intr_moder_tbl = NULL;
2582}
2583
2584int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2585{
2586 struct ena_admin_get_feat_resp get_resp;
2587 u16 delay_resolution;
2588 int rc;
2589
2590 rc = ena_com_get_feature(ena_dev, &get_resp,
2591 ENA_ADMIN_INTERRUPT_MODERATION);
2592
2593 if (rc) {
d1497638 2594 if (rc == -EOPNOTSUPP) {
5add6e4a
NB
2595 pr_debug("Feature %d isn't supported\n",
2596 ENA_ADMIN_INTERRUPT_MODERATION);
1738cd3e
NB
2597 rc = 0;
2598 } else {
2599 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2600 rc);
2601 }
2602
2603 /* no moderation supported, disable adaptive support */
2604 ena_com_disable_adaptive_moderation(ena_dev);
2605 return rc;
2606 }
2607
2608 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2609 if (rc)
2610 goto err;
2611
2612 /* if moderation is supported by device we set adaptive moderation */
2613 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2614 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2615 ena_com_enable_adaptive_moderation(ena_dev);
2616
2617 return 0;
2618err:
2619 ena_com_destroy_interrupt_moderation(ena_dev);
2620 return rc;
2621}
2622
2623void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2624{
2625 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2626
2627 if (!intr_moder_tbl)
2628 return;
2629
2630 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2631 ENA_INTR_LOWEST_USECS;
2632 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2633 ENA_INTR_LOWEST_PKTS;
2634 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2635 ENA_INTR_LOWEST_BYTES;
2636
2637 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2638 ENA_INTR_LOW_USECS;
2639 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2640 ENA_INTR_LOW_PKTS;
2641 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2642 ENA_INTR_LOW_BYTES;
2643
2644 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2645 ENA_INTR_MID_USECS;
2646 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2647 ENA_INTR_MID_PKTS;
2648 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2649 ENA_INTR_MID_BYTES;
2650
2651 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2652 ENA_INTR_HIGH_USECS;
2653 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2654 ENA_INTR_HIGH_PKTS;
2655 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2656 ENA_INTR_HIGH_BYTES;
2657
2658 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2659 ENA_INTR_HIGHEST_USECS;
2660 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2661 ENA_INTR_HIGHEST_PKTS;
2662 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2663 ENA_INTR_HIGHEST_BYTES;
2664}
2665
2666unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2667{
2668 return ena_dev->intr_moder_tx_interval;
2669}
2670
2671unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2672{
2673 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2674
2675 if (intr_moder_tbl)
2676 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2677
2678 return 0;
2679}
2680
2681void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2682 enum ena_intr_moder_level level,
2683 struct ena_intr_moder_entry *entry)
2684{
2685 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2686
2687 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2688 return;
2689
2690 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2691 if (ena_dev->intr_delay_resolution)
2692 intr_moder_tbl[level].intr_moder_interval /=
2693 ena_dev->intr_delay_resolution;
2694 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2695
2696 /* use hardcoded value until ethtool supports bytecount parameter */
2697 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2698 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2699}
2700
2701void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2702 enum ena_intr_moder_level level,
2703 struct ena_intr_moder_entry *entry)
2704{
2705 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2706
2707 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2708 return;
2709
2710 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2711 if (ena_dev->intr_delay_resolution)
2712 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2713 entry->pkts_per_interval =
2714 intr_moder_tbl[level].pkts_per_interval;
2715 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2716}