]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/net/ice/base/ice_controlq.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ice / base / ice_controlq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
3 */
4
5 #include "ice_common.h"
6
7 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
8 do { \
9 (qinfo)->sq.head = prefix##_ATQH; \
10 (qinfo)->sq.tail = prefix##_ATQT; \
11 (qinfo)->sq.len = prefix##_ATQLEN; \
12 (qinfo)->sq.bah = prefix##_ATQBAH; \
13 (qinfo)->sq.bal = prefix##_ATQBAL; \
14 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
15 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
16 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
17 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
18 (qinfo)->rq.head = prefix##_ARQH; \
19 (qinfo)->rq.tail = prefix##_ARQT; \
20 (qinfo)->rq.len = prefix##_ARQLEN; \
21 (qinfo)->rq.bah = prefix##_ARQBAH; \
22 (qinfo)->rq.bal = prefix##_ARQBAL; \
23 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
24 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
25 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
26 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
27 } while (0)
28
29 /**
30 * ice_adminq_init_regs - Initialize AdminQ registers
31 * @hw: pointer to the hardware structure
32 *
33 * This assumes the alloc_sq and alloc_rq functions have already been called
34 */
35 static void ice_adminq_init_regs(struct ice_hw *hw)
36 {
37 struct ice_ctl_q_info *cq = &hw->adminq;
38
39 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
40
41 ICE_CQ_INIT_REGS(cq, PF_FW);
42 }
43
44 /**
45 * ice_mailbox_init_regs - Initialize Mailbox registers
46 * @hw: pointer to the hardware structure
47 *
48 * This assumes the alloc_sq and alloc_rq functions have already been called
49 */
50 static void ice_mailbox_init_regs(struct ice_hw *hw)
51 {
52 struct ice_ctl_q_info *cq = &hw->mailboxq;
53
54 ICE_CQ_INIT_REGS(cq, PF_MBX);
55 }
56
57 /**
58 * ice_check_sq_alive
59 * @hw: pointer to the HW struct
60 * @cq: pointer to the specific Control queue
61 *
62 * Returns true if Queue is enabled else false.
63 */
64 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
65 {
66 /* check both queue-length and queue-enable fields */
67 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
68 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
69 cq->sq.len_ena_mask)) ==
70 (cq->num_sq_entries | cq->sq.len_ena_mask);
71
72 return false;
73 }
74
75 /**
76 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
77 * @hw: pointer to the hardware structure
78 * @cq: pointer to the specific Control queue
79 */
80 static enum ice_status
81 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
82 {
83 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
84
85 cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
86 if (!cq->sq.desc_buf.va)
87 return ICE_ERR_NO_MEMORY;
88
89 cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
90 sizeof(struct ice_sq_cd));
91 if (!cq->sq.cmd_buf) {
92 ice_free_dma_mem(hw, &cq->sq.desc_buf);
93 return ICE_ERR_NO_MEMORY;
94 }
95
96 return ICE_SUCCESS;
97 }
98
99 /**
100 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
101 * @hw: pointer to the hardware structure
102 * @cq: pointer to the specific Control queue
103 */
104 static enum ice_status
105 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
106 {
107 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
108
109 cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
110 if (!cq->rq.desc_buf.va)
111 return ICE_ERR_NO_MEMORY;
112 return ICE_SUCCESS;
113 }
114
115 /**
116 * ice_free_cq_ring - Free control queue ring
117 * @hw: pointer to the hardware structure
118 * @ring: pointer to the specific control queue ring
119 *
120 * This assumes the posted buffers have already been cleaned
121 * and de-allocated
122 */
123 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
124 {
125 ice_free_dma_mem(hw, &ring->desc_buf);
126 }
127
128 /**
129 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
130 * @hw: pointer to the hardware structure
131 * @cq: pointer to the specific Control queue
132 */
133 static enum ice_status
134 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
135 {
136 int i;
137
138 /* We'll be allocating the buffer info memory first, then we can
139 * allocate the mapped buffers for the event processing
140 */
141 cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
142 sizeof(cq->rq.desc_buf));
143 if (!cq->rq.dma_head)
144 return ICE_ERR_NO_MEMORY;
145 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
146
147 /* allocate the mapped buffers */
148 for (i = 0; i < cq->num_rq_entries; i++) {
149 struct ice_aq_desc *desc;
150 struct ice_dma_mem *bi;
151
152 bi = &cq->rq.r.rq_bi[i];
153 bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
154 if (!bi->va)
155 goto unwind_alloc_rq_bufs;
156
157 /* now configure the descriptors for use */
158 desc = ICE_CTL_Q_DESC(cq->rq, i);
159
160 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
161 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
162 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
163 desc->opcode = 0;
164 /* This is in accordance with Admin queue design, there is no
165 * register for buffer size configuration
166 */
167 desc->datalen = CPU_TO_LE16(bi->size);
168 desc->retval = 0;
169 desc->cookie_high = 0;
170 desc->cookie_low = 0;
171 desc->params.generic.addr_high =
172 CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
173 desc->params.generic.addr_low =
174 CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
175 desc->params.generic.param0 = 0;
176 desc->params.generic.param1 = 0;
177 }
178 return ICE_SUCCESS;
179
180 unwind_alloc_rq_bufs:
181 /* don't try to free the one that failed... */
182 i--;
183 for (; i >= 0; i--)
184 ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
185 ice_free(hw, cq->rq.dma_head);
186
187 return ICE_ERR_NO_MEMORY;
188 }
189
190 /**
191 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
192 * @hw: pointer to the hardware structure
193 * @cq: pointer to the specific Control queue
194 */
195 static enum ice_status
196 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
197 {
198 int i;
199
200 /* No mapped memory needed yet, just the buffer info structures */
201 cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
202 sizeof(cq->sq.desc_buf));
203 if (!cq->sq.dma_head)
204 return ICE_ERR_NO_MEMORY;
205 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
206
207 /* allocate the mapped buffers */
208 for (i = 0; i < cq->num_sq_entries; i++) {
209 struct ice_dma_mem *bi;
210
211 bi = &cq->sq.r.sq_bi[i];
212 bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
213 if (!bi->va)
214 goto unwind_alloc_sq_bufs;
215 }
216 return ICE_SUCCESS;
217
218 unwind_alloc_sq_bufs:
219 /* don't try to free the one that failed... */
220 i--;
221 for (; i >= 0; i--)
222 ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
223 ice_free(hw, cq->sq.dma_head);
224
225 return ICE_ERR_NO_MEMORY;
226 }
227
228 static enum ice_status
229 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
230 {
231 /* Clear Head and Tail */
232 wr32(hw, ring->head, 0);
233 wr32(hw, ring->tail, 0);
234
235 /* set starting point */
236 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
237 wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
238 wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
239
240 /* Check one register to verify that config was applied */
241 if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
242 return ICE_ERR_AQ_ERROR;
243
244 return ICE_SUCCESS;
245 }
246
247 /**
248 * ice_cfg_sq_regs - configure Control ATQ registers
249 * @hw: pointer to the hardware structure
250 * @cq: pointer to the specific Control queue
251 *
252 * Configure base address and length registers for the transmit queue
253 */
254 static enum ice_status
255 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
256 {
257 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
258 }
259
260 /**
261 * ice_cfg_rq_regs - configure Control ARQ register
262 * @hw: pointer to the hardware structure
263 * @cq: pointer to the specific Control queue
264 *
265 * Configure base address and length registers for the receive (event queue)
266 */
267 static enum ice_status
268 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
269 {
270 enum ice_status status;
271
272 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
273 if (status)
274 return status;
275
276 /* Update tail in the HW to post pre-allocated buffers */
277 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
278
279 return ICE_SUCCESS;
280 }
281
282 /**
283 * ice_init_sq - main initialization routine for Control ATQ
284 * @hw: pointer to the hardware structure
285 * @cq: pointer to the specific Control queue
286 *
287 * This is the main initialization routine for the Control Send Queue
288 * Prior to calling this function, the driver *MUST* set the following fields
289 * in the cq->structure:
290 * - cq->num_sq_entries
291 * - cq->sq_buf_size
292 *
293 * Do *NOT* hold the lock when calling this as the memory allocation routines
294 * called are not going to be atomic context safe
295 */
296 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
297 {
298 enum ice_status ret_code;
299
300 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
301
302 if (cq->sq.count > 0) {
303 /* queue already initialized */
304 ret_code = ICE_ERR_NOT_READY;
305 goto init_ctrlq_exit;
306 }
307
308 /* verify input for valid configuration */
309 if (!cq->num_sq_entries || !cq->sq_buf_size) {
310 ret_code = ICE_ERR_CFG;
311 goto init_ctrlq_exit;
312 }
313
314 cq->sq.next_to_use = 0;
315 cq->sq.next_to_clean = 0;
316
317 /* allocate the ring memory */
318 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
319 if (ret_code)
320 goto init_ctrlq_exit;
321
322 /* allocate buffers in the rings */
323 ret_code = ice_alloc_sq_bufs(hw, cq);
324 if (ret_code)
325 goto init_ctrlq_free_rings;
326
327 /* initialize base registers */
328 ret_code = ice_cfg_sq_regs(hw, cq);
329 if (ret_code)
330 goto init_ctrlq_free_rings;
331
332 /* success! */
333 cq->sq.count = cq->num_sq_entries;
334 goto init_ctrlq_exit;
335
336 init_ctrlq_free_rings:
337 ice_free_cq_ring(hw, &cq->sq);
338
339 init_ctrlq_exit:
340 return ret_code;
341 }
342
343 /**
344 * ice_init_rq - initialize ARQ
345 * @hw: pointer to the hardware structure
346 * @cq: pointer to the specific Control queue
347 *
348 * The main initialization routine for the Admin Receive (Event) Queue.
349 * Prior to calling this function, the driver *MUST* set the following fields
350 * in the cq->structure:
351 * - cq->num_rq_entries
352 * - cq->rq_buf_size
353 *
354 * Do *NOT* hold the lock when calling this as the memory allocation routines
355 * called are not going to be atomic context safe
356 */
357 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
358 {
359 enum ice_status ret_code;
360
361 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
362
363 if (cq->rq.count > 0) {
364 /* queue already initialized */
365 ret_code = ICE_ERR_NOT_READY;
366 goto init_ctrlq_exit;
367 }
368
369 /* verify input for valid configuration */
370 if (!cq->num_rq_entries || !cq->rq_buf_size) {
371 ret_code = ICE_ERR_CFG;
372 goto init_ctrlq_exit;
373 }
374
375 cq->rq.next_to_use = 0;
376 cq->rq.next_to_clean = 0;
377
378 /* allocate the ring memory */
379 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
380 if (ret_code)
381 goto init_ctrlq_exit;
382
383 /* allocate buffers in the rings */
384 ret_code = ice_alloc_rq_bufs(hw, cq);
385 if (ret_code)
386 goto init_ctrlq_free_rings;
387
388 /* initialize base registers */
389 ret_code = ice_cfg_rq_regs(hw, cq);
390 if (ret_code)
391 goto init_ctrlq_free_rings;
392
393 /* success! */
394 cq->rq.count = cq->num_rq_entries;
395 goto init_ctrlq_exit;
396
397 init_ctrlq_free_rings:
398 ice_free_cq_ring(hw, &cq->rq);
399
400 init_ctrlq_exit:
401 return ret_code;
402 }
403
404 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
405 do { \
406 int i; \
407 /* free descriptors */ \
408 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
409 if ((qi)->ring.r.ring##_bi[i].pa) \
410 ice_free_dma_mem((hw), \
411 &(qi)->ring.r.ring##_bi[i]); \
412 /* free the buffer info list */ \
413 if ((qi)->ring.cmd_buf) \
414 ice_free(hw, (qi)->ring.cmd_buf); \
415 /* free DMA head */ \
416 ice_free(hw, (qi)->ring.dma_head); \
417 } while (0)
418
419 /**
420 * ice_shutdown_sq - shutdown the Control ATQ
421 * @hw: pointer to the hardware structure
422 * @cq: pointer to the specific Control queue
423 *
424 * The main shutdown routine for the Control Transmit Queue
425 */
426 static enum ice_status
427 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
428 {
429 enum ice_status ret_code = ICE_SUCCESS;
430
431 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
432
433 ice_acquire_lock(&cq->sq_lock);
434
435 if (!cq->sq.count) {
436 ret_code = ICE_ERR_NOT_READY;
437 goto shutdown_sq_out;
438 }
439
440 /* Stop firmware AdminQ processing */
441 wr32(hw, cq->sq.head, 0);
442 wr32(hw, cq->sq.tail, 0);
443 wr32(hw, cq->sq.len, 0);
444 wr32(hw, cq->sq.bal, 0);
445 wr32(hw, cq->sq.bah, 0);
446
447 cq->sq.count = 0; /* to indicate uninitialized queue */
448
449 /* free ring buffers and the ring itself */
450 ICE_FREE_CQ_BUFS(hw, cq, sq);
451 ice_free_cq_ring(hw, &cq->sq);
452
453 shutdown_sq_out:
454 ice_release_lock(&cq->sq_lock);
455 return ret_code;
456 }
457
458 /**
459 * ice_aq_ver_check - Check the reported AQ API version.
460 * @hw: pointer to the hardware structure
461 *
462 * Checks if the driver should load on a given AQ API version.
463 *
464 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
465 */
466 static bool ice_aq_ver_check(struct ice_hw *hw)
467 {
468 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
469 /* Major API version is newer than expected, don't load */
470 ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
471 return false;
472 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
473 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
474 ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
475 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
476 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
477 } else {
478 /* Major API version is older than expected, log a warning */
479 ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
480 }
481 return true;
482 }
483
484 /**
485 * ice_shutdown_rq - shutdown Control ARQ
486 * @hw: pointer to the hardware structure
487 * @cq: pointer to the specific Control queue
488 *
489 * The main shutdown routine for the Control Receive Queue
490 */
491 static enum ice_status
492 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
493 {
494 enum ice_status ret_code = ICE_SUCCESS;
495
496 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
497
498 ice_acquire_lock(&cq->rq_lock);
499
500 if (!cq->rq.count) {
501 ret_code = ICE_ERR_NOT_READY;
502 goto shutdown_rq_out;
503 }
504
505 /* Stop Control Queue processing */
506 wr32(hw, cq->rq.head, 0);
507 wr32(hw, cq->rq.tail, 0);
508 wr32(hw, cq->rq.len, 0);
509 wr32(hw, cq->rq.bal, 0);
510 wr32(hw, cq->rq.bah, 0);
511
512 /* set rq.count to 0 to indicate uninitialized queue */
513 cq->rq.count = 0;
514
515 /* free ring buffers and the ring itself */
516 ICE_FREE_CQ_BUFS(hw, cq, rq);
517 ice_free_cq_ring(hw, &cq->rq);
518
519 shutdown_rq_out:
520 ice_release_lock(&cq->rq_lock);
521 return ret_code;
522 }
523
524 /**
525 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
526 * @hw: pointer to the hardware structure
527 */
528 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
529 {
530 struct ice_ctl_q_info *cq = &hw->adminq;
531 enum ice_status status;
532
533 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
534
535 status = ice_aq_get_fw_ver(hw, NULL);
536 if (status)
537 goto init_ctrlq_free_rq;
538
539 if (!ice_aq_ver_check(hw)) {
540 status = ICE_ERR_FW_API_VER;
541 goto init_ctrlq_free_rq;
542 }
543
544 return ICE_SUCCESS;
545
546 init_ctrlq_free_rq:
547 ice_shutdown_rq(hw, cq);
548 ice_shutdown_sq(hw, cq);
549 return status;
550 }
551
552 /**
553 * ice_init_ctrlq - main initialization routine for any control Queue
554 * @hw: pointer to the hardware structure
555 * @q_type: specific Control queue type
556 *
557 * Prior to calling this function, the driver *MUST* set the following fields
558 * in the cq->structure:
559 * - cq->num_sq_entries
560 * - cq->num_rq_entries
561 * - cq->rq_buf_size
562 * - cq->sq_buf_size
563 *
564 * NOTE: this function does not initialize the controlq locks
565 */
566 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
567 {
568 struct ice_ctl_q_info *cq;
569 enum ice_status ret_code;
570
571 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
572
573 switch (q_type) {
574 case ICE_CTL_Q_ADMIN:
575 ice_adminq_init_regs(hw);
576 cq = &hw->adminq;
577 break;
578 case ICE_CTL_Q_MAILBOX:
579 ice_mailbox_init_regs(hw);
580 cq = &hw->mailboxq;
581 break;
582 default:
583 return ICE_ERR_PARAM;
584 }
585 cq->qtype = q_type;
586
587 /* verify input for valid configuration */
588 if (!cq->num_rq_entries || !cq->num_sq_entries ||
589 !cq->rq_buf_size || !cq->sq_buf_size) {
590 return ICE_ERR_CFG;
591 }
592
593 /* setup SQ command write back timeout */
594 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
595
596 /* allocate the ATQ */
597 ret_code = ice_init_sq(hw, cq);
598 if (ret_code)
599 return ret_code;
600
601 /* allocate the ARQ */
602 ret_code = ice_init_rq(hw, cq);
603 if (ret_code)
604 goto init_ctrlq_free_sq;
605
606 /* success! */
607 return ICE_SUCCESS;
608
609 init_ctrlq_free_sq:
610 ice_shutdown_sq(hw, cq);
611 return ret_code;
612 }
613
614 /**
615 * ice_shutdown_ctrlq - shutdown routine for any control queue
616 * @hw: pointer to the hardware structure
617 * @q_type: specific Control queue type
618 *
619 * NOTE: this function does not destroy the control queue locks.
620 */
621 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
622 {
623 struct ice_ctl_q_info *cq;
624
625 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
626
627 switch (q_type) {
628 case ICE_CTL_Q_ADMIN:
629 cq = &hw->adminq;
630 if (ice_check_sq_alive(hw, cq))
631 ice_aq_q_shutdown(hw, true);
632 break;
633 case ICE_CTL_Q_MAILBOX:
634 cq = &hw->mailboxq;
635 break;
636 default:
637 return;
638 }
639
640 ice_shutdown_sq(hw, cq);
641 ice_shutdown_rq(hw, cq);
642 }
643
644 /**
645 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
646 * @hw: pointer to the hardware structure
647 *
648 * NOTE: this function does not destroy the control queue locks. The driver
649 * may call this at runtime to shutdown and later restart control queues, such
650 * as in response to a reset event.
651 */
652 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
653 {
654 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
655 /* Shutdown FW admin queue */
656 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
657 /* Shutdown PF-VF Mailbox */
658 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
659 }
660
661 /**
662 * ice_init_all_ctrlq - main initialization routine for all control queues
663 * @hw: pointer to the hardware structure
664 *
665 * Prior to calling this function, the driver MUST* set the following fields
666 * in the cq->structure for all control queues:
667 * - cq->num_sq_entries
668 * - cq->num_rq_entries
669 * - cq->rq_buf_size
670 * - cq->sq_buf_size
671 *
672 * NOTE: this function does not initialize the controlq locks.
673 */
674 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
675 {
676 enum ice_status status;
677 u32 retry = 0;
678
679 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
680
681 /* Init FW admin queue */
682 do {
683 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
684 if (status)
685 return status;
686
687 status = ice_init_check_adminq(hw);
688 if (status != ICE_ERR_AQ_FW_CRITICAL)
689 break;
690
691 ice_debug(hw, ICE_DBG_AQ_MSG,
692 "Retry Admin Queue init due to FW critical error\n");
693 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
694 ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
695 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
696
697 if (status)
698 return status;
699 /* Init Mailbox queue */
700 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
701 }
702
703 /**
704 * ice_init_ctrlq_locks - Initialize locks for a control queue
705 * @cq: pointer to the control queue
706 *
707 * Initializes the send and receive queue locks for a given control queue.
708 */
709 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
710 {
711 ice_init_lock(&cq->sq_lock);
712 ice_init_lock(&cq->rq_lock);
713 }
714
715 /**
716 * ice_create_all_ctrlq - main initialization routine for all control queues
717 * @hw: pointer to the hardware structure
718 *
719 * Prior to calling this function, the driver *MUST* set the following fields
720 * in the cq->structure for all control queues:
721 * - cq->num_sq_entries
722 * - cq->num_rq_entries
723 * - cq->rq_buf_size
724 * - cq->sq_buf_size
725 *
726 * This function creates all the control queue locks and then calls
727 * ice_init_all_ctrlq. It should be called once during driver load. If the
728 * driver needs to re-initialize control queues at run time it should call
729 * ice_init_all_ctrlq instead.
730 */
731 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
732 {
733 ice_init_ctrlq_locks(&hw->adminq);
734 ice_init_ctrlq_locks(&hw->mailboxq);
735
736 return ice_init_all_ctrlq(hw);
737 }
738
739 /**
740 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
741 * @cq: pointer to the control queue
742 *
743 * Destroys the send and receive queue locks for a given control queue.
744 */
745 static void
746 ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
747 {
748 ice_destroy_lock(&cq->sq_lock);
749 ice_destroy_lock(&cq->rq_lock);
750 }
751
752 /**
753 * ice_destroy_all_ctrlq - exit routine for all control queues
754 * @hw: pointer to the hardware structure
755 *
756 * This function shuts down all the control queues and then destroys the
757 * control queue locks. It should be called once during driver unload. The
758 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
759 * reinitialize control queues, such as in response to a reset event.
760 */
761 void ice_destroy_all_ctrlq(struct ice_hw *hw)
762 {
763 /* shut down all the control queues first */
764 ice_shutdown_all_ctrlq(hw);
765
766 ice_destroy_ctrlq_locks(&hw->adminq);
767 ice_destroy_ctrlq_locks(&hw->mailboxq);
768 }
769
770 /**
771 * ice_clean_sq - cleans Admin send queue (ATQ)
772 * @hw: pointer to the hardware structure
773 * @cq: pointer to the specific Control queue
774 *
775 * returns the number of free desc
776 */
777 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
778 {
779 struct ice_ctl_q_ring *sq = &cq->sq;
780 u16 ntc = sq->next_to_clean;
781 struct ice_sq_cd *details;
782 struct ice_aq_desc *desc;
783
784 desc = ICE_CTL_Q_DESC(*sq, ntc);
785 details = ICE_CTL_Q_DETAILS(*sq, ntc);
786
787 while (rd32(hw, cq->sq.head) != ntc) {
788 ice_debug(hw, ICE_DBG_AQ_MSG,
789 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
790 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
791 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
792 ntc++;
793 if (ntc == sq->count)
794 ntc = 0;
795 desc = ICE_CTL_Q_DESC(*sq, ntc);
796 details = ICE_CTL_Q_DETAILS(*sq, ntc);
797 }
798
799 sq->next_to_clean = ntc;
800
801 return ICE_CTL_Q_DESC_UNUSED(sq);
802 }
803
804 /**
805 * ice_debug_cq
806 * @hw: pointer to the hardware structure
807 * @desc: pointer to control queue descriptor
808 * @buf: pointer to command buffer
809 * @buf_len: max length of buf
810 *
811 * Dumps debug log about control command with descriptor contents.
812 */
813 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
814 {
815 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
816 u16 datalen, flags;
817
818 if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
819 return;
820
821 if (!desc)
822 return;
823
824 datalen = LE16_TO_CPU(cq_desc->datalen);
825 flags = LE16_TO_CPU(cq_desc->flags);
826
827 ice_debug(hw, ICE_DBG_AQ_DESC,
828 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
829 LE16_TO_CPU(cq_desc->opcode), flags, datalen,
830 LE16_TO_CPU(cq_desc->retval));
831 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
832 LE32_TO_CPU(cq_desc->cookie_high),
833 LE32_TO_CPU(cq_desc->cookie_low));
834 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
835 LE32_TO_CPU(cq_desc->params.generic.param0),
836 LE32_TO_CPU(cq_desc->params.generic.param1));
837 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
838 LE32_TO_CPU(cq_desc->params.generic.addr_high),
839 LE32_TO_CPU(cq_desc->params.generic.addr_low));
840 /* Dump buffer iff 1) one exists and 2) is either a response indicated
841 * by the DD and/or CMP flag set or a command with the RD flag set.
842 */
843 if (buf && cq_desc->datalen != 0 &&
844 (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
845 flags & ICE_AQ_FLAG_RD)) {
846 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
847 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
848 MIN_T(u16, buf_len, datalen));
849 }
850 }
851
852 /**
853 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
854 * @hw: pointer to the HW struct
855 * @cq: pointer to the specific Control queue
856 *
857 * Returns true if the firmware has processed all descriptors on the
858 * admin send queue. Returns false if there are still requests pending.
859 */
860 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
861 {
862 /* AQ designers suggest use of head for better
863 * timing reliability than DD bit
864 */
865 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
866 }
867
868 /**
869 * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
870 * @hw: pointer to the HW struct
871 * @cq: pointer to the specific Control queue
872 * @desc: prefilled descriptor describing the command (non DMA mem)
873 * @buf: buffer to use for indirect commands (or NULL for direct commands)
874 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
875 * @cd: pointer to command details structure
876 *
877 * This is the main send command routine for the ATQ. It runs the queue,
878 * cleans the queue, etc.
879 */
880 static enum ice_status
881 ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
882 struct ice_aq_desc *desc, void *buf, u16 buf_size,
883 struct ice_sq_cd *cd)
884 {
885 struct ice_dma_mem *dma_buf = NULL;
886 struct ice_aq_desc *desc_on_ring;
887 bool cmd_completed = false;
888 enum ice_status status = ICE_SUCCESS;
889 struct ice_sq_cd *details;
890 u32 total_delay = 0;
891 u16 retval = 0;
892 u32 val = 0;
893
894 /* if reset is in progress return a soft error */
895 if (hw->reset_ongoing)
896 return ICE_ERR_RESET_ONGOING;
897
898 cq->sq_last_status = ICE_AQ_RC_OK;
899
900 if (!cq->sq.count) {
901 ice_debug(hw, ICE_DBG_AQ_MSG,
902 "Control Send queue not initialized.\n");
903 status = ICE_ERR_AQ_EMPTY;
904 goto sq_send_command_error;
905 }
906
907 if ((buf && !buf_size) || (!buf && buf_size)) {
908 status = ICE_ERR_PARAM;
909 goto sq_send_command_error;
910 }
911
912 if (buf) {
913 if (buf_size > cq->sq_buf_size) {
914 ice_debug(hw, ICE_DBG_AQ_MSG,
915 "Invalid buffer size for Control Send queue: %d.\n",
916 buf_size);
917 status = ICE_ERR_INVAL_SIZE;
918 goto sq_send_command_error;
919 }
920
921 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
922 if (buf_size > ICE_AQ_LG_BUF)
923 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
924 }
925
926 val = rd32(hw, cq->sq.head);
927 if (val >= cq->num_sq_entries) {
928 ice_debug(hw, ICE_DBG_AQ_MSG,
929 "head overrun at %d in the Control Send Queue ring\n",
930 val);
931 status = ICE_ERR_AQ_EMPTY;
932 goto sq_send_command_error;
933 }
934
935 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
936 if (cd)
937 *details = *cd;
938 else
939 ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
940
941 /* Call clean and check queue available function to reclaim the
942 * descriptors that were processed by FW/MBX; the function returns the
943 * number of desc available. The clean function called here could be
944 * called in a separate thread in case of asynchronous completions.
945 */
946 if (ice_clean_sq(hw, cq) == 0) {
947 ice_debug(hw, ICE_DBG_AQ_MSG,
948 "Error: Control Send Queue is full.\n");
949 status = ICE_ERR_AQ_FULL;
950 goto sq_send_command_error;
951 }
952
953 /* initialize the temp desc pointer with the right desc */
954 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
955
956 /* if the desc is available copy the temp desc to the right place */
957 ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
958 ICE_NONDMA_TO_DMA);
959
960 /* if buf is not NULL assume indirect command */
961 if (buf) {
962 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
963 /* copy the user buf into the respective DMA buf */
964 ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
965 desc_on_ring->datalen = CPU_TO_LE16(buf_size);
966
967 /* Update the address values in the desc with the pa value
968 * for respective buffer
969 */
970 desc_on_ring->params.generic.addr_high =
971 CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
972 desc_on_ring->params.generic.addr_low =
973 CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
974 }
975
976 /* Debug desc and buffer */
977 ice_debug(hw, ICE_DBG_AQ_DESC,
978 "ATQ: Control Send queue desc and buffer:\n");
979
980 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
981
982 (cq->sq.next_to_use)++;
983 if (cq->sq.next_to_use == cq->sq.count)
984 cq->sq.next_to_use = 0;
985 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
986
987 do {
988 if (ice_sq_done(hw, cq))
989 break;
990
991 ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
992 total_delay++;
993 } while (total_delay < cq->sq_cmd_timeout);
994
995 /* if ready, copy the desc back to temp */
996 if (ice_sq_done(hw, cq)) {
997 ice_memcpy(desc, desc_on_ring, sizeof(*desc),
998 ICE_DMA_TO_NONDMA);
999 if (buf) {
1000 /* get returned length to copy */
1001 u16 copy_size = LE16_TO_CPU(desc->datalen);
1002
1003 if (copy_size > buf_size) {
1004 ice_debug(hw, ICE_DBG_AQ_MSG,
1005 "Return len %d > than buf len %d\n",
1006 copy_size, buf_size);
1007 status = ICE_ERR_AQ_ERROR;
1008 } else {
1009 ice_memcpy(buf, dma_buf->va, copy_size,
1010 ICE_DMA_TO_NONDMA);
1011 }
1012 }
1013 retval = LE16_TO_CPU(desc->retval);
1014 if (retval) {
1015 ice_debug(hw, ICE_DBG_AQ_MSG,
1016 "Control Send Queue command 0x%04X completed with error 0x%X\n",
1017 LE16_TO_CPU(desc->opcode),
1018 retval);
1019
1020 /* strip off FW internal code */
1021 retval &= 0xff;
1022 }
1023 cmd_completed = true;
1024 if (!status && retval != ICE_AQ_RC_OK)
1025 status = ICE_ERR_AQ_ERROR;
1026 cq->sq_last_status = (enum ice_aq_err)retval;
1027 }
1028
1029 ice_debug(hw, ICE_DBG_AQ_MSG,
1030 "ATQ: desc and buffer writeback:\n");
1031
1032 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1033
1034 /* save writeback AQ if requested */
1035 if (details->wb_desc)
1036 ice_memcpy(details->wb_desc, desc_on_ring,
1037 sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1038
1039 /* update the error if time out occurred */
1040 if (!cmd_completed) {
1041 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1042 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1043 ice_debug(hw, ICE_DBG_AQ_MSG,
1044 "Critical FW error.\n");
1045 status = ICE_ERR_AQ_FW_CRITICAL;
1046 } else {
1047 ice_debug(hw, ICE_DBG_AQ_MSG,
1048 "Control Send Queue Writeback timeout.\n");
1049 status = ICE_ERR_AQ_TIMEOUT;
1050 }
1051 }
1052
1053 sq_send_command_error:
1054 return status;
1055 }
1056
1057 /**
1058 * ice_sq_send_cmd - send command to Control Queue (ATQ)
1059 * @hw: pointer to the HW struct
1060 * @cq: pointer to the specific Control queue
1061 * @desc: prefilled descriptor describing the command (non DMA mem)
1062 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1063 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1064 * @cd: pointer to command details structure
1065 *
1066 * This is the main send command routine for the ATQ. It runs the queue,
1067 * cleans the queue, etc.
1068 */
1069 enum ice_status
1070 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1071 struct ice_aq_desc *desc, void *buf, u16 buf_size,
1072 struct ice_sq_cd *cd)
1073 {
1074 enum ice_status status = ICE_SUCCESS;
1075
1076 /* if reset is in progress return a soft error */
1077 if (hw->reset_ongoing)
1078 return ICE_ERR_RESET_ONGOING;
1079
1080 ice_acquire_lock(&cq->sq_lock);
1081 status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1082 ice_release_lock(&cq->sq_lock);
1083
1084 return status;
1085 }
1086
1087 /**
1088 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1089 * @desc: pointer to the temp descriptor (non DMA mem)
1090 * @opcode: the opcode can be used to decide which flags to turn off or on
1091 *
1092 * Fill the desc with default values
1093 */
1094 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1095 {
1096 /* zero out the desc */
1097 ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1098 desc->opcode = CPU_TO_LE16(opcode);
1099 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1100 }
1101
1102 /**
1103 * ice_clean_rq_elem
1104 * @hw: pointer to the HW struct
1105 * @cq: pointer to the specific Control queue
1106 * @e: event info from the receive descriptor, includes any buffers
1107 * @pending: number of events that could be left to process
1108 *
1109 * This function cleans one Admin Receive Queue element and returns
1110 * the contents through e. It can also return how many events are
1111 * left to process through 'pending'.
1112 */
1113 enum ice_status
1114 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1115 struct ice_rq_event_info *e, u16 *pending)
1116 {
1117 u16 ntc = cq->rq.next_to_clean;
1118 enum ice_status ret_code = ICE_SUCCESS;
1119 struct ice_aq_desc *desc;
1120 struct ice_dma_mem *bi;
1121 u16 desc_idx;
1122 u16 datalen;
1123 u16 flags;
1124 u16 ntu;
1125
1126 /* pre-clean the event info */
1127 ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1128
1129 /* take the lock before we start messing with the ring */
1130 ice_acquire_lock(&cq->rq_lock);
1131
1132 if (!cq->rq.count) {
1133 ice_debug(hw, ICE_DBG_AQ_MSG,
1134 "Control Receive queue not initialized.\n");
1135 ret_code = ICE_ERR_AQ_EMPTY;
1136 goto clean_rq_elem_err;
1137 }
1138
1139 /* set next_to_use to head */
1140 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1141
1142 if (ntu == ntc) {
1143 /* nothing to do - shouldn't need to update ring's values */
1144 ret_code = ICE_ERR_AQ_NO_WORK;
1145 goto clean_rq_elem_out;
1146 }
1147
1148 /* now clean the next descriptor */
1149 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1150 desc_idx = ntc;
1151
1152 cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1153 flags = LE16_TO_CPU(desc->flags);
1154 if (flags & ICE_AQ_FLAG_ERR) {
1155 ret_code = ICE_ERR_AQ_ERROR;
1156 ice_debug(hw, ICE_DBG_AQ_MSG,
1157 "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1158 LE16_TO_CPU(desc->opcode),
1159 cq->rq_last_status);
1160 }
1161 ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1162 datalen = LE16_TO_CPU(desc->datalen);
1163 e->msg_len = MIN_T(u16, datalen, e->buf_len);
1164 if (e->msg_buf && e->msg_len)
1165 ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1166 e->msg_len, ICE_DMA_TO_NONDMA);
1167
1168 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1169
1170 ice_debug_cq(hw, (void *)desc, e->msg_buf,
1171 cq->rq_buf_size);
1172
1173 /* Restore the original datalen and buffer address in the desc,
1174 * FW updates datalen to indicate the event message size
1175 */
1176 bi = &cq->rq.r.rq_bi[ntc];
1177 ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1178
1179 desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1180 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1181 desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1182 desc->datalen = CPU_TO_LE16(bi->size);
1183 desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1184 desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1185
1186 /* set tail = the last cleaned desc index. */
1187 wr32(hw, cq->rq.tail, ntc);
1188 /* ntc is updated to tail + 1 */
1189 ntc++;
1190 if (ntc == cq->num_rq_entries)
1191 ntc = 0;
1192 cq->rq.next_to_clean = ntc;
1193 cq->rq.next_to_use = ntu;
1194
1195 clean_rq_elem_out:
1196 /* Set pending if needed, unlock and return */
1197 if (pending) {
1198 /* re-read HW head to calculate actual pending messages */
1199 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1200 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1201 }
1202 clean_rq_elem_err:
1203 ice_release_lock(&cq->rq_lock);
1204
1205 return ret_code;
1206 }