]>
Commit | Line | Data |
---|---|---|
f67539c2 TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2001-2020 Intel Corporation | |
3 | */ | |
7c673cae | 4 | |
9f95a23c TL |
5 | #include "iavf_status.h" |
6 | #include "iavf_type.h" | |
7 | #include "iavf_register.h" | |
8 | #include "iavf_adminq.h" | |
9 | #include "iavf_prototype.h" | |
7c673cae FG |
10 | |
11 | /** | |
9f95a23c | 12 | * iavf_adminq_init_regs - Initialize AdminQ registers |
7c673cae FG |
13 | * @hw: pointer to the hardware structure |
14 | * | |
15 | * This assumes the alloc_asq and alloc_arq functions have already been called | |
16 | **/ | |
9f95a23c | 17 | STATIC void iavf_adminq_init_regs(struct iavf_hw *hw) |
7c673cae FG |
18 | { |
19 | /* set head and tail registers in our local struct */ | |
f67539c2 TL |
20 | hw->aq.asq.tail = IAVF_VF_ATQT1; |
21 | hw->aq.asq.head = IAVF_VF_ATQH1; | |
22 | hw->aq.asq.len = IAVF_VF_ATQLEN1; | |
23 | hw->aq.asq.bal = IAVF_VF_ATQBAL1; | |
24 | hw->aq.asq.bah = IAVF_VF_ATQBAH1; | |
25 | hw->aq.arq.tail = IAVF_VF_ARQT1; | |
26 | hw->aq.arq.head = IAVF_VF_ARQH1; | |
27 | hw->aq.arq.len = IAVF_VF_ARQLEN1; | |
28 | hw->aq.arq.bal = IAVF_VF_ARQBAL1; | |
29 | hw->aq.arq.bah = IAVF_VF_ARQBAH1; | |
7c673cae FG |
30 | } |
31 | ||
32 | /** | |
9f95a23c | 33 | * iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings |
7c673cae FG |
34 | * @hw: pointer to the hardware structure |
35 | **/ | |
f67539c2 | 36 | enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw) |
7c673cae | 37 | { |
f67539c2 | 38 | enum iavf_status ret_code; |
7c673cae | 39 | |
9f95a23c TL |
40 | ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf, |
41 | iavf_mem_atq_ring, | |
7c673cae | 42 | (hw->aq.num_asq_entries * |
9f95a23c TL |
43 | sizeof(struct iavf_aq_desc)), |
44 | IAVF_ADMINQ_DESC_ALIGNMENT); | |
7c673cae FG |
45 | if (ret_code) |
46 | return ret_code; | |
47 | ||
9f95a23c | 48 | ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf, |
7c673cae | 49 | (hw->aq.num_asq_entries * |
9f95a23c | 50 | sizeof(struct iavf_asq_cmd_details))); |
7c673cae | 51 | if (ret_code) { |
9f95a23c | 52 | iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); |
7c673cae FG |
53 | return ret_code; |
54 | } | |
55 | ||
56 | return ret_code; | |
57 | } | |
58 | ||
59 | /** | |
9f95a23c | 60 | * iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings |
7c673cae FG |
61 | * @hw: pointer to the hardware structure |
62 | **/ | |
f67539c2 | 63 | enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw) |
7c673cae | 64 | { |
f67539c2 | 65 | enum iavf_status ret_code; |
7c673cae | 66 | |
9f95a23c TL |
67 | ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf, |
68 | iavf_mem_arq_ring, | |
7c673cae | 69 | (hw->aq.num_arq_entries * |
9f95a23c TL |
70 | sizeof(struct iavf_aq_desc)), |
71 | IAVF_ADMINQ_DESC_ALIGNMENT); | |
7c673cae FG |
72 | |
73 | return ret_code; | |
74 | } | |
75 | ||
76 | /** | |
9f95a23c | 77 | * iavf_free_adminq_asq - Free Admin Queue send rings |
7c673cae FG |
78 | * @hw: pointer to the hardware structure |
79 | * | |
80 | * This assumes the posted send buffers have already been cleaned | |
81 | * and de-allocated | |
82 | **/ | |
9f95a23c | 83 | void iavf_free_adminq_asq(struct iavf_hw *hw) |
7c673cae | 84 | { |
f67539c2 | 85 | iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); |
9f95a23c | 86 | iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); |
7c673cae FG |
87 | } |
88 | ||
89 | /** | |
9f95a23c | 90 | * iavf_free_adminq_arq - Free Admin Queue receive rings |
7c673cae FG |
91 | * @hw: pointer to the hardware structure |
92 | * | |
93 | * This assumes the posted receive buffers have already been cleaned | |
94 | * and de-allocated | |
95 | **/ | |
9f95a23c | 96 | void iavf_free_adminq_arq(struct iavf_hw *hw) |
7c673cae | 97 | { |
9f95a23c | 98 | iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); |
7c673cae FG |
99 | } |
100 | ||
101 | /** | |
9f95a23c | 102 | * iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue |
7c673cae FG |
103 | * @hw: pointer to the hardware structure |
104 | **/ | |
f67539c2 | 105 | STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw) |
7c673cae | 106 | { |
f67539c2 | 107 | enum iavf_status ret_code; |
9f95a23c TL |
108 | struct iavf_aq_desc *desc; |
109 | struct iavf_dma_mem *bi; | |
7c673cae FG |
110 | int i; |
111 | ||
112 | /* We'll be allocating the buffer info memory first, then we can | |
113 | * allocate the mapped buffers for the event processing | |
114 | */ | |
115 | ||
116 | /* buffer_info structures do not need alignment */ | |
9f95a23c TL |
117 | ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head, |
118 | (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem))); | |
7c673cae FG |
119 | if (ret_code) |
120 | goto alloc_arq_bufs; | |
9f95a23c | 121 | hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va; |
7c673cae FG |
122 | |
123 | /* allocate the mapped buffers */ | |
124 | for (i = 0; i < hw->aq.num_arq_entries; i++) { | |
125 | bi = &hw->aq.arq.r.arq_bi[i]; | |
9f95a23c TL |
126 | ret_code = iavf_allocate_dma_mem(hw, bi, |
127 | iavf_mem_arq_buf, | |
7c673cae | 128 | hw->aq.arq_buf_size, |
9f95a23c | 129 | IAVF_ADMINQ_DESC_ALIGNMENT); |
7c673cae FG |
130 | if (ret_code) |
131 | goto unwind_alloc_arq_bufs; | |
132 | ||
133 | /* now configure the descriptors for use */ | |
9f95a23c | 134 | desc = IAVF_ADMINQ_DESC(hw->aq.arq, i); |
7c673cae | 135 | |
9f95a23c TL |
136 | desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF); |
137 | if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) | |
138 | desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB); | |
7c673cae FG |
139 | desc->opcode = 0; |
140 | /* This is in accordance with Admin queue design, there is no | |
141 | * register for buffer size configuration | |
142 | */ | |
143 | desc->datalen = CPU_TO_LE16((u16)bi->size); | |
144 | desc->retval = 0; | |
145 | desc->cookie_high = 0; | |
146 | desc->cookie_low = 0; | |
147 | desc->params.external.addr_high = | |
9f95a23c | 148 | CPU_TO_LE32(IAVF_HI_DWORD(bi->pa)); |
7c673cae | 149 | desc->params.external.addr_low = |
9f95a23c | 150 | CPU_TO_LE32(IAVF_LO_DWORD(bi->pa)); |
7c673cae FG |
151 | desc->params.external.param0 = 0; |
152 | desc->params.external.param1 = 0; | |
153 | } | |
154 | ||
155 | alloc_arq_bufs: | |
156 | return ret_code; | |
157 | ||
158 | unwind_alloc_arq_bufs: | |
159 | /* don't try to free the one that failed... */ | |
160 | i--; | |
161 | for (; i >= 0; i--) | |
9f95a23c TL |
162 | iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); |
163 | iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); | |
7c673cae FG |
164 | |
165 | return ret_code; | |
166 | } | |
167 | ||
168 | /** | |
9f95a23c | 169 | * iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue |
7c673cae FG |
170 | * @hw: pointer to the hardware structure |
171 | **/ | |
f67539c2 | 172 | STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw) |
7c673cae | 173 | { |
f67539c2 | 174 | enum iavf_status ret_code; |
9f95a23c | 175 | struct iavf_dma_mem *bi; |
7c673cae FG |
176 | int i; |
177 | ||
178 | /* No mapped memory needed yet, just the buffer info structures */ | |
9f95a23c TL |
179 | ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head, |
180 | (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem))); | |
7c673cae FG |
181 | if (ret_code) |
182 | goto alloc_asq_bufs; | |
9f95a23c | 183 | hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va; |
7c673cae FG |
184 | |
185 | /* allocate the mapped buffers */ | |
186 | for (i = 0; i < hw->aq.num_asq_entries; i++) { | |
187 | bi = &hw->aq.asq.r.asq_bi[i]; | |
9f95a23c TL |
188 | ret_code = iavf_allocate_dma_mem(hw, bi, |
189 | iavf_mem_asq_buf, | |
7c673cae | 190 | hw->aq.asq_buf_size, |
9f95a23c | 191 | IAVF_ADMINQ_DESC_ALIGNMENT); |
7c673cae FG |
192 | if (ret_code) |
193 | goto unwind_alloc_asq_bufs; | |
194 | } | |
195 | alloc_asq_bufs: | |
196 | return ret_code; | |
197 | ||
198 | unwind_alloc_asq_bufs: | |
199 | /* don't try to free the one that failed... */ | |
200 | i--; | |
201 | for (; i >= 0; i--) | |
9f95a23c TL |
202 | iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); |
203 | iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); | |
7c673cae FG |
204 | |
205 | return ret_code; | |
206 | } | |
207 | ||
208 | /** | |
9f95a23c | 209 | * iavf_free_arq_bufs - Free receive queue buffer info elements |
7c673cae FG |
210 | * @hw: pointer to the hardware structure |
211 | **/ | |
9f95a23c | 212 | STATIC void iavf_free_arq_bufs(struct iavf_hw *hw) |
7c673cae FG |
213 | { |
214 | int i; | |
215 | ||
216 | /* free descriptors */ | |
217 | for (i = 0; i < hw->aq.num_arq_entries; i++) | |
9f95a23c | 218 | iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]); |
7c673cae FG |
219 | |
220 | /* free the descriptor memory */ | |
9f95a23c | 221 | iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf); |
7c673cae FG |
222 | |
223 | /* free the dma header */ | |
9f95a23c | 224 | iavf_free_virt_mem(hw, &hw->aq.arq.dma_head); |
7c673cae FG |
225 | } |
226 | ||
227 | /** | |
9f95a23c | 228 | * iavf_free_asq_bufs - Free send queue buffer info elements |
7c673cae FG |
229 | * @hw: pointer to the hardware structure |
230 | **/ | |
9f95a23c | 231 | STATIC void iavf_free_asq_bufs(struct iavf_hw *hw) |
7c673cae FG |
232 | { |
233 | int i; | |
234 | ||
235 | /* only unmap if the address is non-NULL */ | |
236 | for (i = 0; i < hw->aq.num_asq_entries; i++) | |
237 | if (hw->aq.asq.r.asq_bi[i].pa) | |
9f95a23c | 238 | iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]); |
7c673cae FG |
239 | |
240 | /* free the buffer info list */ | |
9f95a23c | 241 | iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf); |
7c673cae FG |
242 | |
243 | /* free the descriptor memory */ | |
9f95a23c | 244 | iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf); |
7c673cae FG |
245 | |
246 | /* free the dma header */ | |
9f95a23c | 247 | iavf_free_virt_mem(hw, &hw->aq.asq.dma_head); |
7c673cae FG |
248 | } |
249 | ||
250 | /** | |
9f95a23c | 251 | * iavf_config_asq_regs - configure ASQ registers |
7c673cae FG |
252 | * @hw: pointer to the hardware structure |
253 | * | |
254 | * Configure base address and length registers for the transmit queue | |
255 | **/ | |
f67539c2 | 256 | STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw) |
7c673cae | 257 | { |
f67539c2 | 258 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae FG |
259 | u32 reg = 0; |
260 | ||
261 | /* Clear Head and Tail */ | |
262 | wr32(hw, hw->aq.asq.head, 0); | |
263 | wr32(hw, hw->aq.asq.tail, 0); | |
264 | ||
265 | /* set starting point */ | |
7c673cae | 266 | wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries | |
f67539c2 | 267 | IAVF_VF_ATQLEN1_ATQENABLE_MASK)); |
9f95a23c TL |
268 | wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa)); |
269 | wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa)); | |
7c673cae FG |
270 | |
271 | /* Check one register to verify that config was applied */ | |
272 | reg = rd32(hw, hw->aq.asq.bal); | |
9f95a23c TL |
273 | if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa)) |
274 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; | |
7c673cae FG |
275 | |
276 | return ret_code; | |
277 | } | |
278 | ||
279 | /** | |
9f95a23c | 280 | * iavf_config_arq_regs - ARQ register configuration |
7c673cae FG |
281 | * @hw: pointer to the hardware structure |
282 | * | |
283 | * Configure base address and length registers for the receive (event queue) | |
284 | **/ | |
f67539c2 | 285 | STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw) |
7c673cae | 286 | { |
f67539c2 | 287 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae FG |
288 | u32 reg = 0; |
289 | ||
290 | /* Clear Head and Tail */ | |
291 | wr32(hw, hw->aq.arq.head, 0); | |
292 | wr32(hw, hw->aq.arq.tail, 0); | |
293 | ||
294 | /* set starting point */ | |
7c673cae | 295 | wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries | |
f67539c2 | 296 | IAVF_VF_ARQLEN1_ARQENABLE_MASK)); |
9f95a23c TL |
297 | wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa)); |
298 | wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa)); | |
7c673cae FG |
299 | |
300 | /* Update tail in the HW to post pre-allocated buffers */ | |
301 | wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); | |
302 | ||
303 | /* Check one register to verify that config was applied */ | |
304 | reg = rd32(hw, hw->aq.arq.bal); | |
9f95a23c TL |
305 | if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa)) |
306 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; | |
7c673cae FG |
307 | |
308 | return ret_code; | |
309 | } | |
310 | ||
311 | /** | |
9f95a23c | 312 | * iavf_init_asq - main initialization routine for ASQ |
7c673cae FG |
313 | * @hw: pointer to the hardware structure |
314 | * | |
315 | * This is the main initialization routine for the Admin Send Queue | |
316 | * Prior to calling this function, drivers *MUST* set the following fields | |
317 | * in the hw->aq structure: | |
318 | * - hw->aq.num_asq_entries | |
319 | * - hw->aq.arq_buf_size | |
320 | * | |
321 | * Do *NOT* hold the lock when calling this as the memory allocation routines | |
322 | * called are not going to be atomic context safe | |
323 | **/ | |
f67539c2 | 324 | enum iavf_status iavf_init_asq(struct iavf_hw *hw) |
7c673cae | 325 | { |
f67539c2 | 326 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae FG |
327 | |
328 | if (hw->aq.asq.count > 0) { | |
329 | /* queue already initialized */ | |
9f95a23c | 330 | ret_code = IAVF_ERR_NOT_READY; |
7c673cae FG |
331 | goto init_adminq_exit; |
332 | } | |
333 | ||
334 | /* verify input for valid configuration */ | |
335 | if ((hw->aq.num_asq_entries == 0) || | |
336 | (hw->aq.asq_buf_size == 0)) { | |
9f95a23c | 337 | ret_code = IAVF_ERR_CONFIG; |
7c673cae FG |
338 | goto init_adminq_exit; |
339 | } | |
340 | ||
341 | hw->aq.asq.next_to_use = 0; | |
342 | hw->aq.asq.next_to_clean = 0; | |
343 | ||
344 | /* allocate the ring memory */ | |
9f95a23c TL |
345 | ret_code = iavf_alloc_adminq_asq_ring(hw); |
346 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
347 | goto init_adminq_exit; |
348 | ||
349 | /* allocate buffers in the rings */ | |
9f95a23c TL |
350 | ret_code = iavf_alloc_asq_bufs(hw); |
351 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
352 | goto init_adminq_free_rings; |
353 | ||
354 | /* initialize base registers */ | |
9f95a23c TL |
355 | ret_code = iavf_config_asq_regs(hw); |
356 | if (ret_code != IAVF_SUCCESS) | |
f67539c2 | 357 | goto init_config_regs; |
7c673cae FG |
358 | |
359 | /* success! */ | |
360 | hw->aq.asq.count = hw->aq.num_asq_entries; | |
361 | goto init_adminq_exit; | |
362 | ||
363 | init_adminq_free_rings: | |
9f95a23c | 364 | iavf_free_adminq_asq(hw); |
f67539c2 TL |
365 | return ret_code; |
366 | ||
367 | init_config_regs: | |
368 | iavf_free_asq_bufs(hw); | |
7c673cae FG |
369 | |
370 | init_adminq_exit: | |
371 | return ret_code; | |
372 | } | |
373 | ||
374 | /** | |
9f95a23c | 375 | * iavf_init_arq - initialize ARQ |
7c673cae FG |
376 | * @hw: pointer to the hardware structure |
377 | * | |
378 | * The main initialization routine for the Admin Receive (Event) Queue. | |
379 | * Prior to calling this function, drivers *MUST* set the following fields | |
380 | * in the hw->aq structure: | |
381 | * - hw->aq.num_asq_entries | |
382 | * - hw->aq.arq_buf_size | |
383 | * | |
384 | * Do *NOT* hold the lock when calling this as the memory allocation routines | |
385 | * called are not going to be atomic context safe | |
386 | **/ | |
f67539c2 | 387 | enum iavf_status iavf_init_arq(struct iavf_hw *hw) |
7c673cae | 388 | { |
f67539c2 | 389 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae FG |
390 | |
391 | if (hw->aq.arq.count > 0) { | |
392 | /* queue already initialized */ | |
9f95a23c | 393 | ret_code = IAVF_ERR_NOT_READY; |
7c673cae FG |
394 | goto init_adminq_exit; |
395 | } | |
396 | ||
397 | /* verify input for valid configuration */ | |
398 | if ((hw->aq.num_arq_entries == 0) || | |
399 | (hw->aq.arq_buf_size == 0)) { | |
9f95a23c | 400 | ret_code = IAVF_ERR_CONFIG; |
7c673cae FG |
401 | goto init_adminq_exit; |
402 | } | |
403 | ||
404 | hw->aq.arq.next_to_use = 0; | |
405 | hw->aq.arq.next_to_clean = 0; | |
406 | ||
407 | /* allocate the ring memory */ | |
9f95a23c TL |
408 | ret_code = iavf_alloc_adminq_arq_ring(hw); |
409 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
410 | goto init_adminq_exit; |
411 | ||
412 | /* allocate buffers in the rings */ | |
9f95a23c TL |
413 | ret_code = iavf_alloc_arq_bufs(hw); |
414 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
415 | goto init_adminq_free_rings; |
416 | ||
417 | /* initialize base registers */ | |
9f95a23c TL |
418 | ret_code = iavf_config_arq_regs(hw); |
419 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
420 | goto init_adminq_free_rings; |
421 | ||
422 | /* success! */ | |
423 | hw->aq.arq.count = hw->aq.num_arq_entries; | |
424 | goto init_adminq_exit; | |
425 | ||
426 | init_adminq_free_rings: | |
9f95a23c | 427 | iavf_free_adminq_arq(hw); |
7c673cae FG |
428 | |
429 | init_adminq_exit: | |
430 | return ret_code; | |
431 | } | |
432 | ||
433 | /** | |
9f95a23c | 434 | * iavf_shutdown_asq - shutdown the ASQ |
7c673cae FG |
435 | * @hw: pointer to the hardware structure |
436 | * | |
437 | * The main shutdown routine for the Admin Send Queue | |
438 | **/ | |
f67539c2 | 439 | enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw) |
7c673cae | 440 | { |
f67539c2 | 441 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae | 442 | |
9f95a23c | 443 | iavf_acquire_spinlock(&hw->aq.asq_spinlock); |
7c673cae FG |
444 | |
445 | if (hw->aq.asq.count == 0) { | |
9f95a23c | 446 | ret_code = IAVF_ERR_NOT_READY; |
7c673cae FG |
447 | goto shutdown_asq_out; |
448 | } | |
449 | ||
450 | /* Stop firmware AdminQ processing */ | |
451 | wr32(hw, hw->aq.asq.head, 0); | |
452 | wr32(hw, hw->aq.asq.tail, 0); | |
453 | wr32(hw, hw->aq.asq.len, 0); | |
454 | wr32(hw, hw->aq.asq.bal, 0); | |
455 | wr32(hw, hw->aq.asq.bah, 0); | |
456 | ||
457 | hw->aq.asq.count = 0; /* to indicate uninitialized queue */ | |
458 | ||
459 | /* free ring buffers */ | |
9f95a23c | 460 | iavf_free_asq_bufs(hw); |
7c673cae FG |
461 | |
462 | shutdown_asq_out: | |
9f95a23c | 463 | iavf_release_spinlock(&hw->aq.asq_spinlock); |
7c673cae FG |
464 | return ret_code; |
465 | } | |
466 | ||
467 | /** | |
9f95a23c | 468 | * iavf_shutdown_arq - shutdown ARQ |
7c673cae FG |
469 | * @hw: pointer to the hardware structure |
470 | * | |
471 | * The main shutdown routine for the Admin Receive Queue | |
472 | **/ | |
f67539c2 | 473 | enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw) |
7c673cae | 474 | { |
f67539c2 | 475 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae | 476 | |
9f95a23c | 477 | iavf_acquire_spinlock(&hw->aq.arq_spinlock); |
7c673cae FG |
478 | |
479 | if (hw->aq.arq.count == 0) { | |
9f95a23c | 480 | ret_code = IAVF_ERR_NOT_READY; |
7c673cae FG |
481 | goto shutdown_arq_out; |
482 | } | |
483 | ||
484 | /* Stop firmware AdminQ processing */ | |
485 | wr32(hw, hw->aq.arq.head, 0); | |
486 | wr32(hw, hw->aq.arq.tail, 0); | |
487 | wr32(hw, hw->aq.arq.len, 0); | |
488 | wr32(hw, hw->aq.arq.bal, 0); | |
489 | wr32(hw, hw->aq.arq.bah, 0); | |
490 | ||
491 | hw->aq.arq.count = 0; /* to indicate uninitialized queue */ | |
492 | ||
493 | /* free ring buffers */ | |
9f95a23c | 494 | iavf_free_arq_bufs(hw); |
7c673cae FG |
495 | |
496 | shutdown_arq_out: | |
9f95a23c | 497 | iavf_release_spinlock(&hw->aq.arq_spinlock); |
7c673cae FG |
498 | return ret_code; |
499 | } | |
7c673cae FG |
500 | |
501 | /** | |
9f95a23c | 502 | * iavf_init_adminq - main initialization routine for Admin Queue |
7c673cae FG |
503 | * @hw: pointer to the hardware structure |
504 | * | |
505 | * Prior to calling this function, drivers *MUST* set the following fields | |
506 | * in the hw->aq structure: | |
507 | * - hw->aq.num_asq_entries | |
508 | * - hw->aq.num_arq_entries | |
509 | * - hw->aq.arq_buf_size | |
510 | * - hw->aq.asq_buf_size | |
511 | **/ | |
f67539c2 | 512 | enum iavf_status iavf_init_adminq(struct iavf_hw *hw) |
7c673cae | 513 | { |
f67539c2 | 514 | enum iavf_status ret_code; |
7c673cae FG |
515 | |
516 | /* verify input for valid configuration */ | |
517 | if ((hw->aq.num_arq_entries == 0) || | |
518 | (hw->aq.num_asq_entries == 0) || | |
519 | (hw->aq.arq_buf_size == 0) || | |
520 | (hw->aq.asq_buf_size == 0)) { | |
9f95a23c | 521 | ret_code = IAVF_ERR_CONFIG; |
7c673cae FG |
522 | goto init_adminq_exit; |
523 | } | |
9f95a23c TL |
524 | iavf_init_spinlock(&hw->aq.asq_spinlock); |
525 | iavf_init_spinlock(&hw->aq.arq_spinlock); | |
7c673cae FG |
526 | |
527 | /* Set up register offsets */ | |
9f95a23c | 528 | iavf_adminq_init_regs(hw); |
7c673cae FG |
529 | |
530 | /* setup ASQ command write back timeout */ | |
9f95a23c | 531 | hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT; |
7c673cae FG |
532 | |
533 | /* allocate the ASQ */ | |
9f95a23c TL |
534 | ret_code = iavf_init_asq(hw); |
535 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
536 | goto init_adminq_destroy_spinlocks; |
537 | ||
538 | /* allocate the ARQ */ | |
9f95a23c TL |
539 | ret_code = iavf_init_arq(hw); |
540 | if (ret_code != IAVF_SUCCESS) | |
7c673cae FG |
541 | goto init_adminq_free_asq; |
542 | ||
7c673cae FG |
543 | /* success! */ |
544 | goto init_adminq_exit; | |
545 | ||
7c673cae | 546 | init_adminq_free_asq: |
9f95a23c | 547 | iavf_shutdown_asq(hw); |
7c673cae | 548 | init_adminq_destroy_spinlocks: |
9f95a23c TL |
549 | iavf_destroy_spinlock(&hw->aq.asq_spinlock); |
550 | iavf_destroy_spinlock(&hw->aq.arq_spinlock); | |
7c673cae FG |
551 | |
552 | init_adminq_exit: | |
553 | return ret_code; | |
554 | } | |
555 | ||
556 | /** | |
9f95a23c | 557 | * iavf_shutdown_adminq - shutdown routine for the Admin Queue |
7c673cae FG |
558 | * @hw: pointer to the hardware structure |
559 | **/ | |
f67539c2 | 560 | enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw) |
7c673cae | 561 | { |
f67539c2 | 562 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae | 563 | |
9f95a23c TL |
564 | if (iavf_check_asq_alive(hw)) |
565 | iavf_aq_queue_shutdown(hw, true); | |
7c673cae | 566 | |
9f95a23c TL |
567 | iavf_shutdown_asq(hw); |
568 | iavf_shutdown_arq(hw); | |
569 | iavf_destroy_spinlock(&hw->aq.asq_spinlock); | |
570 | iavf_destroy_spinlock(&hw->aq.arq_spinlock); | |
7c673cae | 571 | |
7c673cae FG |
572 | return ret_code; |
573 | } | |
574 | ||
575 | /** | |
9f95a23c | 576 | * iavf_clean_asq - cleans Admin send queue |
7c673cae FG |
577 | * @hw: pointer to the hardware structure |
578 | * | |
579 | * returns the number of free desc | |
580 | **/ | |
9f95a23c | 581 | u16 iavf_clean_asq(struct iavf_hw *hw) |
7c673cae | 582 | { |
9f95a23c TL |
583 | struct iavf_adminq_ring *asq = &(hw->aq.asq); |
584 | struct iavf_asq_cmd_details *details; | |
7c673cae | 585 | u16 ntc = asq->next_to_clean; |
9f95a23c TL |
586 | struct iavf_aq_desc desc_cb; |
587 | struct iavf_aq_desc *desc; | |
7c673cae | 588 | |
9f95a23c TL |
589 | desc = IAVF_ADMINQ_DESC(*asq, ntc); |
590 | details = IAVF_ADMINQ_DETAILS(*asq, ntc); | |
7c673cae | 591 | while (rd32(hw, hw->aq.asq.head) != ntc) { |
9f95a23c | 592 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
7c673cae FG |
593 | "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); |
594 | ||
595 | if (details->callback) { | |
9f95a23c TL |
596 | IAVF_ADMINQ_CALLBACK cb_func = |
597 | (IAVF_ADMINQ_CALLBACK)details->callback; | |
598 | iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc), | |
599 | IAVF_DMA_TO_DMA); | |
7c673cae FG |
600 | cb_func(hw, &desc_cb); |
601 | } | |
9f95a23c TL |
602 | iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM); |
603 | iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM); | |
7c673cae FG |
604 | ntc++; |
605 | if (ntc == asq->count) | |
606 | ntc = 0; | |
9f95a23c TL |
607 | desc = IAVF_ADMINQ_DESC(*asq, ntc); |
608 | details = IAVF_ADMINQ_DETAILS(*asq, ntc); | |
7c673cae FG |
609 | } |
610 | ||
611 | asq->next_to_clean = ntc; | |
612 | ||
9f95a23c | 613 | return IAVF_DESC_UNUSED(asq); |
7c673cae FG |
614 | } |
615 | ||
616 | /** | |
9f95a23c | 617 | * iavf_asq_done - check if FW has processed the Admin Send Queue |
7c673cae FG |
618 | * @hw: pointer to the hw struct |
619 | * | |
620 | * Returns true if the firmware has processed all descriptors on the | |
621 | * admin send queue. Returns false if there are still requests pending. | |
622 | **/ | |
9f95a23c | 623 | bool iavf_asq_done(struct iavf_hw *hw) |
7c673cae FG |
624 | { |
625 | /* AQ designers suggest use of head for better | |
626 | * timing reliability than DD bit | |
627 | */ | |
628 | return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use; | |
629 | ||
630 | } | |
631 | ||
632 | /** | |
9f95a23c | 633 | * iavf_asq_send_command - send command to Admin Queue |
7c673cae FG |
634 | * @hw: pointer to the hw struct |
635 | * @desc: prefilled descriptor describing the command (non DMA mem) | |
636 | * @buff: buffer to use for indirect commands | |
637 | * @buff_size: size of buffer for indirect commands | |
638 | * @cmd_details: pointer to command details structure | |
639 | * | |
640 | * This is the main send command driver routine for the Admin Queue send | |
641 | * queue. It runs the queue, cleans the queue, etc | |
642 | **/ | |
f67539c2 | 643 | enum iavf_status iavf_asq_send_command(struct iavf_hw *hw, |
9f95a23c | 644 | struct iavf_aq_desc *desc, |
7c673cae FG |
645 | void *buff, /* can be NULL */ |
646 | u16 buff_size, | |
9f95a23c | 647 | struct iavf_asq_cmd_details *cmd_details) |
7c673cae | 648 | { |
f67539c2 | 649 | enum iavf_status status = IAVF_SUCCESS; |
9f95a23c TL |
650 | struct iavf_dma_mem *dma_buff = NULL; |
651 | struct iavf_asq_cmd_details *details; | |
652 | struct iavf_aq_desc *desc_on_ring; | |
7c673cae FG |
653 | bool cmd_completed = false; |
654 | u16 retval = 0; | |
655 | u32 val = 0; | |
656 | ||
9f95a23c | 657 | iavf_acquire_spinlock(&hw->aq.asq_spinlock); |
7c673cae | 658 | |
9f95a23c | 659 | hw->aq.asq_last_status = IAVF_AQ_RC_OK; |
7c673cae FG |
660 | |
661 | if (hw->aq.asq.count == 0) { | |
9f95a23c | 662 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
7c673cae | 663 | "AQTX: Admin queue not initialized.\n"); |
9f95a23c | 664 | status = IAVF_ERR_QUEUE_EMPTY; |
7c673cae FG |
665 | goto asq_send_command_error; |
666 | } | |
667 | ||
668 | val = rd32(hw, hw->aq.asq.head); | |
669 | if (val >= hw->aq.num_asq_entries) { | |
9f95a23c | 670 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
7c673cae | 671 | "AQTX: head overrun at %d\n", val); |
9f95a23c | 672 | status = IAVF_ERR_QUEUE_EMPTY; |
7c673cae FG |
673 | goto asq_send_command_error; |
674 | } | |
675 | ||
9f95a23c | 676 | details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use); |
7c673cae | 677 | if (cmd_details) { |
9f95a23c | 678 | iavf_memcpy(details, |
7c673cae | 679 | cmd_details, |
9f95a23c TL |
680 | sizeof(struct iavf_asq_cmd_details), |
681 | IAVF_NONDMA_TO_NONDMA); | |
7c673cae FG |
682 | |
683 | /* If the cmd_details are defined copy the cookie. The | |
684 | * CPU_TO_LE32 is not needed here because the data is ignored | |
685 | * by the FW, only used by the driver | |
686 | */ | |
687 | if (details->cookie) { | |
688 | desc->cookie_high = | |
9f95a23c | 689 | CPU_TO_LE32(IAVF_HI_DWORD(details->cookie)); |
7c673cae | 690 | desc->cookie_low = |
9f95a23c | 691 | CPU_TO_LE32(IAVF_LO_DWORD(details->cookie)); |
7c673cae FG |
692 | } |
693 | } else { | |
9f95a23c TL |
694 | iavf_memset(details, 0, |
695 | sizeof(struct iavf_asq_cmd_details), | |
696 | IAVF_NONDMA_MEM); | |
7c673cae FG |
697 | } |
698 | ||
699 | /* clear requested flags and then set additional flags if defined */ | |
700 | desc->flags &= ~CPU_TO_LE16(details->flags_dis); | |
701 | desc->flags |= CPU_TO_LE16(details->flags_ena); | |
702 | ||
703 | if (buff_size > hw->aq.asq_buf_size) { | |
9f95a23c TL |
704 | iavf_debug(hw, |
705 | IAVF_DEBUG_AQ_MESSAGE, | |
7c673cae FG |
706 | "AQTX: Invalid buffer size: %d.\n", |
707 | buff_size); | |
9f95a23c | 708 | status = IAVF_ERR_INVALID_SIZE; |
7c673cae FG |
709 | goto asq_send_command_error; |
710 | } | |
711 | ||
712 | if (details->postpone && !details->async) { | |
9f95a23c TL |
713 | iavf_debug(hw, |
714 | IAVF_DEBUG_AQ_MESSAGE, | |
7c673cae | 715 | "AQTX: Async flag not set along with postpone flag"); |
9f95a23c | 716 | status = IAVF_ERR_PARAM; |
7c673cae FG |
717 | goto asq_send_command_error; |
718 | } | |
719 | ||
720 | /* call clean and check queue available function to reclaim the | |
721 | * descriptors that were processed by FW, the function returns the | |
722 | * number of desc available | |
723 | */ | |
724 | /* the clean function called here could be called in a separate thread | |
725 | * in case of asynchronous completions | |
726 | */ | |
9f95a23c TL |
727 | if (iavf_clean_asq(hw) == 0) { |
728 | iavf_debug(hw, | |
729 | IAVF_DEBUG_AQ_MESSAGE, | |
7c673cae | 730 | "AQTX: Error queue is full.\n"); |
9f95a23c | 731 | status = IAVF_ERR_ADMIN_QUEUE_FULL; |
7c673cae FG |
732 | goto asq_send_command_error; |
733 | } | |
734 | ||
735 | /* initialize the temp desc pointer with the right desc */ | |
9f95a23c | 736 | desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use); |
7c673cae FG |
737 | |
738 | /* if the desc is available copy the temp desc to the right place */ | |
9f95a23c TL |
739 | iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc), |
740 | IAVF_NONDMA_TO_DMA); | |
7c673cae FG |
741 | |
742 | /* if buff is not NULL assume indirect command */ | |
743 | if (buff != NULL) { | |
744 | dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]); | |
745 | /* copy the user buff into the respective DMA buff */ | |
9f95a23c TL |
746 | iavf_memcpy(dma_buff->va, buff, buff_size, |
747 | IAVF_NONDMA_TO_DMA); | |
7c673cae FG |
748 | desc_on_ring->datalen = CPU_TO_LE16(buff_size); |
749 | ||
750 | /* Update the address values in the desc with the pa value | |
751 | * for respective buffer | |
752 | */ | |
753 | desc_on_ring->params.external.addr_high = | |
9f95a23c | 754 | CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa)); |
7c673cae | 755 | desc_on_ring->params.external.addr_low = |
9f95a23c | 756 | CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa)); |
7c673cae FG |
757 | } |
758 | ||
759 | /* bump the tail */ | |
9f95a23c TL |
760 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n"); |
761 | iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring, | |
7c673cae FG |
762 | buff, buff_size); |
763 | (hw->aq.asq.next_to_use)++; | |
764 | if (hw->aq.asq.next_to_use == hw->aq.asq.count) | |
765 | hw->aq.asq.next_to_use = 0; | |
766 | if (!details->postpone) | |
767 | wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); | |
768 | ||
769 | /* if cmd_details are not defined or async flag is not set, | |
770 | * we need to wait for desc write back | |
771 | */ | |
772 | if (!details->async && !details->postpone) { | |
773 | u32 total_delay = 0; | |
774 | ||
775 | do { | |
776 | /* AQ designers suggest use of head for better | |
777 | * timing reliability than DD bit | |
778 | */ | |
9f95a23c | 779 | if (iavf_asq_done(hw)) |
7c673cae | 780 | break; |
9f95a23c | 781 | iavf_usec_delay(50); |
11fdf7f2 | 782 | total_delay += 50; |
7c673cae FG |
783 | } while (total_delay < hw->aq.asq_cmd_timeout); |
784 | } | |
785 | ||
786 | /* if ready, copy the desc back to temp */ | |
9f95a23c TL |
787 | if (iavf_asq_done(hw)) { |
788 | iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc), | |
789 | IAVF_DMA_TO_NONDMA); | |
7c673cae | 790 | if (buff != NULL) |
9f95a23c TL |
791 | iavf_memcpy(buff, dma_buff->va, buff_size, |
792 | IAVF_DMA_TO_NONDMA); | |
7c673cae FG |
793 | retval = LE16_TO_CPU(desc->retval); |
794 | if (retval != 0) { | |
9f95a23c TL |
795 | iavf_debug(hw, |
796 | IAVF_DEBUG_AQ_MESSAGE, | |
7c673cae FG |
797 | "AQTX: Command completed with error 0x%X.\n", |
798 | retval); | |
799 | ||
800 | /* strip off FW internal code */ | |
801 | retval &= 0xff; | |
802 | } | |
803 | cmd_completed = true; | |
9f95a23c TL |
804 | if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK) |
805 | status = IAVF_SUCCESS; | |
f67539c2 TL |
806 | else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY) |
807 | status = IAVF_ERR_NOT_READY; | |
7c673cae | 808 | else |
9f95a23c TL |
809 | status = IAVF_ERR_ADMIN_QUEUE_ERROR; |
810 | hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval; | |
7c673cae FG |
811 | } |
812 | ||
9f95a23c | 813 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
7c673cae | 814 | "AQTX: desc and buffer writeback:\n"); |
9f95a23c | 815 | iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size); |
7c673cae FG |
816 | |
817 | /* save writeback aq if requested */ | |
818 | if (details->wb_desc) | |
9f95a23c TL |
819 | iavf_memcpy(details->wb_desc, desc_on_ring, |
820 | sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA); | |
7c673cae FG |
821 | |
822 | /* update the error if time out occurred */ | |
823 | if ((!cmd_completed) && | |
824 | (!details->async && !details->postpone)) { | |
f67539c2 | 825 | if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) { |
9f95a23c | 826 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
11fdf7f2 | 827 | "AQTX: AQ Critical error.\n"); |
9f95a23c | 828 | status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR; |
11fdf7f2 | 829 | } else { |
9f95a23c | 830 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
11fdf7f2 | 831 | "AQTX: Writeback timeout.\n"); |
9f95a23c | 832 | status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT; |
11fdf7f2 | 833 | } |
7c673cae FG |
834 | } |
835 | ||
836 | asq_send_command_error: | |
9f95a23c | 837 | iavf_release_spinlock(&hw->aq.asq_spinlock); |
7c673cae FG |
838 | return status; |
839 | } | |
840 | ||
841 | /** | |
9f95a23c | 842 | * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function |
7c673cae FG |
843 | * @desc: pointer to the temp descriptor (non DMA mem) |
844 | * @opcode: the opcode can be used to decide which flags to turn off or on | |
845 | * | |
846 | * Fill the desc with default values | |
847 | **/ | |
9f95a23c | 848 | void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, |
7c673cae FG |
849 | u16 opcode) |
850 | { | |
851 | /* zero out the desc */ | |
9f95a23c TL |
852 | iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), |
853 | IAVF_NONDMA_MEM); | |
7c673cae | 854 | desc->opcode = CPU_TO_LE16(opcode); |
9f95a23c | 855 | desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI); |
7c673cae FG |
856 | } |
857 | ||
858 | /** | |
9f95a23c | 859 | * iavf_clean_arq_element |
7c673cae FG |
860 | * @hw: pointer to the hw struct |
861 | * @e: event info from the receive descriptor, includes any buffers | |
862 | * @pending: number of events that could be left to process | |
863 | * | |
864 | * This function cleans one Admin Receive Queue element and returns | |
865 | * the contents through e. It can also return how many events are | |
866 | * left to process through 'pending' | |
867 | **/ | |
f67539c2 | 868 | enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw, |
9f95a23c | 869 | struct iavf_arq_event_info *e, |
7c673cae FG |
870 | u16 *pending) |
871 | { | |
f67539c2 | 872 | enum iavf_status ret_code = IAVF_SUCCESS; |
7c673cae | 873 | u16 ntc = hw->aq.arq.next_to_clean; |
9f95a23c TL |
874 | struct iavf_aq_desc *desc; |
875 | struct iavf_dma_mem *bi; | |
7c673cae FG |
876 | u16 desc_idx; |
877 | u16 datalen; | |
878 | u16 flags; | |
879 | u16 ntu; | |
880 | ||
881 | /* pre-clean the event info */ | |
9f95a23c | 882 | iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM); |
7c673cae FG |
883 | |
884 | /* take the lock before we start messing with the ring */ | |
9f95a23c | 885 | iavf_acquire_spinlock(&hw->aq.arq_spinlock); |
7c673cae FG |
886 | |
887 | if (hw->aq.arq.count == 0) { | |
9f95a23c | 888 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, |
7c673cae | 889 | "AQRX: Admin queue not initialized.\n"); |
9f95a23c | 890 | ret_code = IAVF_ERR_QUEUE_EMPTY; |
7c673cae FG |
891 | goto clean_arq_element_err; |
892 | } | |
893 | ||
894 | /* set next_to_use to head */ | |
f67539c2 | 895 | ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK; |
7c673cae FG |
896 | if (ntu == ntc) { |
897 | /* nothing to do - shouldn't need to update ring's values */ | |
9f95a23c | 898 | ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK; |
7c673cae FG |
899 | goto clean_arq_element_out; |
900 | } | |
901 | ||
902 | /* now clean the next descriptor */ | |
9f95a23c | 903 | desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc); |
7c673cae FG |
904 | desc_idx = ntc; |
905 | ||
11fdf7f2 | 906 | hw->aq.arq_last_status = |
9f95a23c | 907 | (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval); |
7c673cae | 908 | flags = LE16_TO_CPU(desc->flags); |
9f95a23c TL |
909 | if (flags & IAVF_AQ_FLAG_ERR) { |
910 | ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR; | |
911 | iavf_debug(hw, | |
912 | IAVF_DEBUG_AQ_MESSAGE, | |
7c673cae FG |
913 | "AQRX: Event received with error 0x%X.\n", |
914 | hw->aq.arq_last_status); | |
915 | } | |
916 | ||
9f95a23c TL |
917 | iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc), |
918 | IAVF_DMA_TO_NONDMA); | |
7c673cae FG |
919 | datalen = LE16_TO_CPU(desc->datalen); |
920 | e->msg_len = min(datalen, e->buf_len); | |
921 | if (e->msg_buf != NULL && (e->msg_len != 0)) | |
9f95a23c | 922 | iavf_memcpy(e->msg_buf, |
7c673cae | 923 | hw->aq.arq.r.arq_bi[desc_idx].va, |
9f95a23c | 924 | e->msg_len, IAVF_DMA_TO_NONDMA); |
7c673cae | 925 | |
9f95a23c TL |
926 | iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n"); |
927 | iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf, | |
7c673cae FG |
928 | hw->aq.arq_buf_size); |
929 | ||
930 | /* Restore the original datalen and buffer address in the desc, | |
931 | * FW updates datalen to indicate the event message | |
932 | * size | |
933 | */ | |
934 | bi = &hw->aq.arq.r.arq_bi[ntc]; | |
9f95a23c | 935 | iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM); |
7c673cae | 936 | |
9f95a23c TL |
937 | desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF); |
938 | if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF) | |
939 | desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB); | |
7c673cae | 940 | desc->datalen = CPU_TO_LE16((u16)bi->size); |
9f95a23c TL |
941 | desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa)); |
942 | desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa)); | |
7c673cae FG |
943 | |
944 | /* set tail = the last cleaned desc index. */ | |
945 | wr32(hw, hw->aq.arq.tail, ntc); | |
946 | /* ntc is updated to tail + 1 */ | |
947 | ntc++; | |
948 | if (ntc == hw->aq.num_arq_entries) | |
949 | ntc = 0; | |
950 | hw->aq.arq.next_to_clean = ntc; | |
951 | hw->aq.arq.next_to_use = ntu; | |
952 | ||
7c673cae FG |
953 | clean_arq_element_out: |
954 | /* Set pending if needed, unlock and return */ | |
955 | if (pending != NULL) | |
956 | *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); | |
957 | clean_arq_element_err: | |
9f95a23c | 958 | iavf_release_spinlock(&hw->aq.arq_spinlock); |
7c673cae FG |
959 | |
960 | return ret_code; | |
961 | } | |
962 |