]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/drivers/net/i40e/base/i40e_lan_hmc.c
bump version to 12.2.12-pve1
[ceph.git] / ceph / src / dpdk / drivers / net / i40e / base / i40e_lan_hmc.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_osdep.h"
35 #include "i40e_register.h"
36 #include "i40e_type.h"
37 #include "i40e_hmc.h"
38 #include "i40e_lan_hmc.h"
39 #include "i40e_prototype.h"
40
41 /* lan specific interface functions */
42
43 /**
44 * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
45 * @offset: base address offset needing alignment
46 *
47 * Aligns the layer 2 function private memory so it's 512-byte aligned.
48 **/
49 STATIC u64 i40e_align_l2obj_base(u64 offset)
50 {
51 u64 aligned_offset = offset;
52
53 if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
54 aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
55 (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
56
57 return aligned_offset;
58 }
59
60 /**
61 * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
62 * @txq_num: number of Tx queues needing backing context
63 * @rxq_num: number of Rx queues needing backing context
64 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
65 * @fcoe_filt_num: number of FCoE filters needing backing context
66 *
67 * Calculates the maximum amount of memory for the function required, based
68 * on the number of resources it must provide context for.
69 **/
70 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
71 u32 fcoe_cntx_num, u32 fcoe_filt_num)
72 {
73 u64 fpm_size = 0;
74
75 fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
76 fpm_size = i40e_align_l2obj_base(fpm_size);
77
78 fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
79 fpm_size = i40e_align_l2obj_base(fpm_size);
80
81 fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
82 fpm_size = i40e_align_l2obj_base(fpm_size);
83
84 fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
85 fpm_size = i40e_align_l2obj_base(fpm_size);
86
87 return fpm_size;
88 }
89
90 /**
91 * i40e_init_lan_hmc - initialize i40e_hmc_info struct
92 * @hw: pointer to the HW structure
93 * @txq_num: number of Tx queues needing backing context
94 * @rxq_num: number of Rx queues needing backing context
95 * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
96 * @fcoe_filt_num: number of FCoE filters needing backing context
97 *
98 * This function will be called once per physical function initialization.
99 * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
100 * the driver's provided input, as well as information from the HMC itself
101 * loaded from NVRAM.
102 *
103 * Assumptions:
104 * - HMC Resource Profile has been selected before calling this function.
105 **/
106 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
107 u32 rxq_num, u32 fcoe_cntx_num,
108 u32 fcoe_filt_num)
109 {
110 struct i40e_hmc_obj_info *obj, *full_obj;
111 enum i40e_status_code ret_code = I40E_SUCCESS;
112 u64 l2fpm_size;
113 u32 size_exp;
114
115 hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
116 hw->hmc.hmc_fn_id = hw->pf_id;
117
118 /* allocate memory for hmc_obj */
119 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
120 sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
121 if (ret_code)
122 goto init_lan_hmc_out;
123 hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
124 hw->hmc.hmc_obj_virt_mem.va;
125
126 /* The full object will be used to create the LAN HMC SD */
127 full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
128 full_obj->max_cnt = 0;
129 full_obj->cnt = 0;
130 full_obj->base = 0;
131 full_obj->size = 0;
132
133 /* Tx queue context information */
134 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
135 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
136 obj->cnt = txq_num;
137 obj->base = 0;
138 size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
139 obj->size = BIT_ULL(size_exp);
140
141 /* validate values requested by driver don't exceed HMC capacity */
142 if (txq_num > obj->max_cnt) {
143 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
144 DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
145 txq_num, obj->max_cnt, ret_code);
146 goto init_lan_hmc_out;
147 }
148
149 /* aggregate values into the full LAN object for later */
150 full_obj->max_cnt += obj->max_cnt;
151 full_obj->cnt += obj->cnt;
152
153 /* Rx queue context information */
154 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
155 obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
156 obj->cnt = rxq_num;
157 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
158 (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
159 hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
160 obj->base = i40e_align_l2obj_base(obj->base);
161 size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
162 obj->size = BIT_ULL(size_exp);
163
164 /* validate values requested by driver don't exceed HMC capacity */
165 if (rxq_num > obj->max_cnt) {
166 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
167 DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
168 rxq_num, obj->max_cnt, ret_code);
169 goto init_lan_hmc_out;
170 }
171
172 /* aggregate values into the full LAN object for later */
173 full_obj->max_cnt += obj->max_cnt;
174 full_obj->cnt += obj->cnt;
175
176 /* FCoE context information */
177 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
178 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
179 obj->cnt = fcoe_cntx_num;
180 obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
181 (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
182 hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
183 obj->base = i40e_align_l2obj_base(obj->base);
184 size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
185 obj->size = BIT_ULL(size_exp);
186
187 /* validate values requested by driver don't exceed HMC capacity */
188 if (fcoe_cntx_num > obj->max_cnt) {
189 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
190 DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
191 fcoe_cntx_num, obj->max_cnt, ret_code);
192 goto init_lan_hmc_out;
193 }
194
195 /* aggregate values into the full LAN object for later */
196 full_obj->max_cnt += obj->max_cnt;
197 full_obj->cnt += obj->cnt;
198
199 /* FCoE filter information */
200 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
201 obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
202 obj->cnt = fcoe_filt_num;
203 obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
204 (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
205 hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
206 obj->base = i40e_align_l2obj_base(obj->base);
207 size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
208 obj->size = BIT_ULL(size_exp);
209
210 /* validate values requested by driver don't exceed HMC capacity */
211 if (fcoe_filt_num > obj->max_cnt) {
212 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
213 DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
214 fcoe_filt_num, obj->max_cnt, ret_code);
215 goto init_lan_hmc_out;
216 }
217
218 /* aggregate values into the full LAN object for later */
219 full_obj->max_cnt += obj->max_cnt;
220 full_obj->cnt += obj->cnt;
221
222 hw->hmc.first_sd_index = 0;
223 hw->hmc.sd_table.ref_cnt = 0;
224 l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
225 fcoe_filt_num);
226 if (NULL == hw->hmc.sd_table.sd_entry) {
227 hw->hmc.sd_table.sd_cnt = (u32)
228 (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
229 I40E_HMC_DIRECT_BP_SIZE;
230
231 /* allocate the sd_entry members in the sd_table */
232 ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
233 (sizeof(struct i40e_hmc_sd_entry) *
234 hw->hmc.sd_table.sd_cnt));
235 if (ret_code)
236 goto init_lan_hmc_out;
237 hw->hmc.sd_table.sd_entry =
238 (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
239 }
240 /* store in the LAN full object for later */
241 full_obj->size = l2fpm_size;
242
243 init_lan_hmc_out:
244 return ret_code;
245 }
246
247 /**
248 * i40e_remove_pd_page - Remove a page from the page descriptor table
249 * @hw: pointer to the HW structure
250 * @hmc_info: pointer to the HMC configuration information structure
251 * @idx: segment descriptor index to find the relevant page descriptor
252 *
253 * This function:
254 * 1. Marks the entry in pd table (for paged address mode) invalid
255 * 2. write to register PMPDINV to invalidate the backing page in FV cache
256 * 3. Decrement the ref count for pd_entry
257 * assumptions:
258 * 1. caller can deallocate the memory used by pd after this function
259 * returns.
260 **/
261 STATIC enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
262 struct i40e_hmc_info *hmc_info,
263 u32 idx)
264 {
265 enum i40e_status_code ret_code = I40E_SUCCESS;
266
267 if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
268 ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
269
270 return ret_code;
271 }
272
273 /**
274 * i40e_remove_sd_bp - remove a backing page from a segment descriptor
275 * @hw: pointer to our HW structure
276 * @hmc_info: pointer to the HMC configuration information structure
277 * @idx: the page index
278 *
279 * This function:
280 * 1. Marks the entry in sd table (for direct address mode) invalid
281 * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
282 * to 0) and PMSDDATAHIGH to invalidate the sd page
283 * 3. Decrement the ref count for the sd_entry
284 * assumptions:
285 * 1. caller can deallocate the memory used by backing storage after this
286 * function returns.
287 **/
288 STATIC enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
289 struct i40e_hmc_info *hmc_info,
290 u32 idx)
291 {
292 enum i40e_status_code ret_code = I40E_SUCCESS;
293
294 if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
295 ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
296
297 return ret_code;
298 }
299
300 /**
301 * i40e_create_lan_hmc_object - allocate backing store for hmc objects
302 * @hw: pointer to the HW structure
303 * @info: pointer to i40e_hmc_create_obj_info struct
304 *
305 * This will allocate memory for PDs and backing pages and populate
306 * the sd and pd entries.
307 **/
308 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
309 struct i40e_hmc_lan_create_obj_info *info)
310 {
311 enum i40e_status_code ret_code = I40E_SUCCESS;
312 struct i40e_hmc_sd_entry *sd_entry;
313 u32 pd_idx1 = 0, pd_lmt1 = 0;
314 u32 pd_idx = 0, pd_lmt = 0;
315 bool pd_error = false;
316 u32 sd_idx, sd_lmt;
317 u64 sd_size;
318 u32 i, j;
319
320 if (NULL == info) {
321 ret_code = I40E_ERR_BAD_PTR;
322 DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
323 goto exit;
324 }
325 if (NULL == info->hmc_info) {
326 ret_code = I40E_ERR_BAD_PTR;
327 DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
328 goto exit;
329 }
330 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
331 ret_code = I40E_ERR_BAD_PTR;
332 DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
333 goto exit;
334 }
335
336 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
337 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
338 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
339 ret_code);
340 goto exit;
341 }
342 if ((info->start_idx + info->count) >
343 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
344 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
345 DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
346 ret_code);
347 goto exit;
348 }
349
350 /* find sd index and limit */
351 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
352 info->start_idx, info->count,
353 &sd_idx, &sd_lmt);
354 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
355 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
356 ret_code = I40E_ERR_INVALID_SD_INDEX;
357 goto exit;
358 }
359 /* find pd index */
360 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
361 info->start_idx, info->count, &pd_idx,
362 &pd_lmt);
363
364 /* This is to cover for cases where you may not want to have an SD with
365 * the full 2M memory but something smaller. By not filling out any
366 * size, the function will default the SD size to be 2M.
367 */
368 if (info->direct_mode_sz == 0)
369 sd_size = I40E_HMC_DIRECT_BP_SIZE;
370 else
371 sd_size = info->direct_mode_sz;
372
373 /* check if all the sds are valid. If not, allocate a page and
374 * initialize it.
375 */
376 for (j = sd_idx; j < sd_lmt; j++) {
377 /* update the sd table entry */
378 ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
379 info->entry_type,
380 sd_size);
381 if (I40E_SUCCESS != ret_code)
382 goto exit_sd_error;
383 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
384 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
385 /* check if all the pds in this sd are valid. If not,
386 * allocate a page and initialize it.
387 */
388
389 /* find pd_idx and pd_lmt in this sd */
390 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
391 pd_lmt1 = min(pd_lmt,
392 ((j + 1) * I40E_HMC_MAX_BP_COUNT));
393 for (i = pd_idx1; i < pd_lmt1; i++) {
394 /* update the pd table entry */
395 ret_code = i40e_add_pd_table_entry(hw,
396 info->hmc_info,
397 i, NULL);
398 if (I40E_SUCCESS != ret_code) {
399 pd_error = true;
400 break;
401 }
402 }
403 if (pd_error) {
404 /* remove the backing pages from pd_idx1 to i */
405 while (i && (i > pd_idx1)) {
406 i40e_remove_pd_bp(hw, info->hmc_info,
407 (i - 1));
408 i--;
409 }
410 }
411 }
412 if (!sd_entry->valid) {
413 sd_entry->valid = true;
414 switch (sd_entry->entry_type) {
415 case I40E_SD_TYPE_PAGED:
416 I40E_SET_PF_SD_ENTRY(hw,
417 sd_entry->u.pd_table.pd_page_addr.pa,
418 j, sd_entry->entry_type);
419 break;
420 case I40E_SD_TYPE_DIRECT:
421 I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
422 j, sd_entry->entry_type);
423 break;
424 default:
425 ret_code = I40E_ERR_INVALID_SD_TYPE;
426 goto exit;
427 }
428 }
429 }
430 goto exit;
431
432 exit_sd_error:
433 /* cleanup for sd entries from j to sd_idx */
434 while (j && (j > sd_idx)) {
435 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
436 switch (sd_entry->entry_type) {
437 case I40E_SD_TYPE_PAGED:
438 pd_idx1 = max(pd_idx,
439 ((j - 1) * I40E_HMC_MAX_BP_COUNT));
440 pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
441 for (i = pd_idx1; i < pd_lmt1; i++)
442 i40e_remove_pd_bp(hw, info->hmc_info, i);
443 i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
444 break;
445 case I40E_SD_TYPE_DIRECT:
446 i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
447 break;
448 default:
449 ret_code = I40E_ERR_INVALID_SD_TYPE;
450 break;
451 }
452 j--;
453 }
454 exit:
455 return ret_code;
456 }
457
458 /**
459 * i40e_configure_lan_hmc - prepare the HMC backing store
460 * @hw: pointer to the hw structure
461 * @model: the model for the layout of the SD/PD tables
462 *
463 * - This function will be called once per physical function initialization.
464 * - This function will be called after i40e_init_lan_hmc() and before
465 * any LAN/FCoE HMC objects can be created.
466 **/
467 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
468 enum i40e_hmc_model model)
469 {
470 struct i40e_hmc_lan_create_obj_info info;
471 u8 hmc_fn_id = hw->hmc.hmc_fn_id;
472 struct i40e_hmc_obj_info *obj;
473 enum i40e_status_code ret_code = I40E_SUCCESS;
474
475 /* Initialize part of the create object info struct */
476 info.hmc_info = &hw->hmc;
477 info.rsrc_type = I40E_HMC_LAN_FULL;
478 info.start_idx = 0;
479 info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
480
481 /* Build the SD entry for the LAN objects */
482 switch (model) {
483 case I40E_HMC_MODEL_DIRECT_PREFERRED:
484 case I40E_HMC_MODEL_DIRECT_ONLY:
485 info.entry_type = I40E_SD_TYPE_DIRECT;
486 /* Make one big object, a single SD */
487 info.count = 1;
488 ret_code = i40e_create_lan_hmc_object(hw, &info);
489 if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
490 goto try_type_paged;
491 else if (ret_code != I40E_SUCCESS)
492 goto configure_lan_hmc_out;
493 /* else clause falls through the break */
494 break;
495 case I40E_HMC_MODEL_PAGED_ONLY:
496 try_type_paged:
497 info.entry_type = I40E_SD_TYPE_PAGED;
498 /* Make one big object in the PD table */
499 info.count = 1;
500 ret_code = i40e_create_lan_hmc_object(hw, &info);
501 if (ret_code != I40E_SUCCESS)
502 goto configure_lan_hmc_out;
503 break;
504 default:
505 /* unsupported type */
506 ret_code = I40E_ERR_INVALID_SD_TYPE;
507 DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
508 ret_code);
509 goto configure_lan_hmc_out;
510 }
511
512 /* Configure and program the FPM registers so objects can be created */
513
514 /* Tx contexts */
515 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
516 wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
517 (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
518 wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
519
520 /* Rx contexts */
521 obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
522 wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
523 (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
524 wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
525
526 /* FCoE contexts */
527 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
528 wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
529 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
530 wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
531
532 /* FCoE filters */
533 obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
534 wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
535 (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
536 wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
537
538 configure_lan_hmc_out:
539 return ret_code;
540 }
541
542 /**
543 * i40e_delete_hmc_object - remove hmc objects
544 * @hw: pointer to the HW structure
545 * @info: pointer to i40e_hmc_delete_obj_info struct
546 *
547 * This will de-populate the SDs and PDs. It frees
548 * the memory for PDS and backing storage. After this function is returned,
549 * caller should deallocate memory allocated previously for
550 * book-keeping information about PDs and backing storage.
551 **/
552 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
553 struct i40e_hmc_lan_delete_obj_info *info)
554 {
555 enum i40e_status_code ret_code = I40E_SUCCESS;
556 struct i40e_hmc_pd_table *pd_table;
557 u32 pd_idx, pd_lmt, rel_pd_idx;
558 u32 sd_idx, sd_lmt;
559 u32 i, j;
560
561 if (NULL == info) {
562 ret_code = I40E_ERR_BAD_PTR;
563 DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
564 goto exit;
565 }
566 if (NULL == info->hmc_info) {
567 ret_code = I40E_ERR_BAD_PTR;
568 DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
569 goto exit;
570 }
571 if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
572 ret_code = I40E_ERR_BAD_PTR;
573 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
574 goto exit;
575 }
576
577 if (NULL == info->hmc_info->sd_table.sd_entry) {
578 ret_code = I40E_ERR_BAD_PTR;
579 DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
580 goto exit;
581 }
582
583 if (NULL == info->hmc_info->hmc_obj) {
584 ret_code = I40E_ERR_BAD_PTR;
585 DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
586 goto exit;
587 }
588 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
589 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
590 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
591 ret_code);
592 goto exit;
593 }
594
595 if ((info->start_idx + info->count) >
596 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
597 ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
598 DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
599 ret_code);
600 goto exit;
601 }
602
603 I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
604 info->start_idx, info->count, &pd_idx,
605 &pd_lmt);
606
607 for (j = pd_idx; j < pd_lmt; j++) {
608 sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
609
610 if (I40E_SD_TYPE_PAGED !=
611 info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
612 continue;
613
614 rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
615
616 pd_table =
617 &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
618 if (pd_table->pd_entry[rel_pd_idx].valid) {
619 ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
620 if (I40E_SUCCESS != ret_code)
621 goto exit;
622 }
623 }
624
625 /* find sd index and limit */
626 I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
627 info->start_idx, info->count,
628 &sd_idx, &sd_lmt);
629 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
630 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
631 ret_code = I40E_ERR_INVALID_SD_INDEX;
632 goto exit;
633 }
634
635 for (i = sd_idx; i < sd_lmt; i++) {
636 if (!info->hmc_info->sd_table.sd_entry[i].valid)
637 continue;
638 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
639 case I40E_SD_TYPE_DIRECT:
640 ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
641 if (I40E_SUCCESS != ret_code)
642 goto exit;
643 break;
644 case I40E_SD_TYPE_PAGED:
645 ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
646 if (I40E_SUCCESS != ret_code)
647 goto exit;
648 break;
649 default:
650 break;
651 }
652 }
653 exit:
654 return ret_code;
655 }
656
657 /**
658 * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
659 * @hw: pointer to the hw structure
660 *
661 * This must be called by drivers as they are shutting down and being
662 * removed from the OS.
663 **/
664 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
665 {
666 struct i40e_hmc_lan_delete_obj_info info;
667 enum i40e_status_code ret_code;
668
669 info.hmc_info = &hw->hmc;
670 info.rsrc_type = I40E_HMC_LAN_FULL;
671 info.start_idx = 0;
672 info.count = 1;
673
674 /* delete the object */
675 ret_code = i40e_delete_lan_hmc_object(hw, &info);
676
677 /* free the SD table entry for LAN */
678 i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
679 hw->hmc.sd_table.sd_cnt = 0;
680 hw->hmc.sd_table.sd_entry = NULL;
681
682 /* free memory used for hmc_obj */
683 i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
684 hw->hmc.hmc_obj = NULL;
685
686 return ret_code;
687 }
688
689 #define I40E_HMC_STORE(_struct, _ele) \
690 offsetof(struct _struct, _ele), \
691 FIELD_SIZEOF(struct _struct, _ele)
692
693 struct i40e_context_ele {
694 u16 offset;
695 u16 size_of;
696 u16 width;
697 u16 lsb;
698 };
699
700 /* LAN Tx Queue Context */
701 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
702 /* Field Width LSB */
703 {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
704 {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
705 {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
706 {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
707 {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
708 {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
709 {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
710 {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
711 /* line 1 */
712 {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
713 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
714 {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
715 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
716 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
717 {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
718 {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
719 /* line 7 */
720 {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
721 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
722 {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
723 { 0 }
724 };
725
726 /* LAN Rx Queue Context */
727 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
728 /* Field Width LSB */
729 { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
730 { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
731 { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
732 { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
733 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
734 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
735 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
736 { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
737 { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
738 { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
739 { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
740 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
741 { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
742 { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
743 { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
744 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
745 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
746 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
747 { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
748 { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
749 { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
750 { 0 }
751 };
752
753 /**
754 * i40e_write_byte - replace HMC context byte
755 * @hmc_bits: pointer to the HMC memory
756 * @ce_info: a description of the struct to be read from
757 * @src: the struct to be read from
758 **/
759 static void i40e_write_byte(u8 *hmc_bits,
760 struct i40e_context_ele *ce_info,
761 u8 *src)
762 {
763 u8 src_byte, dest_byte, mask;
764 u8 *from, *dest;
765 u16 shift_width;
766
767 /* copy from the next struct field */
768 from = src + ce_info->offset;
769
770 /* prepare the bits and mask */
771 shift_width = ce_info->lsb % 8;
772 mask = (u8)(BIT(ce_info->width) - 1);
773
774 src_byte = *from;
775 src_byte &= mask;
776
777 /* shift to correct alignment */
778 mask <<= shift_width;
779 src_byte <<= shift_width;
780
781 /* get the current bits from the target bit string */
782 dest = hmc_bits + (ce_info->lsb / 8);
783
784 i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
785
786 dest_byte &= ~mask; /* get the bits not changing */
787 dest_byte |= src_byte; /* add in the new bits */
788
789 /* put it all back */
790 i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
791 }
792
793 /**
794 * i40e_write_word - replace HMC context word
795 * @hmc_bits: pointer to the HMC memory
796 * @ce_info: a description of the struct to be read from
797 * @src: the struct to be read from
798 **/
799 static void i40e_write_word(u8 *hmc_bits,
800 struct i40e_context_ele *ce_info,
801 u8 *src)
802 {
803 u16 src_word, mask;
804 u8 *from, *dest;
805 u16 shift_width;
806 __le16 dest_word;
807
808 /* copy from the next struct field */
809 from = src + ce_info->offset;
810
811 /* prepare the bits and mask */
812 shift_width = ce_info->lsb % 8;
813 mask = BIT(ce_info->width) - 1;
814
815 /* don't swizzle the bits until after the mask because the mask bits
816 * will be in a different bit position on big endian machines
817 */
818 src_word = *(u16 *)from;
819 src_word &= mask;
820
821 /* shift to correct alignment */
822 mask <<= shift_width;
823 src_word <<= shift_width;
824
825 /* get the current bits from the target bit string */
826 dest = hmc_bits + (ce_info->lsb / 8);
827
828 i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
829
830 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
831 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
832
833 /* put it all back */
834 i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
835 }
836
837 /**
838 * i40e_write_dword - replace HMC context dword
839 * @hmc_bits: pointer to the HMC memory
840 * @ce_info: a description of the struct to be read from
841 * @src: the struct to be read from
842 **/
843 static void i40e_write_dword(u8 *hmc_bits,
844 struct i40e_context_ele *ce_info,
845 u8 *src)
846 {
847 u32 src_dword, mask;
848 u8 *from, *dest;
849 u16 shift_width;
850 __le32 dest_dword;
851
852 /* copy from the next struct field */
853 from = src + ce_info->offset;
854
855 /* prepare the bits and mask */
856 shift_width = ce_info->lsb % 8;
857
858 /* if the field width is exactly 32 on an x86 machine, then the shift
859 * operation will not work because the SHL instructions count is masked
860 * to 5 bits so the shift will do nothing
861 */
862 if (ce_info->width < 32)
863 mask = BIT(ce_info->width) - 1;
864 else
865 mask = ~(u32)0;
866
867 /* don't swizzle the bits until after the mask because the mask bits
868 * will be in a different bit position on big endian machines
869 */
870 src_dword = *(u32 *)from;
871 src_dword &= mask;
872
873 /* shift to correct alignment */
874 mask <<= shift_width;
875 src_dword <<= shift_width;
876
877 /* get the current bits from the target bit string */
878 dest = hmc_bits + (ce_info->lsb / 8);
879
880 i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
881
882 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
883 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
884
885 /* put it all back */
886 i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
887 }
888
889 /**
890 * i40e_write_qword - replace HMC context qword
891 * @hmc_bits: pointer to the HMC memory
892 * @ce_info: a description of the struct to be read from
893 * @src: the struct to be read from
894 **/
895 static void i40e_write_qword(u8 *hmc_bits,
896 struct i40e_context_ele *ce_info,
897 u8 *src)
898 {
899 u64 src_qword, mask;
900 u8 *from, *dest;
901 u16 shift_width;
902 __le64 dest_qword;
903
904 /* copy from the next struct field */
905 from = src + ce_info->offset;
906
907 /* prepare the bits and mask */
908 shift_width = ce_info->lsb % 8;
909
910 /* if the field width is exactly 64 on an x86 machine, then the shift
911 * operation will not work because the SHL instructions count is masked
912 * to 6 bits so the shift will do nothing
913 */
914 if (ce_info->width < 64)
915 mask = BIT_ULL(ce_info->width) - 1;
916 else
917 mask = ~(u64)0;
918
919 /* don't swizzle the bits until after the mask because the mask bits
920 * will be in a different bit position on big endian machines
921 */
922 src_qword = *(u64 *)from;
923 src_qword &= mask;
924
925 /* shift to correct alignment */
926 mask <<= shift_width;
927 src_qword <<= shift_width;
928
929 /* get the current bits from the target bit string */
930 dest = hmc_bits + (ce_info->lsb / 8);
931
932 i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
933
934 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
935 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
936
937 /* put it all back */
938 i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
939 }
940
941 /**
942 * i40e_read_byte - read HMC context byte into struct
943 * @hmc_bits: pointer to the HMC memory
944 * @ce_info: a description of the struct to be filled
945 * @dest: the struct to be filled
946 **/
947 static void i40e_read_byte(u8 *hmc_bits,
948 struct i40e_context_ele *ce_info,
949 u8 *dest)
950 {
951 u8 dest_byte, mask;
952 u8 *src, *target;
953 u16 shift_width;
954
955 /* prepare the bits and mask */
956 shift_width = ce_info->lsb % 8;
957 mask = (u8)(BIT(ce_info->width) - 1);
958
959 /* shift to correct alignment */
960 mask <<= shift_width;
961
962 /* get the current bits from the src bit string */
963 src = hmc_bits + (ce_info->lsb / 8);
964
965 i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
966
967 dest_byte &= ~(mask);
968
969 dest_byte >>= shift_width;
970
971 /* get the address from the struct field */
972 target = dest + ce_info->offset;
973
974 /* put it back in the struct */
975 i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
976 }
977
978 /**
979 * i40e_read_word - read HMC context word into struct
980 * @hmc_bits: pointer to the HMC memory
981 * @ce_info: a description of the struct to be filled
982 * @dest: the struct to be filled
983 **/
984 static void i40e_read_word(u8 *hmc_bits,
985 struct i40e_context_ele *ce_info,
986 u8 *dest)
987 {
988 u16 dest_word, mask;
989 u8 *src, *target;
990 u16 shift_width;
991 __le16 src_word;
992
993 /* prepare the bits and mask */
994 shift_width = ce_info->lsb % 8;
995 mask = BIT(ce_info->width) - 1;
996
997 /* shift to correct alignment */
998 mask <<= shift_width;
999
1000 /* get the current bits from the src bit string */
1001 src = hmc_bits + (ce_info->lsb / 8);
1002
1003 i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1004
1005 /* the data in the memory is stored as little endian so mask it
1006 * correctly
1007 */
1008 src_word &= ~(CPU_TO_LE16(mask));
1009
1010 /* get the data back into host order before shifting */
1011 dest_word = LE16_TO_CPU(src_word);
1012
1013 dest_word >>= shift_width;
1014
1015 /* get the address from the struct field */
1016 target = dest + ce_info->offset;
1017
1018 /* put it back in the struct */
1019 i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1020 }
1021
1022 /**
1023 * i40e_read_dword - read HMC context dword into struct
1024 * @hmc_bits: pointer to the HMC memory
1025 * @ce_info: a description of the struct to be filled
1026 * @dest: the struct to be filled
1027 **/
1028 static void i40e_read_dword(u8 *hmc_bits,
1029 struct i40e_context_ele *ce_info,
1030 u8 *dest)
1031 {
1032 u32 dest_dword, mask;
1033 u8 *src, *target;
1034 u16 shift_width;
1035 __le32 src_dword;
1036
1037 /* prepare the bits and mask */
1038 shift_width = ce_info->lsb % 8;
1039
1040 /* if the field width is exactly 32 on an x86 machine, then the shift
1041 * operation will not work because the SHL instructions count is masked
1042 * to 5 bits so the shift will do nothing
1043 */
1044 if (ce_info->width < 32)
1045 mask = BIT(ce_info->width) - 1;
1046 else
1047 mask = ~(u32)0;
1048
1049 /* shift to correct alignment */
1050 mask <<= shift_width;
1051
1052 /* get the current bits from the src bit string */
1053 src = hmc_bits + (ce_info->lsb / 8);
1054
1055 i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1056
1057 /* the data in the memory is stored as little endian so mask it
1058 * correctly
1059 */
1060 src_dword &= ~(CPU_TO_LE32(mask));
1061
1062 /* get the data back into host order before shifting */
1063 dest_dword = LE32_TO_CPU(src_dword);
1064
1065 dest_dword >>= shift_width;
1066
1067 /* get the address from the struct field */
1068 target = dest + ce_info->offset;
1069
1070 /* put it back in the struct */
1071 i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1072 I40E_NONDMA_TO_DMA);
1073 }
1074
1075 /**
1076 * i40e_read_qword - read HMC context qword into struct
1077 * @hmc_bits: pointer to the HMC memory
1078 * @ce_info: a description of the struct to be filled
1079 * @dest: the struct to be filled
1080 **/
1081 static void i40e_read_qword(u8 *hmc_bits,
1082 struct i40e_context_ele *ce_info,
1083 u8 *dest)
1084 {
1085 u64 dest_qword, mask;
1086 u8 *src, *target;
1087 u16 shift_width;
1088 __le64 src_qword;
1089
1090 /* prepare the bits and mask */
1091 shift_width = ce_info->lsb % 8;
1092
1093 /* if the field width is exactly 64 on an x86 machine, then the shift
1094 * operation will not work because the SHL instructions count is masked
1095 * to 6 bits so the shift will do nothing
1096 */
1097 if (ce_info->width < 64)
1098 mask = BIT_ULL(ce_info->width) - 1;
1099 else
1100 mask = ~(u64)0;
1101
1102 /* shift to correct alignment */
1103 mask <<= shift_width;
1104
1105 /* get the current bits from the src bit string */
1106 src = hmc_bits + (ce_info->lsb / 8);
1107
1108 i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1109
1110 /* the data in the memory is stored as little endian so mask it
1111 * correctly
1112 */
1113 src_qword &= ~(CPU_TO_LE64(mask));
1114
1115 /* get the data back into host order before shifting */
1116 dest_qword = LE64_TO_CPU(src_qword);
1117
1118 dest_qword >>= shift_width;
1119
1120 /* get the address from the struct field */
1121 target = dest + ce_info->offset;
1122
1123 /* put it back in the struct */
1124 i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1125 I40E_NONDMA_TO_DMA);
1126 }
1127
1128 /**
1129 * i40e_get_hmc_context - extract HMC context bits
1130 * @context_bytes: pointer to the context bit array
1131 * @ce_info: a description of the struct to be filled
1132 * @dest: the struct to be filled
1133 **/
1134 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1135 struct i40e_context_ele *ce_info,
1136 u8 *dest)
1137 {
1138 int f;
1139
1140 for (f = 0; ce_info[f].width != 0; f++) {
1141 switch (ce_info[f].size_of) {
1142 case 1:
1143 i40e_read_byte(context_bytes, &ce_info[f], dest);
1144 break;
1145 case 2:
1146 i40e_read_word(context_bytes, &ce_info[f], dest);
1147 break;
1148 case 4:
1149 i40e_read_dword(context_bytes, &ce_info[f], dest);
1150 break;
1151 case 8:
1152 i40e_read_qword(context_bytes, &ce_info[f], dest);
1153 break;
1154 default:
1155 /* nothing to do, just keep going */
1156 break;
1157 }
1158 }
1159
1160 return I40E_SUCCESS;
1161 }
1162
1163 /**
1164 * i40e_clear_hmc_context - zero out the HMC context bits
1165 * @hw: the hardware struct
1166 * @context_bytes: pointer to the context bit array (DMA memory)
1167 * @hmc_type: the type of HMC resource
1168 **/
1169 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1170 u8 *context_bytes,
1171 enum i40e_hmc_lan_rsrc_type hmc_type)
1172 {
1173 /* clean the bit array */
1174 i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1175 I40E_DMA_MEM);
1176
1177 return I40E_SUCCESS;
1178 }
1179
1180 /**
1181 * i40e_set_hmc_context - replace HMC context bits
1182 * @context_bytes: pointer to the context bit array
1183 * @ce_info: a description of the struct to be filled
1184 * @dest: the struct to be filled
1185 **/
1186 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1187 struct i40e_context_ele *ce_info,
1188 u8 *dest)
1189 {
1190 int f;
1191
1192 for (f = 0; ce_info[f].width != 0; f++) {
1193
1194 /* we have to deal with each element of the HMC using the
1195 * correct size so that we are correct regardless of the
1196 * endianness of the machine
1197 */
1198 switch (ce_info[f].size_of) {
1199 case 1:
1200 i40e_write_byte(context_bytes, &ce_info[f], dest);
1201 break;
1202 case 2:
1203 i40e_write_word(context_bytes, &ce_info[f], dest);
1204 break;
1205 case 4:
1206 i40e_write_dword(context_bytes, &ce_info[f], dest);
1207 break;
1208 case 8:
1209 i40e_write_qword(context_bytes, &ce_info[f], dest);
1210 break;
1211 }
1212 }
1213
1214 return I40E_SUCCESS;
1215 }
1216
1217 /**
1218 * i40e_hmc_get_object_va - retrieves an object's virtual address
1219 * @hw: pointer to the hw structure
1220 * @object_base: pointer to u64 to get the va
1221 * @rsrc_type: the hmc resource type
1222 * @obj_idx: hmc object index
1223 *
1224 * This function retrieves the object's virtual address from the object
1225 * base pointer. This function is used for LAN Queue contexts.
1226 **/
1227 STATIC
1228 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1229 u8 **object_base,
1230 enum i40e_hmc_lan_rsrc_type rsrc_type,
1231 u32 obj_idx)
1232 {
1233 u32 obj_offset_in_sd, obj_offset_in_pd;
1234 struct i40e_hmc_info *hmc_info = &hw->hmc;
1235 struct i40e_hmc_sd_entry *sd_entry;
1236 struct i40e_hmc_pd_entry *pd_entry;
1237 u32 pd_idx, pd_lmt, rel_pd_idx;
1238 enum i40e_status_code ret_code = I40E_SUCCESS;
1239 u64 obj_offset_in_fpm;
1240 u32 sd_idx, sd_lmt;
1241
1242 if (NULL == hmc_info) {
1243 ret_code = I40E_ERR_BAD_PTR;
1244 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
1245 goto exit;
1246 }
1247 if (NULL == hmc_info->hmc_obj) {
1248 ret_code = I40E_ERR_BAD_PTR;
1249 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1250 goto exit;
1251 }
1252 if (NULL == object_base) {
1253 ret_code = I40E_ERR_BAD_PTR;
1254 DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1255 goto exit;
1256 }
1257 if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1258 ret_code = I40E_ERR_BAD_PTR;
1259 DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1260 goto exit;
1261 }
1262 if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1263 DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1264 ret_code);
1265 ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1266 goto exit;
1267 }
1268 /* find sd index and limit */
1269 I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1270 &sd_idx, &sd_lmt);
1271
1272 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1273 obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1274 hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1275
1276 if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1277 I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1278 &pd_idx, &pd_lmt);
1279 rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1280 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1281 obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1282 I40E_HMC_PAGED_BP_SIZE);
1283 *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1284 } else {
1285 obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1286 I40E_HMC_DIRECT_BP_SIZE);
1287 *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1288 }
1289 exit:
1290 return ret_code;
1291 }
1292
1293 /**
1294 * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1295 * @hw: the hardware struct
1296 * @queue: the queue we care about
1297 * @s: the struct to be filled
1298 **/
1299 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1300 u16 queue,
1301 struct i40e_hmc_obj_txq *s)
1302 {
1303 enum i40e_status_code err;
1304 u8 *context_bytes;
1305
1306 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1307 if (err < 0)
1308 return err;
1309
1310 return i40e_get_hmc_context(context_bytes,
1311 i40e_hmc_txq_ce_info, (u8 *)s);
1312 }
1313
1314 /**
1315 * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1316 * @hw: the hardware struct
1317 * @queue: the queue we care about
1318 **/
1319 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1320 u16 queue)
1321 {
1322 enum i40e_status_code err;
1323 u8 *context_bytes;
1324
1325 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1326 if (err < 0)
1327 return err;
1328
1329 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1330 }
1331
1332 /**
1333 * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1334 * @hw: the hardware struct
1335 * @queue: the queue we care about
1336 * @s: the struct to be filled
1337 **/
1338 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1339 u16 queue,
1340 struct i40e_hmc_obj_txq *s)
1341 {
1342 enum i40e_status_code err;
1343 u8 *context_bytes;
1344
1345 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1346 if (err < 0)
1347 return err;
1348
1349 return i40e_set_hmc_context(context_bytes,
1350 i40e_hmc_txq_ce_info, (u8 *)s);
1351 }
1352
1353 /**
1354 * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1355 * @hw: the hardware struct
1356 * @queue: the queue we care about
1357 * @s: the struct to be filled
1358 **/
1359 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1360 u16 queue,
1361 struct i40e_hmc_obj_rxq *s)
1362 {
1363 enum i40e_status_code err;
1364 u8 *context_bytes;
1365
1366 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1367 if (err < 0)
1368 return err;
1369
1370 return i40e_get_hmc_context(context_bytes,
1371 i40e_hmc_rxq_ce_info, (u8 *)s);
1372 }
1373
1374 /**
1375 * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1376 * @hw: the hardware struct
1377 * @queue: the queue we care about
1378 **/
1379 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1380 u16 queue)
1381 {
1382 enum i40e_status_code err;
1383 u8 *context_bytes;
1384
1385 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1386 if (err < 0)
1387 return err;
1388
1389 return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1390 }
1391
1392 /**
1393 * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1394 * @hw: the hardware struct
1395 * @queue: the queue we care about
1396 * @s: the struct to be filled
1397 **/
1398 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1399 u16 queue,
1400 struct i40e_hmc_obj_rxq *s)
1401 {
1402 enum i40e_status_code err;
1403 u8 *context_bytes;
1404
1405 err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1406 if (err < 0)
1407 return err;
1408
1409 return i40e_set_hmc_context(context_bytes,
1410 i40e_hmc_rxq_ce_info, (u8 *)s);
1411 }