]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/hw/i40iw/i40iw_pble.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / i40iw / i40iw_pble.c
1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34
35 #include "i40iw_status.h"
36 #include "i40iw_osdep.h"
37 #include "i40iw_register.h"
38 #include "i40iw_hmc.h"
39
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43
44 #include <linux/pci.h>
45 #include <linux/genalloc.h>
46 #include <linux/vmalloc.h>
47 #include "i40iw_pble.h"
48 #include "i40iw.h"
49
50 struct i40iw_device;
51 static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
52 struct i40iw_hmc_pble_rsrc *pble_rsrc);
53 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
54
55 /**
56 * i40iw_destroy_pble_pool - destroy pool during module unload
57 * @pble_rsrc: pble resources
58 */
59 void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
60 {
61 struct list_head *clist;
62 struct list_head *tlist;
63 struct i40iw_chunk *chunk;
64 struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
65
66 if (pinfo->pool) {
67 list_for_each_safe(clist, tlist, &pinfo->clist) {
68 chunk = list_entry(clist, struct i40iw_chunk, list);
69 if (chunk->type == I40IW_VMALLOC)
70 i40iw_free_vmalloc_mem(dev->hw, chunk);
71 kfree(chunk);
72 }
73 gen_pool_destroy(pinfo->pool);
74 }
75 }
76
77 /**
78 * i40iw_hmc_init_pble - Initialize pble resources during module load
79 * @dev: i40iw_sc_dev struct
80 * @pble_rsrc: pble resources
81 */
82 enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
83 struct i40iw_hmc_pble_rsrc *pble_rsrc)
84 {
85 struct i40iw_hmc_info *hmc_info;
86 u32 fpm_idx = 0;
87
88 hmc_info = dev->hmc_info;
89 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
90 /* Now start the pble' on 4k boundary */
91 if (pble_rsrc->fpm_base_addr & 0xfff)
92 fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
93
94 pble_rsrc->unallocated_pble =
95 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
96 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
97
98 pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
99 pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
100 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
101 if (!pble_rsrc->pinfo.pool)
102 goto error;
103
104 if (add_pble_pool(dev, pble_rsrc))
105 goto error;
106
107 return 0;
108
109 error:i40iw_destroy_pble_pool(dev, pble_rsrc);
110 return I40IW_ERR_NO_MEMORY;
111 }
112
113 /**
114 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
115 * @ pble_rsrc: structure containing fpm address
116 * @ idx: where to return indexes
117 */
118 static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
119 struct sd_pd_idx *idx)
120 {
121 idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
122 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
123 idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
124 }
125
126 /**
127 * add_sd_direct - add sd direct for pble
128 * @dev: hardware control device structure
129 * @pble_rsrc: pble resource ptr
130 * @info: page info for sd
131 */
132 static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
133 struct i40iw_hmc_pble_rsrc *pble_rsrc,
134 struct i40iw_add_page_info *info)
135 {
136 enum i40iw_status_code ret_code = 0;
137 struct sd_pd_idx *idx = &info->idx;
138 struct i40iw_chunk *chunk = info->chunk;
139 struct i40iw_hmc_info *hmc_info = info->hmc_info;
140 struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
141 u32 offset = 0;
142
143 if (!sd_entry->valid) {
144 if (dev->is_pf) {
145 ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
146 info->idx.sd_idx,
147 I40IW_SD_TYPE_DIRECT,
148 I40IW_HMC_DIRECT_BP_SIZE);
149 if (ret_code)
150 return ret_code;
151 chunk->type = I40IW_DMA_COHERENT;
152 }
153 }
154 offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
155 chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
156 chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
157 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
158 i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
159 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
160 return 0;
161 }
162
163 /**
164 * i40iw_free_vmalloc_mem - free vmalloc during close
165 * @hw: hw struct
166 * @chunk: chunk information for vmalloc
167 */
168 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
169 {
170 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
171 int i;
172
173 if (!chunk->pg_cnt)
174 goto done;
175 for (i = 0; i < chunk->pg_cnt; i++)
176 dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
177
178 done:
179 kfree(chunk->dmaaddrs);
180 chunk->dmaaddrs = NULL;
181 vfree(chunk->vaddr);
182 chunk->vaddr = NULL;
183 chunk->type = 0;
184 }
185
186 /**
187 * i40iw_get_vmalloc_mem - get 2M page for sd
188 * @hw: hardware address
189 * @chunk: chunk to adf
190 * @pg_cnt: #of 4 K pages
191 */
192 static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
193 struct i40iw_chunk *chunk,
194 int pg_cnt)
195 {
196 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
197 struct page *page;
198 u8 *addr;
199 u32 size;
200 int i;
201
202 chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
203 if (!chunk->dmaaddrs)
204 return I40IW_ERR_NO_MEMORY;
205 size = PAGE_SIZE * pg_cnt;
206 chunk->vaddr = vmalloc(size);
207 if (!chunk->vaddr) {
208 kfree(chunk->dmaaddrs);
209 chunk->dmaaddrs = NULL;
210 return I40IW_ERR_NO_MEMORY;
211 }
212 chunk->size = size;
213 addr = (u8 *)chunk->vaddr;
214 for (i = 0; i < pg_cnt; i++) {
215 page = vmalloc_to_page((void *)addr);
216 if (!page)
217 break;
218 chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
219 PAGE_SIZE, DMA_BIDIRECTIONAL);
220 if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
221 break;
222 addr += PAGE_SIZE;
223 }
224
225 chunk->pg_cnt = i;
226 chunk->type = I40IW_VMALLOC;
227 if (i == pg_cnt)
228 return 0;
229
230 i40iw_free_vmalloc_mem(hw, chunk);
231 return I40IW_ERR_NO_MEMORY;
232 }
233
234 /**
235 * fpm_to_idx - given fpm address, get pble index
236 * @pble_rsrc: pble resource management
237 * @addr: fpm address for index
238 */
239 static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
240 {
241 return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
242 }
243
244 /**
245 * add_bp_pages - add backing pages for sd
246 * @dev: hardware control device structure
247 * @pble_rsrc: pble resource management
248 * @info: page info for sd
249 */
250 static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
251 struct i40iw_hmc_pble_rsrc *pble_rsrc,
252 struct i40iw_add_page_info *info)
253 {
254 u8 *addr;
255 struct i40iw_dma_mem mem;
256 struct i40iw_hmc_pd_entry *pd_entry;
257 struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
258 struct i40iw_hmc_info *hmc_info = info->hmc_info;
259 struct i40iw_chunk *chunk = info->chunk;
260 struct i40iw_manage_vf_pble_info vf_pble_info;
261 enum i40iw_status_code status = 0;
262 u32 rel_pd_idx = info->idx.rel_pd_idx;
263 u32 pd_idx = info->idx.pd_idx;
264 u32 i;
265
266 status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
267 if (status)
268 return I40IW_ERR_NO_MEMORY;
269 status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
270 info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
271 I40IW_HMC_DIRECT_BP_SIZE);
272 if (status) {
273 i40iw_free_vmalloc_mem(dev->hw, chunk);
274 return status;
275 }
276 if (!dev->is_pf) {
277 status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
278 fpm_to_idx(pble_rsrc,
279 pble_rsrc->next_fpm_addr),
280 (info->pages << PBLE_512_SHIFT));
281 if (status) {
282 i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
283 i40iw_free_vmalloc_mem(dev->hw, chunk);
284 return status;
285 }
286 }
287 addr = chunk->vaddr;
288 for (i = 0; i < info->pages; i++) {
289 mem.pa = chunk->dmaaddrs[i];
290 mem.size = PAGE_SIZE;
291 mem.va = (void *)(addr);
292 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
293 if (!pd_entry->valid) {
294 status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
295 if (status)
296 goto error;
297 addr += PAGE_SIZE;
298 } else {
299 i40iw_pr_err("pd entry is valid expecting to be invalid\n");
300 }
301 }
302 if (!dev->is_pf) {
303 vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
304 vf_pble_info.inv_pd_ent = false;
305 vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
306 vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
307 vf_pble_info.sd_index = info->idx.sd_idx;
308 status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
309 &vf_pble_info, true);
310 if (status) {
311 i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
312 goto error;
313 }
314 }
315 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
316 return 0;
317 error:
318 i40iw_free_vmalloc_mem(dev->hw, chunk);
319 return status;
320 }
321
322 /**
323 * add_pble_pool - add a sd entry for pble resoure
324 * @dev: hardware control device structure
325 * @pble_rsrc: pble resource management
326 */
327 static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
328 struct i40iw_hmc_pble_rsrc *pble_rsrc)
329 {
330 struct i40iw_hmc_sd_entry *sd_entry;
331 struct i40iw_hmc_info *hmc_info;
332 struct i40iw_chunk *chunk;
333 struct i40iw_add_page_info info;
334 struct sd_pd_idx *idx = &info.idx;
335 enum i40iw_status_code ret_code = 0;
336 enum i40iw_sd_entry_type sd_entry_type;
337 u64 sd_reg_val = 0;
338 u32 pages;
339
340 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
341 return I40IW_ERR_NO_MEMORY;
342 if (pble_rsrc->next_fpm_addr & 0xfff) {
343 i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
344 return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
345 }
346 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
347 if (!chunk)
348 return I40IW_ERR_NO_MEMORY;
349 hmc_info = dev->hmc_info;
350 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
351 get_sd_pd_idx(pble_rsrc, idx);
352 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
353 pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
354 idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
355 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
356 info.chunk = chunk;
357 info.hmc_info = hmc_info;
358 info.pages = pages;
359 info.sd_entry = sd_entry;
360 if (!sd_entry->valid) {
361 sd_entry_type = (!idx->rel_pd_idx &&
362 (pages == I40IW_HMC_PD_CNT_IN_SD) &&
363 dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
364 } else {
365 sd_entry_type = sd_entry->entry_type;
366 }
367 i40iw_debug(dev, I40IW_DEBUG_PBLE,
368 "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
369 pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
370 i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
371 sd_entry_type, sd_entry->valid);
372
373 if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
374 ret_code = add_sd_direct(dev, pble_rsrc, &info);
375 if (ret_code)
376 sd_entry_type = I40IW_SD_TYPE_PAGED;
377 else
378 pble_rsrc->stats_direct_sds++;
379
380 if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
381 ret_code = add_bp_pages(dev, pble_rsrc, &info);
382 if (ret_code)
383 goto error;
384 else
385 pble_rsrc->stats_paged_sds++;
386 }
387
388 if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
389 (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
390 i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
391 ret_code = I40IW_ERR_NO_MEMORY;
392 goto error;
393 }
394 pble_rsrc->next_fpm_addr += chunk->size;
395 i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
396 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
397 pble_rsrc->unallocated_pble -= (chunk->size >> 3);
398 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
399 sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
400 sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
401 if (sd_entry->valid)
402 return 0;
403 if (dev->is_pf) {
404 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
405 sd_reg_val, idx->sd_idx,
406 sd_entry->entry_type, true);
407 if (ret_code) {
408 i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
409 goto error;
410 }
411 }
412
413 sd_entry->valid = true;
414 return 0;
415 error:
416 kfree(chunk);
417 return ret_code;
418 }
419
420 /**
421 * free_lvl2 - fee level 2 pble
422 * @pble_rsrc: pble resource management
423 * @palloc: level 2 pble allocation
424 */
425 static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
426 struct i40iw_pble_alloc *palloc)
427 {
428 u32 i;
429 struct gen_pool *pool;
430 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
431 struct i40iw_pble_info *root = &lvl2->root;
432 struct i40iw_pble_info *leaf = lvl2->leaf;
433
434 pool = pble_rsrc->pinfo.pool;
435
436 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
437 if (leaf->addr)
438 gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
439 else
440 break;
441 }
442
443 if (root->addr)
444 gen_pool_free(pool, root->addr, (root->cnt << 3));
445
446 kfree(lvl2->leaf);
447 lvl2->leaf = NULL;
448 }
449
450 /**
451 * get_lvl2_pble - get level 2 pble resource
452 * @pble_rsrc: pble resource management
453 * @palloc: level 2 pble allocation
454 * @pool: pool pointer
455 */
456 static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
457 struct i40iw_pble_alloc *palloc,
458 struct gen_pool *pool)
459 {
460 u32 lf4k, lflast, total, i;
461 u32 pblcnt = PBLE_PER_PAGE;
462 u64 *addr;
463 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
464 struct i40iw_pble_info *root = &lvl2->root;
465 struct i40iw_pble_info *leaf;
466
467 /* number of full 512 (4K) leafs) */
468 lf4k = palloc->total_cnt >> 9;
469 lflast = palloc->total_cnt % PBLE_PER_PAGE;
470 total = (lflast == 0) ? lf4k : lf4k + 1;
471 lvl2->leaf_cnt = total;
472
473 leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
474 if (!leaf)
475 return I40IW_ERR_NO_MEMORY;
476 lvl2->leaf = leaf;
477 /* allocate pbles for the root */
478 root->addr = gen_pool_alloc(pool, (total << 3));
479 if (!root->addr) {
480 kfree(lvl2->leaf);
481 lvl2->leaf = NULL;
482 return I40IW_ERR_NO_MEMORY;
483 }
484 root->idx = fpm_to_idx(pble_rsrc,
485 (u64)gen_pool_virt_to_phys(pool, root->addr));
486 root->cnt = total;
487 addr = (u64 *)root->addr;
488 for (i = 0; i < total; i++, leaf++) {
489 pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
490 leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
491 if (!leaf->addr)
492 goto error;
493 leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
494
495 leaf->cnt = pblcnt;
496 *addr = (u64)leaf->idx;
497 addr++;
498 }
499 palloc->level = I40IW_LEVEL_2;
500 pble_rsrc->stats_lvl2++;
501 return 0;
502 error:
503 free_lvl2(pble_rsrc, palloc);
504 return I40IW_ERR_NO_MEMORY;
505 }
506
507 /**
508 * get_lvl1_pble - get level 1 pble resource
509 * @dev: hardware control device structure
510 * @pble_rsrc: pble resource management
511 * @palloc: level 1 pble allocation
512 */
513 static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
514 struct i40iw_hmc_pble_rsrc *pble_rsrc,
515 struct i40iw_pble_alloc *palloc)
516 {
517 u64 *addr;
518 struct gen_pool *pool;
519 struct i40iw_pble_info *lvl1 = &palloc->level1;
520
521 pool = pble_rsrc->pinfo.pool;
522 addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
523
524 if (!addr)
525 return I40IW_ERR_NO_MEMORY;
526
527 palloc->level = I40IW_LEVEL_1;
528 lvl1->addr = (unsigned long)addr;
529 lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
530 (unsigned long)addr));
531 lvl1->cnt = palloc->total_cnt;
532 pble_rsrc->stats_lvl1++;
533 return 0;
534 }
535
536 /**
537 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
538 * @dev: i40iw_sc_dev struct
539 * @pble_rsrc: pble resources
540 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
541 * @pool: pointer to general purpose special memory pool descriptor
542 */
543 static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
544 struct i40iw_hmc_pble_rsrc *pble_rsrc,
545 struct i40iw_pble_alloc *palloc,
546 struct gen_pool *pool)
547 {
548 enum i40iw_status_code status = 0;
549
550 status = get_lvl1_pble(dev, pble_rsrc, palloc);
551 if (status && (palloc->total_cnt > PBLE_PER_PAGE))
552 status = get_lvl2_pble(pble_rsrc, palloc, pool);
553 return status;
554 }
555
556 /**
557 * i40iw_get_pble - allocate pbles from the pool
558 * @dev: i40iw_sc_dev struct
559 * @pble_rsrc: pble resources
560 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
561 * @pble_cnt: #of pbles requested
562 */
563 enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
564 struct i40iw_hmc_pble_rsrc *pble_rsrc,
565 struct i40iw_pble_alloc *palloc,
566 u32 pble_cnt)
567 {
568 struct gen_pool *pool;
569 enum i40iw_status_code status = 0;
570 u32 max_sds = 0;
571 int i;
572
573 pool = pble_rsrc->pinfo.pool;
574 palloc->total_cnt = pble_cnt;
575 palloc->level = I40IW_LEVEL_0;
576 /*check first to see if we can get pble's without acquiring additional sd's */
577 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
578 if (!status)
579 goto exit;
580 max_sds = (palloc->total_cnt >> 18) + 1;
581 for (i = 0; i < max_sds; i++) {
582 status = add_pble_pool(dev, pble_rsrc);
583 if (status)
584 break;
585 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
586 if (!status)
587 break;
588 }
589 exit:
590 if (!status)
591 pble_rsrc->stats_alloc_ok++;
592 else
593 pble_rsrc->stats_alloc_fail++;
594
595 return status;
596 }
597
598 /**
599 * i40iw_free_pble - put pbles back into pool
600 * @pble_rsrc: pble resources
601 * @palloc: contains all inforamtion regarding pble resource being freed
602 */
603 void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
604 struct i40iw_pble_alloc *palloc)
605 {
606 struct gen_pool *pool;
607
608 pool = pble_rsrc->pinfo.pool;
609 if (palloc->level == I40IW_LEVEL_2)
610 free_lvl2(pble_rsrc, palloc);
611 else
612 gen_pool_free(pool, palloc->level1.addr,
613 (palloc->level1.cnt << 3));
614 pble_rsrc->stats_alloc_freed++;
615 }