]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/infiniband/hw/hns/hns_roce_hem.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / infiniband / hw / hns / hns_roce_hem.c
1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
38
39 #define DMA_ADDR_T_SHIFT 12
40 #define BT_BA_SHIFT 32
41
42 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
43 {
44 if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
45 (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
46 (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
47 (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
48 (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
49 (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
50 (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
51 (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
52 (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
53 (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
54 (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
55 return true;
56
57 return false;
58 }
59 EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
60
61 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
62 u32 bt_chunk_num)
63 {
64 int i;
65
66 for (i = 0; i < bt_chunk_num; i++)
67 if (hem[start_idx + i])
68 return false;
69
70 return true;
71 }
72
73 static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
74 {
75 int i;
76
77 for (i = 0; i < bt_chunk_num; i++)
78 if (bt[start_idx + i])
79 return false;
80
81 return true;
82 }
83
84 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
85 {
86 if (check_whether_bt_num_3(table_type, hop_num))
87 return 3;
88 else if (check_whether_bt_num_2(table_type, hop_num))
89 return 2;
90 else if (check_whether_bt_num_1(table_type, hop_num))
91 return 1;
92 else
93 return 0;
94 }
95
96 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
97 struct hns_roce_hem_table *table, unsigned long *obj,
98 struct hns_roce_hem_mhop *mhop)
99 {
100 struct device *dev = hr_dev->dev;
101 u32 chunk_ba_num;
102 u32 table_idx;
103 u32 bt_num;
104 u32 chunk_size;
105
106 switch (table->type) {
107 case HEM_TYPE_QPC:
108 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
109 + PAGE_SHIFT);
110 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
111 + PAGE_SHIFT);
112 mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
113 mhop->hop_num = hr_dev->caps.qpc_hop_num;
114 break;
115 case HEM_TYPE_MTPT:
116 mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
117 + PAGE_SHIFT);
118 mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
119 + PAGE_SHIFT);
120 mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
121 mhop->hop_num = hr_dev->caps.mpt_hop_num;
122 break;
123 case HEM_TYPE_CQC:
124 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
125 + PAGE_SHIFT);
126 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
127 + PAGE_SHIFT);
128 mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
129 mhop->hop_num = hr_dev->caps.cqc_hop_num;
130 break;
131 case HEM_TYPE_SCCC:
132 mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
133 + PAGE_SHIFT);
134 mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
135 + PAGE_SHIFT);
136 mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
137 mhop->hop_num = hr_dev->caps.sccc_hop_num;
138 break;
139 case HEM_TYPE_QPC_TIMER:
140 mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
141 + PAGE_SHIFT);
142 mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
143 + PAGE_SHIFT);
144 mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
145 mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
146 break;
147 case HEM_TYPE_CQC_TIMER:
148 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
149 + PAGE_SHIFT);
150 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
151 + PAGE_SHIFT);
152 mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
153 mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
154 break;
155 case HEM_TYPE_SRQC:
156 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
157 + PAGE_SHIFT);
158 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
159 + PAGE_SHIFT);
160 mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
161 mhop->hop_num = hr_dev->caps.srqc_hop_num;
162 break;
163 case HEM_TYPE_MTT:
164 mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
165 + PAGE_SHIFT);
166 mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
167 + PAGE_SHIFT);
168 mhop->ba_l0_num = mhop->bt_chunk_size / 8;
169 mhop->hop_num = hr_dev->caps.mtt_hop_num;
170 break;
171 case HEM_TYPE_CQE:
172 mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
173 + PAGE_SHIFT);
174 mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
175 + PAGE_SHIFT);
176 mhop->ba_l0_num = mhop->bt_chunk_size / 8;
177 mhop->hop_num = hr_dev->caps.cqe_hop_num;
178 break;
179 case HEM_TYPE_SRQWQE:
180 mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
181 + PAGE_SHIFT);
182 mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
183 + PAGE_SHIFT);
184 mhop->ba_l0_num = mhop->bt_chunk_size / 8;
185 mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
186 break;
187 case HEM_TYPE_IDX:
188 mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
189 + PAGE_SHIFT);
190 mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
191 + PAGE_SHIFT);
192 mhop->ba_l0_num = mhop->bt_chunk_size / 8;
193 mhop->hop_num = hr_dev->caps.idx_hop_num;
194 break;
195 default:
196 dev_err(dev, "Table %d not support multi-hop addressing!\n",
197 table->type);
198 return -EINVAL;
199 }
200
201 if (!obj)
202 return 0;
203
204 /*
205 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
206 * MTT/CQE alloc hem for bt pages.
207 */
208 bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
209 chunk_ba_num = mhop->bt_chunk_size / 8;
210 chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
211 mhop->bt_chunk_size;
212 table_idx = (*obj & (table->num_obj - 1)) /
213 (chunk_size / table->obj_size);
214 switch (bt_num) {
215 case 3:
216 mhop->l2_idx = table_idx & (chunk_ba_num - 1);
217 mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
218 mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
219 break;
220 case 2:
221 mhop->l1_idx = table_idx & (chunk_ba_num - 1);
222 mhop->l0_idx = table_idx / chunk_ba_num;
223 break;
224 case 1:
225 mhop->l0_idx = table_idx;
226 break;
227 default:
228 dev_err(dev, "Table %d not support hop_num = %d!\n",
229 table->type, mhop->hop_num);
230 return -EINVAL;
231 }
232 if (mhop->l0_idx >= mhop->ba_l0_num)
233 mhop->l0_idx %= mhop->ba_l0_num;
234
235 return 0;
236 }
237 EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
238
239 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
240 int npages,
241 unsigned long hem_alloc_size,
242 gfp_t gfp_mask)
243 {
244 struct hns_roce_hem_chunk *chunk = NULL;
245 struct hns_roce_hem *hem;
246 struct scatterlist *mem;
247 int order;
248 void *buf;
249
250 WARN_ON(gfp_mask & __GFP_HIGHMEM);
251
252 hem = kmalloc(sizeof(*hem),
253 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
254 if (!hem)
255 return NULL;
256
257 hem->refcount = 0;
258 INIT_LIST_HEAD(&hem->chunk_list);
259
260 order = get_order(hem_alloc_size);
261
262 while (npages > 0) {
263 if (!chunk) {
264 chunk = kmalloc(sizeof(*chunk),
265 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
266 if (!chunk)
267 goto fail;
268
269 sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
270 chunk->npages = 0;
271 chunk->nsg = 0;
272 memset(chunk->buf, 0, sizeof(chunk->buf));
273 list_add_tail(&chunk->list, &hem->chunk_list);
274 }
275
276 while (1 << order > npages)
277 --order;
278
279 /*
280 * Alloc memory one time. If failed, don't alloc small block
281 * memory, directly return fail.
282 */
283 mem = &chunk->mem[chunk->npages];
284 buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
285 &sg_dma_address(mem), gfp_mask);
286 if (!buf)
287 goto fail;
288
289 chunk->buf[chunk->npages] = buf;
290 sg_dma_len(mem) = PAGE_SIZE << order;
291
292 ++chunk->npages;
293 ++chunk->nsg;
294 npages -= 1 << order;
295 }
296
297 return hem;
298
299 fail:
300 hns_roce_free_hem(hr_dev, hem);
301 return NULL;
302 }
303
304 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
305 {
306 struct hns_roce_hem_chunk *chunk, *tmp;
307 int i;
308
309 if (!hem)
310 return;
311
312 list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
313 for (i = 0; i < chunk->npages; ++i)
314 dma_free_coherent(hr_dev->dev,
315 sg_dma_len(&chunk->mem[i]),
316 chunk->buf[i],
317 sg_dma_address(&chunk->mem[i]));
318 kfree(chunk);
319 }
320
321 kfree(hem);
322 }
323
324 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
325 struct hns_roce_hem_table *table, unsigned long obj)
326 {
327 spinlock_t *lock = &hr_dev->bt_cmd_lock;
328 struct device *dev = hr_dev->dev;
329 unsigned long end = 0;
330 unsigned long flags;
331 struct hns_roce_hem_iter iter;
332 void __iomem *bt_cmd;
333 u32 bt_cmd_h_val = 0;
334 u32 bt_cmd_val[2];
335 u32 bt_cmd_l = 0;
336 u64 bt_ba = 0;
337 int ret = 0;
338
339 /* Find the HEM(Hardware Entry Memory) entry */
340 unsigned long i = (obj & (table->num_obj - 1)) /
341 (table->table_chunk_size / table->obj_size);
342
343 switch (table->type) {
344 case HEM_TYPE_QPC:
345 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
346 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
347 break;
348 case HEM_TYPE_MTPT:
349 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
350 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
351 HEM_TYPE_MTPT);
352 break;
353 case HEM_TYPE_CQC:
354 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
355 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
356 break;
357 case HEM_TYPE_SRQC:
358 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
359 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
360 HEM_TYPE_SRQC);
361 break;
362 default:
363 return ret;
364 }
365 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
366 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
367 roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
368 roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
369
370 /* Currently iter only a chunk */
371 for (hns_roce_hem_first(table->hem[i], &iter);
372 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
373 bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
374
375 spin_lock_irqsave(lock, flags);
376
377 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
378
379 end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
380 while (1) {
381 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
382 if (!(time_before(jiffies, end))) {
383 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
384 spin_unlock_irqrestore(lock, flags);
385 return -EBUSY;
386 }
387 } else {
388 break;
389 }
390 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
391 }
392
393 bt_cmd_l = (u32)bt_ba;
394 roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
395 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
396 bt_ba >> BT_BA_SHIFT);
397
398 bt_cmd_val[0] = bt_cmd_l;
399 bt_cmd_val[1] = bt_cmd_h_val;
400 hns_roce_write64_k(bt_cmd_val,
401 hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
402 spin_unlock_irqrestore(lock, flags);
403 }
404
405 return ret;
406 }
407
408 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
409 struct hns_roce_hem_table *table,
410 unsigned long obj)
411 {
412 struct device *dev = hr_dev->dev;
413 struct hns_roce_hem_mhop mhop;
414 struct hns_roce_hem_iter iter;
415 u32 buf_chunk_size;
416 u32 bt_chunk_size;
417 u32 chunk_ba_num;
418 u32 hop_num;
419 u32 size;
420 u32 bt_num;
421 u64 hem_idx;
422 u64 bt_l1_idx = 0;
423 u64 bt_l0_idx = 0;
424 u64 bt_ba;
425 unsigned long mhop_obj = obj;
426 int bt_l1_allocated = 0;
427 int bt_l0_allocated = 0;
428 int step_idx;
429 int ret;
430
431 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
432 if (ret)
433 return ret;
434
435 buf_chunk_size = mhop.buf_chunk_size;
436 bt_chunk_size = mhop.bt_chunk_size;
437 hop_num = mhop.hop_num;
438 chunk_ba_num = bt_chunk_size / 8;
439
440 bt_num = hns_roce_get_bt_num(table->type, hop_num);
441 switch (bt_num) {
442 case 3:
443 hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
444 mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
445 bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
446 bt_l0_idx = mhop.l0_idx;
447 break;
448 case 2:
449 hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
450 bt_l0_idx = mhop.l0_idx;
451 break;
452 case 1:
453 hem_idx = mhop.l0_idx;
454 break;
455 default:
456 dev_err(dev, "Table %d not support hop_num = %d!\n",
457 table->type, hop_num);
458 return -EINVAL;
459 }
460
461 mutex_lock(&table->mutex);
462
463 if (table->hem[hem_idx]) {
464 ++table->hem[hem_idx]->refcount;
465 goto out;
466 }
467
468 /* alloc L1 BA's chunk */
469 if ((check_whether_bt_num_3(table->type, hop_num) ||
470 check_whether_bt_num_2(table->type, hop_num)) &&
471 !table->bt_l0[bt_l0_idx]) {
472 table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
473 &(table->bt_l0_dma_addr[bt_l0_idx]),
474 GFP_KERNEL);
475 if (!table->bt_l0[bt_l0_idx]) {
476 ret = -ENOMEM;
477 goto out;
478 }
479 bt_l0_allocated = 1;
480
481 /* set base address to hardware */
482 if (table->type < HEM_TYPE_MTT) {
483 step_idx = 0;
484 if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
485 ret = -ENODEV;
486 dev_err(dev, "set HEM base address to HW failed!\n");
487 goto err_dma_alloc_l1;
488 }
489 }
490 }
491
492 /* alloc L2 BA's chunk */
493 if (check_whether_bt_num_3(table->type, hop_num) &&
494 !table->bt_l1[bt_l1_idx]) {
495 table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
496 &(table->bt_l1_dma_addr[bt_l1_idx]),
497 GFP_KERNEL);
498 if (!table->bt_l1[bt_l1_idx]) {
499 ret = -ENOMEM;
500 goto err_dma_alloc_l1;
501 }
502 bt_l1_allocated = 1;
503 *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
504 table->bt_l1_dma_addr[bt_l1_idx];
505
506 /* set base address to hardware */
507 step_idx = 1;
508 if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
509 ret = -ENODEV;
510 dev_err(dev, "set HEM base address to HW failed!\n");
511 goto err_alloc_hem_buf;
512 }
513 }
514
515 /*
516 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
517 * alloc bt space chunk for MTT/CQE.
518 */
519 size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
520 table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
521 size >> PAGE_SHIFT,
522 size,
523 (table->lowmem ? GFP_KERNEL :
524 GFP_HIGHUSER) | __GFP_NOWARN);
525 if (!table->hem[hem_idx]) {
526 ret = -ENOMEM;
527 goto err_alloc_hem_buf;
528 }
529
530 hns_roce_hem_first(table->hem[hem_idx], &iter);
531 bt_ba = hns_roce_hem_addr(&iter);
532
533 if (table->type < HEM_TYPE_MTT) {
534 if (hop_num == 2) {
535 *(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
536 step_idx = 2;
537 } else if (hop_num == 1) {
538 *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
539 step_idx = 1;
540 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
541 step_idx = 0;
542 } else {
543 ret = -EINVAL;
544 goto err_dma_alloc_l1;
545 }
546
547 /* set HEM base address to hardware */
548 if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
549 ret = -ENODEV;
550 dev_err(dev, "set HEM base address to HW failed!\n");
551 goto err_alloc_hem_buf;
552 }
553 } else if (hop_num == 2) {
554 *(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
555 }
556
557 ++table->hem[hem_idx]->refcount;
558 goto out;
559
560 err_alloc_hem_buf:
561 if (bt_l1_allocated) {
562 dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
563 table->bt_l1_dma_addr[bt_l1_idx]);
564 table->bt_l1[bt_l1_idx] = NULL;
565 }
566
567 err_dma_alloc_l1:
568 if (bt_l0_allocated) {
569 dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
570 table->bt_l0_dma_addr[bt_l0_idx]);
571 table->bt_l0[bt_l0_idx] = NULL;
572 }
573
574 out:
575 mutex_unlock(&table->mutex);
576 return ret;
577 }
578
579 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
580 struct hns_roce_hem_table *table, unsigned long obj)
581 {
582 struct device *dev = hr_dev->dev;
583 int ret = 0;
584 unsigned long i;
585
586 if (hns_roce_check_whether_mhop(hr_dev, table->type))
587 return hns_roce_table_mhop_get(hr_dev, table, obj);
588
589 i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
590 table->obj_size);
591
592 mutex_lock(&table->mutex);
593
594 if (table->hem[i]) {
595 ++table->hem[i]->refcount;
596 goto out;
597 }
598
599 table->hem[i] = hns_roce_alloc_hem(hr_dev,
600 table->table_chunk_size >> PAGE_SHIFT,
601 table->table_chunk_size,
602 (table->lowmem ? GFP_KERNEL :
603 GFP_HIGHUSER) | __GFP_NOWARN);
604 if (!table->hem[i]) {
605 ret = -ENOMEM;
606 goto out;
607 }
608
609 /* Set HEM base address(128K/page, pa) to Hardware */
610 if (hns_roce_set_hem(hr_dev, table, obj)) {
611 hns_roce_free_hem(hr_dev, table->hem[i]);
612 table->hem[i] = NULL;
613 ret = -ENODEV;
614 dev_err(dev, "set HEM base address to HW failed.\n");
615 goto out;
616 }
617
618 ++table->hem[i]->refcount;
619 out:
620 mutex_unlock(&table->mutex);
621 return ret;
622 }
623 EXPORT_SYMBOL_GPL(hns_roce_table_get);
624
625 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
626 struct hns_roce_hem_table *table,
627 unsigned long obj,
628 int check_refcount)
629 {
630 struct device *dev = hr_dev->dev;
631 struct hns_roce_hem_mhop mhop;
632 unsigned long mhop_obj = obj;
633 u32 bt_chunk_size;
634 u32 chunk_ba_num;
635 u32 hop_num;
636 u32 start_idx;
637 u32 bt_num;
638 u64 hem_idx;
639 u64 bt_l1_idx = 0;
640 int ret;
641
642 ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
643 if (ret)
644 return;
645
646 bt_chunk_size = mhop.bt_chunk_size;
647 hop_num = mhop.hop_num;
648 chunk_ba_num = bt_chunk_size / 8;
649
650 bt_num = hns_roce_get_bt_num(table->type, hop_num);
651 switch (bt_num) {
652 case 3:
653 hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
654 mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
655 bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
656 break;
657 case 2:
658 hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
659 break;
660 case 1:
661 hem_idx = mhop.l0_idx;
662 break;
663 default:
664 dev_err(dev, "Table %d not support hop_num = %d!\n",
665 table->type, hop_num);
666 return;
667 }
668
669 mutex_lock(&table->mutex);
670
671 if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
672 mutex_unlock(&table->mutex);
673 return;
674 }
675
676 if (table->type < HEM_TYPE_MTT && hop_num == 1) {
677 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
678 dev_warn(dev, "Clear HEM base address failed.\n");
679 } else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
680 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
681 dev_warn(dev, "Clear HEM base address failed.\n");
682 } else if (table->type < HEM_TYPE_MTT &&
683 hop_num == HNS_ROCE_HOP_NUM_0) {
684 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
685 dev_warn(dev, "Clear HEM base address failed.\n");
686 }
687
688 /*
689 * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
690 * free bt space chunk for MTT/CQE.
691 */
692 hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
693 table->hem[hem_idx] = NULL;
694
695 if (check_whether_bt_num_2(table->type, hop_num)) {
696 start_idx = mhop.l0_idx * chunk_ba_num;
697 if (hns_roce_check_hem_null(table->hem, start_idx,
698 chunk_ba_num)) {
699 if (table->type < HEM_TYPE_MTT &&
700 hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
701 dev_warn(dev, "Clear HEM base address failed.\n");
702
703 dma_free_coherent(dev, bt_chunk_size,
704 table->bt_l0[mhop.l0_idx],
705 table->bt_l0_dma_addr[mhop.l0_idx]);
706 table->bt_l0[mhop.l0_idx] = NULL;
707 }
708 } else if (check_whether_bt_num_3(table->type, hop_num)) {
709 start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
710 mhop.l1_idx * chunk_ba_num;
711 if (hns_roce_check_hem_null(table->hem, start_idx,
712 chunk_ba_num)) {
713 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
714 dev_warn(dev, "Clear HEM base address failed.\n");
715
716 dma_free_coherent(dev, bt_chunk_size,
717 table->bt_l1[bt_l1_idx],
718 table->bt_l1_dma_addr[bt_l1_idx]);
719 table->bt_l1[bt_l1_idx] = NULL;
720
721 start_idx = mhop.l0_idx * chunk_ba_num;
722 if (hns_roce_check_bt_null(table->bt_l1, start_idx,
723 chunk_ba_num)) {
724 if (hr_dev->hw->clear_hem(hr_dev, table, obj,
725 0))
726 dev_warn(dev, "Clear HEM base address failed.\n");
727
728 dma_free_coherent(dev, bt_chunk_size,
729 table->bt_l0[mhop.l0_idx],
730 table->bt_l0_dma_addr[mhop.l0_idx]);
731 table->bt_l0[mhop.l0_idx] = NULL;
732 }
733 }
734 }
735
736 mutex_unlock(&table->mutex);
737 }
738
739 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
740 struct hns_roce_hem_table *table, unsigned long obj)
741 {
742 struct device *dev = hr_dev->dev;
743 unsigned long i;
744
745 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
746 hns_roce_table_mhop_put(hr_dev, table, obj, 1);
747 return;
748 }
749
750 i = (obj & (table->num_obj - 1)) /
751 (table->table_chunk_size / table->obj_size);
752
753 mutex_lock(&table->mutex);
754
755 if (--table->hem[i]->refcount == 0) {
756 /* Clear HEM base address */
757 if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
758 dev_warn(dev, "Clear HEM base address failed.\n");
759
760 hns_roce_free_hem(hr_dev, table->hem[i]);
761 table->hem[i] = NULL;
762 }
763
764 mutex_unlock(&table->mutex);
765 }
766 EXPORT_SYMBOL_GPL(hns_roce_table_put);
767
768 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
769 struct hns_roce_hem_table *table,
770 unsigned long obj, dma_addr_t *dma_handle)
771 {
772 struct hns_roce_hem_chunk *chunk;
773 struct hns_roce_hem_mhop mhop;
774 struct hns_roce_hem *hem;
775 void *addr = NULL;
776 unsigned long mhop_obj = obj;
777 unsigned long obj_per_chunk;
778 unsigned long idx_offset;
779 int offset, dma_offset;
780 int length;
781 int i, j;
782 u32 hem_idx = 0;
783
784 if (!table->lowmem)
785 return NULL;
786
787 mutex_lock(&table->mutex);
788
789 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
790 obj_per_chunk = table->table_chunk_size / table->obj_size;
791 hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
792 idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
793 dma_offset = offset = idx_offset * table->obj_size;
794 } else {
795 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
796 /* mtt mhop */
797 i = mhop.l0_idx;
798 j = mhop.l1_idx;
799 if (mhop.hop_num == 2)
800 hem_idx = i * (mhop.bt_chunk_size / 8) + j;
801 else if (mhop.hop_num == 1 ||
802 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
803 hem_idx = i;
804
805 hem = table->hem[hem_idx];
806 dma_offset = offset = (obj & (table->num_obj - 1)) *
807 table->obj_size % mhop.bt_chunk_size;
808 if (mhop.hop_num == 2)
809 dma_offset = offset = 0;
810 }
811
812 if (!hem)
813 goto out;
814
815 list_for_each_entry(chunk, &hem->chunk_list, list) {
816 for (i = 0; i < chunk->npages; ++i) {
817 length = sg_dma_len(&chunk->mem[i]);
818 if (dma_handle && dma_offset >= 0) {
819 if (length > (u32)dma_offset)
820 *dma_handle = sg_dma_address(
821 &chunk->mem[i]) + dma_offset;
822 dma_offset -= length;
823 }
824
825 if (length > (u32)offset) {
826 addr = chunk->buf[i] + offset;
827 goto out;
828 }
829 offset -= length;
830 }
831 }
832
833 out:
834 mutex_unlock(&table->mutex);
835 return addr;
836 }
837 EXPORT_SYMBOL_GPL(hns_roce_table_find);
838
839 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
840 struct hns_roce_hem_table *table,
841 unsigned long start, unsigned long end)
842 {
843 struct hns_roce_hem_mhop mhop;
844 unsigned long inc = table->table_chunk_size / table->obj_size;
845 unsigned long i;
846 int ret;
847
848 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
849 hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
850 inc = mhop.bt_chunk_size / table->obj_size;
851 }
852
853 /* Allocate MTT entry memory according to chunk(128K) */
854 for (i = start; i <= end; i += inc) {
855 ret = hns_roce_table_get(hr_dev, table, i);
856 if (ret)
857 goto fail;
858 }
859
860 return 0;
861
862 fail:
863 while (i > start) {
864 i -= inc;
865 hns_roce_table_put(hr_dev, table, i);
866 }
867 return ret;
868 }
869
870 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
871 struct hns_roce_hem_table *table,
872 unsigned long start, unsigned long end)
873 {
874 struct hns_roce_hem_mhop mhop;
875 unsigned long inc = table->table_chunk_size / table->obj_size;
876 unsigned long i;
877
878 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
879 hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
880 inc = mhop.bt_chunk_size / table->obj_size;
881 }
882
883 for (i = start; i <= end; i += inc)
884 hns_roce_table_put(hr_dev, table, i);
885 }
886
887 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
888 struct hns_roce_hem_table *table, u32 type,
889 unsigned long obj_size, unsigned long nobj,
890 int use_lowmem)
891 {
892 struct device *dev = hr_dev->dev;
893 unsigned long obj_per_chunk;
894 unsigned long num_hem;
895
896 if (!hns_roce_check_whether_mhop(hr_dev, type)) {
897 table->table_chunk_size = hr_dev->caps.chunk_sz;
898 obj_per_chunk = table->table_chunk_size / obj_size;
899 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
900
901 table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
902 if (!table->hem)
903 return -ENOMEM;
904 } else {
905 unsigned long buf_chunk_size;
906 unsigned long bt_chunk_size;
907 unsigned long bt_chunk_num;
908 unsigned long num_bt_l0 = 0;
909 u32 hop_num;
910
911 switch (type) {
912 case HEM_TYPE_QPC:
913 buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
914 + PAGE_SHIFT);
915 bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
916 + PAGE_SHIFT);
917 num_bt_l0 = hr_dev->caps.qpc_bt_num;
918 hop_num = hr_dev->caps.qpc_hop_num;
919 break;
920 case HEM_TYPE_MTPT:
921 buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
922 + PAGE_SHIFT);
923 bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
924 + PAGE_SHIFT);
925 num_bt_l0 = hr_dev->caps.mpt_bt_num;
926 hop_num = hr_dev->caps.mpt_hop_num;
927 break;
928 case HEM_TYPE_CQC:
929 buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
930 + PAGE_SHIFT);
931 bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
932 + PAGE_SHIFT);
933 num_bt_l0 = hr_dev->caps.cqc_bt_num;
934 hop_num = hr_dev->caps.cqc_hop_num;
935 break;
936 case HEM_TYPE_SCCC:
937 buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
938 + PAGE_SHIFT);
939 bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
940 + PAGE_SHIFT);
941 num_bt_l0 = hr_dev->caps.sccc_bt_num;
942 hop_num = hr_dev->caps.sccc_hop_num;
943 break;
944 case HEM_TYPE_QPC_TIMER:
945 buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
946 + PAGE_SHIFT);
947 bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
948 + PAGE_SHIFT);
949 num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
950 hop_num = hr_dev->caps.qpc_timer_hop_num;
951 break;
952 case HEM_TYPE_CQC_TIMER:
953 buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
954 + PAGE_SHIFT);
955 bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
956 + PAGE_SHIFT);
957 num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
958 hop_num = hr_dev->caps.cqc_timer_hop_num;
959 break;
960 case HEM_TYPE_SRQC:
961 buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
962 + PAGE_SHIFT);
963 bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
964 + PAGE_SHIFT);
965 num_bt_l0 = hr_dev->caps.srqc_bt_num;
966 hop_num = hr_dev->caps.srqc_hop_num;
967 break;
968 case HEM_TYPE_MTT:
969 buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
970 + PAGE_SHIFT);
971 bt_chunk_size = buf_chunk_size;
972 hop_num = hr_dev->caps.mtt_hop_num;
973 break;
974 case HEM_TYPE_CQE:
975 buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
976 + PAGE_SHIFT);
977 bt_chunk_size = buf_chunk_size;
978 hop_num = hr_dev->caps.cqe_hop_num;
979 break;
980 case HEM_TYPE_SRQWQE:
981 buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
982 + PAGE_SHIFT);
983 bt_chunk_size = buf_chunk_size;
984 hop_num = hr_dev->caps.srqwqe_hop_num;
985 break;
986 case HEM_TYPE_IDX:
987 buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
988 + PAGE_SHIFT);
989 bt_chunk_size = buf_chunk_size;
990 hop_num = hr_dev->caps.idx_hop_num;
991 break;
992 default:
993 dev_err(dev,
994 "Table %d not support to init hem table here!\n",
995 type);
996 return -EINVAL;
997 }
998 obj_per_chunk = buf_chunk_size / obj_size;
999 num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
1000 bt_chunk_num = bt_chunk_size / 8;
1001 if (type >= HEM_TYPE_MTT)
1002 num_bt_l0 = bt_chunk_num;
1003
1004 table->hem = kcalloc(num_hem, sizeof(*table->hem),
1005 GFP_KERNEL);
1006 if (!table->hem)
1007 goto err_kcalloc_hem_buf;
1008
1009 if (check_whether_bt_num_3(type, hop_num)) {
1010 unsigned long num_bt_l1;
1011
1012 num_bt_l1 = (num_hem + bt_chunk_num - 1) /
1013 bt_chunk_num;
1014 table->bt_l1 = kcalloc(num_bt_l1,
1015 sizeof(*table->bt_l1),
1016 GFP_KERNEL);
1017 if (!table->bt_l1)
1018 goto err_kcalloc_bt_l1;
1019
1020 table->bt_l1_dma_addr = kcalloc(num_bt_l1,
1021 sizeof(*table->bt_l1_dma_addr),
1022 GFP_KERNEL);
1023
1024 if (!table->bt_l1_dma_addr)
1025 goto err_kcalloc_l1_dma;
1026 }
1027
1028 if (check_whether_bt_num_2(type, hop_num) ||
1029 check_whether_bt_num_3(type, hop_num)) {
1030 table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
1031 GFP_KERNEL);
1032 if (!table->bt_l0)
1033 goto err_kcalloc_bt_l0;
1034
1035 table->bt_l0_dma_addr = kcalloc(num_bt_l0,
1036 sizeof(*table->bt_l0_dma_addr),
1037 GFP_KERNEL);
1038 if (!table->bt_l0_dma_addr)
1039 goto err_kcalloc_l0_dma;
1040 }
1041 }
1042
1043 table->type = type;
1044 table->num_hem = num_hem;
1045 table->num_obj = nobj;
1046 table->obj_size = obj_size;
1047 table->lowmem = use_lowmem;
1048 mutex_init(&table->mutex);
1049
1050 return 0;
1051
1052 err_kcalloc_l0_dma:
1053 kfree(table->bt_l0);
1054 table->bt_l0 = NULL;
1055
1056 err_kcalloc_bt_l0:
1057 kfree(table->bt_l1_dma_addr);
1058 table->bt_l1_dma_addr = NULL;
1059
1060 err_kcalloc_l1_dma:
1061 kfree(table->bt_l1);
1062 table->bt_l1 = NULL;
1063
1064 err_kcalloc_bt_l1:
1065 kfree(table->hem);
1066 table->hem = NULL;
1067
1068 err_kcalloc_hem_buf:
1069 return -ENOMEM;
1070 }
1071
1072 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
1073 struct hns_roce_hem_table *table)
1074 {
1075 struct hns_roce_hem_mhop mhop;
1076 u32 buf_chunk_size;
1077 int i;
1078 u64 obj;
1079
1080 hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
1081 buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
1082 mhop.bt_chunk_size;
1083
1084 for (i = 0; i < table->num_hem; ++i) {
1085 obj = i * buf_chunk_size / table->obj_size;
1086 if (table->hem[i])
1087 hns_roce_table_mhop_put(hr_dev, table, obj, 0);
1088 }
1089
1090 kfree(table->hem);
1091 table->hem = NULL;
1092 kfree(table->bt_l1);
1093 table->bt_l1 = NULL;
1094 kfree(table->bt_l1_dma_addr);
1095 table->bt_l1_dma_addr = NULL;
1096 kfree(table->bt_l0);
1097 table->bt_l0 = NULL;
1098 kfree(table->bt_l0_dma_addr);
1099 table->bt_l0_dma_addr = NULL;
1100 }
1101
1102 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
1103 struct hns_roce_hem_table *table)
1104 {
1105 struct device *dev = hr_dev->dev;
1106 unsigned long i;
1107
1108 if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
1109 hns_roce_cleanup_mhop_hem_table(hr_dev, table);
1110 return;
1111 }
1112
1113 for (i = 0; i < table->num_hem; ++i)
1114 if (table->hem[i]) {
1115 if (hr_dev->hw->clear_hem(hr_dev, table,
1116 i * table->table_chunk_size / table->obj_size, 0))
1117 dev_err(dev, "Clear HEM base address failed.\n");
1118
1119 hns_roce_free_hem(hr_dev, table->hem[i]);
1120 }
1121
1122 kfree(table->hem);
1123 }
1124
1125 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
1126 {
1127 if ((hr_dev->caps.num_idx_segs))
1128 hns_roce_cleanup_hem_table(hr_dev,
1129 &hr_dev->mr_table.mtt_idx_table);
1130 if (hr_dev->caps.num_srqwqe_segs)
1131 hns_roce_cleanup_hem_table(hr_dev,
1132 &hr_dev->mr_table.mtt_srqwqe_table);
1133 if (hr_dev->caps.srqc_entry_sz)
1134 hns_roce_cleanup_hem_table(hr_dev,
1135 &hr_dev->srq_table.table);
1136 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
1137 if (hr_dev->caps.qpc_timer_entry_sz)
1138 hns_roce_cleanup_hem_table(hr_dev,
1139 &hr_dev->qpc_timer_table);
1140 if (hr_dev->caps.cqc_timer_entry_sz)
1141 hns_roce_cleanup_hem_table(hr_dev,
1142 &hr_dev->cqc_timer_table);
1143 if (hr_dev->caps.sccc_entry_sz)
1144 hns_roce_cleanup_hem_table(hr_dev,
1145 &hr_dev->qp_table.sccc_table);
1146 if (hr_dev->caps.trrl_entry_sz)
1147 hns_roce_cleanup_hem_table(hr_dev,
1148 &hr_dev->qp_table.trrl_table);
1149 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
1150 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
1151 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
1152 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
1153 hns_roce_cleanup_hem_table(hr_dev,
1154 &hr_dev->mr_table.mtt_cqe_table);
1155 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
1156 }