2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include <rdma/ib_umem.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include <rdma/hns-abi.h>
39 #include "hns_roce_common.h"
41 static void hns_roce_ib_cq_comp(struct hns_roce_cq
*hr_cq
)
43 struct ib_cq
*ibcq
= &hr_cq
->ib_cq
;
45 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
48 static void hns_roce_ib_cq_event(struct hns_roce_cq
*hr_cq
,
49 enum hns_roce_event event_type
)
51 struct hns_roce_dev
*hr_dev
;
52 struct ib_event event
;
56 hr_dev
= to_hr_dev(ibcq
->device
);
58 if (event_type
!= HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
&&
59 event_type
!= HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
&&
60 event_type
!= HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
) {
62 "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
63 event_type
, hr_cq
->cqn
);
67 if (ibcq
->event_handler
) {
68 event
.device
= ibcq
->device
;
69 event
.event
= IB_EVENT_CQ_ERR
;
70 event
.element
.cq
= ibcq
;
71 ibcq
->event_handler(&event
, ibcq
->cq_context
);
75 static int hns_roce_sw2hw_cq(struct hns_roce_dev
*dev
,
76 struct hns_roce_cmd_mailbox
*mailbox
,
79 return hns_roce_cmd_mbox(dev
, mailbox
->dma
, 0, cq_num
, 0,
80 HNS_ROCE_CMD_SW2HW_CQ
, HNS_ROCE_CMD_TIMEOUT_MSECS
);
83 static int hns_roce_cq_alloc(struct hns_roce_dev
*hr_dev
, int nent
,
84 struct hns_roce_mtt
*hr_mtt
,
85 struct hns_roce_uar
*hr_uar
,
86 struct hns_roce_cq
*hr_cq
, int vector
)
88 struct hns_roce_cmd_mailbox
*mailbox
;
89 struct hns_roce_hem_table
*mtt_table
;
90 struct hns_roce_cq_table
*cq_table
;
91 struct device
*dev
= hr_dev
->dev
;
92 dma_addr_t dma_handle
;
96 cq_table
= &hr_dev
->cq_table
;
98 /* Get the physical address of cq buf */
99 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
100 mtt_table
= &hr_dev
->mr_table
.mtt_cqe_table
;
102 mtt_table
= &hr_dev
->mr_table
.mtt_table
;
104 mtts
= hns_roce_table_find(hr_dev
, mtt_table
,
105 hr_mtt
->first_seg
, &dma_handle
);
107 dev_err(dev
, "CQ alloc.Failed to find cq buf addr.\n");
111 if (vector
>= hr_dev
->caps
.num_comp_vectors
) {
112 dev_err(dev
, "CQ alloc.Invalid vector.\n");
115 hr_cq
->vector
= vector
;
117 ret
= hns_roce_bitmap_alloc(&cq_table
->bitmap
, &hr_cq
->cqn
);
119 dev_err(dev
, "CQ alloc.Failed to alloc index.\n");
123 /* Get CQC memory HEM(Hardware Entry Memory) table */
124 ret
= hns_roce_table_get(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
126 dev_err(dev
, "CQ alloc.Failed to get context mem.\n");
130 /* The cq insert radix tree */
131 spin_lock_irq(&cq_table
->lock
);
132 /* Radix_tree: The associated pointer and long integer key value like */
133 ret
= radix_tree_insert(&cq_table
->tree
, hr_cq
->cqn
, hr_cq
);
134 spin_unlock_irq(&cq_table
->lock
);
136 dev_err(dev
, "CQ alloc.Failed to radix_tree_insert.\n");
140 /* Allocate mailbox memory */
141 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
142 if (IS_ERR(mailbox
)) {
143 ret
= PTR_ERR(mailbox
);
147 hr_dev
->hw
->write_cqc(hr_dev
, hr_cq
, mailbox
->buf
, mtts
, dma_handle
,
150 /* Send mailbox to hw */
151 ret
= hns_roce_sw2hw_cq(hr_dev
, mailbox
, hr_cq
->cqn
);
152 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
154 dev_err(dev
, "CQ alloc.Failed to cmd mailbox.\n");
158 hr_cq
->cons_index
= 0;
162 atomic_set(&hr_cq
->refcount
, 1);
163 init_completion(&hr_cq
->free
);
168 spin_lock_irq(&cq_table
->lock
);
169 radix_tree_delete(&cq_table
->tree
, hr_cq
->cqn
);
170 spin_unlock_irq(&cq_table
->lock
);
173 hns_roce_table_put(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
176 hns_roce_bitmap_free(&cq_table
->bitmap
, hr_cq
->cqn
, BITMAP_NO_RR
);
180 static int hns_roce_hw2sw_cq(struct hns_roce_dev
*dev
,
181 struct hns_roce_cmd_mailbox
*mailbox
,
182 unsigned long cq_num
)
184 return hns_roce_cmd_mbox(dev
, 0, mailbox
? mailbox
->dma
: 0, cq_num
,
185 mailbox
? 0 : 1, HNS_ROCE_CMD_HW2SW_CQ
,
186 HNS_ROCE_CMD_TIMEOUT_MSECS
);
189 void hns_roce_free_cq(struct hns_roce_dev
*hr_dev
, struct hns_roce_cq
*hr_cq
)
191 struct hns_roce_cq_table
*cq_table
= &hr_dev
->cq_table
;
192 struct device
*dev
= hr_dev
->dev
;
195 ret
= hns_roce_hw2sw_cq(hr_dev
, NULL
, hr_cq
->cqn
);
197 dev_err(dev
, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret
,
200 /* Waiting interrupt process procedure carried out */
201 synchronize_irq(hr_dev
->eq_table
.eq
[hr_cq
->vector
].irq
);
203 /* wait for all interrupt processed */
204 if (atomic_dec_and_test(&hr_cq
->refcount
))
205 complete(&hr_cq
->free
);
206 wait_for_completion(&hr_cq
->free
);
208 spin_lock_irq(&cq_table
->lock
);
209 radix_tree_delete(&cq_table
->tree
, hr_cq
->cqn
);
210 spin_unlock_irq(&cq_table
->lock
);
212 hns_roce_table_put(hr_dev
, &cq_table
->table
, hr_cq
->cqn
);
213 hns_roce_bitmap_free(&cq_table
->bitmap
, hr_cq
->cqn
, BITMAP_NO_RR
);
215 EXPORT_SYMBOL_GPL(hns_roce_free_cq
);
217 static int hns_roce_ib_get_cq_umem(struct hns_roce_dev
*hr_dev
,
218 struct ib_udata
*udata
,
219 struct hns_roce_cq_buf
*buf
,
220 struct ib_umem
**umem
, u64 buf_addr
, int cqe
)
226 *umem
= ib_umem_get(udata
, buf_addr
, cqe
* hr_dev
->caps
.cq_entry_sz
,
227 IB_ACCESS_LOCAL_WRITE
, 1);
229 return PTR_ERR(*umem
);
231 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
232 buf
->hr_mtt
.mtt_type
= MTT_TYPE_CQE
;
234 buf
->hr_mtt
.mtt_type
= MTT_TYPE_WQE
;
236 if (hr_dev
->caps
.cqe_buf_pg_sz
) {
237 npages
= (ib_umem_page_count(*umem
) +
238 (1 << hr_dev
->caps
.cqe_buf_pg_sz
) - 1) /
239 (1 << hr_dev
->caps
.cqe_buf_pg_sz
);
240 page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.cqe_buf_pg_sz
;
241 ret
= hns_roce_mtt_init(hr_dev
, npages
, page_shift
,
244 ret
= hns_roce_mtt_init(hr_dev
, ib_umem_page_count(*umem
),
251 ret
= hns_roce_ib_umem_write_mtt(hr_dev
, &buf
->hr_mtt
, *umem
);
258 hns_roce_mtt_cleanup(hr_dev
, &buf
->hr_mtt
);
261 ib_umem_release(*umem
);
265 static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev
*hr_dev
,
266 struct hns_roce_cq_buf
*buf
, u32 nent
)
269 u32 page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.cqe_buf_pg_sz
;
271 ret
= hns_roce_buf_alloc(hr_dev
, nent
* hr_dev
->caps
.cq_entry_sz
,
272 (1 << page_shift
) * 2, &buf
->hr_buf
,
277 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
278 buf
->hr_mtt
.mtt_type
= MTT_TYPE_CQE
;
280 buf
->hr_mtt
.mtt_type
= MTT_TYPE_WQE
;
282 ret
= hns_roce_mtt_init(hr_dev
, buf
->hr_buf
.npages
,
283 buf
->hr_buf
.page_shift
, &buf
->hr_mtt
);
287 ret
= hns_roce_buf_write_mtt(hr_dev
, &buf
->hr_mtt
, &buf
->hr_buf
);
294 hns_roce_mtt_cleanup(hr_dev
, &buf
->hr_mtt
);
297 hns_roce_buf_free(hr_dev
, nent
* hr_dev
->caps
.cq_entry_sz
,
303 static void hns_roce_ib_free_cq_buf(struct hns_roce_dev
*hr_dev
,
304 struct hns_roce_cq_buf
*buf
, int cqe
)
306 hns_roce_buf_free(hr_dev
, (cqe
+ 1) * hr_dev
->caps
.cq_entry_sz
,
310 struct ib_cq
*hns_roce_ib_create_cq(struct ib_device
*ib_dev
,
311 const struct ib_cq_init_attr
*attr
,
312 struct ib_ucontext
*context
,
313 struct ib_udata
*udata
)
315 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
316 struct device
*dev
= hr_dev
->dev
;
317 struct hns_roce_ib_create_cq ucmd
;
318 struct hns_roce_ib_create_cq_resp resp
= {};
319 struct hns_roce_cq
*hr_cq
= NULL
;
320 struct hns_roce_uar
*uar
= NULL
;
321 int vector
= attr
->comp_vector
;
322 int cq_entries
= attr
->cqe
;
325 if (cq_entries
< 1 || cq_entries
> hr_dev
->caps
.max_cqes
) {
326 dev_err(dev
, "Creat CQ failed. entries=%d, max=%d\n",
327 cq_entries
, hr_dev
->caps
.max_cqes
);
328 return ERR_PTR(-EINVAL
);
331 hr_cq
= kzalloc(sizeof(*hr_cq
), GFP_KERNEL
);
333 return ERR_PTR(-ENOMEM
);
335 if (hr_dev
->caps
.min_cqes
)
336 cq_entries
= max(cq_entries
, hr_dev
->caps
.min_cqes
);
338 cq_entries
= roundup_pow_of_two((unsigned int)cq_entries
);
339 hr_cq
->ib_cq
.cqe
= cq_entries
- 1;
340 spin_lock_init(&hr_cq
->lock
);
343 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
344 dev_err(dev
, "Failed to copy_from_udata.\n");
349 /* Get user space address, write it into mtt table */
350 ret
= hns_roce_ib_get_cq_umem(hr_dev
, udata
, &hr_cq
->hr_buf
,
351 &hr_cq
->umem
, ucmd
.buf_addr
,
354 dev_err(dev
, "Failed to get_cq_umem.\n");
358 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
359 (udata
->outlen
>= sizeof(resp
))) {
360 ret
= hns_roce_db_map_user(to_hr_ucontext(context
),
364 dev_err(dev
, "cq record doorbell map failed!\n");
368 resp
.cap_flags
|= HNS_ROCE_SUPPORT_CQ_RECORD_DB
;
371 /* Get user space parameters */
372 uar
= &to_hr_ucontext(context
)->uar
;
374 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
375 ret
= hns_roce_alloc_db(hr_dev
, &hr_cq
->db
, 1);
379 hr_cq
->set_ci_db
= hr_cq
->db
.db_record
;
380 *hr_cq
->set_ci_db
= 0;
384 /* Init mmt table and write buff address to mtt table */
385 ret
= hns_roce_ib_alloc_cq_buf(hr_dev
, &hr_cq
->hr_buf
,
388 dev_err(dev
, "Failed to alloc_cq_buf.\n");
392 uar
= &hr_dev
->priv_uar
;
393 hr_cq
->cq_db_l
= hr_dev
->reg_base
+ hr_dev
->odb_offset
+
394 DB_REG_OFFSET
* uar
->index
;
397 /* Allocate cq index, fill cq_context */
398 ret
= hns_roce_cq_alloc(hr_dev
, cq_entries
, &hr_cq
->hr_buf
.hr_mtt
, uar
,
401 dev_err(dev
, "Creat CQ .Failed to cq_alloc.\n");
406 * For the QP created by kernel space, tptr value should be initialized
407 * to zero; For the QP created by user space, it will cause synchronous
408 * problems if tptr is set to zero here, so we initialze it in user
411 if (!context
&& hr_cq
->tptr_addr
)
412 *hr_cq
->tptr_addr
= 0;
414 /* Get created cq handler and carry out event */
415 hr_cq
->comp
= hns_roce_ib_cq_comp
;
416 hr_cq
->event
= hns_roce_ib_cq_event
;
417 hr_cq
->cq_depth
= cq_entries
;
420 resp
.cqn
= hr_cq
->cqn
;
421 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
426 return &hr_cq
->ib_cq
;
429 hns_roce_free_cq(hr_dev
, hr_cq
);
432 if (context
&& (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
433 (udata
->outlen
>= sizeof(resp
)))
434 hns_roce_db_unmap_user(to_hr_ucontext(context
),
438 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->hr_buf
.hr_mtt
);
440 ib_umem_release(hr_cq
->umem
);
442 hns_roce_ib_free_cq_buf(hr_dev
, &hr_cq
->hr_buf
,
446 if (!context
&& (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
))
447 hns_roce_free_db(hr_dev
, &hr_cq
->db
);
453 EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq
);
455 int hns_roce_ib_destroy_cq(struct ib_cq
*ib_cq
)
457 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_cq
->device
);
458 struct hns_roce_cq
*hr_cq
= to_hr_cq(ib_cq
);
461 if (hr_dev
->hw
->destroy_cq
) {
462 ret
= hr_dev
->hw
->destroy_cq(ib_cq
);
464 hns_roce_free_cq(hr_dev
, hr_cq
);
465 hns_roce_mtt_cleanup(hr_dev
, &hr_cq
->hr_buf
.hr_mtt
);
467 if (ib_cq
->uobject
) {
468 ib_umem_release(hr_cq
->umem
);
470 if (hr_cq
->db_en
== 1)
471 hns_roce_db_unmap_user(
472 to_hr_ucontext(ib_cq
->uobject
->context
),
475 /* Free the buff of stored cq */
476 hns_roce_ib_free_cq_buf(hr_dev
, &hr_cq
->hr_buf
,
478 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
)
479 hns_roce_free_db(hr_dev
, &hr_cq
->db
);
487 EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq
);
489 void hns_roce_cq_completion(struct hns_roce_dev
*hr_dev
, u32 cqn
)
491 struct device
*dev
= hr_dev
->dev
;
492 struct hns_roce_cq
*cq
;
494 cq
= radix_tree_lookup(&hr_dev
->cq_table
.tree
,
495 cqn
& (hr_dev
->caps
.num_cqs
- 1));
497 dev_warn(dev
, "Completion event for bogus CQ 0x%08x\n", cqn
);
504 EXPORT_SYMBOL_GPL(hns_roce_cq_completion
);
506 void hns_roce_cq_event(struct hns_roce_dev
*hr_dev
, u32 cqn
, int event_type
)
508 struct hns_roce_cq_table
*cq_table
= &hr_dev
->cq_table
;
509 struct device
*dev
= hr_dev
->dev
;
510 struct hns_roce_cq
*cq
;
512 cq
= radix_tree_lookup(&cq_table
->tree
,
513 cqn
& (hr_dev
->caps
.num_cqs
- 1));
515 atomic_inc(&cq
->refcount
);
518 dev_warn(dev
, "Async event for bogus CQ %08x\n", cqn
);
522 cq
->event(cq
, (enum hns_roce_event
)event_type
);
524 if (atomic_dec_and_test(&cq
->refcount
))
527 EXPORT_SYMBOL_GPL(hns_roce_cq_event
);
529 int hns_roce_init_cq_table(struct hns_roce_dev
*hr_dev
)
531 struct hns_roce_cq_table
*cq_table
= &hr_dev
->cq_table
;
533 spin_lock_init(&cq_table
->lock
);
534 INIT_RADIX_TREE(&cq_table
->tree
, GFP_ATOMIC
);
536 return hns_roce_bitmap_init(&cq_table
->bitmap
, hr_dev
->caps
.num_cqs
,
537 hr_dev
->caps
.num_cqs
- 1,
538 hr_dev
->caps
.reserved_cqs
, 0);
541 void hns_roce_cleanup_cq_table(struct hns_roce_dev
*hr_dev
)
543 hns_roce_bitmap_cleanup(&hr_dev
->cq_table
.bitmap
);