2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
60 EXPORT_SYMBOL_GPL(hns_get_gid_index
);
62 static int hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
67 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, MAC_ADDR_OCTET_NUM
))
70 for (i
= 0; i
< MAC_ADDR_OCTET_NUM
; i
++)
71 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
73 phy_port
= hr_dev
->iboe
.phy_port
[port
];
74 return hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
77 static int hns_roce_add_gid(struct ib_device
*device
, u8 port_num
,
78 unsigned int index
, const union ib_gid
*gid
,
79 const struct ib_gid_attr
*attr
, void **context
)
81 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
82 u8 port
= port_num
- 1;
86 if (port
>= hr_dev
->caps
.num_ports
)
89 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
91 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, index
, (union ib_gid
*)gid
,
94 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
99 static int hns_roce_del_gid(struct ib_device
*device
, u8 port_num
,
100 unsigned int index
, void **context
)
102 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
103 union ib_gid zgid
= { {0} };
104 u8 port
= port_num
- 1;
108 if (port
>= hr_dev
->caps
.num_ports
)
111 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
113 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, index
, &zgid
, NULL
);
115 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
120 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
123 struct device
*dev
= hr_dev
->dev
;
124 struct net_device
*netdev
;
127 netdev
= hr_dev
->iboe
.netdevs
[port
];
129 dev_err(dev
, "port(%d) can't find netdev\n", port
);
136 case NETDEV_REGISTER
:
137 case NETDEV_CHANGEADDR
:
138 ret
= hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
142 * In v1 engine, only support all ports closed together.
146 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
153 static int hns_roce_netdev_event(struct notifier_block
*self
,
154 unsigned long event
, void *ptr
)
156 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
157 struct hns_roce_ib_iboe
*iboe
= NULL
;
158 struct hns_roce_dev
*hr_dev
= NULL
;
162 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
163 iboe
= &hr_dev
->iboe
;
165 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
166 if (dev
== iboe
->netdevs
[port
]) {
167 ret
= handle_en_event(hr_dev
, port
, event
);
177 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
182 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
183 if (hr_dev
->hw
->set_mtu
)
184 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
185 hr_dev
->caps
.max_mtu
);
186 ret
= hns_roce_set_mac(hr_dev
, i
,
187 hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
195 static int hns_roce_query_device(struct ib_device
*ib_dev
,
196 struct ib_device_attr
*props
,
197 struct ib_udata
*uhw
)
199 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
201 memset(props
, 0, sizeof(*props
));
203 props
->sys_image_guid
= cpu_to_be32(hr_dev
->sys_image_guid
);
204 props
->max_mr_size
= (u64
)(~(0ULL));
205 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
206 props
->vendor_id
= hr_dev
->vendor_id
;
207 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
208 props
->hw_ver
= hr_dev
->hw_rev
;
209 props
->max_qp
= hr_dev
->caps
.num_qps
;
210 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
211 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
212 IB_DEVICE_RC_RNR_NAK_GEN
;
213 props
->max_sge
= max(hr_dev
->caps
.max_sq_sg
, hr_dev
->caps
.max_rq_sg
);
214 props
->max_sge_rd
= 1;
215 props
->max_cq
= hr_dev
->caps
.num_cqs
;
216 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
217 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
218 props
->max_pd
= hr_dev
->caps
.num_pds
;
219 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
220 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
221 props
->atomic_cap
= IB_ATOMIC_NONE
;
222 props
->max_pkeys
= 1;
223 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
228 static struct net_device
*hns_roce_get_netdev(struct ib_device
*ib_dev
,
231 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
232 struct net_device
*ndev
;
234 if (port_num
< 1 || port_num
> hr_dev
->caps
.num_ports
)
239 ndev
= hr_dev
->iboe
.netdevs
[port_num
- 1];
247 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
248 struct ib_port_attr
*props
)
250 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
251 struct device
*dev
= hr_dev
->dev
;
252 struct net_device
*net_dev
;
257 assert(port_num
> 0);
260 /* props being zeroed by the caller, avoid zeroing it here */
262 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
263 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
264 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
265 IB_PORT_VENDOR_CLASS_SUP
|
266 IB_PORT_BOOT_MGMT_SUP
;
267 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
268 props
->pkey_tbl_len
= 1;
269 props
->active_width
= IB_WIDTH_4X
;
270 props
->active_speed
= 1;
272 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
274 net_dev
= hr_dev
->iboe
.netdevs
[port
];
276 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
277 dev_err(dev
, "find netdev %d failed!\r\n", port
);
281 mtu
= iboe_get_mtu(net_dev
->mtu
);
282 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
283 props
->state
= (netif_running(net_dev
) && netif_carrier_ok(net_dev
)) ?
284 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
285 props
->phys_state
= (props
->state
== IB_PORT_ACTIVE
) ? 5 : 3;
287 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
292 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
295 return IB_LINK_LAYER_ETHERNET
;
298 static int hns_roce_query_gid(struct ib_device
*ib_dev
, u8 port_num
, int index
,
304 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
312 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
313 struct ib_device_modify
*props
)
317 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
320 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
321 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
322 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
323 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
329 static int hns_roce_modify_port(struct ib_device
*ib_dev
, u8 port_num
, int mask
,
330 struct ib_port_modify
*props
)
335 static struct ib_ucontext
*hns_roce_alloc_ucontext(struct ib_device
*ib_dev
,
336 struct ib_udata
*udata
)
339 struct hns_roce_ucontext
*context
;
340 struct hns_roce_ib_alloc_ucontext_resp resp
;
341 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
343 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
345 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
347 return ERR_PTR(-ENOMEM
);
349 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
351 goto error_fail_uar_alloc
;
353 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
354 INIT_LIST_HEAD(&context
->page_list
);
355 mutex_init(&context
->page_mutex
);
358 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
360 goto error_fail_copy_to_udata
;
362 return &context
->ibucontext
;
364 error_fail_copy_to_udata
:
365 hns_roce_uar_free(hr_dev
, &context
->uar
);
367 error_fail_uar_alloc
:
373 static int hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
375 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
377 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
383 static int hns_roce_mmap(struct ib_ucontext
*context
,
384 struct vm_area_struct
*vma
)
386 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
388 if (((vma
->vm_end
- vma
->vm_start
) % PAGE_SIZE
) != 0)
391 if (vma
->vm_pgoff
== 0) {
392 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
393 if (io_remap_pfn_range(vma
, vma
->vm_start
,
394 to_hr_ucontext(context
)->uar
.pfn
,
395 PAGE_SIZE
, vma
->vm_page_prot
))
397 } else if (vma
->vm_pgoff
== 1 && hr_dev
->tptr_dma_addr
&&
399 /* vm_pgoff: 1 -- TPTR */
400 if (io_remap_pfn_range(vma
, vma
->vm_start
,
401 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
411 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
412 struct ib_port_immutable
*immutable
)
414 struct ib_port_attr attr
;
417 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
421 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
422 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
424 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
425 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
426 if (to_hr_dev(ib_dev
)->caps
.flags
& HNS_ROCE_CAP_FLAG_ROCE_V1_V2
)
427 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
432 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
434 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
436 unregister_netdevice_notifier(&iboe
->nb
);
437 ib_unregister_device(&hr_dev
->ib_dev
);
440 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
443 struct hns_roce_ib_iboe
*iboe
= NULL
;
444 struct ib_device
*ib_dev
= NULL
;
445 struct device
*dev
= hr_dev
->dev
;
447 iboe
= &hr_dev
->iboe
;
448 spin_lock_init(&iboe
->lock
);
450 ib_dev
= &hr_dev
->ib_dev
;
451 strlcpy(ib_dev
->name
, "hns_%d", IB_DEVICE_NAME_MAX
);
453 ib_dev
->owner
= THIS_MODULE
;
454 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
455 ib_dev
->dev
.parent
= dev
;
457 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
458 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
459 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
460 ib_dev
->uverbs_abi_ver
= 1;
461 ib_dev
->uverbs_cmd_mask
=
462 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT
) |
463 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
464 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT
) |
465 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD
) |
466 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD
) |
467 (1ULL << IB_USER_VERBS_CMD_REG_MR
) |
468 (1ULL << IB_USER_VERBS_CMD_DEREG_MR
) |
469 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
470 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ
) |
471 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ
) |
472 (1ULL << IB_USER_VERBS_CMD_CREATE_QP
) |
473 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP
) |
474 (1ULL << IB_USER_VERBS_CMD_QUERY_QP
) |
475 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP
);
477 /* HCA||device||port */
478 ib_dev
->modify_device
= hns_roce_modify_device
;
479 ib_dev
->query_device
= hns_roce_query_device
;
480 ib_dev
->query_port
= hns_roce_query_port
;
481 ib_dev
->modify_port
= hns_roce_modify_port
;
482 ib_dev
->get_link_layer
= hns_roce_get_link_layer
;
483 ib_dev
->get_netdev
= hns_roce_get_netdev
;
484 ib_dev
->query_gid
= hns_roce_query_gid
;
485 ib_dev
->add_gid
= hns_roce_add_gid
;
486 ib_dev
->del_gid
= hns_roce_del_gid
;
487 ib_dev
->query_pkey
= hns_roce_query_pkey
;
488 ib_dev
->alloc_ucontext
= hns_roce_alloc_ucontext
;
489 ib_dev
->dealloc_ucontext
= hns_roce_dealloc_ucontext
;
490 ib_dev
->mmap
= hns_roce_mmap
;
493 ib_dev
->alloc_pd
= hns_roce_alloc_pd
;
494 ib_dev
->dealloc_pd
= hns_roce_dealloc_pd
;
497 ib_dev
->create_ah
= hns_roce_create_ah
;
498 ib_dev
->query_ah
= hns_roce_query_ah
;
499 ib_dev
->destroy_ah
= hns_roce_destroy_ah
;
502 ib_dev
->create_qp
= hns_roce_create_qp
;
503 ib_dev
->modify_qp
= hns_roce_modify_qp
;
504 ib_dev
->query_qp
= hr_dev
->hw
->query_qp
;
505 ib_dev
->destroy_qp
= hr_dev
->hw
->destroy_qp
;
506 ib_dev
->post_send
= hr_dev
->hw
->post_send
;
507 ib_dev
->post_recv
= hr_dev
->hw
->post_recv
;
510 ib_dev
->create_cq
= hns_roce_ib_create_cq
;
511 ib_dev
->modify_cq
= hr_dev
->hw
->modify_cq
;
512 ib_dev
->destroy_cq
= hns_roce_ib_destroy_cq
;
513 ib_dev
->req_notify_cq
= hr_dev
->hw
->req_notify_cq
;
514 ib_dev
->poll_cq
= hr_dev
->hw
->poll_cq
;
517 ib_dev
->get_dma_mr
= hns_roce_get_dma_mr
;
518 ib_dev
->reg_user_mr
= hns_roce_reg_user_mr
;
519 ib_dev
->dereg_mr
= hns_roce_dereg_mr
;
520 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_REREG_MR
) {
521 ib_dev
->rereg_user_mr
= hns_roce_rereg_user_mr
;
522 ib_dev
->uverbs_cmd_mask
|= (1ULL << IB_USER_VERBS_CMD_REREG_MR
);
526 ib_dev
->get_port_immutable
= hns_roce_port_immutable
;
528 ret
= ib_register_device(ib_dev
, NULL
);
530 dev_err(dev
, "ib_register_device failed!\n");
534 ret
= hns_roce_setup_mtu_mac(hr_dev
);
536 dev_err(dev
, "setup_mtu_mac failed!\n");
537 goto error_failed_setup_mtu_mac
;
540 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
541 ret
= register_netdevice_notifier(&iboe
->nb
);
543 dev_err(dev
, "register_netdevice_notifier failed!\n");
544 goto error_failed_setup_mtu_mac
;
549 error_failed_setup_mtu_mac
:
550 ib_unregister_device(ib_dev
);
555 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
558 struct device
*dev
= hr_dev
->dev
;
560 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
561 HEM_TYPE_MTT
, hr_dev
->caps
.mtt_entry_sz
,
562 hr_dev
->caps
.num_mtt_segs
, 1);
564 dev_err(dev
, "Failed to init MTT context memory, aborting.\n");
568 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
)) {
569 ret
= hns_roce_init_hem_table(hr_dev
,
570 &hr_dev
->mr_table
.mtt_cqe_table
,
571 HEM_TYPE_CQE
, hr_dev
->caps
.mtt_entry_sz
,
572 hr_dev
->caps
.num_cqe_segs
, 1);
574 dev_err(dev
, "Failed to init MTT CQE context memory, aborting.\n");
579 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
580 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
581 hr_dev
->caps
.num_mtpts
, 1);
583 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
587 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
588 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_entry_sz
,
589 hr_dev
->caps
.num_qps
, 1);
591 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
595 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
597 hr_dev
->caps
.irrl_entry_sz
*
598 hr_dev
->caps
.max_qp_init_rdma
,
599 hr_dev
->caps
.num_qps
, 1);
601 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
605 if (hr_dev
->caps
.trrl_entry_sz
) {
606 ret
= hns_roce_init_hem_table(hr_dev
,
607 &hr_dev
->qp_table
.trrl_table
,
609 hr_dev
->caps
.trrl_entry_sz
*
610 hr_dev
->caps
.max_qp_dest_rdma
,
611 hr_dev
->caps
.num_qps
, 1);
614 "Failed to init trrl_table memory, aborting.\n");
619 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
620 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
621 hr_dev
->caps
.num_cqs
, 1);
623 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
630 if (hr_dev
->caps
.trrl_entry_sz
)
631 hns_roce_cleanup_hem_table(hr_dev
,
632 &hr_dev
->qp_table
.trrl_table
);
635 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
638 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
641 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
644 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
645 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
646 hns_roce_cleanup_hem_table(hr_dev
,
647 &hr_dev
->mr_table
.mtt_cqe_table
);
650 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
656 * hns_roce_setup_hca - setup host channel adapter
657 * @hr_dev: pointer to hns roce device
660 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
663 struct device
*dev
= hr_dev
->dev
;
665 spin_lock_init(&hr_dev
->sm_lock
);
666 spin_lock_init(&hr_dev
->bt_cmd_lock
);
668 ret
= hns_roce_init_uar_table(hr_dev
);
670 dev_err(dev
, "Failed to initialize uar table. aborting\n");
674 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
676 dev_err(dev
, "Failed to allocate priv_uar.\n");
677 goto err_uar_table_free
;
680 ret
= hns_roce_init_pd_table(hr_dev
);
682 dev_err(dev
, "Failed to init protected domain table.\n");
683 goto err_uar_alloc_free
;
686 ret
= hns_roce_init_mr_table(hr_dev
);
688 dev_err(dev
, "Failed to init memory region table.\n");
689 goto err_pd_table_free
;
692 ret
= hns_roce_init_cq_table(hr_dev
);
694 dev_err(dev
, "Failed to init completion queue table.\n");
695 goto err_mr_table_free
;
698 ret
= hns_roce_init_qp_table(hr_dev
);
700 dev_err(dev
, "Failed to init queue pair table.\n");
701 goto err_cq_table_free
;
707 hns_roce_cleanup_cq_table(hr_dev
);
710 hns_roce_cleanup_mr_table(hr_dev
);
713 hns_roce_cleanup_pd_table(hr_dev
);
716 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
719 hns_roce_cleanup_uar_table(hr_dev
);
723 int hns_roce_init(struct hns_roce_dev
*hr_dev
)
726 struct device
*dev
= hr_dev
->dev
;
728 if (hr_dev
->hw
->reset
) {
729 ret
= hr_dev
->hw
->reset(hr_dev
, true);
731 dev_err(dev
, "Reset RoCE engine failed!\n");
736 if (hr_dev
->hw
->cmq_init
) {
737 ret
= hr_dev
->hw
->cmq_init(hr_dev
);
739 dev_err(dev
, "Init RoCE Command Queue failed!\n");
740 goto error_failed_cmq_init
;
744 ret
= hr_dev
->hw
->hw_profile(hr_dev
);
746 dev_err(dev
, "Get RoCE engine profile failed!\n");
747 goto error_failed_cmd_init
;
750 ret
= hns_roce_cmd_init(hr_dev
);
752 dev_err(dev
, "cmd init failed!\n");
753 goto error_failed_cmd_init
;
756 ret
= hr_dev
->hw
->init_eq(hr_dev
);
758 dev_err(dev
, "eq init failed!\n");
759 goto error_failed_eq_table
;
762 if (hr_dev
->cmd_mod
) {
763 ret
= hns_roce_cmd_use_events(hr_dev
);
765 dev_err(dev
, "Switch to event-driven cmd failed!\n");
766 goto error_failed_use_event
;
770 ret
= hns_roce_init_hem(hr_dev
);
772 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
773 goto error_failed_init_hem
;
776 ret
= hns_roce_setup_hca(hr_dev
);
778 dev_err(dev
, "setup hca failed!\n");
779 goto error_failed_setup_hca
;
782 if (hr_dev
->hw
->hw_init
) {
783 ret
= hr_dev
->hw
->hw_init(hr_dev
);
785 dev_err(dev
, "hw_init failed!\n");
786 goto error_failed_engine_init
;
790 ret
= hns_roce_register_device(hr_dev
);
792 goto error_failed_register_device
;
796 error_failed_register_device
:
797 if (hr_dev
->hw
->hw_exit
)
798 hr_dev
->hw
->hw_exit(hr_dev
);
800 error_failed_engine_init
:
801 hns_roce_cleanup_bitmap(hr_dev
);
803 error_failed_setup_hca
:
804 hns_roce_cleanup_hem(hr_dev
);
806 error_failed_init_hem
:
808 hns_roce_cmd_use_polling(hr_dev
);
810 error_failed_use_event
:
811 hr_dev
->hw
->cleanup_eq(hr_dev
);
813 error_failed_eq_table
:
814 hns_roce_cmd_cleanup(hr_dev
);
816 error_failed_cmd_init
:
817 if (hr_dev
->hw
->cmq_exit
)
818 hr_dev
->hw
->cmq_exit(hr_dev
);
820 error_failed_cmq_init
:
821 if (hr_dev
->hw
->reset
) {
822 ret
= hr_dev
->hw
->reset(hr_dev
, false);
824 dev_err(dev
, "Dereset RoCE engine failed!\n");
829 EXPORT_SYMBOL_GPL(hns_roce_init
);
831 void hns_roce_exit(struct hns_roce_dev
*hr_dev
)
833 hns_roce_unregister_device(hr_dev
);
834 if (hr_dev
->hw
->hw_exit
)
835 hr_dev
->hw
->hw_exit(hr_dev
);
836 hns_roce_cleanup_bitmap(hr_dev
);
837 hns_roce_cleanup_hem(hr_dev
);
840 hns_roce_cmd_use_polling(hr_dev
);
842 hr_dev
->hw
->cleanup_eq(hr_dev
);
843 hns_roce_cmd_cleanup(hr_dev
);
844 if (hr_dev
->hw
->cmq_exit
)
845 hr_dev
->hw
->cmq_exit(hr_dev
);
846 if (hr_dev
->hw
->reset
)
847 hr_dev
->hw
->reset(hr_dev
, false);
849 EXPORT_SYMBOL_GPL(hns_roce_exit
);
851 MODULE_LICENSE("Dual BSD/GPL");
852 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
853 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
854 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
855 MODULE_DESCRIPTION("HNS RoCE Driver");