2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
61 static void hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
66 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, MAC_ADDR_OCTET_NUM
))
69 for (i
= 0; i
< MAC_ADDR_OCTET_NUM
; i
++)
70 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
72 phy_port
= hr_dev
->iboe
.phy_port
[port
];
73 hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
76 static int hns_roce_add_gid(struct ib_device
*device
, u8 port_num
,
77 unsigned int index
, const union ib_gid
*gid
,
78 const struct ib_gid_attr
*attr
, void **context
)
80 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
81 u8 port
= port_num
- 1;
84 if (port
>= hr_dev
->caps
.num_ports
)
87 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
89 hr_dev
->hw
->set_gid(hr_dev
, port
, index
, (union ib_gid
*)gid
);
91 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
96 static int hns_roce_del_gid(struct ib_device
*device
, u8 port_num
,
97 unsigned int index
, void **context
)
99 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
100 union ib_gid zgid
= { {0} };
101 u8 port
= port_num
- 1;
104 if (port
>= hr_dev
->caps
.num_ports
)
107 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
109 hr_dev
->hw
->set_gid(hr_dev
, port
, index
, &zgid
);
111 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
116 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
119 struct device
*dev
= &hr_dev
->pdev
->dev
;
120 struct net_device
*netdev
;
122 netdev
= hr_dev
->iboe
.netdevs
[port
];
124 dev_err(dev
, "port(%d) can't find netdev\n", port
);
128 spin_lock_bh(&hr_dev
->iboe
.lock
);
133 case NETDEV_REGISTER
:
134 case NETDEV_CHANGEADDR
:
135 hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
139 * In v1 engine, only support all ports closed together.
143 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
147 spin_unlock_bh(&hr_dev
->iboe
.lock
);
151 static int hns_roce_netdev_event(struct notifier_block
*self
,
152 unsigned long event
, void *ptr
)
154 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
155 struct hns_roce_ib_iboe
*iboe
= NULL
;
156 struct hns_roce_dev
*hr_dev
= NULL
;
160 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
161 iboe
= &hr_dev
->iboe
;
163 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
164 if (dev
== iboe
->netdevs
[port
]) {
165 ret
= handle_en_event(hr_dev
, port
, event
);
175 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
179 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
180 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
181 hr_dev
->caps
.max_mtu
);
182 hns_roce_set_mac(hr_dev
, i
, hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
188 static int hns_roce_query_device(struct ib_device
*ib_dev
,
189 struct ib_device_attr
*props
,
190 struct ib_udata
*uhw
)
192 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
194 memset(props
, 0, sizeof(*props
));
196 props
->sys_image_guid
= hr_dev
->sys_image_guid
;
197 props
->max_mr_size
= (u64
)(~(0ULL));
198 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
199 props
->vendor_id
= hr_dev
->vendor_id
;
200 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
201 props
->hw_ver
= hr_dev
->hw_rev
;
202 props
->max_qp
= hr_dev
->caps
.num_qps
;
203 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
204 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
205 IB_DEVICE_RC_RNR_NAK_GEN
;
206 props
->max_sge
= hr_dev
->caps
.max_sq_sg
;
207 props
->max_sge_rd
= 1;
208 props
->max_cq
= hr_dev
->caps
.num_cqs
;
209 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
210 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
211 props
->max_pd
= hr_dev
->caps
.num_pds
;
212 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
213 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
214 props
->atomic_cap
= IB_ATOMIC_NONE
;
215 props
->max_pkeys
= 1;
216 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
221 static struct net_device
*hns_roce_get_netdev(struct ib_device
*ib_dev
,
224 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
225 struct net_device
*ndev
;
227 if (port_num
< 1 || port_num
> hr_dev
->caps
.num_ports
)
232 ndev
= hr_dev
->iboe
.netdevs
[port_num
- 1];
240 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
241 struct ib_port_attr
*props
)
243 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
244 struct device
*dev
= &hr_dev
->pdev
->dev
;
245 struct net_device
*net_dev
;
250 assert(port_num
> 0);
253 /* props being zeroed by the caller, avoid zeroing it here */
255 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
256 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
257 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
258 IB_PORT_VENDOR_CLASS_SUP
|
259 IB_PORT_BOOT_MGMT_SUP
;
260 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
261 props
->pkey_tbl_len
= 1;
262 props
->active_width
= IB_WIDTH_4X
;
263 props
->active_speed
= 1;
265 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
267 net_dev
= hr_dev
->iboe
.netdevs
[port
];
269 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
270 dev_err(dev
, "find netdev %d failed!\r\n", port
);
274 mtu
= iboe_get_mtu(net_dev
->mtu
);
275 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
276 props
->state
= (netif_running(net_dev
) && netif_carrier_ok(net_dev
)) ?
277 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
278 props
->phys_state
= (props
->state
== IB_PORT_ACTIVE
) ? 5 : 3;
280 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
285 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
288 return IB_LINK_LAYER_ETHERNET
;
291 static int hns_roce_query_gid(struct ib_device
*ib_dev
, u8 port_num
, int index
,
297 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
305 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
306 struct ib_device_modify
*props
)
310 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
313 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
314 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
315 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
316 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
322 static int hns_roce_modify_port(struct ib_device
*ib_dev
, u8 port_num
, int mask
,
323 struct ib_port_modify
*props
)
328 static struct ib_ucontext
*hns_roce_alloc_ucontext(struct ib_device
*ib_dev
,
329 struct ib_udata
*udata
)
332 struct hns_roce_ucontext
*context
;
333 struct hns_roce_ib_alloc_ucontext_resp resp
;
334 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
336 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
338 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
340 return ERR_PTR(-ENOMEM
);
342 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
344 goto error_fail_uar_alloc
;
346 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
348 goto error_fail_copy_to_udata
;
350 return &context
->ibucontext
;
352 error_fail_copy_to_udata
:
353 hns_roce_uar_free(hr_dev
, &context
->uar
);
355 error_fail_uar_alloc
:
361 static int hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
363 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
365 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
371 static int hns_roce_mmap(struct ib_ucontext
*context
,
372 struct vm_area_struct
*vma
)
374 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
376 if (((vma
->vm_end
- vma
->vm_start
) % PAGE_SIZE
) != 0)
379 if (vma
->vm_pgoff
== 0) {
380 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
381 if (io_remap_pfn_range(vma
, vma
->vm_start
,
382 to_hr_ucontext(context
)->uar
.pfn
,
383 PAGE_SIZE
, vma
->vm_page_prot
))
385 } else if (vma
->vm_pgoff
== 1 && hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
) {
386 /* vm_pgoff: 1 -- TPTR */
387 if (io_remap_pfn_range(vma
, vma
->vm_start
,
388 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
398 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
399 struct ib_port_immutable
*immutable
)
401 struct ib_port_attr attr
;
404 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
406 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
410 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
411 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
413 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
418 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
420 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
422 unregister_inetaddr_notifier(&iboe
->nb_inet
);
423 unregister_netdevice_notifier(&iboe
->nb
);
424 ib_unregister_device(&hr_dev
->ib_dev
);
427 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
430 struct hns_roce_ib_iboe
*iboe
= NULL
;
431 struct ib_device
*ib_dev
= NULL
;
432 struct device
*dev
= &hr_dev
->pdev
->dev
;
434 iboe
= &hr_dev
->iboe
;
435 spin_lock_init(&iboe
->lock
);
437 ib_dev
= &hr_dev
->ib_dev
;
438 strlcpy(ib_dev
->name
, "hns_%d", IB_DEVICE_NAME_MAX
);
440 ib_dev
->owner
= THIS_MODULE
;
441 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
442 ib_dev
->dev
.parent
= dev
;
444 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
445 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
446 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
447 ib_dev
->uverbs_abi_ver
= 1;
448 ib_dev
->uverbs_cmd_mask
=
449 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT
) |
450 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
451 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT
) |
452 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD
) |
453 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD
) |
454 (1ULL << IB_USER_VERBS_CMD_REG_MR
) |
455 (1ULL << IB_USER_VERBS_CMD_DEREG_MR
) |
456 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
457 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ
) |
458 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ
) |
459 (1ULL << IB_USER_VERBS_CMD_CREATE_QP
) |
460 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP
) |
461 (1ULL << IB_USER_VERBS_CMD_QUERY_QP
) |
462 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP
);
464 /* HCA||device||port */
465 ib_dev
->modify_device
= hns_roce_modify_device
;
466 ib_dev
->query_device
= hns_roce_query_device
;
467 ib_dev
->query_port
= hns_roce_query_port
;
468 ib_dev
->modify_port
= hns_roce_modify_port
;
469 ib_dev
->get_link_layer
= hns_roce_get_link_layer
;
470 ib_dev
->get_netdev
= hns_roce_get_netdev
;
471 ib_dev
->query_gid
= hns_roce_query_gid
;
472 ib_dev
->add_gid
= hns_roce_add_gid
;
473 ib_dev
->del_gid
= hns_roce_del_gid
;
474 ib_dev
->query_pkey
= hns_roce_query_pkey
;
475 ib_dev
->alloc_ucontext
= hns_roce_alloc_ucontext
;
476 ib_dev
->dealloc_ucontext
= hns_roce_dealloc_ucontext
;
477 ib_dev
->mmap
= hns_roce_mmap
;
480 ib_dev
->alloc_pd
= hns_roce_alloc_pd
;
481 ib_dev
->dealloc_pd
= hns_roce_dealloc_pd
;
484 ib_dev
->create_ah
= hns_roce_create_ah
;
485 ib_dev
->query_ah
= hns_roce_query_ah
;
486 ib_dev
->destroy_ah
= hns_roce_destroy_ah
;
489 ib_dev
->create_qp
= hns_roce_create_qp
;
490 ib_dev
->modify_qp
= hns_roce_modify_qp
;
491 ib_dev
->query_qp
= hr_dev
->hw
->query_qp
;
492 ib_dev
->destroy_qp
= hr_dev
->hw
->destroy_qp
;
493 ib_dev
->post_send
= hr_dev
->hw
->post_send
;
494 ib_dev
->post_recv
= hr_dev
->hw
->post_recv
;
497 ib_dev
->create_cq
= hns_roce_ib_create_cq
;
498 ib_dev
->destroy_cq
= hns_roce_ib_destroy_cq
;
499 ib_dev
->req_notify_cq
= hr_dev
->hw
->req_notify_cq
;
500 ib_dev
->poll_cq
= hr_dev
->hw
->poll_cq
;
503 ib_dev
->get_dma_mr
= hns_roce_get_dma_mr
;
504 ib_dev
->reg_user_mr
= hns_roce_reg_user_mr
;
505 ib_dev
->dereg_mr
= hns_roce_dereg_mr
;
508 ib_dev
->get_port_immutable
= hns_roce_port_immutable
;
510 ret
= ib_register_device(ib_dev
, NULL
);
512 dev_err(dev
, "ib_register_device failed!\n");
516 ret
= hns_roce_setup_mtu_mac(hr_dev
);
518 dev_err(dev
, "setup_mtu_mac failed!\n");
519 goto error_failed_setup_mtu_mac
;
522 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
523 ret
= register_netdevice_notifier(&iboe
->nb
);
525 dev_err(dev
, "register_netdevice_notifier failed!\n");
526 goto error_failed_setup_mtu_mac
;
531 error_failed_setup_mtu_mac
:
532 ib_unregister_device(ib_dev
);
537 static const struct of_device_id hns_roce_of_match
[] = {
538 { .compatible
= "hisilicon,hns-roce-v1", .data
= &hns_roce_hw_v1
, },
541 MODULE_DEVICE_TABLE(of
, hns_roce_of_match
);
543 static const struct acpi_device_id hns_roce_acpi_match
[] = {
544 { "HISI00D1", (kernel_ulong_t
)&hns_roce_hw_v1
},
547 MODULE_DEVICE_TABLE(acpi
, hns_roce_acpi_match
);
549 static int hns_roce_node_match(struct device
*dev
, void *fwnode
)
551 return dev
->fwnode
== fwnode
;
555 platform_device
*hns_roce_find_pdev(struct fwnode_handle
*fwnode
)
559 /* get the 'device'corresponding to matching 'fwnode' */
560 dev
= bus_find_device(&platform_bus_type
, NULL
,
561 fwnode
, hns_roce_node_match
);
562 /* get the platform device */
563 return dev
? to_platform_device(dev
) : NULL
;
566 static int hns_roce_get_cfg(struct hns_roce_dev
*hr_dev
)
572 struct device
*dev
= &hr_dev
->pdev
->dev
;
573 struct device_node
*net_node
;
574 struct net_device
*netdev
= NULL
;
575 struct platform_device
*pdev
= NULL
;
576 struct resource
*res
;
578 /* check if we are compatible with the underlying SoC */
579 if (dev_of_node(dev
)) {
580 const struct of_device_id
*of_id
;
582 of_id
= of_match_node(hns_roce_of_match
, dev
->of_node
);
584 dev_err(dev
, "device is not compatible!\n");
587 hr_dev
->hw
= (struct hns_roce_hw
*)of_id
->data
;
589 dev_err(dev
, "couldn't get H/W specific DT data!\n");
592 } else if (is_acpi_device_node(dev
->fwnode
)) {
593 const struct acpi_device_id
*acpi_id
;
595 acpi_id
= acpi_match_device(hns_roce_acpi_match
, dev
);
597 dev_err(dev
, "device is not compatible!\n");
600 hr_dev
->hw
= (struct hns_roce_hw
*) acpi_id
->driver_data
;
602 dev_err(dev
, "couldn't get H/W specific ACPI data!\n");
606 dev_err(dev
, "can't read compatibility data from DT or ACPI\n");
610 /* get the mapped register base address */
611 res
= platform_get_resource(hr_dev
->pdev
, IORESOURCE_MEM
, 0);
613 dev_err(dev
, "memory resource not found!\n");
616 hr_dev
->reg_base
= devm_ioremap_resource(dev
, res
);
617 if (IS_ERR(hr_dev
->reg_base
))
618 return PTR_ERR(hr_dev
->reg_base
);
620 /* read the node_guid of IB device from the DT or ACPI */
621 ret
= device_property_read_u8_array(dev
, "node-guid",
622 (u8
*)&hr_dev
->ib_dev
.node_guid
,
625 dev_err(dev
, "couldn't get node_guid from DT or ACPI!\n");
629 /* get the RoCE associated ethernet ports or netdevices */
630 for (i
= 0; i
< HNS_ROCE_MAX_PORTS
; i
++) {
631 if (dev_of_node(dev
)) {
632 net_node
= of_parse_phandle(dev
->of_node
, "eth-handle",
636 pdev
= of_find_device_by_node(net_node
);
637 } else if (is_acpi_device_node(dev
->fwnode
)) {
638 struct acpi_reference_args args
;
639 struct fwnode_handle
*fwnode
;
641 ret
= acpi_node_get_property_reference(dev
->fwnode
,
646 fwnode
= acpi_fwnode_handle(args
.adev
);
647 pdev
= hns_roce_find_pdev(fwnode
);
649 dev_err(dev
, "cannot read data from DT or ACPI\n");
654 netdev
= platform_get_drvdata(pdev
);
657 hr_dev
->iboe
.netdevs
[port_cnt
] = netdev
;
658 hr_dev
->iboe
.phy_port
[port_cnt
] = phy_port
;
660 dev_err(dev
, "no netdev found with pdev %s\n",
669 dev_err(dev
, "unable to get eth-handle for available ports!\n");
673 hr_dev
->caps
.num_ports
= port_cnt
;
675 /* cmd issue mode: 0 is poll, 1 is event */
677 hr_dev
->loop_idc
= 0;
679 /* read the interrupt names from the DT or ACPI */
680 ret
= device_property_read_string_array(dev
, "interrupt-names",
682 HNS_ROCE_MAX_IRQ_NUM
);
684 dev_err(dev
, "couldn't get interrupt names from DT or ACPI!\n");
688 /* fetch the interrupt numbers */
689 for (i
= 0; i
< HNS_ROCE_MAX_IRQ_NUM
; i
++) {
690 hr_dev
->irq
[i
] = platform_get_irq(hr_dev
->pdev
, i
);
691 if (hr_dev
->irq
[i
] <= 0) {
692 dev_err(dev
, "platform get of irq[=%d] failed!\n", i
);
700 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
703 struct device
*dev
= &hr_dev
->pdev
->dev
;
705 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
706 HEM_TYPE_MTT
, hr_dev
->caps
.mtt_entry_sz
,
707 hr_dev
->caps
.num_mtt_segs
, 1);
709 dev_err(dev
, "Failed to init MTT context memory, aborting.\n");
713 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
714 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
715 hr_dev
->caps
.num_mtpts
, 1);
717 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
721 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
722 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_entry_sz
,
723 hr_dev
->caps
.num_qps
, 1);
725 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
729 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
731 hr_dev
->caps
.irrl_entry_sz
*
732 hr_dev
->caps
.max_qp_init_rdma
,
733 hr_dev
->caps
.num_qps
, 1);
735 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
739 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
740 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
741 hr_dev
->caps
.num_cqs
, 1);
743 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
750 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
753 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
756 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
759 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
765 * hns_roce_setup_hca - setup host channel adapter
766 * @hr_dev: pointer to hns roce device
769 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
772 struct device
*dev
= &hr_dev
->pdev
->dev
;
774 spin_lock_init(&hr_dev
->sm_lock
);
775 spin_lock_init(&hr_dev
->bt_cmd_lock
);
777 ret
= hns_roce_init_uar_table(hr_dev
);
779 dev_err(dev
, "Failed to initialize uar table. aborting\n");
783 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
785 dev_err(dev
, "Failed to allocate priv_uar.\n");
786 goto err_uar_table_free
;
789 ret
= hns_roce_init_pd_table(hr_dev
);
791 dev_err(dev
, "Failed to init protected domain table.\n");
792 goto err_uar_alloc_free
;
795 ret
= hns_roce_init_mr_table(hr_dev
);
797 dev_err(dev
, "Failed to init memory region table.\n");
798 goto err_pd_table_free
;
801 ret
= hns_roce_init_cq_table(hr_dev
);
803 dev_err(dev
, "Failed to init completion queue table.\n");
804 goto err_mr_table_free
;
807 ret
= hns_roce_init_qp_table(hr_dev
);
809 dev_err(dev
, "Failed to init queue pair table.\n");
810 goto err_cq_table_free
;
816 hns_roce_cleanup_cq_table(hr_dev
);
819 hns_roce_cleanup_mr_table(hr_dev
);
822 hns_roce_cleanup_pd_table(hr_dev
);
825 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
828 hns_roce_cleanup_uar_table(hr_dev
);
833 * hns_roce_probe - RoCE driver entrance
834 * @pdev: pointer to platform device
838 static int hns_roce_probe(struct platform_device
*pdev
)
841 struct hns_roce_dev
*hr_dev
;
842 struct device
*dev
= &pdev
->dev
;
844 hr_dev
= (struct hns_roce_dev
*)ib_alloc_device(sizeof(*hr_dev
));
849 platform_set_drvdata(pdev
, hr_dev
);
851 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64ULL)) &&
852 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32ULL))) {
853 dev_err(dev
, "Not usable DMA addressing mode\n");
855 goto error_failed_get_cfg
;
858 ret
= hns_roce_get_cfg(hr_dev
);
860 dev_err(dev
, "Get Configuration failed!\n");
861 goto error_failed_get_cfg
;
864 ret
= hr_dev
->hw
->reset(hr_dev
, true);
866 dev_err(dev
, "Reset RoCE engine failed!\n");
867 goto error_failed_get_cfg
;
870 hr_dev
->hw
->hw_profile(hr_dev
);
872 ret
= hns_roce_cmd_init(hr_dev
);
874 dev_err(dev
, "cmd init failed!\n");
875 goto error_failed_cmd_init
;
878 ret
= hns_roce_init_eq_table(hr_dev
);
880 dev_err(dev
, "eq init failed!\n");
881 goto error_failed_eq_table
;
884 if (hr_dev
->cmd_mod
) {
885 ret
= hns_roce_cmd_use_events(hr_dev
);
887 dev_err(dev
, "Switch to event-driven cmd failed!\n");
888 goto error_failed_use_event
;
892 ret
= hns_roce_init_hem(hr_dev
);
894 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
895 goto error_failed_init_hem
;
898 ret
= hns_roce_setup_hca(hr_dev
);
900 dev_err(dev
, "setup hca failed!\n");
901 goto error_failed_setup_hca
;
904 ret
= hr_dev
->hw
->hw_init(hr_dev
);
906 dev_err(dev
, "hw_init failed!\n");
907 goto error_failed_engine_init
;
910 ret
= hns_roce_register_device(hr_dev
);
912 goto error_failed_register_device
;
916 error_failed_register_device
:
917 hr_dev
->hw
->hw_exit(hr_dev
);
919 error_failed_engine_init
:
920 hns_roce_cleanup_bitmap(hr_dev
);
922 error_failed_setup_hca
:
923 hns_roce_cleanup_hem(hr_dev
);
925 error_failed_init_hem
:
927 hns_roce_cmd_use_polling(hr_dev
);
929 error_failed_use_event
:
930 hns_roce_cleanup_eq_table(hr_dev
);
932 error_failed_eq_table
:
933 hns_roce_cmd_cleanup(hr_dev
);
935 error_failed_cmd_init
:
936 ret
= hr_dev
->hw
->reset(hr_dev
, false);
938 dev_err(&hr_dev
->pdev
->dev
, "roce_engine reset fail\n");
940 error_failed_get_cfg
:
941 ib_dealloc_device(&hr_dev
->ib_dev
);
947 * hns_roce_remove - remove RoCE device
948 * @pdev: pointer to platform device
950 static int hns_roce_remove(struct platform_device
*pdev
)
952 struct hns_roce_dev
*hr_dev
= platform_get_drvdata(pdev
);
954 hns_roce_unregister_device(hr_dev
);
955 hr_dev
->hw
->hw_exit(hr_dev
);
956 hns_roce_cleanup_bitmap(hr_dev
);
957 hns_roce_cleanup_hem(hr_dev
);
960 hns_roce_cmd_use_polling(hr_dev
);
962 hns_roce_cleanup_eq_table(hr_dev
);
963 hns_roce_cmd_cleanup(hr_dev
);
964 hr_dev
->hw
->reset(hr_dev
, false);
966 ib_dealloc_device(&hr_dev
->ib_dev
);
971 static struct platform_driver hns_roce_driver
= {
972 .probe
= hns_roce_probe
,
973 .remove
= hns_roce_remove
,
976 .of_match_table
= hns_roce_of_match
,
977 .acpi_match_table
= ACPI_PTR(hns_roce_acpi_match
),
981 module_platform_driver(hns_roce_driver
);
983 MODULE_LICENSE("Dual BSD/GPL");
984 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
985 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
986 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
987 MODULE_DESCRIPTION("HNS RoCE Driver");