1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <rdma/ib_verbs.h>
26 #define SMC_QP_MIN_RNR_TIMER 5
27 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
28 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
29 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
31 struct smc_ib_devices smc_ib_devices
= { /* smc-registered ib devices */
32 .lock
= __SPIN_LOCK_UNLOCKED(smc_ib_devices
.lock
),
33 .list
= LIST_HEAD_INIT(smc_ib_devices
.list
),
36 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
38 u8 local_systemid
[SMC_SYSTEMID_LEN
] = SMC_LOCAL_SYSTEMID_RESET
; /* unique system
42 static int smc_ib_modify_qp_init(struct smc_link
*lnk
)
44 struct ib_qp_attr qp_attr
;
46 memset(&qp_attr
, 0, sizeof(qp_attr
));
47 qp_attr
.qp_state
= IB_QPS_INIT
;
48 qp_attr
.pkey_index
= 0;
49 qp_attr
.port_num
= lnk
->ibport
;
50 qp_attr
.qp_access_flags
= IB_ACCESS_LOCAL_WRITE
51 | IB_ACCESS_REMOTE_WRITE
;
52 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
53 IB_QP_STATE
| IB_QP_PKEY_INDEX
|
54 IB_QP_ACCESS_FLAGS
| IB_QP_PORT
);
57 static int smc_ib_modify_qp_rtr(struct smc_link
*lnk
)
59 enum ib_qp_attr_mask qp_attr_mask
=
60 IB_QP_STATE
| IB_QP_AV
| IB_QP_PATH_MTU
| IB_QP_DEST_QPN
|
61 IB_QP_RQ_PSN
| IB_QP_MAX_DEST_RD_ATOMIC
| IB_QP_MIN_RNR_TIMER
;
62 struct ib_qp_attr qp_attr
;
64 memset(&qp_attr
, 0, sizeof(qp_attr
));
65 qp_attr
.qp_state
= IB_QPS_RTR
;
66 qp_attr
.path_mtu
= min(lnk
->path_mtu
, lnk
->peer_mtu
);
67 qp_attr
.ah_attr
.type
= RDMA_AH_ATTR_TYPE_ROCE
;
68 rdma_ah_set_port_num(&qp_attr
.ah_attr
, lnk
->ibport
);
69 rdma_ah_set_grh(&qp_attr
.ah_attr
, NULL
, 0, 0, 1, 0);
70 rdma_ah_set_dgid_raw(&qp_attr
.ah_attr
, lnk
->peer_gid
);
71 memcpy(&qp_attr
.ah_attr
.roce
.dmac
, lnk
->peer_mac
,
72 sizeof(lnk
->peer_mac
));
73 qp_attr
.dest_qp_num
= lnk
->peer_qpn
;
74 qp_attr
.rq_psn
= lnk
->peer_psn
; /* starting receive packet seq # */
75 qp_attr
.max_dest_rd_atomic
= 1; /* max # of resources for incoming
78 qp_attr
.min_rnr_timer
= SMC_QP_MIN_RNR_TIMER
;
80 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, qp_attr_mask
);
83 int smc_ib_modify_qp_rts(struct smc_link
*lnk
)
85 struct ib_qp_attr qp_attr
;
87 memset(&qp_attr
, 0, sizeof(qp_attr
));
88 qp_attr
.qp_state
= IB_QPS_RTS
;
89 qp_attr
.timeout
= SMC_QP_TIMEOUT
; /* local ack timeout */
90 qp_attr
.retry_cnt
= SMC_QP_RETRY_CNT
; /* retry count */
91 qp_attr
.rnr_retry
= SMC_QP_RNR_RETRY
; /* RNR retries, 7=infinite */
92 qp_attr
.sq_psn
= lnk
->psn_initial
; /* starting send packet seq # */
93 qp_attr
.max_rd_atomic
= 1; /* # of outstanding RDMA reads and
96 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
,
97 IB_QP_STATE
| IB_QP_TIMEOUT
| IB_QP_RETRY_CNT
|
98 IB_QP_SQ_PSN
| IB_QP_RNR_RETRY
|
99 IB_QP_MAX_QP_RD_ATOMIC
);
102 int smc_ib_modify_qp_reset(struct smc_link
*lnk
)
104 struct ib_qp_attr qp_attr
;
106 memset(&qp_attr
, 0, sizeof(qp_attr
));
107 qp_attr
.qp_state
= IB_QPS_RESET
;
108 return ib_modify_qp(lnk
->roce_qp
, &qp_attr
, IB_QP_STATE
);
111 int smc_ib_ready_link(struct smc_link
*lnk
)
113 struct smc_link_group
*lgr
=
114 container_of(lnk
, struct smc_link_group
, lnk
[0]);
117 rc
= smc_ib_modify_qp_init(lnk
);
121 rc
= smc_ib_modify_qp_rtr(lnk
);
124 smc_wr_remember_qp_attr(lnk
);
125 rc
= ib_req_notify_cq(lnk
->smcibdev
->roce_cq_recv
,
126 IB_CQ_SOLICITED_MASK
);
129 rc
= smc_wr_rx_post_init(lnk
);
132 smc_wr_remember_qp_attr(lnk
);
134 if (lgr
->role
== SMC_SERV
) {
135 rc
= smc_ib_modify_qp_rts(lnk
);
138 smc_wr_remember_qp_attr(lnk
);
144 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
145 static void smc_ib_port_event_work(struct work_struct
*work
)
147 struct smc_ib_device
*smcibdev
= container_of(
148 work
, struct smc_ib_device
, port_event_work
);
151 for_each_set_bit(port_idx
, &smcibdev
->port_event_mask
, SMC_MAX_PORTS
) {
152 smc_ib_remember_port_attr(smcibdev
, port_idx
+ 1);
153 clear_bit(port_idx
, &smcibdev
->port_event_mask
);
157 /* can be called in IRQ context */
158 static void smc_ib_global_event_handler(struct ib_event_handler
*handler
,
159 struct ib_event
*ibevent
)
161 struct smc_ib_device
*smcibdev
;
164 smcibdev
= container_of(handler
, struct smc_ib_device
, event_handler
);
166 switch (ibevent
->event
) {
167 case IB_EVENT_PORT_ERR
:
168 port_idx
= ibevent
->element
.port_num
- 1;
169 set_bit(port_idx
, &smcibdev
->port_event_mask
);
170 schedule_work(&smcibdev
->port_event_work
);
172 case IB_EVENT_DEVICE_FATAL
:
173 /* tbd in follow-on patch:
174 * abnormal close of corresponding connections
177 case IB_EVENT_PORT_ACTIVE
:
178 port_idx
= ibevent
->element
.port_num
- 1;
179 set_bit(port_idx
, &smcibdev
->port_event_mask
);
180 schedule_work(&smcibdev
->port_event_work
);
187 void smc_ib_dealloc_protection_domain(struct smc_link
*lnk
)
189 ib_dealloc_pd(lnk
->roce_pd
);
193 int smc_ib_create_protection_domain(struct smc_link
*lnk
)
197 lnk
->roce_pd
= ib_alloc_pd(lnk
->smcibdev
->ibdev
, 0);
198 rc
= PTR_ERR_OR_ZERO(lnk
->roce_pd
);
199 if (IS_ERR(lnk
->roce_pd
))
204 static void smc_ib_qp_event_handler(struct ib_event
*ibevent
, void *priv
)
206 switch (ibevent
->event
) {
207 case IB_EVENT_DEVICE_FATAL
:
208 case IB_EVENT_GID_CHANGE
:
209 case IB_EVENT_PORT_ERR
:
210 case IB_EVENT_QP_ACCESS_ERR
:
211 /* tbd in follow-on patch:
212 * abnormal close of corresponding connections
220 void smc_ib_destroy_queue_pair(struct smc_link
*lnk
)
222 ib_destroy_qp(lnk
->roce_qp
);
226 /* create a queue pair within the protection domain for a link */
227 int smc_ib_create_queue_pair(struct smc_link
*lnk
)
229 struct ib_qp_init_attr qp_attr
= {
230 .event_handler
= smc_ib_qp_event_handler
,
232 .send_cq
= lnk
->smcibdev
->roce_cq_send
,
233 .recv_cq
= lnk
->smcibdev
->roce_cq_recv
,
236 /* include unsolicited rdma_writes as well,
237 * there are max. 2 RDMA_WRITE per 1 WR_SEND
239 .max_send_wr
= SMC_WR_BUF_CNT
* 3,
240 .max_recv_wr
= SMC_WR_BUF_CNT
* 3,
241 .max_send_sge
= SMC_IB_MAX_SEND_SGE
,
244 .sq_sig_type
= IB_SIGNAL_REQ_WR
,
245 .qp_type
= IB_QPT_RC
,
249 lnk
->roce_qp
= ib_create_qp(lnk
->roce_pd
, &qp_attr
);
250 rc
= PTR_ERR_OR_ZERO(lnk
->roce_qp
);
251 if (IS_ERR(lnk
->roce_qp
))
254 smc_wr_remember_qp_attr(lnk
);
258 void smc_ib_put_memory_region(struct ib_mr
*mr
)
263 static int smc_ib_map_mr_sg(struct smc_buf_desc
*buf_slot
)
265 unsigned int offset
= 0;
268 /* map the largest prefix of a dma mapped SG list */
269 sg_num
= ib_map_mr_sg(buf_slot
->mr_rx
[SMC_SINGLE_LINK
],
270 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
271 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
277 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
278 int smc_ib_get_memory_region(struct ib_pd
*pd
, int access_flags
,
279 struct smc_buf_desc
*buf_slot
)
281 if (buf_slot
->mr_rx
[SMC_SINGLE_LINK
])
282 return 0; /* already done */
284 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] =
285 ib_alloc_mr(pd
, IB_MR_TYPE_MEM_REG
, 1 << buf_slot
->order
);
286 if (IS_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
])) {
289 rc
= PTR_ERR(buf_slot
->mr_rx
[SMC_SINGLE_LINK
]);
290 buf_slot
->mr_rx
[SMC_SINGLE_LINK
] = NULL
;
294 if (smc_ib_map_mr_sg(buf_slot
) != 1)
300 /* synchronize buffer usage for cpu access */
301 void smc_ib_sync_sg_for_cpu(struct smc_ib_device
*smcibdev
,
302 struct smc_buf_desc
*buf_slot
,
303 enum dma_data_direction data_direction
)
305 struct scatterlist
*sg
;
308 /* for now there is just one DMA address */
309 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
310 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
313 ib_dma_sync_single_for_cpu(smcibdev
->ibdev
,
320 /* synchronize buffer usage for device access */
321 void smc_ib_sync_sg_for_device(struct smc_ib_device
*smcibdev
,
322 struct smc_buf_desc
*buf_slot
,
323 enum dma_data_direction data_direction
)
325 struct scatterlist
*sg
;
328 /* for now there is just one DMA address */
329 for_each_sg(buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
, sg
,
330 buf_slot
->sgt
[SMC_SINGLE_LINK
].nents
, i
) {
333 ib_dma_sync_single_for_device(smcibdev
->ibdev
,
340 /* Map a new TX or RX buffer SG-table to DMA */
341 int smc_ib_buf_map_sg(struct smc_ib_device
*smcibdev
,
342 struct smc_buf_desc
*buf_slot
,
343 enum dma_data_direction data_direction
)
347 mapped_nents
= ib_dma_map_sg(smcibdev
->ibdev
,
348 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
349 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
357 void smc_ib_buf_unmap_sg(struct smc_ib_device
*smcibdev
,
358 struct smc_buf_desc
*buf_slot
,
359 enum dma_data_direction data_direction
)
361 if (!buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
)
362 return; /* already unmapped */
364 ib_dma_unmap_sg(smcibdev
->ibdev
,
365 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
,
366 buf_slot
->sgt
[SMC_SINGLE_LINK
].orig_nents
,
368 buf_slot
->sgt
[SMC_SINGLE_LINK
].sgl
->dma_address
= 0;
371 static int smc_ib_fill_gid_and_mac(struct smc_ib_device
*smcibdev
, u8 ibport
)
373 struct net_device
*ndev
;
376 rc
= ib_query_gid(smcibdev
->ibdev
, ibport
, 0,
377 &smcibdev
->gid
[ibport
- 1], NULL
);
378 /* the SMC protocol requires specification of the roce MAC address;
379 * if net_device cannot be determined, it can be derived from gid 0
381 ndev
= smcibdev
->ibdev
->get_netdev(smcibdev
->ibdev
, ibport
);
383 memcpy(&smcibdev
->mac
, ndev
->dev_addr
, ETH_ALEN
);
386 memcpy(&smcibdev
->mac
[ibport
- 1][0],
387 &smcibdev
->gid
[ibport
- 1].raw
[8], 3);
388 memcpy(&smcibdev
->mac
[ibport
- 1][3],
389 &smcibdev
->gid
[ibport
- 1].raw
[13], 3);
390 smcibdev
->mac
[ibport
- 1][0] &= ~0x02;
395 /* Create an identifier unique for this instance of SMC-R.
396 * The MAC-address of the first active registered IB device
397 * plus a random 2-byte number is used to create this identifier.
398 * This name is delivered to the peer during connection initialization.
400 static inline void smc_ib_define_local_systemid(struct smc_ib_device
*smcibdev
,
403 memcpy(&local_systemid
[2], &smcibdev
->mac
[ibport
- 1],
404 sizeof(smcibdev
->mac
[ibport
- 1]));
405 get_random_bytes(&local_systemid
[0], 2);
408 bool smc_ib_port_active(struct smc_ib_device
*smcibdev
, u8 ibport
)
410 return smcibdev
->pattr
[ibport
- 1].state
== IB_PORT_ACTIVE
;
413 int smc_ib_remember_port_attr(struct smc_ib_device
*smcibdev
, u8 ibport
)
417 memset(&smcibdev
->pattr
[ibport
- 1], 0,
418 sizeof(smcibdev
->pattr
[ibport
- 1]));
419 rc
= ib_query_port(smcibdev
->ibdev
, ibport
,
420 &smcibdev
->pattr
[ibport
- 1]);
423 rc
= smc_ib_fill_gid_and_mac(smcibdev
, ibport
);
426 if (!strncmp(local_systemid
, SMC_LOCAL_SYSTEMID_RESET
,
427 sizeof(local_systemid
)) &&
428 smc_ib_port_active(smcibdev
, ibport
))
429 /* create unique system identifier */
430 smc_ib_define_local_systemid(smcibdev
, ibport
);
435 long smc_ib_setup_per_ibdev(struct smc_ib_device
*smcibdev
)
437 struct ib_cq_init_attr cqattr
= {
438 .cqe
= SMC_WR_MAX_CQE
, .comp_vector
= 0 };
441 smcibdev
->roce_cq_send
= ib_create_cq(smcibdev
->ibdev
,
442 smc_wr_tx_cq_handler
, NULL
,
444 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_send
);
445 if (IS_ERR(smcibdev
->roce_cq_send
)) {
446 smcibdev
->roce_cq_send
= NULL
;
449 smcibdev
->roce_cq_recv
= ib_create_cq(smcibdev
->ibdev
,
450 smc_wr_rx_cq_handler
, NULL
,
452 rc
= PTR_ERR_OR_ZERO(smcibdev
->roce_cq_recv
);
453 if (IS_ERR(smcibdev
->roce_cq_recv
)) {
454 smcibdev
->roce_cq_recv
= NULL
;
457 INIT_IB_EVENT_HANDLER(&smcibdev
->event_handler
, smcibdev
->ibdev
,
458 smc_ib_global_event_handler
);
459 ib_register_event_handler(&smcibdev
->event_handler
);
460 smc_wr_add_dev(smcibdev
);
461 smcibdev
->initialized
= 1;
465 ib_destroy_cq(smcibdev
->roce_cq_send
);
469 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device
*smcibdev
)
471 if (!smcibdev
->initialized
)
473 smc_wr_remove_dev(smcibdev
);
474 ib_unregister_event_handler(&smcibdev
->event_handler
);
475 ib_destroy_cq(smcibdev
->roce_cq_recv
);
476 ib_destroy_cq(smcibdev
->roce_cq_send
);
479 static struct ib_client smc_ib_client
;
481 /* callback function for ib_register_client() */
482 static void smc_ib_add_dev(struct ib_device
*ibdev
)
484 struct smc_ib_device
*smcibdev
;
486 if (ibdev
->node_type
!= RDMA_NODE_IB_CA
)
489 smcibdev
= kzalloc(sizeof(*smcibdev
), GFP_KERNEL
);
493 smcibdev
->ibdev
= ibdev
;
494 INIT_WORK(&smcibdev
->port_event_work
, smc_ib_port_event_work
);
496 spin_lock(&smc_ib_devices
.lock
);
497 list_add_tail(&smcibdev
->list
, &smc_ib_devices
.list
);
498 spin_unlock(&smc_ib_devices
.lock
);
499 ib_set_client_data(ibdev
, &smc_ib_client
, smcibdev
);
502 /* callback function for ib_register_client() */
503 static void smc_ib_remove_dev(struct ib_device
*ibdev
, void *client_data
)
505 struct smc_ib_device
*smcibdev
;
507 smcibdev
= ib_get_client_data(ibdev
, &smc_ib_client
);
508 ib_set_client_data(ibdev
, &smc_ib_client
, NULL
);
509 spin_lock(&smc_ib_devices
.lock
);
510 list_del_init(&smcibdev
->list
); /* remove from smc_ib_devices */
511 spin_unlock(&smc_ib_devices
.lock
);
512 smc_pnet_remove_by_ibdev(smcibdev
);
513 smc_ib_cleanup_per_ibdev(smcibdev
);
517 static struct ib_client smc_ib_client
= {
519 .add
= smc_ib_add_dev
,
520 .remove
= smc_ib_remove_dev
,
523 int __init
smc_ib_register_client(void)
525 return ib_register_client(&smc_ib_client
);
528 void smc_ib_unregister_client(void)
530 ib_unregister_client(&smc_ib_client
);