]>
Commit | Line | Data |
---|---|---|
a4cf0443 UB |
1 | /* |
2 | * Shared Memory Communications over RDMA (SMC-R) and RoCE | |
3 | * | |
4 | * IB infrastructure: | |
5 | * Establish SMC-R as an Infiniband Client to be notified about added and | |
6 | * removed IB devices of type RDMA. | |
7 | * Determine device and port characteristics for these IB devices. | |
8 | * | |
9 | * Copyright IBM Corp. 2016 | |
10 | * | |
11 | * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> | |
12 | */ | |
13 | ||
14 | #include <linux/random.h> | |
bd4ad577 | 15 | #include <linux/workqueue.h> |
a4cf0443 UB |
16 | #include <rdma/ib_verbs.h> |
17 | ||
6812baab | 18 | #include "smc_pnet.h" |
a4cf0443 | 19 | #include "smc_ib.h" |
cd6851f3 | 20 | #include "smc_core.h" |
f38ba179 | 21 | #include "smc_wr.h" |
a4cf0443 UB |
22 | #include "smc.h" |
23 | ||
bd4ad577 UB |
24 | #define SMC_QP_MIN_RNR_TIMER 5 |
25 | #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ | |
26 | #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ | |
27 | #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ | |
28 | ||
a4cf0443 UB |
29 | struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ |
30 | .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), | |
31 | .list = LIST_HEAD_INIT(smc_ib_devices.list), | |
32 | }; | |
33 | ||
34 | #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" | |
35 | ||
36 | u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system | |
37 | * identifier | |
38 | */ | |
39 | ||
bd4ad577 UB |
40 | int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, |
41 | struct ib_mr **mr) | |
42 | { | |
43 | int rc; | |
44 | ||
45 | if (*mr) | |
46 | return 0; /* already done */ | |
47 | ||
48 | /* obtain unique key - | |
49 | * next invocation of get_dma_mr returns a different key! | |
50 | */ | |
51 | *mr = pd->device->get_dma_mr(pd, access_flags); | |
52 | rc = PTR_ERR_OR_ZERO(*mr); | |
53 | if (IS_ERR(*mr)) | |
54 | *mr = NULL; | |
55 | return rc; | |
56 | } | |
57 | ||
58 | static int smc_ib_modify_qp_init(struct smc_link *lnk) | |
59 | { | |
60 | struct ib_qp_attr qp_attr; | |
61 | ||
62 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
63 | qp_attr.qp_state = IB_QPS_INIT; | |
64 | qp_attr.pkey_index = 0; | |
65 | qp_attr.port_num = lnk->ibport; | |
66 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE | |
67 | | IB_ACCESS_REMOTE_WRITE; | |
68 | return ib_modify_qp(lnk->roce_qp, &qp_attr, | |
69 | IB_QP_STATE | IB_QP_PKEY_INDEX | | |
70 | IB_QP_ACCESS_FLAGS | IB_QP_PORT); | |
71 | } | |
72 | ||
73 | static int smc_ib_modify_qp_rtr(struct smc_link *lnk) | |
74 | { | |
75 | enum ib_qp_attr_mask qp_attr_mask = | |
76 | IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | | |
77 | IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; | |
78 | struct ib_qp_attr qp_attr; | |
79 | ||
80 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
81 | qp_attr.qp_state = IB_QPS_RTR; | |
82 | qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); | |
d8966fcd DC |
83 | rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); |
84 | rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, 0, 1, 0); | |
85 | rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); | |
bd4ad577 UB |
86 | memcpy(&qp_attr.ah_attr.dmac, lnk->peer_mac, |
87 | sizeof(lnk->peer_mac)); | |
88 | qp_attr.dest_qp_num = lnk->peer_qpn; | |
89 | qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ | |
90 | qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming | |
91 | * requests | |
92 | */ | |
93 | qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; | |
94 | ||
95 | return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); | |
96 | } | |
97 | ||
98 | int smc_ib_modify_qp_rts(struct smc_link *lnk) | |
99 | { | |
100 | struct ib_qp_attr qp_attr; | |
101 | ||
102 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
103 | qp_attr.qp_state = IB_QPS_RTS; | |
104 | qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ | |
105 | qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ | |
106 | qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ | |
107 | qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ | |
108 | qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and | |
109 | * atomic ops allowed | |
110 | */ | |
111 | return ib_modify_qp(lnk->roce_qp, &qp_attr, | |
112 | IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | | |
113 | IB_QP_SQ_PSN | IB_QP_RNR_RETRY | | |
114 | IB_QP_MAX_QP_RD_ATOMIC); | |
115 | } | |
116 | ||
117 | int smc_ib_modify_qp_reset(struct smc_link *lnk) | |
118 | { | |
119 | struct ib_qp_attr qp_attr; | |
120 | ||
121 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
122 | qp_attr.qp_state = IB_QPS_RESET; | |
123 | return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); | |
124 | } | |
125 | ||
126 | int smc_ib_ready_link(struct smc_link *lnk) | |
127 | { | |
128 | struct smc_link_group *lgr = | |
129 | container_of(lnk, struct smc_link_group, lnk[0]); | |
130 | int rc = 0; | |
131 | ||
132 | rc = smc_ib_modify_qp_init(lnk); | |
133 | if (rc) | |
134 | goto out; | |
135 | ||
136 | rc = smc_ib_modify_qp_rtr(lnk); | |
137 | if (rc) | |
138 | goto out; | |
139 | smc_wr_remember_qp_attr(lnk); | |
140 | rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, | |
141 | IB_CQ_SOLICITED_MASK); | |
142 | if (rc) | |
143 | goto out; | |
144 | rc = smc_wr_rx_post_init(lnk); | |
145 | if (rc) | |
146 | goto out; | |
147 | smc_wr_remember_qp_attr(lnk); | |
148 | ||
149 | if (lgr->role == SMC_SERV) { | |
150 | rc = smc_ib_modify_qp_rts(lnk); | |
151 | if (rc) | |
152 | goto out; | |
153 | smc_wr_remember_qp_attr(lnk); | |
154 | } | |
155 | out: | |
156 | return rc; | |
157 | } | |
158 | ||
159 | /* process context wrapper for might_sleep smc_ib_remember_port_attr */ | |
160 | static void smc_ib_port_event_work(struct work_struct *work) | |
161 | { | |
162 | struct smc_ib_device *smcibdev = container_of( | |
163 | work, struct smc_ib_device, port_event_work); | |
164 | u8 port_idx; | |
165 | ||
166 | for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { | |
167 | smc_ib_remember_port_attr(smcibdev, port_idx + 1); | |
168 | clear_bit(port_idx, &smcibdev->port_event_mask); | |
169 | } | |
170 | } | |
171 | ||
172 | /* can be called in IRQ context */ | |
173 | static void smc_ib_global_event_handler(struct ib_event_handler *handler, | |
174 | struct ib_event *ibevent) | |
175 | { | |
176 | struct smc_ib_device *smcibdev; | |
177 | u8 port_idx; | |
178 | ||
179 | smcibdev = container_of(handler, struct smc_ib_device, event_handler); | |
bd4ad577 UB |
180 | |
181 | switch (ibevent->event) { | |
182 | case IB_EVENT_PORT_ERR: | |
183 | port_idx = ibevent->element.port_num - 1; | |
184 | set_bit(port_idx, &smcibdev->port_event_mask); | |
185 | schedule_work(&smcibdev->port_event_work); | |
186 | /* fall through */ | |
187 | case IB_EVENT_DEVICE_FATAL: | |
188 | /* tbd in follow-on patch: | |
189 | * abnormal close of corresponding connections | |
190 | */ | |
191 | break; | |
192 | case IB_EVENT_PORT_ACTIVE: | |
193 | port_idx = ibevent->element.port_num - 1; | |
194 | set_bit(port_idx, &smcibdev->port_event_mask); | |
195 | schedule_work(&smcibdev->port_event_work); | |
196 | break; | |
197 | default: | |
198 | break; | |
199 | } | |
200 | } | |
201 | ||
f38ba179 UB |
202 | void smc_ib_dealloc_protection_domain(struct smc_link *lnk) |
203 | { | |
204 | ib_dealloc_pd(lnk->roce_pd); | |
205 | lnk->roce_pd = NULL; | |
206 | } | |
207 | ||
208 | int smc_ib_create_protection_domain(struct smc_link *lnk) | |
209 | { | |
210 | int rc; | |
211 | ||
212 | lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); | |
213 | rc = PTR_ERR_OR_ZERO(lnk->roce_pd); | |
214 | if (IS_ERR(lnk->roce_pd)) | |
215 | lnk->roce_pd = NULL; | |
216 | return rc; | |
217 | } | |
218 | ||
219 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | |
220 | { | |
221 | switch (ibevent->event) { | |
222 | case IB_EVENT_DEVICE_FATAL: | |
223 | case IB_EVENT_GID_CHANGE: | |
224 | case IB_EVENT_PORT_ERR: | |
225 | case IB_EVENT_QP_ACCESS_ERR: | |
226 | /* tbd in follow-on patch: | |
227 | * abnormal close of corresponding connections | |
228 | */ | |
229 | break; | |
230 | default: | |
231 | break; | |
232 | } | |
233 | } | |
234 | ||
235 | void smc_ib_destroy_queue_pair(struct smc_link *lnk) | |
236 | { | |
237 | ib_destroy_qp(lnk->roce_qp); | |
238 | lnk->roce_qp = NULL; | |
239 | } | |
240 | ||
241 | /* create a queue pair within the protection domain for a link */ | |
242 | int smc_ib_create_queue_pair(struct smc_link *lnk) | |
243 | { | |
244 | struct ib_qp_init_attr qp_attr = { | |
245 | .event_handler = smc_ib_qp_event_handler, | |
246 | .qp_context = lnk, | |
247 | .send_cq = lnk->smcibdev->roce_cq_send, | |
248 | .recv_cq = lnk->smcibdev->roce_cq_recv, | |
249 | .srq = NULL, | |
250 | .cap = { | |
251 | .max_send_wr = SMC_WR_BUF_CNT, | |
252 | /* include unsolicited rdma_writes as well, | |
253 | * there are max. 2 RDMA_WRITE per 1 WR_SEND | |
254 | */ | |
255 | .max_recv_wr = SMC_WR_BUF_CNT * 3, | |
256 | .max_send_sge = SMC_IB_MAX_SEND_SGE, | |
257 | .max_recv_sge = 1, | |
f38ba179 UB |
258 | }, |
259 | .sq_sig_type = IB_SIGNAL_REQ_WR, | |
260 | .qp_type = IB_QPT_RC, | |
261 | }; | |
262 | int rc; | |
263 | ||
264 | lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); | |
265 | rc = PTR_ERR_OR_ZERO(lnk->roce_qp); | |
266 | if (IS_ERR(lnk->roce_qp)) | |
267 | lnk->roce_qp = NULL; | |
268 | else | |
269 | smc_wr_remember_qp_attr(lnk); | |
270 | return rc; | |
271 | } | |
272 | ||
cd6851f3 UB |
273 | /* map a new TX or RX buffer to DMA */ |
274 | int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, | |
275 | struct smc_buf_desc *buf_slot, | |
276 | enum dma_data_direction data_direction) | |
277 | { | |
278 | int rc = 0; | |
279 | ||
280 | if (buf_slot->dma_addr[SMC_SINGLE_LINK]) | |
281 | return rc; /* already mapped */ | |
282 | buf_slot->dma_addr[SMC_SINGLE_LINK] = | |
283 | ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr, | |
284 | buf_size, data_direction); | |
285 | if (ib_dma_mapping_error(smcibdev->ibdev, | |
286 | buf_slot->dma_addr[SMC_SINGLE_LINK])) | |
287 | rc = -EIO; | |
288 | return rc; | |
289 | } | |
290 | ||
bd4ad577 UB |
291 | void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, |
292 | struct smc_buf_desc *buf_slot, | |
293 | enum dma_data_direction data_direction) | |
294 | { | |
295 | if (!buf_slot->dma_addr[SMC_SINGLE_LINK]) | |
296 | return; /* already unmapped */ | |
297 | ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size, | |
298 | data_direction); | |
299 | buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; | |
300 | } | |
301 | ||
a4cf0443 UB |
302 | static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) |
303 | { | |
304 | struct net_device *ndev; | |
305 | int rc; | |
306 | ||
307 | rc = ib_query_gid(smcibdev->ibdev, ibport, 0, | |
308 | &smcibdev->gid[ibport - 1], NULL); | |
309 | /* the SMC protocol requires specification of the roce MAC address; | |
310 | * if net_device cannot be determined, it can be derived from gid 0 | |
311 | */ | |
312 | ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport); | |
313 | if (ndev) { | |
314 | memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN); | |
315 | } else if (!rc) { | |
316 | memcpy(&smcibdev->mac[ibport - 1][0], | |
317 | &smcibdev->gid[ibport - 1].raw[8], 3); | |
318 | memcpy(&smcibdev->mac[ibport - 1][3], | |
319 | &smcibdev->gid[ibport - 1].raw[13], 3); | |
320 | smcibdev->mac[ibport - 1][0] &= ~0x02; | |
321 | } | |
322 | return rc; | |
323 | } | |
324 | ||
325 | /* Create an identifier unique for this instance of SMC-R. | |
326 | * The MAC-address of the first active registered IB device | |
327 | * plus a random 2-byte number is used to create this identifier. | |
328 | * This name is delivered to the peer during connection initialization. | |
329 | */ | |
330 | static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, | |
331 | u8 ibport) | |
332 | { | |
333 | memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], | |
334 | sizeof(smcibdev->mac[ibport - 1])); | |
335 | get_random_bytes(&local_systemid[0], 2); | |
336 | } | |
337 | ||
338 | bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) | |
339 | { | |
340 | return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; | |
341 | } | |
342 | ||
343 | int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) | |
344 | { | |
345 | int rc; | |
346 | ||
347 | memset(&smcibdev->pattr[ibport - 1], 0, | |
348 | sizeof(smcibdev->pattr[ibport - 1])); | |
349 | rc = ib_query_port(smcibdev->ibdev, ibport, | |
350 | &smcibdev->pattr[ibport - 1]); | |
351 | if (rc) | |
352 | goto out; | |
353 | rc = smc_ib_fill_gid_and_mac(smcibdev, ibport); | |
354 | if (rc) | |
355 | goto out; | |
356 | if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, | |
357 | sizeof(local_systemid)) && | |
358 | smc_ib_port_active(smcibdev, ibport)) | |
359 | /* create unique system identifier */ | |
360 | smc_ib_define_local_systemid(smcibdev, ibport); | |
361 | out: | |
362 | return rc; | |
363 | } | |
364 | ||
bd4ad577 UB |
365 | long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) |
366 | { | |
367 | struct ib_cq_init_attr cqattr = { | |
368 | .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 }; | |
369 | long rc; | |
370 | ||
371 | smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, | |
372 | smc_wr_tx_cq_handler, NULL, | |
373 | smcibdev, &cqattr); | |
374 | rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); | |
375 | if (IS_ERR(smcibdev->roce_cq_send)) { | |
376 | smcibdev->roce_cq_send = NULL; | |
377 | return rc; | |
378 | } | |
379 | smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, | |
380 | smc_wr_rx_cq_handler, NULL, | |
381 | smcibdev, &cqattr); | |
382 | rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); | |
383 | if (IS_ERR(smcibdev->roce_cq_recv)) { | |
384 | smcibdev->roce_cq_recv = NULL; | |
385 | goto err; | |
386 | } | |
387 | INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, | |
388 | smc_ib_global_event_handler); | |
389 | ib_register_event_handler(&smcibdev->event_handler); | |
390 | smc_wr_add_dev(smcibdev); | |
391 | smcibdev->initialized = 1; | |
392 | return rc; | |
393 | ||
394 | err: | |
395 | ib_destroy_cq(smcibdev->roce_cq_send); | |
396 | return rc; | |
397 | } | |
398 | ||
399 | static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) | |
400 | { | |
401 | if (!smcibdev->initialized) | |
402 | return; | |
403 | smc_wr_remove_dev(smcibdev); | |
404 | ib_unregister_event_handler(&smcibdev->event_handler); | |
405 | ib_destroy_cq(smcibdev->roce_cq_recv); | |
406 | ib_destroy_cq(smcibdev->roce_cq_send); | |
407 | } | |
408 | ||
a4cf0443 UB |
409 | static struct ib_client smc_ib_client; |
410 | ||
411 | /* callback function for ib_register_client() */ | |
412 | static void smc_ib_add_dev(struct ib_device *ibdev) | |
413 | { | |
414 | struct smc_ib_device *smcibdev; | |
415 | ||
416 | if (ibdev->node_type != RDMA_NODE_IB_CA) | |
417 | return; | |
418 | ||
419 | smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); | |
420 | if (!smcibdev) | |
421 | return; | |
422 | ||
423 | smcibdev->ibdev = ibdev; | |
bd4ad577 | 424 | INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); |
a4cf0443 UB |
425 | |
426 | spin_lock(&smc_ib_devices.lock); | |
427 | list_add_tail(&smcibdev->list, &smc_ib_devices.list); | |
428 | spin_unlock(&smc_ib_devices.lock); | |
429 | ib_set_client_data(ibdev, &smc_ib_client, smcibdev); | |
430 | } | |
431 | ||
432 | /* callback function for ib_register_client() */ | |
433 | static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) | |
434 | { | |
435 | struct smc_ib_device *smcibdev; | |
436 | ||
437 | smcibdev = ib_get_client_data(ibdev, &smc_ib_client); | |
438 | ib_set_client_data(ibdev, &smc_ib_client, NULL); | |
439 | spin_lock(&smc_ib_devices.lock); | |
440 | list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ | |
441 | spin_unlock(&smc_ib_devices.lock); | |
6812baab | 442 | smc_pnet_remove_by_ibdev(smcibdev); |
bd4ad577 | 443 | smc_ib_cleanup_per_ibdev(smcibdev); |
a4cf0443 UB |
444 | kfree(smcibdev); |
445 | } | |
446 | ||
447 | static struct ib_client smc_ib_client = { | |
448 | .name = "smc_ib", | |
449 | .add = smc_ib_add_dev, | |
450 | .remove = smc_ib_remove_dev, | |
451 | }; | |
452 | ||
453 | int __init smc_ib_register_client(void) | |
454 | { | |
455 | return ib_register_client(&smc_ib_client); | |
456 | } | |
457 | ||
458 | void smc_ib_unregister_client(void) | |
459 | { | |
460 | ib_unregister_client(&smc_ib_client); | |
461 | } |