]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/smc/smc_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / net / smc / smc_core.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
0cfdd8f9
UB
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Basic Transport Functions exploiting Infiniband API
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/socket.h>
13#include <linux/if_vlan.h>
14#include <linux/random.h>
15#include <linux/workqueue.h>
6dabd405 16#include <linux/wait.h>
a33a803c 17#include <linux/reboot.h>
0cfdd8f9
UB
18#include <net/tcp.h>
19#include <net/sock.h>
20#include <rdma/ib_verbs.h>
ddb457c6 21#include <rdma/ib_cache.h>
0cfdd8f9
UB
22
23#include "smc.h"
24#include "smc_clc.h"
25#include "smc_core.h"
26#include "smc_ib.h"
f38ba179 27#include "smc_wr.h"
9bf9abea 28#include "smc_llc.h"
5f08318f 29#include "smc_cdc.h"
b38d7324 30#include "smc_close.h"
c6ba7c9b 31#include "smc_ism.h"
0cfdd8f9 32
5bc11ddb
UB
33#define SMC_LGR_NUM_INCR 256
34#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
7f58a1ad 35#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
0d18a0cb 36#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
0cfdd8f9 37
9fda3510
HW
38static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
41 .num = 0,
42};
9bf9abea 43
6dabd405
UB
44static atomic_t lgr_cnt; /* number of existing link groups */
45static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
46
6511aad3
HW
47static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
a6920d1d 49
a0a62ee1
UB
50/* return head of link group list and its lock for a given link group */
51static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
52 spinlock_t **lgr_lock)
53{
54 if (lgr->is_smcd) {
55 *lgr_lock = &lgr->smcd->lgr_lock;
56 return &lgr->smcd->lgr_list;
57 }
58
59 *lgr_lock = &smc_lgr_list.lock;
60 return &smc_lgr_list.list;
61}
62
97cdbc42
KG
63static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
64{
65 /* client link group creation always follows the server link group
66 * creation. For client use a somewhat higher removal delay time,
67 * otherwise there is a risk of out-of-sync link groups.
68 */
8e316b9e
UB
69 if (!lgr->freeing && !lgr->freefast) {
70 mod_delayed_work(system_wq, &lgr->free_work,
71 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
72 SMC_LGR_FREE_DELAY_CLNT :
73 SMC_LGR_FREE_DELAY_SERV);
74 }
97cdbc42
KG
75}
76
0d18a0cb
KG
77void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
78{
8e316b9e
UB
79 if (!lgr->freeing && !lgr->freefast) {
80 lgr->freefast = 1;
81 mod_delayed_work(system_wq, &lgr->free_work,
82 SMC_LGR_FREE_DELAY_FAST);
83 }
97cdbc42
KG
84}
85
0cfdd8f9
UB
86/* Register connection's alert token in our lookup structure.
87 * To use rbtrees we have to implement our own insert core.
88 * Requires @conns_lock
89 * @smc connection to register
90 * Returns 0 on success, != otherwise.
91 */
92static void smc_lgr_add_alert_token(struct smc_connection *conn)
93{
94 struct rb_node **link, *parent = NULL;
95 u32 token = conn->alert_token_local;
96
97 link = &conn->lgr->conns_all.rb_node;
98 while (*link) {
99 struct smc_connection *cur = rb_entry(*link,
100 struct smc_connection, alert_node);
101
102 parent = *link;
103 if (cur->alert_token_local > token)
104 link = &parent->rb_left;
105 else
106 link = &parent->rb_right;
107 }
108 /* Put the new node there */
109 rb_link_node(&conn->alert_node, parent, link);
110 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
111}
112
113/* Register connection in link group by assigning an alert token
114 * registered in a search tree.
115 * Requires @conns_lock
116 * Note that '0' is a reserved value and not assigned.
117 */
118static void smc_lgr_register_conn(struct smc_connection *conn)
119{
120 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
121 static atomic_t nexttoken = ATOMIC_INIT(0);
122
123 /* find a new alert_token_local value not yet used by some connection
124 * in this link group
125 */
126 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
127 while (!conn->alert_token_local) {
128 conn->alert_token_local = atomic_inc_return(&nexttoken);
129 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
130 conn->alert_token_local = 0;
131 }
132 smc_lgr_add_alert_token(conn);
133 conn->lgr->conns_num++;
134}
135
136/* Unregister connection and reset the alert token of the given connection<
137 */
138static void __smc_lgr_unregister_conn(struct smc_connection *conn)
139{
140 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
141 struct smc_link_group *lgr = conn->lgr;
142
143 rb_erase(&conn->alert_node, &lgr->conns_all);
144 lgr->conns_num--;
145 conn->alert_token_local = 0;
0cfdd8f9
UB
146 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
147}
148
fb692ec4 149/* Unregister connection from lgr
0cfdd8f9
UB
150 */
151static void smc_lgr_unregister_conn(struct smc_connection *conn)
152{
153 struct smc_link_group *lgr = conn->lgr;
0cfdd8f9 154
77f838ac
KG
155 if (!lgr)
156 return;
0cfdd8f9
UB
157 write_lock_bh(&lgr->conns_lock);
158 if (conn->alert_token_local) {
0cfdd8f9
UB
159 __smc_lgr_unregister_conn(conn);
160 }
161 write_unlock_bh(&lgr->conns_lock);
2a0674ff 162 conn->lgr = NULL;
0cfdd8f9
UB
163}
164
0d18a0cb
KG
165/* Send delete link, either as client to request the initiation
166 * of the DELETE LINK sequence from server; or as server to
167 * initiate the delete processing. See smc_llc_rx_delete_link().
168 */
2c1d3e50 169static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
0d18a0cb
KG
170{
171 if (lnk->state == SMC_LNK_ACTIVE &&
2c1d3e50 172 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
0d18a0cb
KG
173 smc_llc_link_deleting(lnk);
174 return 0;
175 }
176 return -ENOTCONN;
177}
178
3f3f0e36
UB
179static void smc_lgr_free(struct smc_link_group *lgr);
180
0cfdd8f9
UB
181static void smc_lgr_free_work(struct work_struct *work)
182{
183 struct smc_link_group *lgr = container_of(to_delayed_work(work),
184 struct smc_link_group,
185 free_work);
a0a62ee1 186 spinlock_t *lgr_lock;
8e316b9e 187 struct smc_link *lnk;
0cfdd8f9
UB
188 bool conns;
189
a0a62ee1
UB
190 smc_lgr_list_head(lgr, &lgr_lock);
191 spin_lock_bh(lgr_lock);
8e316b9e
UB
192 if (lgr->freeing) {
193 spin_unlock_bh(lgr_lock);
194 return;
195 }
0cfdd8f9
UB
196 read_lock_bh(&lgr->conns_lock);
197 conns = RB_EMPTY_ROOT(&lgr->conns_all);
198 read_unlock_bh(&lgr->conns_lock);
199 if (!conns) { /* number of lgr connections is no longer zero */
a0a62ee1 200 spin_unlock_bh(lgr_lock);
0cfdd8f9
UB
201 return;
202 }
8caa6544 203 list_del_init(&lgr->list); /* remove from smc_lgr_list */
0d18a0cb 204
8e316b9e 205 lnk = &lgr->lnk[SMC_SINGLE_LINK];
0d18a0cb
KG
206 if (!lgr->is_smcd && !lgr->terminating) {
207 /* try to send del link msg, on error free lgr immediately */
90d8b29c 208 if (lnk->state == SMC_LNK_ACTIVE &&
2c1d3e50 209 !smc_link_send_delete(lnk, true)) {
0d18a0cb
KG
210 /* reschedule in case we never receive a response */
211 smc_lgr_schedule_free_work(lgr);
8e316b9e 212 spin_unlock_bh(lgr_lock);
0d18a0cb
KG
213 return;
214 }
215 }
8e316b9e
UB
216 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
217 spin_unlock_bh(lgr_lock);
218 cancel_delayed_work(&lgr->free_work);
0d18a0cb 219
8e316b9e
UB
220 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
221 smc_llc_link_inactive(lnk);
42bfba9e 222 if (lgr->is_smcd && !lgr->terminating)
8e316b9e
UB
223 smc_ism_signal_shutdown(lgr);
224 smc_lgr_free(lgr);
0cfdd8f9
UB
225}
226
f528ba24
UB
227static void smc_lgr_terminate_work(struct work_struct *work)
228{
229 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
230 terminate_work);
231
5421ec28 232 smc_lgr_terminate(lgr, true);
f528ba24
UB
233}
234
0cfdd8f9 235/* create a new SMC link group */
bc36d2fc 236static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
0cfdd8f9
UB
237{
238 struct smc_link_group *lgr;
a2351c5d 239 struct list_head *lgr_list;
0cfdd8f9 240 struct smc_link *lnk;
a0a62ee1 241 spinlock_t *lgr_lock;
0cfdd8f9
UB
242 u8 rndvec[3];
243 int rc = 0;
cd6851f3 244 int i;
0cfdd8f9 245
bc36d2fc 246 if (ini->is_smcd && ini->vlan_id) {
7a62725a
KG
247 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
248 rc = SMC_CLC_DECL_ISMVLANERR;
c6ba7c9b 249 goto out;
7a62725a 250 }
c6ba7c9b
HW
251 }
252
0cfdd8f9
UB
253 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
254 if (!lgr) {
7a62725a 255 rc = SMC_CLC_DECL_MEM;
29ee2701 256 goto ism_put_vlan;
0cfdd8f9 257 }
bc36d2fc 258 lgr->is_smcd = ini->is_smcd;
517c300e 259 lgr->sync_err = 0;
8e316b9e
UB
260 lgr->terminating = 0;
261 lgr->freefast = 0;
262 lgr->freeing = 0;
bc36d2fc 263 lgr->vlan_id = ini->vlan_id;
cd6851f3
UB
264 rwlock_init(&lgr->sndbufs_lock);
265 rwlock_init(&lgr->rmbs_lock);
c6ba7c9b 266 rwlock_init(&lgr->conns_lock);
cd6851f3
UB
267 for (i = 0; i < SMC_RMBE_SIZES; i++) {
268 INIT_LIST_HEAD(&lgr->sndbufs[i]);
269 INIT_LIST_HEAD(&lgr->rmbs[i]);
270 }
9fda3510
HW
271 smc_lgr_list.num += SMC_LGR_NUM_INCR;
272 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
0cfdd8f9 273 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
f528ba24 274 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
0cfdd8f9 275 lgr->conns_all = RB_ROOT;
bc36d2fc 276 if (ini->is_smcd) {
c6ba7c9b 277 /* SMC-D specific settings */
b3cb53c0 278 get_device(&ini->ism_dev->dev);
bc36d2fc
KG
279 lgr->peer_gid = ini->ism_gid;
280 lgr->smcd = ini->ism_dev;
a2351c5d 281 lgr_list = &ini->ism_dev->lgr_list;
a0a62ee1 282 lgr_lock = &lgr->smcd->lgr_lock;
50c6b20e 283 lgr->peer_shutdown = 0;
5edd6b9c 284 atomic_inc(&ini->ism_dev->lgr_cnt);
c6ba7c9b
HW
285 } else {
286 /* SMC-R specific settings */
b3cb53c0 287 get_device(&ini->ib_dev->ibdev->dev);
c6ba7c9b 288 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
bc36d2fc
KG
289 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
290 SMC_SYSTEMID_LEN);
c6ba7c9b
HW
291
292 lnk = &lgr->lnk[SMC_SINGLE_LINK];
293 /* initialize link */
294 lnk->state = SMC_LNK_ACTIVATING;
295 lnk->link_id = SMC_SINGLE_LINK;
bc36d2fc
KG
296 lnk->smcibdev = ini->ib_dev;
297 lnk->ibport = ini->ib_port;
a2351c5d 298 lgr_list = &smc_lgr_list.list;
a0a62ee1 299 lgr_lock = &smc_lgr_list.lock;
bc36d2fc
KG
300 lnk->path_mtu =
301 ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
302 if (!ini->ib_dev->initialized)
303 smc_ib_setup_per_ibdev(ini->ib_dev);
c6ba7c9b
HW
304 get_random_bytes(rndvec, sizeof(rndvec));
305 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
306 (rndvec[2] << 16);
7005ada6 307 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
bc36d2fc
KG
308 ini->vlan_id, lnk->gid,
309 &lnk->sgid_index);
7005ada6
UB
310 if (rc)
311 goto free_lgr;
c6ba7c9b
HW
312 rc = smc_llc_link_init(lnk);
313 if (rc)
314 goto free_lgr;
315 rc = smc_wr_alloc_link_mem(lnk);
316 if (rc)
317 goto clear_llc_lnk;
318 rc = smc_ib_create_protection_domain(lnk);
319 if (rc)
320 goto free_link_mem;
321 rc = smc_ib_create_queue_pair(lnk);
322 if (rc)
323 goto dealloc_pd;
324 rc = smc_wr_create_link(lnk);
325 if (rc)
326 goto destroy_qp;
6dabd405
UB
327 atomic_inc(&lgr_cnt);
328 atomic_inc(&ini->ib_dev->lnk_cnt);
c6ba7c9b 329 }
0cfdd8f9 330 smc->conn.lgr = lgr;
a0a62ee1 331 spin_lock_bh(lgr_lock);
a2351c5d 332 list_add(&lgr->list, lgr_list);
a0a62ee1 333 spin_unlock_bh(lgr_lock);
f38ba179
UB
334 return 0;
335
bd4ad577
UB
336destroy_qp:
337 smc_ib_destroy_queue_pair(lnk);
338dealloc_pd:
339 smc_ib_dealloc_protection_domain(lnk);
340free_link_mem:
341 smc_wr_free_link_mem(lnk);
2a4c57a9
KG
342clear_llc_lnk:
343 smc_llc_link_clear(lnk);
f38ba179
UB
344free_lgr:
345 kfree(lgr);
29ee2701
UB
346ism_put_vlan:
347 if (ini->is_smcd && ini->vlan_id)
348 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
0cfdd8f9 349out:
7a62725a
KG
350 if (rc < 0) {
351 if (rc == -ENOMEM)
352 rc = SMC_CLC_DECL_MEM;
353 else
354 rc = SMC_CLC_DECL_INTERR;
355 }
0cfdd8f9
UB
356 return rc;
357}
358
fb692ec4
KG
359static void smc_buf_unuse(struct smc_connection *conn,
360 struct smc_link_group *lgr)
cd6851f3 361{
69cb7dc0 362 if (conn->sndbuf_desc)
cd6851f3 363 conn->sndbuf_desc->used = 0;
cd6851f3 364 if (conn->rmb_desc) {
a6920d1d 365 if (!conn->rmb_desc->regerr) {
2a0674ff 366 if (!lgr->is_smcd && !list_empty(&lgr->list)) {
c7674c00
KG
367 /* unregister rmb with peer */
368 smc_llc_do_delete_rkey(
369 &lgr->lnk[SMC_SINGLE_LINK],
370 conn->rmb_desc);
371 }
a5e04318 372 conn->rmb_desc->used = 0;
a6920d1d
KG
373 } else {
374 /* buf registration failed, reuse not possible */
a6920d1d
KG
375 write_lock_bh(&lgr->rmbs_lock);
376 list_del(&conn->rmb_desc->list);
377 write_unlock_bh(&lgr->rmbs_lock);
378
6511aad3 379 smc_buf_free(lgr, true, conn->rmb_desc);
a6920d1d 380 }
cd6851f3
UB
381 }
382}
383
0cfdd8f9
UB
384/* remove a finished connection from its link group */
385void smc_conn_free(struct smc_connection *conn)
386{
fb692ec4
KG
387 struct smc_link_group *lgr = conn->lgr;
388
389 if (!lgr)
0cfdd8f9 390 return;
fb692ec4 391 if (lgr->is_smcd) {
42bfba9e
UB
392 if (!list_empty(&lgr->list))
393 smc_ism_unset_conn(conn);
be244f28
HW
394 tasklet_kill(&conn->rx_tsklet);
395 } else {
c6ba7c9b 396 smc_cdc_tx_dismiss_slots(conn);
be244f28 397 }
2a0674ff
UB
398 if (!list_empty(&lgr->list)) {
399 smc_lgr_unregister_conn(conn);
400 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
401 }
fb692ec4
KG
402
403 if (!lgr->conns_num)
404 smc_lgr_schedule_free_work(lgr);
0cfdd8f9
UB
405}
406
407static void smc_link_clear(struct smc_link *lnk)
408{
409 lnk->peer_qpn = 0;
2a4c57a9 410 smc_llc_link_clear(lnk);
bd4ad577 411 smc_ib_modify_qp_reset(lnk);
f38ba179 412 smc_wr_free_link(lnk);
bd4ad577
UB
413 smc_ib_destroy_queue_pair(lnk);
414 smc_ib_dealloc_protection_domain(lnk);
f38ba179 415 smc_wr_free_link_mem(lnk);
6dabd405
UB
416 if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
417 wake_up(&lnk->smcibdev->lnks_deleted);
0cfdd8f9
UB
418}
419
c6ba7c9b
HW
420static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
421 struct smc_buf_desc *buf_desc)
cd6851f3 422{
6511aad3
HW
423 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
424
3e034725
UB
425 if (is_rmb) {
426 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
427 smc_ib_put_memory_region(
428 buf_desc->mr_rx[SMC_SINGLE_LINK]);
429 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
430 DMA_FROM_DEVICE);
431 } else {
432 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
433 DMA_TO_DEVICE);
cd6851f3 434 }
3e034725 435 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
2ef4f27a
SR
436 if (buf_desc->pages)
437 __free_pages(buf_desc->pages, buf_desc->order);
3e034725 438 kfree(buf_desc);
cd6851f3
UB
439}
440
c6ba7c9b
HW
441static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
442 struct smc_buf_desc *buf_desc)
443{
be244f28
HW
444 if (is_dmb) {
445 /* restore original buf len */
446 buf_desc->len += sizeof(struct smcd_cdc_msg);
c6ba7c9b 447 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
be244f28 448 } else {
c6ba7c9b 449 kfree(buf_desc->cpu_addr);
be244f28 450 }
c6ba7c9b
HW
451 kfree(buf_desc);
452}
453
454static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
455 struct smc_buf_desc *buf_desc)
456{
457 if (lgr->is_smcd)
458 smcd_buf_free(lgr, is_rmb, buf_desc);
459 else
460 smcr_buf_free(lgr, is_rmb, buf_desc);
461}
462
3e034725 463static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
cd6851f3 464{
3e034725
UB
465 struct smc_buf_desc *buf_desc, *bf_desc;
466 struct list_head *buf_list;
cd6851f3
UB
467 int i;
468
469 for (i = 0; i < SMC_RMBE_SIZES; i++) {
3e034725
UB
470 if (is_rmb)
471 buf_list = &lgr->rmbs[i];
472 else
473 buf_list = &lgr->sndbufs[i];
474 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
cd6851f3 475 list) {
3e034725 476 list_del(&buf_desc->list);
6511aad3 477 smc_buf_free(lgr, is_rmb, buf_desc);
cd6851f3
UB
478 }
479 }
480}
481
3e034725
UB
482static void smc_lgr_free_bufs(struct smc_link_group *lgr)
483{
484 /* free send buffers */
485 __smc_lgr_free_bufs(lgr, false);
486 /* free rmbs */
487 __smc_lgr_free_bufs(lgr, true);
488}
489
0cfdd8f9 490/* remove a link group */
3f3f0e36 491static void smc_lgr_free(struct smc_link_group *lgr)
0cfdd8f9 492{
3e034725 493 smc_lgr_free_bufs(lgr);
b3cb53c0 494 if (lgr->is_smcd) {
42bfba9e
UB
495 if (!lgr->terminating) {
496 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
497 put_device(&lgr->smcd->dev);
498 }
5edd6b9c
UB
499 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
500 wake_up(&lgr->smcd->lgrs_deleted);
b3cb53c0 501 } else {
c6ba7c9b 502 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
b3cb53c0 503 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
6dabd405
UB
504 if (!atomic_dec_return(&lgr_cnt))
505 wake_up(&lgrs_deleted);
b3cb53c0 506 }
0cfdd8f9
UB
507 kfree(lgr);
508}
509
9651b934
KG
510void smc_lgr_forget(struct smc_link_group *lgr)
511{
a0a62ee1
UB
512 struct list_head *lgr_list;
513 spinlock_t *lgr_lock;
514
515 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
516 spin_lock_bh(lgr_lock);
9651b934 517 /* do not use this link group for new connections */
a0a62ee1
UB
518 if (!list_empty(lgr_list))
519 list_del_init(lgr_list);
520 spin_unlock_bh(lgr_lock);
9651b934
KG
521}
522
42bfba9e
UB
523static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
524{
525 int i;
526
527 for (i = 0; i < SMC_RMBE_SIZES; i++) {
528 struct smc_buf_desc *buf_desc;
529
530 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
531 buf_desc->len += sizeof(struct smcd_cdc_msg);
532 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
533 }
534 }
535}
536
2a0674ff
UB
537static void smc_sk_wake_ups(struct smc_sock *smc)
538{
539 smc->sk.sk_write_space(&smc->sk);
540 smc->sk.sk_data_ready(&smc->sk);
541 smc->sk.sk_state_change(&smc->sk);
542}
543
544/* kill a connection */
5421ec28 545static void smc_conn_kill(struct smc_connection *conn, bool soft)
2a0674ff
UB
546{
547 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
548
50c6b20e
UB
549 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
550 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
551 else
552 smc_close_abort(conn);
2a0674ff 553 conn->killed = 1;
50c6b20e 554 smc->sk.sk_err = ECONNABORTED;
2a0674ff 555 smc_sk_wake_ups(smc);
42bfba9e
UB
556 if (conn->lgr->is_smcd) {
557 smc_ism_unset_conn(conn);
5421ec28
UB
558 if (soft)
559 tasklet_kill(&conn->rx_tsklet);
560 else
561 tasklet_unlock_wait(&conn->rx_tsklet);
6a37ad3d
UB
562 } else {
563 smc_cdc_tx_dismiss_slots(conn);
42bfba9e 564 }
2a0674ff 565 smc_lgr_unregister_conn(conn);
81cf4f47 566 smc_close_active_abort(smc);
2a0674ff
UB
567}
568
42bfba9e
UB
569static void smc_lgr_cleanup(struct smc_link_group *lgr)
570{
571 if (lgr->is_smcd) {
572 smc_ism_signal_shutdown(lgr);
573 smcd_unregister_all_dmbs(lgr);
574 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
575 put_device(&lgr->smcd->dev);
576 } else {
577 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
578
579 wake_up(&lnk->wr_reg_wait);
0b29ec64
UB
580 if (lnk->state != SMC_LNK_INACTIVE) {
581 smc_link_send_delete(lnk, false);
582 smc_llc_link_inactive(lnk);
583 }
42bfba9e
UB
584 }
585}
586
8caa6544 587/* terminate link group */
5421ec28 588static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
0cfdd8f9
UB
589{
590 struct smc_connection *conn;
b38d7324 591 struct smc_sock *smc;
0cfdd8f9
UB
592 struct rb_node *node;
593
517c300e
KG
594 if (lgr->terminating)
595 return; /* lgr already terminating */
5421ec28
UB
596 if (!soft)
597 cancel_delayed_work_sync(&lgr->free_work);
517c300e 598 lgr->terminating = 1;
c6ba7c9b
HW
599 if (!lgr->is_smcd)
600 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
0cfdd8f9 601
69318b52
UB
602 /* kill remaining link group connections */
603 read_lock_bh(&lgr->conns_lock);
0cfdd8f9
UB
604 node = rb_first(&lgr->conns_all);
605 while (node) {
69318b52 606 read_unlock_bh(&lgr->conns_lock);
0cfdd8f9 607 conn = rb_entry(node, struct smc_connection, alert_node);
b38d7324 608 smc = container_of(conn, struct smc_sock, conn);
81cf4f47 609 sock_hold(&smc->sk); /* sock_put below */
69318b52 610 lock_sock(&smc->sk);
5421ec28 611 smc_conn_kill(conn, soft);
69318b52 612 release_sock(&smc->sk);
81cf4f47 613 sock_put(&smc->sk); /* sock_hold above */
69318b52 614 read_lock_bh(&lgr->conns_lock);
0cfdd8f9
UB
615 node = rb_first(&lgr->conns_all);
616 }
69318b52 617 read_unlock_bh(&lgr->conns_lock);
42bfba9e 618 smc_lgr_cleanup(lgr);
5421ec28
UB
619 if (soft)
620 smc_lgr_schedule_free_work_fast(lgr);
621 else
622 smc_lgr_free(lgr);
0cfdd8f9
UB
623}
624
5421ec28
UB
625/* unlink and terminate link group
626 * @soft: true if link group shutdown can take its time
627 * false if immediate link group shutdown is required
628 */
629void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
b9f227c3 630{
a0a62ee1
UB
631 spinlock_t *lgr_lock;
632
633 smc_lgr_list_head(lgr, &lgr_lock);
634 spin_lock_bh(lgr_lock);
8caa6544
UB
635 if (lgr->terminating) {
636 spin_unlock_bh(lgr_lock);
637 return; /* lgr already terminating */
638 }
5421ec28
UB
639 if (!soft)
640 lgr->freeing = 1;
8caa6544 641 list_del_init(&lgr->list);
a0a62ee1 642 spin_unlock_bh(lgr_lock);
5421ec28 643 __smc_lgr_terminate(lgr, soft);
b9f227c3
HW
644}
645
9fda3510
HW
646/* Called when IB port is terminated */
647void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
648{
649 struct smc_link_group *lgr, *l;
8caa6544 650 LIST_HEAD(lgr_free_list);
9fda3510 651
b9f227c3 652 spin_lock_bh(&smc_lgr_list.lock);
9fda3510 653 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
c6ba7c9b
HW
654 if (!lgr->is_smcd &&
655 lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
0b29ec64 656 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
8caa6544 657 list_move(&lgr->list, &lgr_free_list);
0b29ec64
UB
658 lgr->freeing = 1;
659 }
9fda3510 660 }
b9f227c3 661 spin_unlock_bh(&smc_lgr_list.lock);
8caa6544
UB
662
663 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
664 list_del_init(&lgr->list);
0b29ec64 665 __smc_lgr_terminate(lgr, false);
8caa6544 666 }
9fda3510
HW
667}
668
5421ec28 669/* Called when peer lgr shutdown (regularly or abnormally) is received */
0512f69e 670void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
c6ba7c9b
HW
671{
672 struct smc_link_group *lgr, *l;
673 LIST_HEAD(lgr_free_list);
674
675 /* run common cleanup function and build free list */
a0a62ee1 676 spin_lock_bh(&dev->lgr_lock);
a2351c5d
UB
677 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
678 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
0512f69e 679 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
50c6b20e
UB
680 if (peer_gid) /* peer triggered termination */
681 lgr->peer_shutdown = 1;
c6ba7c9b
HW
682 list_move(&lgr->list, &lgr_free_list);
683 }
684 }
a0a62ee1 685 spin_unlock_bh(&dev->lgr_lock);
c6ba7c9b
HW
686
687 /* cancel the regular free workers and actually free lgrs */
688 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
689 list_del_init(&lgr->list);
50c6b20e 690 schedule_work(&lgr->terminate_work);
c6ba7c9b
HW
691 }
692}
693
5421ec28
UB
694/* Called when an SMCD device is removed or the smc module is unloaded */
695void smc_smcd_terminate_all(struct smcd_dev *smcd)
696{
697 struct smc_link_group *lgr, *lg;
698 LIST_HEAD(lgr_free_list);
699
700 spin_lock_bh(&smcd->lgr_lock);
701 list_splice_init(&smcd->lgr_list, &lgr_free_list);
702 list_for_each_entry(lgr, &lgr_free_list, list)
703 lgr->freeing = 1;
704 spin_unlock_bh(&smcd->lgr_lock);
705
706 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
707 list_del_init(&lgr->list);
708 __smc_lgr_terminate(lgr, false);
709 }
5edd6b9c
UB
710
711 if (atomic_read(&smcd->lgr_cnt))
712 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
5421ec28
UB
713}
714
0b29ec64
UB
715/* Called when an SMCR device is removed or the smc module is unloaded.
716 * If smcibdev is given, all SMCR link groups using this device are terminated.
717 * If smcibdev is NULL, all SMCR link groups are terminated.
718 */
719void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
720{
721 struct smc_link_group *lgr, *lg;
722 LIST_HEAD(lgr_free_list);
723
724 spin_lock_bh(&smc_lgr_list.lock);
725 if (!smcibdev) {
726 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
727 list_for_each_entry(lgr, &lgr_free_list, list)
728 lgr->freeing = 1;
729 } else {
730 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
731 if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
732 list_move(&lgr->list, &lgr_free_list);
733 lgr->freeing = 1;
734 }
735 }
736 }
737 spin_unlock_bh(&smc_lgr_list.lock);
738
739 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
740 list_del_init(&lgr->list);
741 __smc_lgr_terminate(lgr, false);
742 }
6dabd405
UB
743
744 if (smcibdev) {
745 if (atomic_read(&smcibdev->lnk_cnt))
746 wait_event(smcibdev->lnks_deleted,
747 !atomic_read(&smcibdev->lnk_cnt));
748 } else {
749 if (atomic_read(&lgr_cnt))
750 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
751 }
0b29ec64
UB
752}
753
0cfdd8f9
UB
754/* Determine vlan of internal TCP socket.
755 * @vlan_id: address to store the determined vlan id into
756 */
bc36d2fc 757int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
0cfdd8f9
UB
758{
759 struct dst_entry *dst = sk_dst_get(clcsock->sk);
cb9d43f6
UB
760 struct net_device *ndev;
761 int i, nest_lvl, rc = 0;
0cfdd8f9 762
bc36d2fc 763 ini->vlan_id = 0;
0cfdd8f9
UB
764 if (!dst) {
765 rc = -ENOTCONN;
766 goto out;
767 }
768 if (!dst->dev) {
769 rc = -ENODEV;
770 goto out_rel;
771 }
772
cb9d43f6
UB
773 ndev = dst->dev;
774 if (is_vlan_dev(ndev)) {
bc36d2fc 775 ini->vlan_id = vlan_dev_vlan_id(ndev);
cb9d43f6
UB
776 goto out_rel;
777 }
778
779 rtnl_lock();
f3b0a18b 780 nest_lvl = ndev->lower_level;
cb9d43f6
UB
781 for (i = 0; i < nest_lvl; i++) {
782 struct list_head *lower = &ndev->adj_list.lower;
783
784 if (list_empty(lower))
785 break;
786 lower = lower->next;
787 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
788 if (is_vlan_dev(ndev)) {
bc36d2fc 789 ini->vlan_id = vlan_dev_vlan_id(ndev);
cb9d43f6
UB
790 break;
791 }
792 }
793 rtnl_unlock();
0cfdd8f9
UB
794
795out_rel:
796 dst_release(dst);
797out:
798 return rc;
799}
800
c6ba7c9b
HW
801static bool smcr_lgr_match(struct smc_link_group *lgr,
802 struct smc_clc_msg_local *lcl,
ee05ff7a 803 enum smc_lgr_role role, u32 clcqpn)
0cfdd8f9 804{
c6ba7c9b
HW
805 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
806 SMC_SYSTEMID_LEN) &&
807 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
808 SMC_GID_SIZE) &&
809 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
810 sizeof(lcl->mac)) &&
ee05ff7a
KG
811 lgr->role == role &&
812 (lgr->role == SMC_SERV ||
813 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
c6ba7c9b 814}
0cfdd8f9 815
c6ba7c9b
HW
816static bool smcd_lgr_match(struct smc_link_group *lgr,
817 struct smcd_dev *smcismdev, u64 peer_gid)
818{
819 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
0cfdd8f9
UB
820}
821
822/* create a new SMC connection (and a new link group if necessary) */
bc36d2fc 823int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
0cfdd8f9
UB
824{
825 struct smc_connection *conn = &smc->conn;
a2351c5d 826 struct list_head *lgr_list;
0cfdd8f9 827 struct smc_link_group *lgr;
0cfdd8f9 828 enum smc_lgr_role role;
a0a62ee1 829 spinlock_t *lgr_lock;
0cfdd8f9
UB
830 int rc = 0;
831
a2351c5d 832 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
a0a62ee1 833 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
7a62725a 834 ini->cln_first_contact = SMC_FIRST_CONTACT;
0cfdd8f9 835 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
bc36d2fc 836 if (role == SMC_CLNT && ini->srv_first_contact)
0cfdd8f9
UB
837 /* create new link group as well */
838 goto create;
839
840 /* determine if an existing link group can be reused */
a0a62ee1 841 spin_lock_bh(lgr_lock);
a2351c5d 842 list_for_each_entry(lgr, lgr_list, list) {
0cfdd8f9 843 write_lock_bh(&lgr->conns_lock);
bc36d2fc
KG
844 if ((ini->is_smcd ?
845 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
846 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
0cfdd8f9 847 !lgr->sync_err &&
bc36d2fc 848 lgr->vlan_id == ini->vlan_id &&
c6ba7c9b
HW
849 (role == SMC_CLNT ||
850 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
0cfdd8f9 851 /* link group found */
7a62725a 852 ini->cln_first_contact = SMC_REUSE_CONTACT;
0cfdd8f9
UB
853 conn->lgr = lgr;
854 smc_lgr_register_conn(conn); /* add smc conn to lgr */
77f838ac
KG
855 if (delayed_work_pending(&lgr->free_work))
856 cancel_delayed_work(&lgr->free_work);
0cfdd8f9
UB
857 write_unlock_bh(&lgr->conns_lock);
858 break;
859 }
860 write_unlock_bh(&lgr->conns_lock);
861 }
a0a62ee1 862 spin_unlock_bh(lgr_lock);
0cfdd8f9 863
bc36d2fc 864 if (role == SMC_CLNT && !ini->srv_first_contact &&
7a62725a 865 ini->cln_first_contact == SMC_FIRST_CONTACT) {
0cfdd8f9
UB
866 /* Server reuses a link group, but Client wants to start
867 * a new one
868 * send out_of_sync decline, reason synchr. error
869 */
7a62725a 870 return SMC_CLC_DECL_SYNCERR;
0cfdd8f9
UB
871 }
872
873create:
7a62725a 874 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
bc36d2fc 875 rc = smc_lgr_create(smc, ini);
0cfdd8f9
UB
876 if (rc)
877 goto out;
44808792
HZ
878 lgr = conn->lgr;
879 write_lock_bh(&lgr->conns_lock);
0cfdd8f9 880 smc_lgr_register_conn(conn); /* add smc conn to lgr */
44808792 881 write_unlock_bh(&lgr->conns_lock);
0cfdd8f9 882 }
5f08318f 883 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
cbba07a7 884 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
de8474eb 885 conn->urg_state = SMC_URG_READ;
bc36d2fc 886 if (ini->is_smcd) {
be244f28
HW
887 conn->rx_off = sizeof(struct smcd_cdc_msg);
888 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
889 }
5f08318f
UB
890#ifndef KERNEL_HAS_ATOMIC64
891 spin_lock_init(&conn->acurs_lock);
892#endif
0cfdd8f9
UB
893
894out:
7a62725a 895 return rc;
0cfdd8f9 896}
cd6851f3 897
2f6becaf
HW
898/* convert the RMB size into the compressed notation - minimum 16K.
899 * In contrast to plain ilog2, this rounds towards the next power of 2,
900 * so the socket application gets at least its desired sndbuf / rcvbuf size.
901 */
902static u8 smc_compress_bufsize(int size)
903{
904 u8 compressed;
905
906 if (size <= SMC_BUF_MIN_SIZE)
907 return 0;
908
909 size = (size - 1) >> 14;
910 compressed = ilog2(size) + 1;
911 if (compressed >= SMC_RMBE_SIZES)
912 compressed = SMC_RMBE_SIZES - 1;
913 return compressed;
914}
915
916/* convert the RMB size from compressed notation into integer */
917int smc_uncompress_bufsize(u8 compressed)
918{
919 u32 size;
920
921 size = 0x00000001 << (((int)compressed) + 14);
922 return (int)size;
923}
924
3e034725
UB
925/* try to reuse a sndbuf or rmb description slot for a certain
926 * buffer size; if not available, return NULL
cd6851f3 927 */
8437bda0
HW
928static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
929 rwlock_t *lock,
930 struct list_head *buf_list)
cd6851f3 931{
3e034725 932 struct smc_buf_desc *buf_slot;
cd6851f3 933
3e034725
UB
934 read_lock_bh(lock);
935 list_for_each_entry(buf_slot, buf_list, list) {
936 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
937 read_unlock_bh(lock);
938 return buf_slot;
cd6851f3
UB
939 }
940 }
3e034725 941 read_unlock_bh(lock);
cd6851f3
UB
942 return NULL;
943}
944
952310cc
UB
945/* one of the conditions for announcing a receiver's current window size is
946 * that it "results in a minimum increase in the window size of 10% of the
947 * receive buffer space" [RFC7609]
948 */
949static inline int smc_rmb_wnd_update_limit(int rmbe_size)
950{
951 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
952}
953
c6ba7c9b
HW
954static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
955 bool is_rmb, int bufsize)
b33982c3
UB
956{
957 struct smc_buf_desc *buf_desc;
958 struct smc_link *lnk;
959 int rc;
960
961 /* try to alloc a new buffer */
962 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
963 if (!buf_desc)
964 return ERR_PTR(-ENOMEM);
965
2ef4f27a
SR
966 buf_desc->order = get_order(bufsize);
967 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
968 __GFP_NOMEMALLOC | __GFP_COMP |
969 __GFP_NORETRY | __GFP_ZERO,
970 buf_desc->order);
971 if (!buf_desc->pages) {
b33982c3
UB
972 kfree(buf_desc);
973 return ERR_PTR(-EAGAIN);
974 }
2ef4f27a 975 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
b33982c3
UB
976
977 /* build the sg table from the pages */
978 lnk = &lgr->lnk[SMC_SINGLE_LINK];
979 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
980 GFP_KERNEL);
981 if (rc) {
6511aad3 982 smc_buf_free(lgr, is_rmb, buf_desc);
b33982c3
UB
983 return ERR_PTR(rc);
984 }
985 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
986 buf_desc->cpu_addr, bufsize);
987
988 /* map sg table to DMA address */
989 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
990 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
991 /* SMC protocol depends on mapping to one DMA address only */
992 if (rc != 1) {
6511aad3 993 smc_buf_free(lgr, is_rmb, buf_desc);
b33982c3
UB
994 return ERR_PTR(-EAGAIN);
995 }
996
997 /* create a new memory region for the RMB */
998 if (is_rmb) {
999 rc = smc_ib_get_memory_region(lnk->roce_pd,
1000 IB_ACCESS_REMOTE_WRITE |
1001 IB_ACCESS_LOCAL_WRITE,
1002 buf_desc);
1003 if (rc) {
6511aad3 1004 smc_buf_free(lgr, is_rmb, buf_desc);
b33982c3
UB
1005 return ERR_PTR(rc);
1006 }
1007 }
1008
69cb7dc0 1009 buf_desc->len = bufsize;
b33982c3
UB
1010 return buf_desc;
1011}
1012
c6ba7c9b
HW
1013#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1014
1015static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1016 bool is_dmb, int bufsize)
1017{
1018 struct smc_buf_desc *buf_desc;
1019 int rc;
1020
1021 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1022 return ERR_PTR(-EAGAIN);
1023
1024 /* try to alloc a new DMB */
1025 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1026 if (!buf_desc)
1027 return ERR_PTR(-ENOMEM);
1028 if (is_dmb) {
1029 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1030 if (rc) {
1031 kfree(buf_desc);
1032 return ERR_PTR(-EAGAIN);
1033 }
be244f28
HW
1034 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1035 /* CDC header stored in buf. So, pretend it was smaller */
1036 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
c6ba7c9b
HW
1037 } else {
1038 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1039 __GFP_NOWARN | __GFP_NORETRY |
1040 __GFP_NOMEMALLOC);
1041 if (!buf_desc->cpu_addr) {
1042 kfree(buf_desc);
1043 return ERR_PTR(-EAGAIN);
1044 }
1045 buf_desc->len = bufsize;
1046 }
1047 return buf_desc;
1048}
1049
1050static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
cd6851f3 1051{
8437bda0 1052 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
cd6851f3
UB
1053 struct smc_connection *conn = &smc->conn;
1054 struct smc_link_group *lgr = conn->lgr;
3e034725 1055 struct list_head *buf_list;
c45abf31 1056 int bufsize, bufsize_short;
3e034725
UB
1057 int sk_buf_size;
1058 rwlock_t *lock;
cd6851f3 1059
3e034725
UB
1060 if (is_rmb)
1061 /* use socket recv buffer size (w/o overhead) as start value */
1062 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1063 else
1064 /* use socket send buffer size (w/o overhead) as start value */
1065 sk_buf_size = smc->sk.sk_sndbuf / 2;
1066
4e1061f4 1067 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
c45abf31 1068 bufsize_short >= 0; bufsize_short--) {
9d8fb617 1069
3e034725
UB
1070 if (is_rmb) {
1071 lock = &lgr->rmbs_lock;
1072 buf_list = &lgr->rmbs[bufsize_short];
1073 } else {
1074 lock = &lgr->sndbufs_lock;
1075 buf_list = &lgr->sndbufs[bufsize_short];
9d8fb617 1076 }
c45abf31 1077 bufsize = smc_uncompress_bufsize(bufsize_short);
a3fe3d01
UB
1078 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1079 continue;
1080
3e034725 1081 /* check for reusable slot in the link group */
8437bda0 1082 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
3e034725
UB
1083 if (buf_desc) {
1084 memset(buf_desc->cpu_addr, 0, bufsize);
cd6851f3
UB
1085 break; /* found reusable slot */
1086 }
a3fe3d01 1087
c6ba7c9b
HW
1088 if (is_smcd)
1089 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1090 else
1091 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1092
b33982c3
UB
1093 if (PTR_ERR(buf_desc) == -ENOMEM)
1094 break;
1095 if (IS_ERR(buf_desc))
a3fe3d01 1096 continue;
897e1c24 1097
3e034725
UB
1098 buf_desc->used = 1;
1099 write_lock_bh(lock);
1100 list_add(&buf_desc->list, buf_list);
1101 write_unlock_bh(lock);
1102 break; /* found */
cd6851f3 1103 }
3e034725 1104
b33982c3 1105 if (IS_ERR(buf_desc))
3e034725
UB
1106 return -ENOMEM;
1107
1108 if (is_rmb) {
1109 conn->rmb_desc = buf_desc;
c45abf31
UB
1110 conn->rmbe_size_short = bufsize_short;
1111 smc->sk.sk_rcvbuf = bufsize * 2;
5f08318f 1112 atomic_set(&conn->bytes_to_rcv, 0);
be244f28
HW
1113 conn->rmbe_update_limit =
1114 smc_rmb_wnd_update_limit(buf_desc->len);
c6ba7c9b
HW
1115 if (is_smcd)
1116 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
cd6851f3 1117 } else {
3e034725 1118 conn->sndbuf_desc = buf_desc;
3e034725
UB
1119 smc->sk.sk_sndbuf = bufsize * 2;
1120 atomic_set(&conn->sndbuf_space, bufsize);
cd6851f3 1121 }
3e034725
UB
1122 return 0;
1123}
1124
10428dd8
UB
1125void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1126{
1127 struct smc_link_group *lgr = conn->lgr;
1128
c6ba7c9b
HW
1129 if (!conn->lgr || conn->lgr->is_smcd)
1130 return;
10428dd8
UB
1131 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1132 conn->sndbuf_desc, DMA_TO_DEVICE);
1133}
1134
1135void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1136{
1137 struct smc_link_group *lgr = conn->lgr;
1138
c6ba7c9b
HW
1139 if (!conn->lgr || conn->lgr->is_smcd)
1140 return;
10428dd8
UB
1141 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1142 conn->sndbuf_desc, DMA_TO_DEVICE);
1143}
1144
1145void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1146{
1147 struct smc_link_group *lgr = conn->lgr;
1148
c6ba7c9b
HW
1149 if (!conn->lgr || conn->lgr->is_smcd)
1150 return;
10428dd8
UB
1151 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1152 conn->rmb_desc, DMA_FROM_DEVICE);
1153}
1154
1155void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1156{
1157 struct smc_link_group *lgr = conn->lgr;
1158
c6ba7c9b
HW
1159 if (!conn->lgr || conn->lgr->is_smcd)
1160 return;
10428dd8
UB
1161 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1162 conn->rmb_desc, DMA_FROM_DEVICE);
1163}
1164
3e034725
UB
1165/* create the send and receive buffer for an SMC socket;
1166 * receive buffers are called RMBs;
1167 * (even though the SMC protocol allows more than one RMB-element per RMB,
1168 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1169 * extra RMB for every connection in a link group
1170 */
c6ba7c9b 1171int smc_buf_create(struct smc_sock *smc, bool is_smcd)
3e034725
UB
1172{
1173 int rc;
1174
1175 /* create send buffer */
c6ba7c9b 1176 rc = __smc_buf_create(smc, is_smcd, false);
3e034725
UB
1177 if (rc)
1178 return rc;
1179 /* create rmb */
c6ba7c9b 1180 rc = __smc_buf_create(smc, is_smcd, true);
3e034725 1181 if (rc)
6511aad3 1182 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
3e034725 1183 return rc;
cd6851f3 1184}
bd4ad577
UB
1185
1186static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1187{
1188 int i;
1189
1190 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1191 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1192 return i;
1193 }
1194 return -ENOSPC;
1195}
1196
4ed75de5
KG
1197/* add a new rtoken from peer */
1198int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
bd4ad577 1199{
4ed75de5
KG
1200 u64 dma_addr = be64_to_cpu(nw_vaddr);
1201 u32 rkey = ntohl(nw_rkey);
bd4ad577
UB
1202 int i;
1203
1204 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1205 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
263eec9b 1206 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
bd4ad577 1207 test_bit(i, lgr->rtokens_used_mask)) {
4ed75de5
KG
1208 /* already in list */
1209 return i;
1210 }
1211 }
1212 i = smc_rmb_reserve_rtoken_idx(lgr);
1213 if (i < 0)
1214 return i;
1215 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1216 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1217 return i;
1218}
1219
1220/* delete an rtoken */
1221int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1222{
1223 u32 rkey = ntohl(nw_rkey);
1224 int i;
1225
1226 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1227 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1228 test_bit(i, lgr->rtokens_used_mask)) {
1229 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1230 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1231
1232 clear_bit(i, lgr->rtokens_used_mask);
bd4ad577
UB
1233 return 0;
1234 }
1235 }
4ed75de5
KG
1236 return -ENOENT;
1237}
1238
1239/* save rkey and dma_addr received from peer during clc handshake */
1240int smc_rmb_rtoken_handling(struct smc_connection *conn,
1241 struct smc_clc_msg_accept_confirm *clc)
1242{
1243 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1244 clc->rmb_rkey);
bd4ad577
UB
1245 if (conn->rtoken_idx < 0)
1246 return conn->rtoken_idx;
bd4ad577
UB
1247 return 0;
1248}
9fda3510 1249
c3d9494e
UB
1250static void smc_core_going_away(void)
1251{
1252 struct smc_ib_device *smcibdev;
1253 struct smcd_dev *smcd;
1254
1255 spin_lock(&smc_ib_devices.lock);
1256 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1257 int i;
1258
1259 for (i = 0; i < SMC_MAX_PORTS; i++)
1260 set_bit(i, smcibdev->ports_going_away);
1261 }
1262 spin_unlock(&smc_ib_devices.lock);
1263
1264 spin_lock(&smcd_dev_list.lock);
1265 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1266 smcd->going_away = 1;
1267 }
1268 spin_unlock(&smcd_dev_list.lock);
1269}
1270
5421ec28
UB
1271/* Clean up all SMC link groups */
1272static void smc_lgrs_shutdown(void)
9fda3510 1273{
a2351c5d 1274 struct smcd_dev *smcd;
9fda3510 1275
c3d9494e
UB
1276 smc_core_going_away();
1277
0b29ec64 1278 smc_smcr_terminate_all(NULL);
a2351c5d
UB
1279
1280 spin_lock(&smcd_dev_list.lock);
1281 list_for_each_entry(smcd, &smcd_dev_list.list, list)
5421ec28 1282 smc_smcd_terminate_all(smcd);
a2351c5d 1283 spin_unlock(&smcd_dev_list.lock);
9fda3510 1284}
5421ec28 1285
a33a803c
UB
1286static int smc_core_reboot_event(struct notifier_block *this,
1287 unsigned long event, void *ptr)
1288{
1289 smc_lgrs_shutdown();
1290
1291 return 0;
1292}
1293
1294static struct notifier_block smc_reboot_notifier = {
1295 .notifier_call = smc_core_reboot_event,
1296};
1297
6dabd405
UB
1298int __init smc_core_init(void)
1299{
1300 atomic_set(&lgr_cnt, 0);
a33a803c 1301 return register_reboot_notifier(&smc_reboot_notifier);
6dabd405
UB
1302}
1303
5421ec28
UB
1304/* Called (from smc_exit) when module is removed */
1305void smc_core_exit(void)
1306{
a33a803c 1307 unregister_reboot_notifier(&smc_reboot_notifier);
5421ec28
UB
1308 smc_lgrs_shutdown();
1309}