]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / lustre / lnet / klnds / socklnd / socklnd_lib-linux.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #include "socklnd.h"
38
39 int
40 ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
41 {
42 int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
43 &conn->ksnc_ipaddr,
44 &conn->ksnc_port);
45
46 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
47 LASSERT (!conn->ksnc_closing);
48
49 if (rc != 0) {
50 CERROR ("Error %d getting sock peer IP\n", rc);
51 return rc;
52 }
53
54 rc = libcfs_sock_getaddr(conn->ksnc_sock, 0,
55 &conn->ksnc_myipaddr, NULL);
56 if (rc != 0) {
57 CERROR ("Error %d getting sock local IP\n", rc);
58 return rc;
59 }
60
61 return 0;
62 }
63
64 int
65 ksocknal_lib_zc_capable(ksock_conn_t *conn)
66 {
67 int caps = conn->ksnc_sock->sk->sk_route_caps;
68
69 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
70 return 0;
71
72 /* ZC if the socket supports scatter/gather and doesn't need software
73 * checksums */
74 return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
75 }
76
77 int
78 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
79 {
80 struct socket *sock = conn->ksnc_sock;
81 int nob;
82 int rc;
83
84 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
85 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
86 tx->tx_nob == tx->tx_resid && /* frist sending */
87 tx->tx_msg.ksm_csum == 0) /* not checksummed */
88 ksocknal_lib_csum_tx(tx);
89
90 /* NB we can't trust socket ops to either consume our iovs
91 * or leave them alone. */
92
93 {
94 #if SOCKNAL_SINGLE_FRAG_TX
95 struct iovec scratch;
96 struct iovec *scratchiov = &scratch;
97 unsigned int niov = 1;
98 #else
99 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
100 unsigned int niov = tx->tx_niov;
101 #endif
102 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
103 int i;
104
105 for (nob = i = 0; i < niov; i++) {
106 scratchiov[i] = tx->tx_iov[i];
107 nob += scratchiov[i].iov_len;
108 }
109
110 if (!list_empty(&conn->ksnc_tx_queue) ||
111 nob < tx->tx_resid)
112 msg.msg_flags |= MSG_MORE;
113
114 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
115 }
116 return rc;
117 }
118
119 int
120 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
121 {
122 struct socket *sock = conn->ksnc_sock;
123 lnet_kiov_t *kiov = tx->tx_kiov;
124 int rc;
125 int nob;
126
127 /* Not NOOP message */
128 LASSERT (tx->tx_lnetmsg != NULL);
129
130 /* NB we can't trust socket ops to either consume our iovs
131 * or leave them alone. */
132 if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
133 /* Zero copy is enabled */
134 struct sock *sk = sock->sk;
135 struct page *page = kiov->kiov_page;
136 int offset = kiov->kiov_offset;
137 int fragsize = kiov->kiov_len;
138 int msgflg = MSG_DONTWAIT;
139
140 CDEBUG(D_NET, "page %p + offset %x for %d\n",
141 page, offset, kiov->kiov_len);
142
143 if (!list_empty(&conn->ksnc_tx_queue) ||
144 fragsize < tx->tx_resid)
145 msgflg |= MSG_MORE;
146
147 if (sk->sk_prot->sendpage != NULL) {
148 rc = sk->sk_prot->sendpage(sk, page,
149 offset, fragsize, msgflg);
150 } else {
151 rc = cfs_tcp_sendpage(sk, page, offset, fragsize,
152 msgflg);
153 }
154 } else {
155 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
156 struct iovec scratch;
157 struct iovec *scratchiov = &scratch;
158 unsigned int niov = 1;
159 #else
160 #ifdef CONFIG_HIGHMEM
161 #warning "XXX risk of kmap deadlock on multiple frags..."
162 #endif
163 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
164 unsigned int niov = tx->tx_nkiov;
165 #endif
166 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
167 int i;
168
169 for (nob = i = 0; i < niov; i++) {
170 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
171 kiov[i].kiov_offset;
172 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
173 }
174
175 if (!list_empty(&conn->ksnc_tx_queue) ||
176 nob < tx->tx_resid)
177 msg.msg_flags |= MSG_MORE;
178
179 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
180
181 for (i = 0; i < niov; i++)
182 kunmap(kiov[i].kiov_page);
183 }
184 return rc;
185 }
186
187 void
188 ksocknal_lib_eager_ack (ksock_conn_t *conn)
189 {
190 int opt = 1;
191 mm_segment_t oldmm = get_fs();
192 struct socket *sock = conn->ksnc_sock;
193
194 /* Remind the socket to ACK eagerly. If I don't, the socket might
195 * think I'm about to send something it could piggy-back the ACK
196 * on, introducing delay in completing zero-copy sends in my
197 * peer. */
198
199 set_fs(KERNEL_DS);
200 sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
201 (char *)&opt, sizeof (opt));
202 set_fs(oldmm);
203 }
204
205 int
206 ksocknal_lib_recv_iov (ksock_conn_t *conn)
207 {
208 #if SOCKNAL_SINGLE_FRAG_RX
209 struct iovec scratch;
210 struct iovec *scratchiov = &scratch;
211 unsigned int niov = 1;
212 #else
213 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
214 unsigned int niov = conn->ksnc_rx_niov;
215 #endif
216 struct iovec *iov = conn->ksnc_rx_iov;
217 struct msghdr msg = {
218 .msg_flags = 0
219 };
220 int nob;
221 int i;
222 int rc;
223 int fragnob;
224 int sum;
225 __u32 saved_csum;
226
227 /* NB we can't trust socket ops to either consume our iovs
228 * or leave them alone. */
229 LASSERT (niov > 0);
230
231 for (nob = i = 0; i < niov; i++) {
232 scratchiov[i] = iov[i];
233 nob += scratchiov[i].iov_len;
234 }
235 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
236
237 rc = kernel_recvmsg(conn->ksnc_sock, &msg,
238 (struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
239
240 saved_csum = 0;
241 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
242 saved_csum = conn->ksnc_msg.ksm_csum;
243 conn->ksnc_msg.ksm_csum = 0;
244 }
245
246 if (saved_csum != 0) {
247 /* accumulate checksum */
248 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
249 LASSERT (i < niov);
250
251 fragnob = iov[i].iov_len;
252 if (fragnob > sum)
253 fragnob = sum;
254
255 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
256 iov[i].iov_base, fragnob);
257 }
258 conn->ksnc_msg.ksm_csum = saved_csum;
259 }
260
261 return rc;
262 }
263
264 static void
265 ksocknal_lib_kiov_vunmap(void *addr)
266 {
267 if (addr == NULL)
268 return;
269
270 vunmap(addr);
271 }
272
273 static void *
274 ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
275 struct iovec *iov, struct page **pages)
276 {
277 void *addr;
278 int nob;
279 int i;
280
281 if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
282 return NULL;
283
284 LASSERT (niov <= LNET_MAX_IOV);
285
286 if (niov < 2 ||
287 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
288 return NULL;
289
290 for (nob = i = 0; i < niov; i++) {
291 if ((kiov[i].kiov_offset != 0 && i > 0) ||
292 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
293 return NULL;
294
295 pages[i] = kiov[i].kiov_page;
296 nob += kiov[i].kiov_len;
297 }
298
299 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
300 if (addr == NULL)
301 return NULL;
302
303 iov->iov_base = addr + kiov[0].kiov_offset;
304 iov->iov_len = nob;
305
306 return addr;
307 }
308
309 int
310 ksocknal_lib_recv_kiov (ksock_conn_t *conn)
311 {
312 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
313 struct iovec scratch;
314 struct iovec *scratchiov = &scratch;
315 struct page **pages = NULL;
316 unsigned int niov = 1;
317 #else
318 #ifdef CONFIG_HIGHMEM
319 #warning "XXX risk of kmap deadlock on multiple frags..."
320 #endif
321 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
322 struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
323 unsigned int niov = conn->ksnc_rx_nkiov;
324 #endif
325 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
326 struct msghdr msg = {
327 .msg_flags = 0
328 };
329 int nob;
330 int i;
331 int rc;
332 void *base;
333 void *addr;
334 int sum;
335 int fragnob;
336 int n;
337
338 /* NB we can't trust socket ops to either consume our iovs
339 * or leave them alone. */
340 addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
341 if (addr != NULL) {
342 nob = scratchiov[0].iov_len;
343 n = 1;
344
345 } else {
346 for (nob = i = 0; i < niov; i++) {
347 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
348 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
349 kiov[i].kiov_offset;
350 }
351 n = niov;
352 }
353
354 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
355
356 rc = kernel_recvmsg(conn->ksnc_sock, &msg,
357 (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
358
359 if (conn->ksnc_msg.ksm_csum != 0) {
360 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
361 LASSERT (i < niov);
362
363 /* Dang! have to kmap again because I have nowhere to stash the
364 * mapped address. But by doing it while the page is still
365 * mapped, the kernel just bumps the map count and returns me
366 * the address it stashed. */
367 base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
368 fragnob = kiov[i].kiov_len;
369 if (fragnob > sum)
370 fragnob = sum;
371
372 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
373 base, fragnob);
374
375 kunmap(kiov[i].kiov_page);
376 }
377 }
378
379 if (addr != NULL) {
380 ksocknal_lib_kiov_vunmap(addr);
381 } else {
382 for (i = 0; i < niov; i++)
383 kunmap(kiov[i].kiov_page);
384 }
385
386 return (rc);
387 }
388
389 void
390 ksocknal_lib_csum_tx(ksock_tx_t *tx)
391 {
392 int i;
393 __u32 csum;
394 void *base;
395
396 LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
397 LASSERT(tx->tx_conn != NULL);
398 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
399
400 tx->tx_msg.ksm_csum = 0;
401
402 csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
403 tx->tx_iov[0].iov_len);
404
405 if (tx->tx_kiov != NULL) {
406 for (i = 0; i < tx->tx_nkiov; i++) {
407 base = kmap(tx->tx_kiov[i].kiov_page) +
408 tx->tx_kiov[i].kiov_offset;
409
410 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
411
412 kunmap(tx->tx_kiov[i].kiov_page);
413 }
414 } else {
415 for (i = 1; i < tx->tx_niov; i++)
416 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
417 tx->tx_iov[i].iov_len);
418 }
419
420 if (*ksocknal_tunables.ksnd_inject_csum_error) {
421 csum++;
422 *ksocknal_tunables.ksnd_inject_csum_error = 0;
423 }
424
425 tx->tx_msg.ksm_csum = csum;
426 }
427
428 int
429 ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
430 {
431 mm_segment_t oldmm = get_fs ();
432 struct socket *sock = conn->ksnc_sock;
433 int len;
434 int rc;
435
436 rc = ksocknal_connsock_addref(conn);
437 if (rc != 0) {
438 LASSERT (conn->ksnc_closing);
439 *txmem = *rxmem = *nagle = 0;
440 return (-ESHUTDOWN);
441 }
442
443 rc = libcfs_sock_getbuf(sock, txmem, rxmem);
444 if (rc == 0) {
445 len = sizeof(*nagle);
446 set_fs(KERNEL_DS);
447 rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
448 (char *)nagle, &len);
449 set_fs(oldmm);
450 }
451
452 ksocknal_connsock_decref(conn);
453
454 if (rc == 0)
455 *nagle = !*nagle;
456 else
457 *txmem = *rxmem = *nagle = 0;
458
459 return (rc);
460 }
461
462 int
463 ksocknal_lib_setup_sock (struct socket *sock)
464 {
465 mm_segment_t oldmm = get_fs ();
466 int rc;
467 int option;
468 int keep_idle;
469 int keep_intvl;
470 int keep_count;
471 int do_keepalive;
472 struct linger linger;
473
474 sock->sk->sk_allocation = GFP_NOFS;
475
476 /* Ensure this socket aborts active sends immediately when we close
477 * it. */
478
479 linger.l_onoff = 0;
480 linger.l_linger = 0;
481
482 set_fs (KERNEL_DS);
483 rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
484 (char *)&linger, sizeof (linger));
485 set_fs (oldmm);
486 if (rc != 0) {
487 CERROR ("Can't set SO_LINGER: %d\n", rc);
488 return (rc);
489 }
490
491 option = -1;
492 set_fs (KERNEL_DS);
493 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
494 (char *)&option, sizeof (option));
495 set_fs (oldmm);
496 if (rc != 0) {
497 CERROR ("Can't set SO_LINGER2: %d\n", rc);
498 return (rc);
499 }
500
501 if (!*ksocknal_tunables.ksnd_nagle) {
502 option = 1;
503
504 set_fs (KERNEL_DS);
505 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
506 (char *)&option, sizeof (option));
507 set_fs (oldmm);
508 if (rc != 0) {
509 CERROR ("Can't disable nagle: %d\n", rc);
510 return (rc);
511 }
512 }
513
514 rc = libcfs_sock_setbuf(sock,
515 *ksocknal_tunables.ksnd_tx_buffer_size,
516 *ksocknal_tunables.ksnd_rx_buffer_size);
517 if (rc != 0) {
518 CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n",
519 *ksocknal_tunables.ksnd_tx_buffer_size,
520 *ksocknal_tunables.ksnd_rx_buffer_size, rc);
521 return (rc);
522 }
523
524 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
525
526 /* snapshot tunables */
527 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
528 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
529 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
530
531 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
532
533 option = (do_keepalive ? 1 : 0);
534 set_fs (KERNEL_DS);
535 rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
536 (char *)&option, sizeof (option));
537 set_fs (oldmm);
538 if (rc != 0) {
539 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
540 return (rc);
541 }
542
543 if (!do_keepalive)
544 return (0);
545
546 set_fs (KERNEL_DS);
547 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
548 (char *)&keep_idle, sizeof (keep_idle));
549 set_fs (oldmm);
550 if (rc != 0) {
551 CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
552 return (rc);
553 }
554
555 set_fs (KERNEL_DS);
556 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
557 (char *)&keep_intvl, sizeof (keep_intvl));
558 set_fs (oldmm);
559 if (rc != 0) {
560 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
561 return (rc);
562 }
563
564 set_fs (KERNEL_DS);
565 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
566 (char *)&keep_count, sizeof (keep_count));
567 set_fs (oldmm);
568 if (rc != 0) {
569 CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
570 return (rc);
571 }
572
573 return (0);
574 }
575
576 void
577 ksocknal_lib_push_conn (ksock_conn_t *conn)
578 {
579 struct sock *sk;
580 struct tcp_sock *tp;
581 int nonagle;
582 int val = 1;
583 int rc;
584 mm_segment_t oldmm;
585
586 rc = ksocknal_connsock_addref(conn);
587 if (rc != 0) /* being shut down */
588 return;
589
590 sk = conn->ksnc_sock->sk;
591 tp = tcp_sk(sk);
592
593 lock_sock (sk);
594 nonagle = tp->nonagle;
595 tp->nonagle = 1;
596 release_sock (sk);
597
598 oldmm = get_fs ();
599 set_fs (KERNEL_DS);
600
601 rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
602 (char *)&val, sizeof (val));
603 LASSERT (rc == 0);
604
605 set_fs (oldmm);
606
607 lock_sock (sk);
608 tp->nonagle = nonagle;
609 release_sock (sk);
610
611 ksocknal_connsock_decref(conn);
612 }
613
614 extern void ksocknal_read_callback (ksock_conn_t *conn);
615 extern void ksocknal_write_callback (ksock_conn_t *conn);
616 /*
617 * socket call back in Linux
618 */
619 static void
620 ksocknal_data_ready (struct sock *sk, int n)
621 {
622 ksock_conn_t *conn;
623
624 /* interleave correctly with closing sockets... */
625 LASSERT(!in_irq());
626 read_lock(&ksocknal_data.ksnd_global_lock);
627
628 conn = sk->sk_user_data;
629 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
630 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
631 sk->sk_data_ready (sk, n);
632 } else
633 ksocknal_read_callback(conn);
634
635 read_unlock(&ksocknal_data.ksnd_global_lock);
636 }
637
638 static void
639 ksocknal_write_space (struct sock *sk)
640 {
641 ksock_conn_t *conn;
642 int wspace;
643 int min_wpace;
644
645 /* interleave correctly with closing sockets... */
646 LASSERT(!in_irq());
647 read_lock(&ksocknal_data.ksnd_global_lock);
648
649 conn = sk->sk_user_data;
650 wspace = SOCKNAL_WSPACE(sk);
651 min_wpace = SOCKNAL_MIN_WSPACE(sk);
652
653 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
654 sk, wspace, min_wpace, conn,
655 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
656 " ready" : " blocked"),
657 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
658 " scheduled" : " idle"),
659 (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
660 " empty" : " queued"));
661
662 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
663 LASSERT (sk->sk_write_space != &ksocknal_write_space);
664 sk->sk_write_space (sk);
665
666 read_unlock(&ksocknal_data.ksnd_global_lock);
667 return;
668 }
669
670 if (wspace >= min_wpace) { /* got enough space */
671 ksocknal_write_callback(conn);
672
673 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
674 * ENOMEM check in ksocknal_transmit is race-free (think about
675 * it). */
676
677 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
678 }
679
680 read_unlock(&ksocknal_data.ksnd_global_lock);
681 }
682
683 void
684 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
685 {
686 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
687 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
688 }
689
690 void
691 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
692 {
693 sock->sk->sk_user_data = conn;
694 sock->sk->sk_data_ready = ksocknal_data_ready;
695 sock->sk->sk_write_space = ksocknal_write_space;
696 return;
697 }
698
699 void
700 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
701 {
702 /* Remove conn's network callbacks.
703 * NB I _have_ to restore the callback, rather than storing a noop,
704 * since the socket could survive past this module being unloaded!! */
705 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
706 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
707
708 /* A callback could be in progress already; they hold a read lock
709 * on ksnd_global_lock (to serialise with me) and NOOP if
710 * sk_user_data is NULL. */
711 sock->sk->sk_user_data = NULL;
712
713 return ;
714 }
715
716 int
717 ksocknal_lib_memory_pressure(ksock_conn_t *conn)
718 {
719 int rc = 0;
720 ksock_sched_t *sched;
721
722 sched = conn->ksnc_scheduler;
723 spin_lock_bh(&sched->kss_lock);
724
725 if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
726 !conn->ksnc_tx_ready) {
727 /* SOCK_NOSPACE is set when the socket fills
728 * and cleared in the write_space callback
729 * (which also sets ksnc_tx_ready). If
730 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
731 * zero, I didn't fill the socket and
732 * write_space won't reschedule me, so I
733 * return -ENOMEM to get my caller to retry
734 * after a timeout */
735 rc = -ENOMEM;
736 }
737
738 spin_unlock_bh(&sched->kss_lock);
739
740 return rc;
741 }