1 // SPDX-License-Identifier: GPL-2.0-only
3 * IUCV protocol stack for Linux on zSeries
5 * Copyright IBM Corp. 2006, 2009
7 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
8 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10 * Ursula Braun <ursula.braun@de.ibm.com>
13 #define KMSG_COMPONENT "af_iucv"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/types.h>
19 #include <linux/limits.h>
20 #include <linux/list.h>
21 #include <linux/errno.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/slab.h>
25 #include <linux/skbuff.h>
26 #include <linux/init.h>
27 #include <linux/poll.h>
28 #include <linux/security.h>
30 #include <asm/ebcdic.h>
31 #include <asm/cpcmd.h>
32 #include <linux/kmod.h>
34 #include <net/iucv/af_iucv.h>
38 static char iucv_userid
[80];
40 static struct proto iucv_proto
= {
43 .obj_size
= sizeof(struct iucv_sock
),
46 static struct iucv_interface
*pr_iucv
;
48 /* special AF_IUCV IPRM messages */
49 static const u8 iprm_shutdown
[8] =
50 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
52 #define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
54 #define __iucv_sock_wait(sk, condition, timeo, ret) \
56 DEFINE_WAIT(__wait); \
57 long __timeo = timeo; \
59 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
60 while (!(condition)) { \
65 if (signal_pending(current)) { \
66 ret = sock_intr_errno(__timeo); \
70 __timeo = schedule_timeout(__timeo); \
72 ret = sock_error(sk); \
76 finish_wait(sk_sleep(sk), &__wait); \
79 #define iucv_sock_wait(sk, condition, timeo) \
83 __iucv_sock_wait(sk, condition, timeo, __ret); \
87 static struct sock
*iucv_accept_dequeue(struct sock
*parent
,
88 struct socket
*newsock
);
89 static void iucv_sock_kill(struct sock
*sk
);
90 static void iucv_sock_close(struct sock
*sk
);
92 static void afiucv_hs_callback_txnotify(struct sk_buff
*, enum iucv_tx_notify
);
94 /* Call Back functions */
95 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
96 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
97 static void iucv_callback_connack(struct iucv_path
*, u8
*);
98 static int iucv_callback_connreq(struct iucv_path
*, u8
*, u8
*);
99 static void iucv_callback_connrej(struct iucv_path
*, u8
*);
100 static void iucv_callback_shutdown(struct iucv_path
*, u8
*);
102 static struct iucv_sock_list iucv_sk_list
= {
103 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
104 .autobind_name
= ATOMIC_INIT(0)
107 static struct iucv_handler af_iucv_handler
= {
108 .path_pending
= iucv_callback_connreq
,
109 .path_complete
= iucv_callback_connack
,
110 .path_severed
= iucv_callback_connrej
,
111 .message_pending
= iucv_callback_rx
,
112 .message_complete
= iucv_callback_txdone
,
113 .path_quiesced
= iucv_callback_shutdown
,
116 static inline void high_nmcpy(unsigned char *dst
, char *src
)
121 static inline void low_nmcpy(unsigned char *dst
, char *src
)
123 memcpy(&dst
[8], src
, 8);
127 * iucv_msg_length() - Returns the length of an iucv message.
128 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
130 * The function returns the length of the specified iucv message @msg of data
131 * stored in a buffer and of data stored in the parameter list (PRMDATA).
133 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
135 * PRMDATA[0..6] socket data (max 7 bytes);
136 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
138 * The socket data length is computed by subtracting the socket data length
140 * If the socket data len is greater 7, then PRMDATA can be used for special
141 * notifications (see iucv_sock_shutdown); and further,
142 * if the socket data len is > 7, the function returns 8.
144 * Use this function to allocate socket buffers to store iucv message data.
146 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
150 if (msg
->flags
& IUCV_IPRMDATA
) {
151 datalen
= 0xff - msg
->rmmsg
[7];
152 return (datalen
< 8) ? datalen
: 8;
158 * iucv_sock_in_state() - check for specific states
159 * @sk: sock structure
160 * @state: first iucv sk state
161 * @state: second iucv sk state
163 * Returns true if the socket in either in the first or second state.
165 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
167 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
171 * iucv_below_msglim() - function to check if messages can be sent
172 * @sk: sock structure
174 * Returns true if the send queue length is lower than the message limit.
175 * Always returns true if the socket is not connected (no iucv path for
176 * checking the message limit).
178 static inline int iucv_below_msglim(struct sock
*sk
)
180 struct iucv_sock
*iucv
= iucv_sk(sk
);
182 if (sk
->sk_state
!= IUCV_CONNECTED
)
184 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
185 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
187 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
188 (atomic_read(&iucv
->pendings
) <= 0));
192 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
194 static void iucv_sock_wake_msglim(struct sock
*sk
)
196 struct socket_wq
*wq
;
199 wq
= rcu_dereference(sk
->sk_wq
);
200 if (skwq_has_sleeper(wq
))
201 wake_up_interruptible_all(&wq
->wait
);
202 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
207 * afiucv_hs_send() - send a message through HiperSockets transport
209 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
210 struct sk_buff
*skb
, u8 flags
)
212 struct iucv_sock
*iucv
= iucv_sk(sock
);
213 struct af_iucv_trans_hdr
*phs_hdr
;
214 struct sk_buff
*nskb
;
215 int err
, confirm_recv
= 0;
217 phs_hdr
= skb_push(skb
, sizeof(*phs_hdr
));
218 memset(phs_hdr
, 0, sizeof(*phs_hdr
));
219 skb_reset_network_header(skb
);
221 phs_hdr
->magic
= ETH_P_AF_IUCV
;
222 phs_hdr
->version
= 1;
223 phs_hdr
->flags
= flags
;
224 if (flags
== AF_IUCV_FLAG_SYN
)
225 phs_hdr
->window
= iucv
->msglimit
;
226 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
227 confirm_recv
= atomic_read(&iucv
->msg_recv
);
228 phs_hdr
->window
= confirm_recv
;
230 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
232 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
233 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
234 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
235 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
236 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
237 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
238 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
239 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
241 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
243 skb
->dev
= iucv
->hs_dev
;
249 dev_hard_header(skb
, skb
->dev
, ETH_P_AF_IUCV
, NULL
, NULL
, skb
->len
);
251 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
)) {
255 if (skb
->len
> skb
->dev
->mtu
) {
256 if (sock
->sk_type
== SOCK_SEQPACKET
) {
260 skb_trim(skb
, skb
->dev
->mtu
);
262 skb
->protocol
= cpu_to_be16(ETH_P_AF_IUCV
);
264 __skb_header_release(skb
);
265 nskb
= skb_clone(skb
, GFP_ATOMIC
);
271 skb_queue_tail(&iucv
->send_skb_q
, nskb
);
272 err
= dev_queue_xmit(skb
);
273 if (net_xmit_eval(err
)) {
274 skb_unlink(nskb
, &iucv
->send_skb_q
);
277 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
278 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
280 return net_xmit_eval(err
);
287 static struct sock
*__iucv_get_sock_by_name(char *nm
)
291 sk_for_each(sk
, &iucv_sk_list
.head
)
292 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
298 static void iucv_sock_destruct(struct sock
*sk
)
300 skb_queue_purge(&sk
->sk_receive_queue
);
301 skb_queue_purge(&sk
->sk_error_queue
);
305 if (!sock_flag(sk
, SOCK_DEAD
)) {
306 pr_err("Attempt to release alive iucv socket %p\n", sk
);
310 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
311 WARN_ON(refcount_read(&sk
->sk_wmem_alloc
));
312 WARN_ON(sk
->sk_wmem_queued
);
313 WARN_ON(sk
->sk_forward_alloc
);
317 static void iucv_sock_cleanup_listen(struct sock
*parent
)
321 /* Close non-accepted connections */
322 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
327 parent
->sk_state
= IUCV_CLOSED
;
330 static void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
332 write_lock_bh(&l
->lock
);
333 sk_add_node(sk
, &l
->head
);
334 write_unlock_bh(&l
->lock
);
337 static void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
339 write_lock_bh(&l
->lock
);
340 sk_del_node_init(sk
);
341 write_unlock_bh(&l
->lock
);
344 /* Kill socket (only if zapped and orphaned) */
345 static void iucv_sock_kill(struct sock
*sk
)
347 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
350 iucv_sock_unlink(&iucv_sk_list
, sk
);
351 sock_set_flag(sk
, SOCK_DEAD
);
355 /* Terminate an IUCV path */
356 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
358 unsigned char user_data
[16];
359 struct iucv_sock
*iucv
= iucv_sk(sk
);
360 struct iucv_path
*path
= iucv
->path
;
364 if (with_user_data
) {
365 low_nmcpy(user_data
, iucv
->src_name
);
366 high_nmcpy(user_data
, iucv
->dst_name
);
367 ASCEBC(user_data
, sizeof(user_data
));
368 pr_iucv
->path_sever(path
, user_data
);
370 pr_iucv
->path_sever(path
, NULL
);
371 iucv_path_free(path
);
375 /* Send controlling flags through an IUCV socket for HIPER transport */
376 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
378 struct iucv_sock
*iucv
= iucv_sk(sk
);
384 blen
= sizeof(struct af_iucv_trans_hdr
) +
385 LL_RESERVED_SPACE(iucv
->hs_dev
);
386 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
387 /* controlling flags should be sent anyway */
388 shutdown
= sk
->sk_shutdown
;
389 sk
->sk_shutdown
&= RCV_SHUTDOWN
;
391 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
393 skb_reserve(skb
, blen
);
394 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
397 sk
->sk_shutdown
= shutdown
;
401 /* Close an IUCV socket */
402 static void iucv_sock_close(struct sock
*sk
)
404 struct iucv_sock
*iucv
= iucv_sk(sk
);
410 switch (sk
->sk_state
) {
412 iucv_sock_cleanup_listen(sk
);
416 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
417 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
418 sk
->sk_state
= IUCV_DISCONN
;
419 sk
->sk_state_change(sk
);
424 sk
->sk_state
= IUCV_CLOSING
;
425 sk
->sk_state_change(sk
);
427 if (!err
&& !skb_queue_empty(&iucv
->send_skb_q
)) {
428 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
429 timeo
= sk
->sk_lingertime
;
431 timeo
= IUCV_DISCONN_TIMEOUT
;
433 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
439 sk
->sk_state
= IUCV_CLOSED
;
440 sk
->sk_state_change(sk
);
442 sk
->sk_err
= ECONNRESET
;
443 sk
->sk_state_change(sk
);
445 skb_queue_purge(&iucv
->send_skb_q
);
446 skb_queue_purge(&iucv
->backlog_skb_q
);
450 iucv_sever_path(sk
, 1);
454 dev_put(iucv
->hs_dev
);
456 sk
->sk_bound_dev_if
= 0;
459 /* mark socket for deletion by iucv_sock_kill() */
460 sock_set_flag(sk
, SOCK_ZAPPED
);
465 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
468 sk
->sk_type
= parent
->sk_type
;
469 security_sk_clone(parent
, sk
);
473 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
, int kern
)
476 struct iucv_sock
*iucv
;
478 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
, kern
);
483 sock_init_data(sock
, sk
);
484 INIT_LIST_HEAD(&iucv
->accept_q
);
485 spin_lock_init(&iucv
->accept_q_lock
);
486 skb_queue_head_init(&iucv
->send_skb_q
);
487 INIT_LIST_HEAD(&iucv
->message_q
.list
);
488 spin_lock_init(&iucv
->message_q
.lock
);
489 skb_queue_head_init(&iucv
->backlog_skb_q
);
491 atomic_set(&iucv
->pendings
, 0);
494 atomic_set(&iucv
->msg_sent
, 0);
495 atomic_set(&iucv
->msg_recv
, 0);
497 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
498 memset(&iucv
->src_user_id
, 0, 32);
500 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
502 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
504 sk
->sk_destruct
= iucv_sock_destruct
;
505 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
507 sock_reset_flag(sk
, SOCK_ZAPPED
);
509 sk
->sk_protocol
= proto
;
510 sk
->sk_state
= IUCV_OPEN
;
512 iucv_sock_link(&iucv_sk_list
, sk
);
516 static void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
519 struct iucv_sock
*par
= iucv_sk(parent
);
522 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
523 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
524 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
525 iucv_sk(sk
)->parent
= parent
;
526 sk_acceptq_added(parent
);
529 static void iucv_accept_unlink(struct sock
*sk
)
532 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
534 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
535 list_del_init(&iucv_sk(sk
)->accept_q
);
536 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
537 sk_acceptq_removed(iucv_sk(sk
)->parent
);
538 iucv_sk(sk
)->parent
= NULL
;
542 static struct sock
*iucv_accept_dequeue(struct sock
*parent
,
543 struct socket
*newsock
)
545 struct iucv_sock
*isk
, *n
;
548 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
549 sk
= (struct sock
*) isk
;
552 if (sk
->sk_state
== IUCV_CLOSED
) {
553 iucv_accept_unlink(sk
);
558 if (sk
->sk_state
== IUCV_CONNECTED
||
559 sk
->sk_state
== IUCV_DISCONN
||
561 iucv_accept_unlink(sk
);
563 sock_graft(sk
, newsock
);
574 static void __iucv_auto_name(struct iucv_sock
*iucv
)
578 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
579 while (__iucv_get_sock_by_name(name
)) {
580 sprintf(name
, "%08x",
581 atomic_inc_return(&iucv_sk_list
.autobind_name
));
583 memcpy(iucv
->src_name
, name
, 8);
586 /* Bind an unbound socket */
587 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
590 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
591 char uid
[sizeof(sa
->siucv_user_id
)];
592 struct sock
*sk
= sock
->sk
;
593 struct iucv_sock
*iucv
;
595 struct net_device
*dev
;
597 /* Verify the input sockaddr */
598 if (addr_len
< sizeof(struct sockaddr_iucv
) ||
599 addr
->sa_family
!= AF_IUCV
)
603 if (sk
->sk_state
!= IUCV_OPEN
) {
608 write_lock_bh(&iucv_sk_list
.lock
);
611 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
618 /* Bind the socket */
620 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
621 goto vm_bind
; /* VM IUCV transport */
623 /* try hiper transport */
624 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
627 for_each_netdev_rcu(&init_net
, dev
) {
628 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
629 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
630 /* Check for unitialized siucv_name */
631 if (strncmp(sa
->siucv_name
, " ", 8) == 0)
632 __iucv_auto_name(iucv
);
634 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
635 sk
->sk_bound_dev_if
= dev
->ifindex
;
638 sk
->sk_state
= IUCV_BOUND
;
639 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
641 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
649 /* use local userid for backward compat */
650 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
651 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
652 sk
->sk_state
= IUCV_BOUND
;
653 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
654 sk
->sk_allocation
|= GFP_DMA
;
656 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
659 /* found no dev to bind */
662 /* Release the socket list lock */
663 write_unlock_bh(&iucv_sk_list
.lock
);
669 /* Automatically bind an unbound socket */
670 static int iucv_sock_autobind(struct sock
*sk
)
672 struct iucv_sock
*iucv
= iucv_sk(sk
);
675 if (unlikely(!pr_iucv
))
678 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
679 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
680 sk
->sk_allocation
|= GFP_DMA
;
682 write_lock_bh(&iucv_sk_list
.lock
);
683 __iucv_auto_name(iucv
);
684 write_unlock_bh(&iucv_sk_list
.lock
);
687 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
692 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
694 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
695 struct sock
*sk
= sock
->sk
;
696 struct iucv_sock
*iucv
= iucv_sk(sk
);
697 unsigned char user_data
[16];
700 high_nmcpy(user_data
, sa
->siucv_name
);
701 low_nmcpy(user_data
, iucv
->src_name
);
702 ASCEBC(user_data
, sizeof(user_data
));
705 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
706 IUCV_IPRMDATA
, GFP_KERNEL
);
711 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
712 sa
->siucv_user_id
, NULL
, user_data
,
715 iucv_path_free(iucv
->path
);
718 case 0x0b: /* Target communicator is not logged on */
721 case 0x0d: /* Max connections for this guest exceeded */
722 case 0x0e: /* Max connections for target guest exceeded */
725 case 0x0f: /* Missing IUCV authorization */
737 /* Connect an unconnected socket */
738 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
741 DECLARE_SOCKADDR(struct sockaddr_iucv
*, sa
, addr
);
742 struct sock
*sk
= sock
->sk
;
743 struct iucv_sock
*iucv
= iucv_sk(sk
);
746 if (alen
< sizeof(struct sockaddr_iucv
) || addr
->sa_family
!= AF_IUCV
)
749 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
752 if (sk
->sk_state
== IUCV_OPEN
&&
753 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
754 return -EBADFD
; /* explicit bind required */
756 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
759 if (sk
->sk_state
== IUCV_OPEN
) {
760 err
= iucv_sock_autobind(sk
);
767 /* Set the destination information */
768 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
769 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
771 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
772 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
774 err
= afiucv_path_connect(sock
, addr
);
778 if (sk
->sk_state
!= IUCV_CONNECTED
)
779 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
781 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
783 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
786 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
787 iucv_sever_path(sk
, 0);
794 /* Move a socket into listening state. */
795 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
797 struct sock
*sk
= sock
->sk
;
803 if (sk
->sk_state
!= IUCV_BOUND
)
806 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
809 sk
->sk_max_ack_backlog
= backlog
;
810 sk
->sk_ack_backlog
= 0;
811 sk
->sk_state
= IUCV_LISTEN
;
819 /* Accept a pending connection */
820 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
821 int flags
, bool kern
)
823 DECLARE_WAITQUEUE(wait
, current
);
824 struct sock
*sk
= sock
->sk
, *nsk
;
828 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
830 if (sk
->sk_state
!= IUCV_LISTEN
) {
835 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
837 /* Wait for an incoming connection */
838 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
839 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
840 set_current_state(TASK_INTERRUPTIBLE
);
847 timeo
= schedule_timeout(timeo
);
848 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
850 if (sk
->sk_state
!= IUCV_LISTEN
) {
855 if (signal_pending(current
)) {
856 err
= sock_intr_errno(timeo
);
861 set_current_state(TASK_RUNNING
);
862 remove_wait_queue(sk_sleep(sk
), &wait
);
867 newsock
->state
= SS_CONNECTED
;
874 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
877 DECLARE_SOCKADDR(struct sockaddr_iucv
*, siucv
, addr
);
878 struct sock
*sk
= sock
->sk
;
879 struct iucv_sock
*iucv
= iucv_sk(sk
);
881 addr
->sa_family
= AF_IUCV
;
884 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
885 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
887 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
888 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
890 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
891 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
892 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
894 return sizeof(struct sockaddr_iucv
);
898 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
900 * @msg: Pointer to a struct iucv_message
901 * @skb: The socket data to send, skb->len MUST BE <= 7
903 * Send the socket data in the parameter list in the iucv message
904 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
905 * list and the socket data len at index 7 (last byte).
906 * See also iucv_msg_length().
908 * Returns the error code from the iucv_message_send() call.
910 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
915 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
916 prmdata
[7] = 0xff - (u8
) skb
->len
;
917 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
918 (void *) prmdata
, 8);
921 static int iucv_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
924 struct sock
*sk
= sock
->sk
;
925 struct iucv_sock
*iucv
= iucv_sk(sk
);
929 struct iucv_message txmsg
= {0};
930 struct cmsghdr
*cmsg
;
936 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
938 err
= sock_error(sk
);
942 if (msg
->msg_flags
& MSG_OOB
)
945 /* SOCK_SEQPACKET: we do not support segmented records */
946 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
951 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
956 /* Return if the socket is not in connected state */
957 if (sk
->sk_state
!= IUCV_CONNECTED
) {
962 /* initialize defaults */
963 cmsg_done
= 0; /* check for duplicate headers */
965 /* iterate over control messages */
966 for_each_cmsghdr(cmsg
, msg
) {
967 if (!CMSG_OK(msg
, cmsg
)) {
972 if (cmsg
->cmsg_level
!= SOL_IUCV
)
975 if (cmsg
->cmsg_type
& cmsg_done
) {
979 cmsg_done
|= cmsg
->cmsg_type
;
981 switch (cmsg
->cmsg_type
) {
982 case SCM_IUCV_TRGCLS
:
983 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
988 /* set iucv message target class */
990 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
1000 /* allocate one skb for each iucv message:
1001 * this is fine for SOCK_SEQPACKET (unless we want to support
1002 * segmented records using the MSG_EOR flag), but
1003 * for SOCK_STREAM we might want to improve it in future */
1004 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1005 headroom
= sizeof(struct af_iucv_trans_hdr
) +
1006 LL_RESERVED_SPACE(iucv
->hs_dev
);
1009 if (len
< PAGE_SIZE
) {
1012 /* In nonlinear "classic" iucv skb,
1013 * reserve space for iucv_array
1015 headroom
= sizeof(struct iucv_array
) *
1016 (MAX_SKB_FRAGS
+ 1);
1017 linear
= PAGE_SIZE
- headroom
;
1020 skb
= sock_alloc_send_pskb(sk
, headroom
+ linear
, len
- linear
,
1025 skb_reserve(skb
, headroom
);
1026 skb_put(skb
, linear
);
1028 skb
->data_len
= len
- linear
;
1029 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1033 /* wait if outstanding messages for iucv path has reached */
1034 timeo
= sock_sndtimeo(sk
, noblock
);
1035 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1039 /* return -ECONNRESET if the socket is no longer connected */
1040 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1045 /* increment and save iucv message tag for msg_completion cbk */
1046 txmsg
.tag
= iucv
->send_tag
++;
1047 IUCV_SKB_CB(skb
)->tag
= txmsg
.tag
;
1049 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1050 atomic_inc(&iucv
->msg_sent
);
1051 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1053 atomic_dec(&iucv
->msg_sent
);
1056 } else { /* Classic VM IUCV transport */
1057 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1059 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
) &&
1061 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1063 /* on success: there is no message_complete callback */
1064 /* for an IPRMDATA msg; remove skb from send queue */
1066 skb_unlink(skb
, &iucv
->send_skb_q
);
1070 /* this error should never happen since the */
1071 /* IUCV_IPRMDATA path flag is set... sever path */
1073 pr_iucv
->path_sever(iucv
->path
, NULL
);
1074 skb_unlink(skb
, &iucv
->send_skb_q
);
1078 } else if (skb_is_nonlinear(skb
)) {
1079 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1082 /* skip iucv_array lying in the headroom */
1083 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1084 iba
[0].length
= (u32
)skb_headlen(skb
);
1085 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1086 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1088 iba
[i
+ 1].address
=
1089 (u32
)(addr_t
)skb_frag_address(frag
);
1090 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1092 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1094 (void *)iba
, skb
->len
);
1095 } else { /* non-IPRM Linear skb */
1096 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1097 0, 0, (void *)skb
->data
, skb
->len
);
1102 memcpy(user_id
, iucv
->dst_user_id
, 8);
1104 memcpy(appl_id
, iucv
->dst_name
, 8);
1106 "Application %s on z/VM guest %s exceeds message limit\n",
1112 skb_unlink(skb
, &iucv
->send_skb_q
);
1127 static struct sk_buff
*alloc_iucv_recv_skb(unsigned long len
)
1129 size_t headroom
, linear
;
1130 struct sk_buff
*skb
;
1133 if (len
< PAGE_SIZE
) {
1137 headroom
= sizeof(struct iucv_array
) * (MAX_SKB_FRAGS
+ 1);
1138 linear
= PAGE_SIZE
- headroom
;
1140 skb
= alloc_skb_with_frags(headroom
+ linear
, len
- linear
,
1141 0, &err
, GFP_ATOMIC
| GFP_DMA
);
1143 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1147 skb_reserve(skb
, headroom
);
1148 skb_put(skb
, linear
);
1150 skb
->data_len
= len
- linear
;
1155 /* iucv_process_message() - Receive a single outstanding IUCV message
1157 * Locking: must be called with message_q.lock held
1159 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1160 struct iucv_path
*path
,
1161 struct iucv_message
*msg
)
1166 len
= iucv_msg_length(msg
);
1168 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1169 /* Note: the first 4 bytes are reserved for msg tag */
1170 IUCV_SKB_CB(skb
)->class = msg
->class;
1172 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1173 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1174 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1179 if (skb_is_nonlinear(skb
)) {
1180 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1183 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1184 iba
[0].length
= (u32
)skb_headlen(skb
);
1185 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1186 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1188 iba
[i
+ 1].address
=
1189 (u32
)(addr_t
)skb_frag_address(frag
);
1190 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1192 rc
= pr_iucv
->message_receive(path
, msg
,
1194 (void *)iba
, len
, NULL
);
1196 rc
= pr_iucv
->message_receive(path
, msg
,
1197 msg
->flags
& IUCV_IPRMDATA
,
1198 skb
->data
, len
, NULL
);
1204 WARN_ON_ONCE(skb
->len
!= len
);
1207 IUCV_SKB_CB(skb
)->offset
= 0;
1208 if (sk_filter(sk
, skb
)) {
1209 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
1213 if (__sock_queue_rcv_skb(sk
, skb
)) /* handle rcv queue full */
1214 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1217 /* iucv_process_message_q() - Process outstanding IUCV messages
1219 * Locking: must be called with message_q.lock held
1221 static void iucv_process_message_q(struct sock
*sk
)
1223 struct iucv_sock
*iucv
= iucv_sk(sk
);
1224 struct sk_buff
*skb
;
1225 struct sock_msg_q
*p
, *n
;
1227 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1228 skb
= alloc_iucv_recv_skb(iucv_msg_length(&p
->msg
));
1231 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1234 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1239 static int iucv_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1240 size_t len
, int flags
)
1242 int noblock
= flags
& MSG_DONTWAIT
;
1243 struct sock
*sk
= sock
->sk
;
1244 struct iucv_sock
*iucv
= iucv_sk(sk
);
1245 unsigned int copied
, rlen
;
1246 struct sk_buff
*skb
, *rskb
, *cskb
;
1250 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1251 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1252 skb_queue_empty(&sk
->sk_receive_queue
) &&
1253 list_empty(&iucv
->message_q
.list
))
1256 if (flags
& (MSG_OOB
))
1259 /* receive/dequeue next skb:
1260 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1261 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1263 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1268 offset
= IUCV_SKB_CB(skb
)->offset
;
1269 rlen
= skb
->len
- offset
; /* real length of skb */
1270 copied
= min_t(unsigned int, rlen
, len
);
1272 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1275 if (skb_copy_datagram_msg(cskb
, offset
, msg
, copied
)) {
1276 if (!(flags
& MSG_PEEK
))
1277 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1281 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1282 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1284 msg
->msg_flags
|= MSG_TRUNC
;
1285 /* each iucv message contains a complete record */
1286 msg
->msg_flags
|= MSG_EOR
;
1289 /* create control message to store iucv msg target class:
1290 * get the trgcls from the control buffer of the skb due to
1291 * fragmentation of original iucv message. */
1292 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1293 sizeof(IUCV_SKB_CB(skb
)->class),
1294 (void *)&IUCV_SKB_CB(skb
)->class);
1296 if (!(flags
& MSG_PEEK
))
1297 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1301 /* Mark read part of skb as used */
1302 if (!(flags
& MSG_PEEK
)) {
1304 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1305 if (sk
->sk_type
== SOCK_STREAM
) {
1306 if (copied
< rlen
) {
1307 IUCV_SKB_CB(skb
)->offset
= offset
+ copied
;
1308 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1314 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1315 atomic_inc(&iucv
->msg_recv
);
1316 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1318 iucv_sock_close(sk
);
1323 /* Queue backlog skbs */
1324 spin_lock_bh(&iucv
->message_q
.lock
);
1325 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1327 IUCV_SKB_CB(rskb
)->offset
= 0;
1328 if (__sock_queue_rcv_skb(sk
, rskb
)) {
1329 /* handle rcv queue full */
1330 skb_queue_head(&iucv
->backlog_skb_q
,
1334 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1336 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1337 if (!list_empty(&iucv
->message_q
.list
))
1338 iucv_process_message_q(sk
);
1339 if (atomic_read(&iucv
->msg_recv
) >=
1340 iucv
->msglimit
/ 2) {
1341 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1343 sk
->sk_state
= IUCV_DISCONN
;
1344 sk
->sk_state_change(sk
);
1348 spin_unlock_bh(&iucv
->message_q
.lock
);
1352 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1353 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1359 static inline __poll_t
iucv_accept_poll(struct sock
*parent
)
1361 struct iucv_sock
*isk
, *n
;
1364 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1365 sk
= (struct sock
*) isk
;
1367 if (sk
->sk_state
== IUCV_CONNECTED
)
1368 return EPOLLIN
| EPOLLRDNORM
;
1374 static __poll_t
iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1377 struct sock
*sk
= sock
->sk
;
1380 sock_poll_wait(file
, sock
, wait
);
1382 if (sk
->sk_state
== IUCV_LISTEN
)
1383 return iucv_accept_poll(sk
);
1385 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1387 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? EPOLLPRI
: 0);
1389 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1392 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1395 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1396 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1397 mask
|= EPOLLIN
| EPOLLRDNORM
;
1399 if (sk
->sk_state
== IUCV_CLOSED
)
1402 if (sk
->sk_state
== IUCV_DISCONN
)
1405 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1406 mask
|= EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
;
1408 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1413 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1415 struct sock
*sk
= sock
->sk
;
1416 struct iucv_sock
*iucv
= iucv_sk(sk
);
1417 struct iucv_message txmsg
;
1422 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1426 switch (sk
->sk_state
) {
1437 if ((how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) &&
1438 sk
->sk_state
== IUCV_CONNECTED
) {
1439 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1442 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1443 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1458 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1461 sk
->sk_shutdown
|= how
;
1462 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1463 if ((iucv
->transport
== AF_IUCV_TRANS_IUCV
) &&
1465 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1468 /* skb_queue_purge(&sk->sk_receive_queue); */
1470 skb_queue_purge(&sk
->sk_receive_queue
);
1473 /* Wake up anyone sleeping in poll */
1474 sk
->sk_state_change(sk
);
1481 static int iucv_sock_release(struct socket
*sock
)
1483 struct sock
*sk
= sock
->sk
;
1489 iucv_sock_close(sk
);
1496 /* getsockopt and setsockopt */
1497 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1498 sockptr_t optval
, unsigned int optlen
)
1500 struct sock
*sk
= sock
->sk
;
1501 struct iucv_sock
*iucv
= iucv_sk(sk
);
1505 if (level
!= SOL_IUCV
)
1506 return -ENOPROTOOPT
;
1508 if (optlen
< sizeof(int))
1511 if (copy_from_sockptr(&val
, optval
, sizeof(int)))
1518 case SO_IPRMDATA_MSG
:
1520 iucv
->flags
|= IUCV_IPRMDATA
;
1522 iucv
->flags
&= ~IUCV_IPRMDATA
;
1525 switch (sk
->sk_state
) {
1528 if (val
< 1 || val
> U16_MAX
)
1531 iucv
->msglimit
= val
;
1547 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1548 char __user
*optval
, int __user
*optlen
)
1550 struct sock
*sk
= sock
->sk
;
1551 struct iucv_sock
*iucv
= iucv_sk(sk
);
1555 if (level
!= SOL_IUCV
)
1556 return -ENOPROTOOPT
;
1558 if (get_user(len
, optlen
))
1564 len
= min_t(unsigned int, len
, sizeof(int));
1567 case SO_IPRMDATA_MSG
:
1568 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1572 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1573 : iucv
->msglimit
; /* default */
1577 if (sk
->sk_state
== IUCV_OPEN
)
1579 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1580 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1584 return -ENOPROTOOPT
;
1587 if (put_user(len
, optlen
))
1589 if (copy_to_user(optval
, &val
, len
))
1596 /* Callback wrappers - called from iucv base support */
1597 static int iucv_callback_connreq(struct iucv_path
*path
,
1598 u8 ipvmid
[8], u8 ipuser
[16])
1600 unsigned char user_data
[16];
1601 unsigned char nuser_data
[16];
1602 unsigned char src_name
[8];
1603 struct sock
*sk
, *nsk
;
1604 struct iucv_sock
*iucv
, *niucv
;
1607 memcpy(src_name
, ipuser
, 8);
1608 EBCASC(src_name
, 8);
1609 /* Find out if this path belongs to af_iucv. */
1610 read_lock(&iucv_sk_list
.lock
);
1613 sk_for_each(sk
, &iucv_sk_list
.head
)
1614 if (sk
->sk_state
== IUCV_LISTEN
&&
1615 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1617 * Found a listening socket with
1618 * src_name == ipuser[0-7].
1623 read_unlock(&iucv_sk_list
.lock
);
1625 /* No socket found, not one of our paths. */
1630 /* Check if parent socket is listening */
1631 low_nmcpy(user_data
, iucv
->src_name
);
1632 high_nmcpy(user_data
, iucv
->dst_name
);
1633 ASCEBC(user_data
, sizeof(user_data
));
1634 if (sk
->sk_state
!= IUCV_LISTEN
) {
1635 err
= pr_iucv
->path_sever(path
, user_data
);
1636 iucv_path_free(path
);
1640 /* Check for backlog size */
1641 if (sk_acceptq_is_full(sk
)) {
1642 err
= pr_iucv
->path_sever(path
, user_data
);
1643 iucv_path_free(path
);
1647 /* Create the new socket */
1648 nsk
= iucv_sock_alloc(NULL
, sk
->sk_protocol
, GFP_ATOMIC
, 0);
1650 err
= pr_iucv
->path_sever(path
, user_data
);
1651 iucv_path_free(path
);
1655 niucv
= iucv_sk(nsk
);
1656 iucv_sock_init(nsk
, sk
);
1657 niucv
->transport
= AF_IUCV_TRANS_IUCV
;
1658 nsk
->sk_allocation
|= GFP_DMA
;
1660 /* Set the new iucv_sock */
1661 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1662 EBCASC(niucv
->dst_name
, 8);
1663 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1664 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1665 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1668 /* Call iucv_accept */
1669 high_nmcpy(nuser_data
, ipuser
+ 8);
1670 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1671 ASCEBC(nuser_data
+ 8, 8);
1673 /* set message limit for path based on msglimit of accepting socket */
1674 niucv
->msglimit
= iucv
->msglimit
;
1675 path
->msglim
= iucv
->msglimit
;
1676 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1678 iucv_sever_path(nsk
, 1);
1679 iucv_sock_kill(nsk
);
1683 iucv_accept_enqueue(sk
, nsk
);
1685 /* Wake up accept */
1686 nsk
->sk_state
= IUCV_CONNECTED
;
1687 sk
->sk_data_ready(sk
);
1694 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1696 struct sock
*sk
= path
->private;
1698 sk
->sk_state
= IUCV_CONNECTED
;
1699 sk
->sk_state_change(sk
);
1702 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1704 struct sock
*sk
= path
->private;
1705 struct iucv_sock
*iucv
= iucv_sk(sk
);
1706 struct sk_buff
*skb
;
1707 struct sock_msg_q
*save_msg
;
1710 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1711 pr_iucv
->message_reject(path
, msg
);
1715 spin_lock(&iucv
->message_q
.lock
);
1717 if (!list_empty(&iucv
->message_q
.list
) ||
1718 !skb_queue_empty(&iucv
->backlog_skb_q
))
1721 len
= atomic_read(&sk
->sk_rmem_alloc
);
1722 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1723 if (len
> sk
->sk_rcvbuf
)
1726 skb
= alloc_iucv_recv_skb(iucv_msg_length(msg
));
1730 iucv_process_message(sk
, skb
, path
, msg
);
1734 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1737 save_msg
->path
= path
;
1738 save_msg
->msg
= *msg
;
1740 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1743 spin_unlock(&iucv
->message_q
.lock
);
1746 static void iucv_callback_txdone(struct iucv_path
*path
,
1747 struct iucv_message
*msg
)
1749 struct sock
*sk
= path
->private;
1750 struct sk_buff
*this = NULL
;
1751 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1752 struct sk_buff
*list_skb
;
1753 unsigned long flags
;
1757 spin_lock_irqsave(&list
->lock
, flags
);
1758 skb_queue_walk(list
, list_skb
) {
1759 if (msg
->tag
== IUCV_SKB_CB(list_skb
)->tag
) {
1765 __skb_unlink(this, list
);
1766 spin_unlock_irqrestore(&list
->lock
, flags
);
1770 /* wake up any process waiting for sending */
1771 iucv_sock_wake_msglim(sk
);
1774 if (sk
->sk_state
== IUCV_CLOSING
) {
1775 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1776 sk
->sk_state
= IUCV_CLOSED
;
1777 sk
->sk_state_change(sk
);
1784 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1786 struct sock
*sk
= path
->private;
1788 if (sk
->sk_state
== IUCV_CLOSED
)
1792 iucv_sever_path(sk
, 1);
1793 sk
->sk_state
= IUCV_DISCONN
;
1795 sk
->sk_state_change(sk
);
1799 /* called if the other communication side shuts down its RECV direction;
1800 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1802 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1804 struct sock
*sk
= path
->private;
1807 if (sk
->sk_state
!= IUCV_CLOSED
) {
1808 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1809 sk
->sk_state_change(sk
);
1814 /***************** HiperSockets transport callbacks ********************/
1815 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1817 struct af_iucv_trans_hdr
*trans_hdr
= iucv_trans_hdr(skb
);
1821 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1822 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1823 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1824 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1825 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1826 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1827 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1828 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1829 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1830 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1831 skb_push(skb
, ETH_HLEN
);
1832 memset(skb
->data
, 0, ETH_HLEN
);
1836 * afiucv_hs_callback_syn - react on received SYN
1838 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1840 struct af_iucv_trans_hdr
*trans_hdr
= iucv_trans_hdr(skb
);
1842 struct iucv_sock
*iucv
, *niucv
;
1847 /* no sock - connection refused */
1848 afiucv_swap_src_dest(skb
);
1849 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1850 err
= dev_queue_xmit(skb
);
1854 nsk
= iucv_sock_alloc(NULL
, sk
->sk_protocol
, GFP_ATOMIC
, 0);
1856 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1857 sk_acceptq_is_full(sk
) ||
1859 /* error on server socket - connection refused */
1860 afiucv_swap_src_dest(skb
);
1861 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1862 err
= dev_queue_xmit(skb
);
1863 iucv_sock_kill(nsk
);
1868 niucv
= iucv_sk(nsk
);
1869 iucv_sock_init(nsk
, sk
);
1870 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
1871 niucv
->msglimit
= iucv
->msglimit
;
1872 if (!trans_hdr
->window
)
1873 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
1875 niucv
->msglimit_peer
= trans_hdr
->window
;
1876 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
1877 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
1878 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1879 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1880 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
1881 niucv
->hs_dev
= iucv
->hs_dev
;
1882 dev_hold(niucv
->hs_dev
);
1883 afiucv_swap_src_dest(skb
);
1884 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
1885 trans_hdr
->window
= niucv
->msglimit
;
1886 /* if receiver acks the xmit connection is established */
1887 err
= dev_queue_xmit(skb
);
1889 iucv_accept_enqueue(sk
, nsk
);
1890 nsk
->sk_state
= IUCV_CONNECTED
;
1891 sk
->sk_data_ready(sk
);
1893 iucv_sock_kill(nsk
);
1897 return NET_RX_SUCCESS
;
1901 * afiucv_hs_callback_synack() - react on received SYN-ACK
1903 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
1905 struct iucv_sock
*iucv
= iucv_sk(sk
);
1909 if (sk
->sk_state
!= IUCV_BOUND
)
1912 iucv
->msglimit_peer
= iucv_trans_hdr(skb
)->window
;
1913 sk
->sk_state
= IUCV_CONNECTED
;
1914 sk
->sk_state_change(sk
);
1918 return NET_RX_SUCCESS
;
1922 * afiucv_hs_callback_synfin() - react on received SYN_FIN
1924 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
1926 struct iucv_sock
*iucv
= iucv_sk(sk
);
1930 if (sk
->sk_state
!= IUCV_BOUND
)
1933 sk
->sk_state
= IUCV_DISCONN
;
1934 sk
->sk_state_change(sk
);
1938 return NET_RX_SUCCESS
;
1942 * afiucv_hs_callback_fin() - react on received FIN
1944 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
1946 struct iucv_sock
*iucv
= iucv_sk(sk
);
1948 /* other end of connection closed */
1952 if (sk
->sk_state
== IUCV_CONNECTED
) {
1953 sk
->sk_state
= IUCV_DISCONN
;
1954 sk
->sk_state_change(sk
);
1959 return NET_RX_SUCCESS
;
1963 * afiucv_hs_callback_win() - react on received WIN
1965 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
1967 struct iucv_sock
*iucv
= iucv_sk(sk
);
1970 return NET_RX_SUCCESS
;
1972 if (sk
->sk_state
!= IUCV_CONNECTED
)
1973 return NET_RX_SUCCESS
;
1975 atomic_sub(iucv_trans_hdr(skb
)->window
, &iucv
->msg_sent
);
1976 iucv_sock_wake_msglim(sk
);
1977 return NET_RX_SUCCESS
;
1981 * afiucv_hs_callback_rx() - react on received data
1983 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
1985 struct iucv_sock
*iucv
= iucv_sk(sk
);
1989 return NET_RX_SUCCESS
;
1992 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1994 return NET_RX_SUCCESS
;
1997 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1999 return NET_RX_SUCCESS
;
2002 /* write stuff from iucv_msg to skb cb */
2003 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2004 skb_reset_transport_header(skb
);
2005 skb_reset_network_header(skb
);
2006 IUCV_SKB_CB(skb
)->offset
= 0;
2007 if (sk_filter(sk
, skb
)) {
2008 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
2010 return NET_RX_SUCCESS
;
2013 spin_lock(&iucv
->message_q
.lock
);
2014 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2015 if (__sock_queue_rcv_skb(sk
, skb
))
2016 /* handle rcv queue full */
2017 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2019 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2020 spin_unlock(&iucv
->message_q
.lock
);
2021 return NET_RX_SUCCESS
;
2025 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2027 * called from netif RX softirq
2029 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2030 struct packet_type
*pt
, struct net_device
*orig_dev
)
2033 struct iucv_sock
*iucv
;
2034 struct af_iucv_trans_hdr
*trans_hdr
;
2035 int err
= NET_RX_SUCCESS
;
2038 if (!pskb_may_pull(skb
, sizeof(*trans_hdr
))) {
2039 WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb
->len
);
2041 return NET_RX_SUCCESS
;
2044 trans_hdr
= iucv_trans_hdr(skb
);
2045 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2046 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2047 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2048 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2049 memset(nullstring
, 0, sizeof(nullstring
));
2052 read_lock(&iucv_sk_list
.lock
);
2053 sk_for_each(sk
, &iucv_sk_list
.head
) {
2054 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2055 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2056 trans_hdr
->destAppName
, 8)) &&
2057 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2058 trans_hdr
->destUserID
, 8)) &&
2059 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2060 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2066 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2067 trans_hdr
->destAppName
, 8)) &&
2068 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2069 trans_hdr
->destUserID
, 8)) &&
2070 (!memcmp(&iucv_sk(sk
)->dst_name
,
2071 trans_hdr
->srcAppName
, 8)) &&
2072 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2073 trans_hdr
->srcUserID
, 8))) {
2079 read_unlock(&iucv_sk_list
.lock
);
2084 how should we send with no sock
2085 1) send without sock no send rc checking?
2086 2) introduce default sock to handle this cases
2088 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2090 SYN|ACK, SYN|FIN, FIN -> no action? */
2092 switch (trans_hdr
->flags
) {
2093 case AF_IUCV_FLAG_SYN
:
2094 /* connect request */
2095 err
= afiucv_hs_callback_syn(sk
, skb
);
2097 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2098 /* connect request confirmed */
2099 err
= afiucv_hs_callback_synack(sk
, skb
);
2101 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2102 /* connect request refused */
2103 err
= afiucv_hs_callback_synfin(sk
, skb
);
2105 case (AF_IUCV_FLAG_FIN
):
2107 err
= afiucv_hs_callback_fin(sk
, skb
);
2109 case (AF_IUCV_FLAG_WIN
):
2110 err
= afiucv_hs_callback_win(sk
, skb
);
2111 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2115 fallthrough
; /* and receive non-zero length data */
2116 case (AF_IUCV_FLAG_SHT
):
2117 /* shutdown request */
2118 fallthrough
; /* and receive zero length data */
2120 /* plain data frame */
2121 IUCV_SKB_CB(skb
)->class = trans_hdr
->iucv_hdr
.class;
2122 err
= afiucv_hs_callback_rx(sk
, skb
);
2132 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2135 static void afiucv_hs_callback_txnotify(struct sk_buff
*skb
,
2136 enum iucv_tx_notify n
)
2138 struct sock
*isk
= skb
->sk
;
2139 struct sock
*sk
= NULL
;
2140 struct iucv_sock
*iucv
= NULL
;
2141 struct sk_buff_head
*list
;
2142 struct sk_buff
*list_skb
;
2143 struct sk_buff
*nskb
;
2144 unsigned long flags
;
2146 read_lock_irqsave(&iucv_sk_list
.lock
, flags
);
2147 sk_for_each(sk
, &iucv_sk_list
.head
)
2152 read_unlock_irqrestore(&iucv_sk_list
.lock
, flags
);
2154 if (!iucv
|| sock_flag(sk
, SOCK_ZAPPED
))
2157 list
= &iucv
->send_skb_q
;
2158 spin_lock_irqsave(&list
->lock
, flags
);
2159 skb_queue_walk_safe(list
, list_skb
, nskb
) {
2160 if (skb_shinfo(list_skb
) == skb_shinfo(skb
)) {
2163 __skb_unlink(list_skb
, list
);
2164 kfree_skb(list_skb
);
2165 iucv_sock_wake_msglim(sk
);
2167 case TX_NOTIFY_PENDING
:
2168 atomic_inc(&iucv
->pendings
);
2170 case TX_NOTIFY_DELAYED_OK
:
2171 __skb_unlink(list_skb
, list
);
2172 atomic_dec(&iucv
->pendings
);
2173 if (atomic_read(&iucv
->pendings
) <= 0)
2174 iucv_sock_wake_msglim(sk
);
2175 kfree_skb(list_skb
);
2177 case TX_NOTIFY_UNREACHABLE
:
2178 case TX_NOTIFY_DELAYED_UNREACHABLE
:
2179 case TX_NOTIFY_TPQFULL
: /* not yet used */
2180 case TX_NOTIFY_GENERALERROR
:
2181 case TX_NOTIFY_DELAYED_GENERALERROR
:
2182 __skb_unlink(list_skb
, list
);
2183 kfree_skb(list_skb
);
2184 if (sk
->sk_state
== IUCV_CONNECTED
) {
2185 sk
->sk_state
= IUCV_DISCONN
;
2186 sk
->sk_state_change(sk
);
2193 spin_unlock_irqrestore(&list
->lock
, flags
);
2195 if (sk
->sk_state
== IUCV_CLOSING
) {
2196 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
2197 sk
->sk_state
= IUCV_CLOSED
;
2198 sk
->sk_state_change(sk
);
2205 * afiucv_netdev_event: handle netdev notifier chain events
2207 static int afiucv_netdev_event(struct notifier_block
*this,
2208 unsigned long event
, void *ptr
)
2210 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2212 struct iucv_sock
*iucv
;
2216 case NETDEV_GOING_DOWN
:
2217 sk_for_each(sk
, &iucv_sk_list
.head
) {
2219 if ((iucv
->hs_dev
== event_dev
) &&
2220 (sk
->sk_state
== IUCV_CONNECTED
)) {
2221 if (event
== NETDEV_GOING_DOWN
)
2222 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2223 sk
->sk_state
= IUCV_DISCONN
;
2224 sk
->sk_state_change(sk
);
2229 case NETDEV_UNREGISTER
:
2236 static struct notifier_block afiucv_netdev_notifier
= {
2237 .notifier_call
= afiucv_netdev_event
,
2240 static const struct proto_ops iucv_sock_ops
= {
2242 .owner
= THIS_MODULE
,
2243 .release
= iucv_sock_release
,
2244 .bind
= iucv_sock_bind
,
2245 .connect
= iucv_sock_connect
,
2246 .listen
= iucv_sock_listen
,
2247 .accept
= iucv_sock_accept
,
2248 .getname
= iucv_sock_getname
,
2249 .sendmsg
= iucv_sock_sendmsg
,
2250 .recvmsg
= iucv_sock_recvmsg
,
2251 .poll
= iucv_sock_poll
,
2252 .ioctl
= sock_no_ioctl
,
2253 .mmap
= sock_no_mmap
,
2254 .socketpair
= sock_no_socketpair
,
2255 .shutdown
= iucv_sock_shutdown
,
2256 .setsockopt
= iucv_sock_setsockopt
,
2257 .getsockopt
= iucv_sock_getsockopt
,
2260 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
2265 if (protocol
&& protocol
!= PF_IUCV
)
2266 return -EPROTONOSUPPORT
;
2268 sock
->state
= SS_UNCONNECTED
;
2270 switch (sock
->type
) {
2272 case SOCK_SEQPACKET
:
2273 /* currently, proto ops can handle both sk types */
2274 sock
->ops
= &iucv_sock_ops
;
2277 return -ESOCKTNOSUPPORT
;
2280 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
, kern
);
2284 iucv_sock_init(sk
, NULL
);
2289 static const struct net_proto_family iucv_sock_family_ops
= {
2291 .owner
= THIS_MODULE
,
2292 .create
= iucv_sock_create
,
2295 static struct packet_type iucv_packet_type
= {
2296 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2297 .func
= afiucv_hs_rcv
,
2300 static int afiucv_iucv_init(void)
2302 return pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2305 static void afiucv_iucv_exit(void)
2307 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2310 static int __init
afiucv_init(void)
2314 if (MACHINE_IS_VM
) {
2315 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2316 if (unlikely(err
)) {
2318 err
= -EPROTONOSUPPORT
;
2322 pr_iucv
= try_then_request_module(symbol_get(iucv_if
), "iucv");
2324 printk(KERN_WARNING
"iucv_if lookup failed\n");
2325 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2328 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2332 err
= proto_register(&iucv_proto
, 0);
2335 err
= sock_register(&iucv_sock_family_ops
);
2340 err
= afiucv_iucv_init();
2345 err
= register_netdevice_notifier(&afiucv_netdev_notifier
);
2349 dev_add_pack(&iucv_packet_type
);
2356 sock_unregister(PF_IUCV
);
2358 proto_unregister(&iucv_proto
);
2361 symbol_put(iucv_if
);
2365 static void __exit
afiucv_exit(void)
2369 symbol_put(iucv_if
);
2372 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2373 dev_remove_pack(&iucv_packet_type
);
2374 sock_unregister(PF_IUCV
);
2375 proto_unregister(&iucv_proto
);
2378 module_init(afiucv_init
);
2379 module_exit(afiucv_exit
);
2381 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2382 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2383 MODULE_VERSION(VERSION
);
2384 MODULE_LICENSE("GPL");
2385 MODULE_ALIAS_NETPROTO(PF_IUCV
);