2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <linux/security.h>
27 #include <asm/ebcdic.h>
28 #include <asm/cpcmd.h>
29 #include <linux/kmod.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid
[80];
37 static const struct proto_ops iucv_sock_ops
;
39 static struct proto iucv_proto
= {
42 .obj_size
= sizeof(struct iucv_sock
),
45 static struct iucv_interface
*pr_iucv
;
47 /* special AF_IUCV IPRM messages */
48 static const u8 iprm_shutdown
[8] =
49 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
51 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
53 #define __iucv_sock_wait(sk, condition, timeo, ret) \
55 DEFINE_WAIT(__wait); \
56 long __timeo = timeo; \
58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
59 while (!(condition)) { \
64 if (signal_pending(current)) { \
65 ret = sock_intr_errno(__timeo); \
69 __timeo = schedule_timeout(__timeo); \
71 ret = sock_error(sk); \
75 finish_wait(sk_sleep(sk), &__wait); \
78 #define iucv_sock_wait(sk, condition, timeo) \
82 __iucv_sock_wait(sk, condition, timeo, __ret); \
86 static void iucv_sock_kill(struct sock
*sk
);
87 static void iucv_sock_close(struct sock
*sk
);
88 static void iucv_sever_path(struct sock
*, int);
90 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
91 struct packet_type
*pt
, struct net_device
*orig_dev
);
92 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
93 struct sk_buff
*skb
, u8 flags
);
94 static void afiucv_hs_callback_txnotify(struct sk_buff
*, enum iucv_tx_notify
);
96 /* Call Back functions */
97 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
98 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
99 static void iucv_callback_connack(struct iucv_path
*, u8
*);
100 static int iucv_callback_connreq(struct iucv_path
*, u8
*, u8
*);
101 static void iucv_callback_connrej(struct iucv_path
*, u8
*);
102 static void iucv_callback_shutdown(struct iucv_path
*, u8
*);
104 static struct iucv_sock_list iucv_sk_list
= {
105 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
106 .autobind_name
= ATOMIC_INIT(0)
109 static struct iucv_handler af_iucv_handler
= {
110 .path_pending
= iucv_callback_connreq
,
111 .path_complete
= iucv_callback_connack
,
112 .path_severed
= iucv_callback_connrej
,
113 .message_pending
= iucv_callback_rx
,
114 .message_complete
= iucv_callback_txdone
,
115 .path_quiesced
= iucv_callback_shutdown
,
118 static inline void high_nmcpy(unsigned char *dst
, char *src
)
123 static inline void low_nmcpy(unsigned char *dst
, char *src
)
125 memcpy(&dst
[8], src
, 8);
128 static int afiucv_pm_prepare(struct device
*dev
)
130 #ifdef CONFIG_PM_DEBUG
131 printk(KERN_WARNING
"afiucv_pm_prepare\n");
136 static void afiucv_pm_complete(struct device
*dev
)
138 #ifdef CONFIG_PM_DEBUG
139 printk(KERN_WARNING
"afiucv_pm_complete\n");
144 * afiucv_pm_freeze() - Freeze PM callback
145 * @dev: AFIUCV dummy device
147 * Sever all established IUCV communication pathes
149 static int afiucv_pm_freeze(struct device
*dev
)
151 struct iucv_sock
*iucv
;
155 #ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING
"afiucv_pm_freeze\n");
158 read_lock(&iucv_sk_list
.lock
);
159 sk_for_each(sk
, &iucv_sk_list
.head
) {
161 switch (sk
->sk_state
) {
165 iucv_sever_path(sk
, 0);
174 skb_queue_purge(&iucv
->send_skb_q
);
175 skb_queue_purge(&iucv
->backlog_skb_q
);
177 read_unlock(&iucv_sk_list
.lock
);
182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
183 * @dev: AFIUCV dummy device
185 * socket clean up after freeze
187 static int afiucv_pm_restore_thaw(struct device
*dev
)
191 #ifdef CONFIG_PM_DEBUG
192 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
194 read_lock(&iucv_sk_list
.lock
);
195 sk_for_each(sk
, &iucv_sk_list
.head
) {
196 switch (sk
->sk_state
) {
199 sk
->sk_state
= IUCV_DISCONN
;
200 sk
->sk_state_change(sk
);
211 read_unlock(&iucv_sk_list
.lock
);
215 static const struct dev_pm_ops afiucv_pm_ops
= {
216 .prepare
= afiucv_pm_prepare
,
217 .complete
= afiucv_pm_complete
,
218 .freeze
= afiucv_pm_freeze
,
219 .thaw
= afiucv_pm_restore_thaw
,
220 .restore
= afiucv_pm_restore_thaw
,
223 static struct device_driver af_iucv_driver
= {
224 .owner
= THIS_MODULE
,
227 .pm
= &afiucv_pm_ops
,
230 /* dummy device used as trigger for PM functions */
231 static struct device
*af_iucv_dev
;
234 * iucv_msg_length() - Returns the length of an iucv message.
235 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
237 * The function returns the length of the specified iucv message @msg of data
238 * stored in a buffer and of data stored in the parameter list (PRMDATA).
240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
242 * PRMDATA[0..6] socket data (max 7 bytes);
243 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
245 * The socket data length is computed by subtracting the socket data length
247 * If the socket data len is greater 7, then PRMDATA can be used for special
248 * notifications (see iucv_sock_shutdown); and further,
249 * if the socket data len is > 7, the function returns 8.
251 * Use this function to allocate socket buffers to store iucv message data.
253 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
257 if (msg
->flags
& IUCV_IPRMDATA
) {
258 datalen
= 0xff - msg
->rmmsg
[7];
259 return (datalen
< 8) ? datalen
: 8;
265 * iucv_sock_in_state() - check for specific states
266 * @sk: sock structure
267 * @state: first iucv sk state
268 * @state: second iucv sk state
270 * Returns true if the socket in either in the first or second state.
272 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
274 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
278 * iucv_below_msglim() - function to check if messages can be sent
279 * @sk: sock structure
281 * Returns true if the send queue length is lower than the message limit.
282 * Always returns true if the socket is not connected (no iucv path for
283 * checking the message limit).
285 static inline int iucv_below_msglim(struct sock
*sk
)
287 struct iucv_sock
*iucv
= iucv_sk(sk
);
289 if (sk
->sk_state
!= IUCV_CONNECTED
)
291 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
292 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
294 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
295 (atomic_read(&iucv
->pendings
) <= 0));
299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
301 static void iucv_sock_wake_msglim(struct sock
*sk
)
303 struct socket_wq
*wq
;
306 wq
= rcu_dereference(sk
->sk_wq
);
307 if (skwq_has_sleeper(wq
))
308 wake_up_interruptible_all(&wq
->wait
);
309 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
314 * afiucv_hs_send() - send a message through HiperSockets transport
316 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
317 struct sk_buff
*skb
, u8 flags
)
319 struct iucv_sock
*iucv
= iucv_sk(sock
);
320 struct af_iucv_trans_hdr
*phs_hdr
;
321 struct sk_buff
*nskb
;
322 int err
, confirm_recv
= 0;
324 memset(skb
->head
, 0, ETH_HLEN
);
325 phs_hdr
= (struct af_iucv_trans_hdr
*)skb_push(skb
,
326 sizeof(struct af_iucv_trans_hdr
));
327 skb_reset_mac_header(skb
);
328 skb_reset_network_header(skb
);
329 skb_push(skb
, ETH_HLEN
);
330 skb_reset_mac_header(skb
);
331 memset(phs_hdr
, 0, sizeof(struct af_iucv_trans_hdr
));
333 phs_hdr
->magic
= ETH_P_AF_IUCV
;
334 phs_hdr
->version
= 1;
335 phs_hdr
->flags
= flags
;
336 if (flags
== AF_IUCV_FLAG_SYN
)
337 phs_hdr
->window
= iucv
->msglimit
;
338 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
339 confirm_recv
= atomic_read(&iucv
->msg_recv
);
340 phs_hdr
->window
= confirm_recv
;
342 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
344 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
345 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
346 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
347 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
348 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
349 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
350 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
351 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
353 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
355 skb
->dev
= iucv
->hs_dev
;
358 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
))
360 if (skb
->len
> skb
->dev
->mtu
) {
361 if (sock
->sk_type
== SOCK_SEQPACKET
)
364 skb_trim(skb
, skb
->dev
->mtu
);
366 skb
->protocol
= ETH_P_AF_IUCV
;
367 nskb
= skb_clone(skb
, GFP_ATOMIC
);
370 skb_queue_tail(&iucv
->send_skb_q
, nskb
);
371 err
= dev_queue_xmit(skb
);
372 if (net_xmit_eval(err
)) {
373 skb_unlink(nskb
, &iucv
->send_skb_q
);
376 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
377 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
379 return net_xmit_eval(err
);
382 static struct sock
*__iucv_get_sock_by_name(char *nm
)
386 sk_for_each(sk
, &iucv_sk_list
.head
)
387 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
393 static void iucv_sock_destruct(struct sock
*sk
)
395 skb_queue_purge(&sk
->sk_receive_queue
);
396 skb_queue_purge(&sk
->sk_error_queue
);
400 if (!sock_flag(sk
, SOCK_DEAD
)) {
401 pr_err("Attempt to release alive iucv socket %p\n", sk
);
405 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
406 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
407 WARN_ON(sk
->sk_wmem_queued
);
408 WARN_ON(sk
->sk_forward_alloc
);
412 static void iucv_sock_cleanup_listen(struct sock
*parent
)
416 /* Close non-accepted connections */
417 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
422 parent
->sk_state
= IUCV_CLOSED
;
425 /* Kill socket (only if zapped and orphaned) */
426 static void iucv_sock_kill(struct sock
*sk
)
428 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
431 iucv_sock_unlink(&iucv_sk_list
, sk
);
432 sock_set_flag(sk
, SOCK_DEAD
);
436 /* Terminate an IUCV path */
437 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
439 unsigned char user_data
[16];
440 struct iucv_sock
*iucv
= iucv_sk(sk
);
441 struct iucv_path
*path
= iucv
->path
;
445 if (with_user_data
) {
446 low_nmcpy(user_data
, iucv
->src_name
);
447 high_nmcpy(user_data
, iucv
->dst_name
);
448 ASCEBC(user_data
, sizeof(user_data
));
449 pr_iucv
->path_sever(path
, user_data
);
451 pr_iucv
->path_sever(path
, NULL
);
452 iucv_path_free(path
);
456 /* Send controlling flags through an IUCV socket for HIPER transport */
457 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
464 blen
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
465 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
466 /* controlling flags should be sent anyway */
467 shutdown
= sk
->sk_shutdown
;
468 sk
->sk_shutdown
&= RCV_SHUTDOWN
;
470 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
472 skb_reserve(skb
, blen
);
473 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
476 sk
->sk_shutdown
= shutdown
;
480 /* Close an IUCV socket */
481 static void iucv_sock_close(struct sock
*sk
)
483 struct iucv_sock
*iucv
= iucv_sk(sk
);
489 switch (sk
->sk_state
) {
491 iucv_sock_cleanup_listen(sk
);
495 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
496 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
497 sk
->sk_state
= IUCV_DISCONN
;
498 sk
->sk_state_change(sk
);
500 case IUCV_DISCONN
: /* fall through */
501 sk
->sk_state
= IUCV_CLOSING
;
502 sk
->sk_state_change(sk
);
504 if (!err
&& !skb_queue_empty(&iucv
->send_skb_q
)) {
505 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
506 timeo
= sk
->sk_lingertime
;
508 timeo
= IUCV_DISCONN_TIMEOUT
;
510 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
514 case IUCV_CLOSING
: /* fall through */
515 sk
->sk_state
= IUCV_CLOSED
;
516 sk
->sk_state_change(sk
);
518 sk
->sk_err
= ECONNRESET
;
519 sk
->sk_state_change(sk
);
521 skb_queue_purge(&iucv
->send_skb_q
);
522 skb_queue_purge(&iucv
->backlog_skb_q
);
524 default: /* fall through */
525 iucv_sever_path(sk
, 1);
529 dev_put(iucv
->hs_dev
);
531 sk
->sk_bound_dev_if
= 0;
534 /* mark socket for deletion by iucv_sock_kill() */
535 sock_set_flag(sk
, SOCK_ZAPPED
);
540 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
543 sk
->sk_type
= parent
->sk_type
;
544 security_sk_clone(parent
, sk
);
548 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
, int kern
)
551 struct iucv_sock
*iucv
;
553 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
, kern
);
558 sock_init_data(sock
, sk
);
559 INIT_LIST_HEAD(&iucv
->accept_q
);
560 spin_lock_init(&iucv
->accept_q_lock
);
561 skb_queue_head_init(&iucv
->send_skb_q
);
562 INIT_LIST_HEAD(&iucv
->message_q
.list
);
563 spin_lock_init(&iucv
->message_q
.lock
);
564 skb_queue_head_init(&iucv
->backlog_skb_q
);
566 atomic_set(&iucv
->pendings
, 0);
569 atomic_set(&iucv
->msg_sent
, 0);
570 atomic_set(&iucv
->msg_recv
, 0);
572 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
573 memset(&iucv
->src_user_id
, 0, 32);
575 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
577 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
579 sk
->sk_destruct
= iucv_sock_destruct
;
580 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
581 sk
->sk_allocation
= GFP_DMA
;
583 sock_reset_flag(sk
, SOCK_ZAPPED
);
585 sk
->sk_protocol
= proto
;
586 sk
->sk_state
= IUCV_OPEN
;
588 iucv_sock_link(&iucv_sk_list
, sk
);
592 /* Create an IUCV socket */
593 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
598 if (protocol
&& protocol
!= PF_IUCV
)
599 return -EPROTONOSUPPORT
;
601 sock
->state
= SS_UNCONNECTED
;
603 switch (sock
->type
) {
605 sock
->ops
= &iucv_sock_ops
;
608 /* currently, proto ops can handle both sk types */
609 sock
->ops
= &iucv_sock_ops
;
612 return -ESOCKTNOSUPPORT
;
615 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
, kern
);
619 iucv_sock_init(sk
, NULL
);
624 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
626 write_lock_bh(&l
->lock
);
627 sk_add_node(sk
, &l
->head
);
628 write_unlock_bh(&l
->lock
);
631 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
633 write_lock_bh(&l
->lock
);
634 sk_del_node_init(sk
);
635 write_unlock_bh(&l
->lock
);
638 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
641 struct iucv_sock
*par
= iucv_sk(parent
);
644 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
645 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
646 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
647 iucv_sk(sk
)->parent
= parent
;
648 sk_acceptq_added(parent
);
651 void iucv_accept_unlink(struct sock
*sk
)
654 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
656 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
657 list_del_init(&iucv_sk(sk
)->accept_q
);
658 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
659 sk_acceptq_removed(iucv_sk(sk
)->parent
);
660 iucv_sk(sk
)->parent
= NULL
;
664 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
666 struct iucv_sock
*isk
, *n
;
669 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
670 sk
= (struct sock
*) isk
;
673 if (sk
->sk_state
== IUCV_CLOSED
) {
674 iucv_accept_unlink(sk
);
679 if (sk
->sk_state
== IUCV_CONNECTED
||
680 sk
->sk_state
== IUCV_DISCONN
||
682 iucv_accept_unlink(sk
);
684 sock_graft(sk
, newsock
);
695 static void __iucv_auto_name(struct iucv_sock
*iucv
)
699 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
700 while (__iucv_get_sock_by_name(name
)) {
701 sprintf(name
, "%08x",
702 atomic_inc_return(&iucv_sk_list
.autobind_name
));
704 memcpy(iucv
->src_name
, name
, 8);
707 /* Bind an unbound socket */
708 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
711 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
712 struct sock
*sk
= sock
->sk
;
713 struct iucv_sock
*iucv
;
715 struct net_device
*dev
;
718 /* Verify the input sockaddr */
719 if (!addr
|| addr
->sa_family
!= AF_IUCV
)
722 if (addr_len
< sizeof(struct sockaddr_iucv
))
726 if (sk
->sk_state
!= IUCV_OPEN
) {
731 write_lock_bh(&iucv_sk_list
.lock
);
734 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
741 /* Bind the socket */
743 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
744 goto vm_bind
; /* VM IUCV transport */
746 /* try hiper transport */
747 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
750 for_each_netdev_rcu(&init_net
, dev
) {
751 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
752 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
753 /* Check for unitialized siucv_name */
754 if (strncmp(sa
->siucv_name
, " ", 8) == 0)
755 __iucv_auto_name(iucv
);
757 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
758 sk
->sk_bound_dev_if
= dev
->ifindex
;
761 sk
->sk_state
= IUCV_BOUND
;
762 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
764 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
772 /* use local userid for backward compat */
773 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
774 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
775 sk
->sk_state
= IUCV_BOUND
;
776 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
778 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
781 /* found no dev to bind */
784 /* Release the socket list lock */
785 write_unlock_bh(&iucv_sk_list
.lock
);
791 /* Automatically bind an unbound socket */
792 static int iucv_sock_autobind(struct sock
*sk
)
794 struct iucv_sock
*iucv
= iucv_sk(sk
);
797 if (unlikely(!pr_iucv
))
800 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
802 write_lock_bh(&iucv_sk_list
.lock
);
803 __iucv_auto_name(iucv
);
804 write_unlock_bh(&iucv_sk_list
.lock
);
807 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
812 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
814 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
815 struct sock
*sk
= sock
->sk
;
816 struct iucv_sock
*iucv
= iucv_sk(sk
);
817 unsigned char user_data
[16];
820 high_nmcpy(user_data
, sa
->siucv_name
);
821 low_nmcpy(user_data
, iucv
->src_name
);
822 ASCEBC(user_data
, sizeof(user_data
));
825 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
826 IUCV_IPRMDATA
, GFP_KERNEL
);
831 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
832 sa
->siucv_user_id
, NULL
, user_data
,
835 iucv_path_free(iucv
->path
);
838 case 0x0b: /* Target communicator is not logged on */
841 case 0x0d: /* Max connections for this guest exceeded */
842 case 0x0e: /* Max connections for target guest exceeded */
845 case 0x0f: /* Missing IUCV authorization */
857 /* Connect an unconnected socket */
858 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
861 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
862 struct sock
*sk
= sock
->sk
;
863 struct iucv_sock
*iucv
= iucv_sk(sk
);
866 if (addr
->sa_family
!= AF_IUCV
|| alen
< sizeof(struct sockaddr_iucv
))
869 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
872 if (sk
->sk_state
== IUCV_OPEN
&&
873 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
874 return -EBADFD
; /* explicit bind required */
876 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
879 if (sk
->sk_state
== IUCV_OPEN
) {
880 err
= iucv_sock_autobind(sk
);
887 /* Set the destination information */
888 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
889 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
891 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
892 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
894 err
= afiucv_path_connect(sock
, addr
);
898 if (sk
->sk_state
!= IUCV_CONNECTED
)
899 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
901 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
903 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
906 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
907 iucv_sever_path(sk
, 0);
914 /* Move a socket into listening state. */
915 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
917 struct sock
*sk
= sock
->sk
;
923 if (sk
->sk_state
!= IUCV_BOUND
)
926 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
929 sk
->sk_max_ack_backlog
= backlog
;
930 sk
->sk_ack_backlog
= 0;
931 sk
->sk_state
= IUCV_LISTEN
;
939 /* Accept a pending connection */
940 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
943 DECLARE_WAITQUEUE(wait
, current
);
944 struct sock
*sk
= sock
->sk
, *nsk
;
948 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
950 if (sk
->sk_state
!= IUCV_LISTEN
) {
955 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
957 /* Wait for an incoming connection */
958 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
959 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
960 set_current_state(TASK_INTERRUPTIBLE
);
967 timeo
= schedule_timeout(timeo
);
968 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
970 if (sk
->sk_state
!= IUCV_LISTEN
) {
975 if (signal_pending(current
)) {
976 err
= sock_intr_errno(timeo
);
981 set_current_state(TASK_RUNNING
);
982 remove_wait_queue(sk_sleep(sk
), &wait
);
987 newsock
->state
= SS_CONNECTED
;
994 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
997 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
998 struct sock
*sk
= sock
->sk
;
999 struct iucv_sock
*iucv
= iucv_sk(sk
);
1001 addr
->sa_family
= AF_IUCV
;
1002 *len
= sizeof(struct sockaddr_iucv
);
1005 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
1006 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
1008 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
1009 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
1011 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
1012 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
1013 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
1019 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1021 * @msg: Pointer to a struct iucv_message
1022 * @skb: The socket data to send, skb->len MUST BE <= 7
1024 * Send the socket data in the parameter list in the iucv message
1025 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1026 * list and the socket data len at index 7 (last byte).
1027 * See also iucv_msg_length().
1029 * Returns the error code from the iucv_message_send() call.
1031 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
1032 struct sk_buff
*skb
)
1036 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
1037 prmdata
[7] = 0xff - (u8
) skb
->len
;
1038 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
1039 (void *) prmdata
, 8);
1042 static int iucv_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1045 struct sock
*sk
= sock
->sk
;
1046 struct iucv_sock
*iucv
= iucv_sk(sk
);
1047 size_t headroom
= 0;
1049 struct sk_buff
*skb
;
1050 struct iucv_message txmsg
= {0};
1051 struct cmsghdr
*cmsg
;
1057 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1059 err
= sock_error(sk
);
1063 if (msg
->msg_flags
& MSG_OOB
)
1066 /* SOCK_SEQPACKET: we do not support segmented records */
1067 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
1072 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1077 /* Return if the socket is not in connected state */
1078 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1083 /* initialize defaults */
1084 cmsg_done
= 0; /* check for duplicate headers */
1087 /* iterate over control messages */
1088 for_each_cmsghdr(cmsg
, msg
) {
1089 if (!CMSG_OK(msg
, cmsg
)) {
1094 if (cmsg
->cmsg_level
!= SOL_IUCV
)
1097 if (cmsg
->cmsg_type
& cmsg_done
) {
1101 cmsg_done
|= cmsg
->cmsg_type
;
1103 switch (cmsg
->cmsg_type
) {
1104 case SCM_IUCV_TRGCLS
:
1105 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
1110 /* set iucv message target class */
1111 memcpy(&txmsg
.class,
1112 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
1122 /* allocate one skb for each iucv message:
1123 * this is fine for SOCK_SEQPACKET (unless we want to support
1124 * segmented records using the MSG_EOR flag), but
1125 * for SOCK_STREAM we might want to improve it in future */
1126 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1127 headroom
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
1130 if (len
< PAGE_SIZE
) {
1133 /* In nonlinear "classic" iucv skb,
1134 * reserve space for iucv_array
1136 headroom
= sizeof(struct iucv_array
) *
1137 (MAX_SKB_FRAGS
+ 1);
1138 linear
= PAGE_SIZE
- headroom
;
1141 skb
= sock_alloc_send_pskb(sk
, headroom
+ linear
, len
- linear
,
1146 skb_reserve(skb
, headroom
);
1147 skb_put(skb
, linear
);
1149 skb
->data_len
= len
- linear
;
1150 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1154 /* wait if outstanding messages for iucv path has reached */
1155 timeo
= sock_sndtimeo(sk
, noblock
);
1156 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1160 /* return -ECONNRESET if the socket is no longer connected */
1161 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1166 /* increment and save iucv message tag for msg_completion cbk */
1167 txmsg
.tag
= iucv
->send_tag
++;
1168 IUCV_SKB_CB(skb
)->tag
= txmsg
.tag
;
1170 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1171 atomic_inc(&iucv
->msg_sent
);
1172 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1174 atomic_dec(&iucv
->msg_sent
);
1177 } else { /* Classic VM IUCV transport */
1178 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1180 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
) &&
1182 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1184 /* on success: there is no message_complete callback */
1185 /* for an IPRMDATA msg; remove skb from send queue */
1187 skb_unlink(skb
, &iucv
->send_skb_q
);
1191 /* this error should never happen since the */
1192 /* IUCV_IPRMDATA path flag is set... sever path */
1194 pr_iucv
->path_sever(iucv
->path
, NULL
);
1195 skb_unlink(skb
, &iucv
->send_skb_q
);
1199 } else if (skb_is_nonlinear(skb
)) {
1200 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1203 /* skip iucv_array lying in the headroom */
1204 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1205 iba
[0].length
= (u32
)skb_headlen(skb
);
1206 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1207 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1209 iba
[i
+ 1].address
=
1210 (u32
)(addr_t
)skb_frag_address(frag
);
1211 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1213 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1215 (void *)iba
, skb
->len
);
1216 } else { /* non-IPRM Linear skb */
1217 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1218 0, 0, (void *)skb
->data
, skb
->len
);
1223 memcpy(user_id
, iucv
->dst_user_id
, 8);
1225 memcpy(appl_id
, iucv
->dst_name
, 8);
1227 "Application %s on z/VM guest %s exceeds message limit\n",
1233 skb_unlink(skb
, &iucv
->send_skb_q
);
1248 static struct sk_buff
*alloc_iucv_recv_skb(unsigned long len
)
1250 size_t headroom
, linear
;
1251 struct sk_buff
*skb
;
1254 if (len
< PAGE_SIZE
) {
1258 headroom
= sizeof(struct iucv_array
) * (MAX_SKB_FRAGS
+ 1);
1259 linear
= PAGE_SIZE
- headroom
;
1261 skb
= alloc_skb_with_frags(headroom
+ linear
, len
- linear
,
1262 0, &err
, GFP_ATOMIC
| GFP_DMA
);
1264 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1268 skb_reserve(skb
, headroom
);
1269 skb_put(skb
, linear
);
1271 skb
->data_len
= len
- linear
;
1276 /* iucv_process_message() - Receive a single outstanding IUCV message
1278 * Locking: must be called with message_q.lock held
1280 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1281 struct iucv_path
*path
,
1282 struct iucv_message
*msg
)
1287 len
= iucv_msg_length(msg
);
1289 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1290 /* Note: the first 4 bytes are reserved for msg tag */
1291 IUCV_SKB_CB(skb
)->class = msg
->class;
1293 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1294 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1295 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1300 if (skb_is_nonlinear(skb
)) {
1301 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1304 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1305 iba
[0].length
= (u32
)skb_headlen(skb
);
1306 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1307 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1309 iba
[i
+ 1].address
=
1310 (u32
)(addr_t
)skb_frag_address(frag
);
1311 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1313 rc
= pr_iucv
->message_receive(path
, msg
,
1315 (void *)iba
, len
, NULL
);
1317 rc
= pr_iucv
->message_receive(path
, msg
,
1318 msg
->flags
& IUCV_IPRMDATA
,
1319 skb
->data
, len
, NULL
);
1325 WARN_ON_ONCE(skb
->len
!= len
);
1328 IUCV_SKB_CB(skb
)->offset
= 0;
1329 if (sk_filter(sk
, skb
)) {
1330 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
1334 if (__sock_queue_rcv_skb(sk
, skb
)) /* handle rcv queue full */
1335 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1338 /* iucv_process_message_q() - Process outstanding IUCV messages
1340 * Locking: must be called with message_q.lock held
1342 static void iucv_process_message_q(struct sock
*sk
)
1344 struct iucv_sock
*iucv
= iucv_sk(sk
);
1345 struct sk_buff
*skb
;
1346 struct sock_msg_q
*p
, *n
;
1348 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1349 skb
= alloc_iucv_recv_skb(iucv_msg_length(&p
->msg
));
1352 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1355 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1360 static int iucv_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1361 size_t len
, int flags
)
1363 int noblock
= flags
& MSG_DONTWAIT
;
1364 struct sock
*sk
= sock
->sk
;
1365 struct iucv_sock
*iucv
= iucv_sk(sk
);
1366 unsigned int copied
, rlen
;
1367 struct sk_buff
*skb
, *rskb
, *cskb
;
1371 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1372 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1373 skb_queue_empty(&sk
->sk_receive_queue
) &&
1374 list_empty(&iucv
->message_q
.list
))
1377 if (flags
& (MSG_OOB
))
1380 /* receive/dequeue next skb:
1381 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1382 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1384 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1389 offset
= IUCV_SKB_CB(skb
)->offset
;
1390 rlen
= skb
->len
- offset
; /* real length of skb */
1391 copied
= min_t(unsigned int, rlen
, len
);
1393 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1396 if (skb_copy_datagram_msg(cskb
, offset
, msg
, copied
)) {
1397 if (!(flags
& MSG_PEEK
))
1398 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1402 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1403 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1405 msg
->msg_flags
|= MSG_TRUNC
;
1406 /* each iucv message contains a complete record */
1407 msg
->msg_flags
|= MSG_EOR
;
1410 /* create control message to store iucv msg target class:
1411 * get the trgcls from the control buffer of the skb due to
1412 * fragmentation of original iucv message. */
1413 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1414 sizeof(IUCV_SKB_CB(skb
)->class),
1415 (void *)&IUCV_SKB_CB(skb
)->class);
1417 if (!(flags
& MSG_PEEK
))
1418 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1422 /* Mark read part of skb as used */
1423 if (!(flags
& MSG_PEEK
)) {
1425 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1426 if (sk
->sk_type
== SOCK_STREAM
) {
1427 if (copied
< rlen
) {
1428 IUCV_SKB_CB(skb
)->offset
= offset
+ copied
;
1429 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1435 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1436 atomic_inc(&iucv
->msg_recv
);
1437 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1439 iucv_sock_close(sk
);
1444 /* Queue backlog skbs */
1445 spin_lock_bh(&iucv
->message_q
.lock
);
1446 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1448 IUCV_SKB_CB(rskb
)->offset
= 0;
1449 if (__sock_queue_rcv_skb(sk
, rskb
)) {
1450 /* handle rcv queue full */
1451 skb_queue_head(&iucv
->backlog_skb_q
,
1455 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1457 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1458 if (!list_empty(&iucv
->message_q
.list
))
1459 iucv_process_message_q(sk
);
1460 if (atomic_read(&iucv
->msg_recv
) >=
1461 iucv
->msglimit
/ 2) {
1462 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1464 sk
->sk_state
= IUCV_DISCONN
;
1465 sk
->sk_state_change(sk
);
1469 spin_unlock_bh(&iucv
->message_q
.lock
);
1473 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1474 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1480 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1482 struct iucv_sock
*isk
, *n
;
1485 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1486 sk
= (struct sock
*) isk
;
1488 if (sk
->sk_state
== IUCV_CONNECTED
)
1489 return POLLIN
| POLLRDNORM
;
1495 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1498 struct sock
*sk
= sock
->sk
;
1499 unsigned int mask
= 0;
1501 sock_poll_wait(file
, sk_sleep(sk
), wait
);
1503 if (sk
->sk_state
== IUCV_LISTEN
)
1504 return iucv_accept_poll(sk
);
1506 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1508 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
1510 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1513 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1516 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1517 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1518 mask
|= POLLIN
| POLLRDNORM
;
1520 if (sk
->sk_state
== IUCV_CLOSED
)
1523 if (sk
->sk_state
== IUCV_DISCONN
)
1526 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1527 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1529 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1534 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1536 struct sock
*sk
= sock
->sk
;
1537 struct iucv_sock
*iucv
= iucv_sk(sk
);
1538 struct iucv_message txmsg
;
1543 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1547 switch (sk
->sk_state
) {
1558 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1559 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1562 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1563 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1578 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1581 sk
->sk_shutdown
|= how
;
1582 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1583 if ((iucv
->transport
== AF_IUCV_TRANS_IUCV
) &&
1585 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1588 /* skb_queue_purge(&sk->sk_receive_queue); */
1590 skb_queue_purge(&sk
->sk_receive_queue
);
1593 /* Wake up anyone sleeping in poll */
1594 sk
->sk_state_change(sk
);
1601 static int iucv_sock_release(struct socket
*sock
)
1603 struct sock
*sk
= sock
->sk
;
1609 iucv_sock_close(sk
);
1616 /* getsockopt and setsockopt */
1617 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1618 char __user
*optval
, unsigned int optlen
)
1620 struct sock
*sk
= sock
->sk
;
1621 struct iucv_sock
*iucv
= iucv_sk(sk
);
1625 if (level
!= SOL_IUCV
)
1626 return -ENOPROTOOPT
;
1628 if (optlen
< sizeof(int))
1631 if (get_user(val
, (int __user
*) optval
))
1638 case SO_IPRMDATA_MSG
:
1640 iucv
->flags
|= IUCV_IPRMDATA
;
1642 iucv
->flags
&= ~IUCV_IPRMDATA
;
1645 switch (sk
->sk_state
) {
1648 if (val
< 1 || val
> (u16
)(~0))
1651 iucv
->msglimit
= val
;
1667 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1668 char __user
*optval
, int __user
*optlen
)
1670 struct sock
*sk
= sock
->sk
;
1671 struct iucv_sock
*iucv
= iucv_sk(sk
);
1675 if (level
!= SOL_IUCV
)
1676 return -ENOPROTOOPT
;
1678 if (get_user(len
, optlen
))
1684 len
= min_t(unsigned int, len
, sizeof(int));
1687 case SO_IPRMDATA_MSG
:
1688 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1692 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1693 : iucv
->msglimit
; /* default */
1697 if (sk
->sk_state
== IUCV_OPEN
)
1699 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1700 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1704 return -ENOPROTOOPT
;
1707 if (put_user(len
, optlen
))
1709 if (copy_to_user(optval
, &val
, len
))
1716 /* Callback wrappers - called from iucv base support */
1717 static int iucv_callback_connreq(struct iucv_path
*path
,
1718 u8 ipvmid
[8], u8 ipuser
[16])
1720 unsigned char user_data
[16];
1721 unsigned char nuser_data
[16];
1722 unsigned char src_name
[8];
1723 struct sock
*sk
, *nsk
;
1724 struct iucv_sock
*iucv
, *niucv
;
1727 memcpy(src_name
, ipuser
, 8);
1728 EBCASC(src_name
, 8);
1729 /* Find out if this path belongs to af_iucv. */
1730 read_lock(&iucv_sk_list
.lock
);
1733 sk_for_each(sk
, &iucv_sk_list
.head
)
1734 if (sk
->sk_state
== IUCV_LISTEN
&&
1735 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1737 * Found a listening socket with
1738 * src_name == ipuser[0-7].
1743 read_unlock(&iucv_sk_list
.lock
);
1745 /* No socket found, not one of our paths. */
1750 /* Check if parent socket is listening */
1751 low_nmcpy(user_data
, iucv
->src_name
);
1752 high_nmcpy(user_data
, iucv
->dst_name
);
1753 ASCEBC(user_data
, sizeof(user_data
));
1754 if (sk
->sk_state
!= IUCV_LISTEN
) {
1755 err
= pr_iucv
->path_sever(path
, user_data
);
1756 iucv_path_free(path
);
1760 /* Check for backlog size */
1761 if (sk_acceptq_is_full(sk
)) {
1762 err
= pr_iucv
->path_sever(path
, user_data
);
1763 iucv_path_free(path
);
1767 /* Create the new socket */
1768 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1770 err
= pr_iucv
->path_sever(path
, user_data
);
1771 iucv_path_free(path
);
1775 niucv
= iucv_sk(nsk
);
1776 iucv_sock_init(nsk
, sk
);
1778 /* Set the new iucv_sock */
1779 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1780 EBCASC(niucv
->dst_name
, 8);
1781 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1782 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1783 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1786 /* Call iucv_accept */
1787 high_nmcpy(nuser_data
, ipuser
+ 8);
1788 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1789 ASCEBC(nuser_data
+ 8, 8);
1791 /* set message limit for path based on msglimit of accepting socket */
1792 niucv
->msglimit
= iucv
->msglimit
;
1793 path
->msglim
= iucv
->msglimit
;
1794 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1796 iucv_sever_path(nsk
, 1);
1797 iucv_sock_kill(nsk
);
1801 iucv_accept_enqueue(sk
, nsk
);
1803 /* Wake up accept */
1804 nsk
->sk_state
= IUCV_CONNECTED
;
1805 sk
->sk_data_ready(sk
);
1812 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1814 struct sock
*sk
= path
->private;
1816 sk
->sk_state
= IUCV_CONNECTED
;
1817 sk
->sk_state_change(sk
);
1820 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1822 struct sock
*sk
= path
->private;
1823 struct iucv_sock
*iucv
= iucv_sk(sk
);
1824 struct sk_buff
*skb
;
1825 struct sock_msg_q
*save_msg
;
1828 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1829 pr_iucv
->message_reject(path
, msg
);
1833 spin_lock(&iucv
->message_q
.lock
);
1835 if (!list_empty(&iucv
->message_q
.list
) ||
1836 !skb_queue_empty(&iucv
->backlog_skb_q
))
1839 len
= atomic_read(&sk
->sk_rmem_alloc
);
1840 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1841 if (len
> sk
->sk_rcvbuf
)
1844 skb
= alloc_iucv_recv_skb(iucv_msg_length(msg
));
1848 iucv_process_message(sk
, skb
, path
, msg
);
1852 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1855 save_msg
->path
= path
;
1856 save_msg
->msg
= *msg
;
1858 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1861 spin_unlock(&iucv
->message_q
.lock
);
1864 static void iucv_callback_txdone(struct iucv_path
*path
,
1865 struct iucv_message
*msg
)
1867 struct sock
*sk
= path
->private;
1868 struct sk_buff
*this = NULL
;
1869 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1870 struct sk_buff
*list_skb
= list
->next
;
1871 unsigned long flags
;
1874 if (!skb_queue_empty(list
)) {
1875 spin_lock_irqsave(&list
->lock
, flags
);
1877 while (list_skb
!= (struct sk_buff
*)list
) {
1878 if (msg
->tag
== IUCV_SKB_CB(list_skb
)->tag
) {
1882 list_skb
= list_skb
->next
;
1885 __skb_unlink(this, list
);
1887 spin_unlock_irqrestore(&list
->lock
, flags
);
1891 /* wake up any process waiting for sending */
1892 iucv_sock_wake_msglim(sk
);
1896 if (sk
->sk_state
== IUCV_CLOSING
) {
1897 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1898 sk
->sk_state
= IUCV_CLOSED
;
1899 sk
->sk_state_change(sk
);
1906 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1908 struct sock
*sk
= path
->private;
1910 if (sk
->sk_state
== IUCV_CLOSED
)
1914 iucv_sever_path(sk
, 1);
1915 sk
->sk_state
= IUCV_DISCONN
;
1917 sk
->sk_state_change(sk
);
1921 /* called if the other communication side shuts down its RECV direction;
1922 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1924 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1926 struct sock
*sk
= path
->private;
1929 if (sk
->sk_state
!= IUCV_CLOSED
) {
1930 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1931 sk
->sk_state_change(sk
);
1936 /***************** HiperSockets transport callbacks ********************/
1937 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1939 struct af_iucv_trans_hdr
*trans_hdr
=
1940 (struct af_iucv_trans_hdr
*)skb
->data
;
1944 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1945 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1946 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1947 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1948 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1949 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1950 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1951 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1952 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1953 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1954 skb_push(skb
, ETH_HLEN
);
1955 memset(skb
->data
, 0, ETH_HLEN
);
1959 * afiucv_hs_callback_syn - react on received SYN
1961 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1964 struct iucv_sock
*iucv
, *niucv
;
1965 struct af_iucv_trans_hdr
*trans_hdr
;
1969 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
1971 /* no sock - connection refused */
1972 afiucv_swap_src_dest(skb
);
1973 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1974 err
= dev_queue_xmit(skb
);
1978 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1980 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1981 sk_acceptq_is_full(sk
) ||
1983 /* error on server socket - connection refused */
1984 afiucv_swap_src_dest(skb
);
1985 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1986 err
= dev_queue_xmit(skb
);
1987 iucv_sock_kill(nsk
);
1992 niucv
= iucv_sk(nsk
);
1993 iucv_sock_init(nsk
, sk
);
1994 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
1995 niucv
->msglimit
= iucv
->msglimit
;
1996 if (!trans_hdr
->window
)
1997 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
1999 niucv
->msglimit_peer
= trans_hdr
->window
;
2000 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
2001 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
2002 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
2003 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
2004 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
2005 niucv
->hs_dev
= iucv
->hs_dev
;
2006 dev_hold(niucv
->hs_dev
);
2007 afiucv_swap_src_dest(skb
);
2008 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
2009 trans_hdr
->window
= niucv
->msglimit
;
2010 /* if receiver acks the xmit connection is established */
2011 err
= dev_queue_xmit(skb
);
2013 iucv_accept_enqueue(sk
, nsk
);
2014 nsk
->sk_state
= IUCV_CONNECTED
;
2015 sk
->sk_data_ready(sk
);
2017 iucv_sock_kill(nsk
);
2021 return NET_RX_SUCCESS
;
2025 * afiucv_hs_callback_synack() - react on received SYN-ACK
2027 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
2029 struct iucv_sock
*iucv
= iucv_sk(sk
);
2030 struct af_iucv_trans_hdr
*trans_hdr
=
2031 (struct af_iucv_trans_hdr
*)skb
->data
;
2035 if (sk
->sk_state
!= IUCV_BOUND
)
2038 iucv
->msglimit_peer
= trans_hdr
->window
;
2039 sk
->sk_state
= IUCV_CONNECTED
;
2040 sk
->sk_state_change(sk
);
2044 return NET_RX_SUCCESS
;
2048 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2050 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
2052 struct iucv_sock
*iucv
= iucv_sk(sk
);
2056 if (sk
->sk_state
!= IUCV_BOUND
)
2059 sk
->sk_state
= IUCV_DISCONN
;
2060 sk
->sk_state_change(sk
);
2064 return NET_RX_SUCCESS
;
2068 * afiucv_hs_callback_fin() - react on received FIN
2070 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
2072 struct iucv_sock
*iucv
= iucv_sk(sk
);
2074 /* other end of connection closed */
2078 if (sk
->sk_state
== IUCV_CONNECTED
) {
2079 sk
->sk_state
= IUCV_DISCONN
;
2080 sk
->sk_state_change(sk
);
2085 return NET_RX_SUCCESS
;
2089 * afiucv_hs_callback_win() - react on received WIN
2091 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
2093 struct iucv_sock
*iucv
= iucv_sk(sk
);
2094 struct af_iucv_trans_hdr
*trans_hdr
=
2095 (struct af_iucv_trans_hdr
*)skb
->data
;
2098 return NET_RX_SUCCESS
;
2100 if (sk
->sk_state
!= IUCV_CONNECTED
)
2101 return NET_RX_SUCCESS
;
2103 atomic_sub(trans_hdr
->window
, &iucv
->msg_sent
);
2104 iucv_sock_wake_msglim(sk
);
2105 return NET_RX_SUCCESS
;
2109 * afiucv_hs_callback_rx() - react on received data
2111 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
2113 struct iucv_sock
*iucv
= iucv_sk(sk
);
2117 return NET_RX_SUCCESS
;
2120 if (sk
->sk_state
!= IUCV_CONNECTED
) {
2122 return NET_RX_SUCCESS
;
2125 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2127 return NET_RX_SUCCESS
;
2130 /* write stuff from iucv_msg to skb cb */
2131 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2132 skb_reset_transport_header(skb
);
2133 skb_reset_network_header(skb
);
2134 IUCV_SKB_CB(skb
)->offset
= 0;
2135 if (sk_filter(sk
, skb
)) {
2136 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
2138 return NET_RX_SUCCESS
;
2141 spin_lock(&iucv
->message_q
.lock
);
2142 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2143 if (__sock_queue_rcv_skb(sk
, skb
))
2144 /* handle rcv queue full */
2145 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2147 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2148 spin_unlock(&iucv
->message_q
.lock
);
2149 return NET_RX_SUCCESS
;
2153 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2155 * called from netif RX softirq
2157 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2158 struct packet_type
*pt
, struct net_device
*orig_dev
)
2161 struct iucv_sock
*iucv
;
2162 struct af_iucv_trans_hdr
*trans_hdr
;
2166 if (skb
->len
< (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
))) {
2167 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2169 (int)(ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)));
2171 return NET_RX_SUCCESS
;
2173 if (skb_headlen(skb
) < (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)))
2174 if (skb_linearize(skb
)) {
2175 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2178 return NET_RX_SUCCESS
;
2180 skb_pull(skb
, ETH_HLEN
);
2181 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
2182 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2183 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2184 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2185 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2186 memset(nullstring
, 0, sizeof(nullstring
));
2189 read_lock(&iucv_sk_list
.lock
);
2190 sk_for_each(sk
, &iucv_sk_list
.head
) {
2191 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2192 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2193 trans_hdr
->destAppName
, 8)) &&
2194 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2195 trans_hdr
->destUserID
, 8)) &&
2196 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2197 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2203 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2204 trans_hdr
->destAppName
, 8)) &&
2205 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2206 trans_hdr
->destUserID
, 8)) &&
2207 (!memcmp(&iucv_sk(sk
)->dst_name
,
2208 trans_hdr
->srcAppName
, 8)) &&
2209 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2210 trans_hdr
->srcUserID
, 8))) {
2216 read_unlock(&iucv_sk_list
.lock
);
2221 how should we send with no sock
2222 1) send without sock no send rc checking?
2223 2) introduce default sock to handle this cases
2225 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2227 SYN|ACK, SYN|FIN, FIN -> no action? */
2229 switch (trans_hdr
->flags
) {
2230 case AF_IUCV_FLAG_SYN
:
2231 /* connect request */
2232 err
= afiucv_hs_callback_syn(sk
, skb
);
2234 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2235 /* connect request confirmed */
2236 err
= afiucv_hs_callback_synack(sk
, skb
);
2238 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2239 /* connect request refused */
2240 err
= afiucv_hs_callback_synfin(sk
, skb
);
2242 case (AF_IUCV_FLAG_FIN
):
2244 err
= afiucv_hs_callback_fin(sk
, skb
);
2246 case (AF_IUCV_FLAG_WIN
):
2247 err
= afiucv_hs_callback_win(sk
, skb
);
2248 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2252 /* fall through and receive non-zero length data */
2253 case (AF_IUCV_FLAG_SHT
):
2254 /* shutdown request */
2255 /* fall through and receive zero length data */
2257 /* plain data frame */
2258 IUCV_SKB_CB(skb
)->class = trans_hdr
->iucv_hdr
.class;
2259 err
= afiucv_hs_callback_rx(sk
, skb
);
2269 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2272 static void afiucv_hs_callback_txnotify(struct sk_buff
*skb
,
2273 enum iucv_tx_notify n
)
2275 struct sock
*isk
= skb
->sk
;
2276 struct sock
*sk
= NULL
;
2277 struct iucv_sock
*iucv
= NULL
;
2278 struct sk_buff_head
*list
;
2279 struct sk_buff
*list_skb
;
2280 struct sk_buff
*nskb
;
2281 unsigned long flags
;
2283 read_lock_irqsave(&iucv_sk_list
.lock
, flags
);
2284 sk_for_each(sk
, &iucv_sk_list
.head
)
2289 read_unlock_irqrestore(&iucv_sk_list
.lock
, flags
);
2291 if (!iucv
|| sock_flag(sk
, SOCK_ZAPPED
))
2294 list
= &iucv
->send_skb_q
;
2295 spin_lock_irqsave(&list
->lock
, flags
);
2296 if (skb_queue_empty(list
))
2298 list_skb
= list
->next
;
2299 nskb
= list_skb
->next
;
2300 while (list_skb
!= (struct sk_buff
*)list
) {
2301 if (skb_shinfo(list_skb
) == skb_shinfo(skb
)) {
2304 __skb_unlink(list_skb
, list
);
2305 kfree_skb(list_skb
);
2306 iucv_sock_wake_msglim(sk
);
2308 case TX_NOTIFY_PENDING
:
2309 atomic_inc(&iucv
->pendings
);
2311 case TX_NOTIFY_DELAYED_OK
:
2312 __skb_unlink(list_skb
, list
);
2313 atomic_dec(&iucv
->pendings
);
2314 if (atomic_read(&iucv
->pendings
) <= 0)
2315 iucv_sock_wake_msglim(sk
);
2316 kfree_skb(list_skb
);
2318 case TX_NOTIFY_UNREACHABLE
:
2319 case TX_NOTIFY_DELAYED_UNREACHABLE
:
2320 case TX_NOTIFY_TPQFULL
: /* not yet used */
2321 case TX_NOTIFY_GENERALERROR
:
2322 case TX_NOTIFY_DELAYED_GENERALERROR
:
2323 __skb_unlink(list_skb
, list
);
2324 kfree_skb(list_skb
);
2325 if (sk
->sk_state
== IUCV_CONNECTED
) {
2326 sk
->sk_state
= IUCV_DISCONN
;
2327 sk
->sk_state_change(sk
);
2337 spin_unlock_irqrestore(&list
->lock
, flags
);
2339 if (sk
->sk_state
== IUCV_CLOSING
) {
2340 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
2341 sk
->sk_state
= IUCV_CLOSED
;
2342 sk
->sk_state_change(sk
);
2349 * afiucv_netdev_event: handle netdev notifier chain events
2351 static int afiucv_netdev_event(struct notifier_block
*this,
2352 unsigned long event
, void *ptr
)
2354 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2356 struct iucv_sock
*iucv
;
2360 case NETDEV_GOING_DOWN
:
2361 sk_for_each(sk
, &iucv_sk_list
.head
) {
2363 if ((iucv
->hs_dev
== event_dev
) &&
2364 (sk
->sk_state
== IUCV_CONNECTED
)) {
2365 if (event
== NETDEV_GOING_DOWN
)
2366 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2367 sk
->sk_state
= IUCV_DISCONN
;
2368 sk
->sk_state_change(sk
);
2373 case NETDEV_UNREGISTER
:
2380 static struct notifier_block afiucv_netdev_notifier
= {
2381 .notifier_call
= afiucv_netdev_event
,
2384 static const struct proto_ops iucv_sock_ops
= {
2386 .owner
= THIS_MODULE
,
2387 .release
= iucv_sock_release
,
2388 .bind
= iucv_sock_bind
,
2389 .connect
= iucv_sock_connect
,
2390 .listen
= iucv_sock_listen
,
2391 .accept
= iucv_sock_accept
,
2392 .getname
= iucv_sock_getname
,
2393 .sendmsg
= iucv_sock_sendmsg
,
2394 .recvmsg
= iucv_sock_recvmsg
,
2395 .poll
= iucv_sock_poll
,
2396 .ioctl
= sock_no_ioctl
,
2397 .mmap
= sock_no_mmap
,
2398 .socketpair
= sock_no_socketpair
,
2399 .shutdown
= iucv_sock_shutdown
,
2400 .setsockopt
= iucv_sock_setsockopt
,
2401 .getsockopt
= iucv_sock_getsockopt
,
2404 static const struct net_proto_family iucv_sock_family_ops
= {
2406 .owner
= THIS_MODULE
,
2407 .create
= iucv_sock_create
,
2410 static struct packet_type iucv_packet_type
= {
2411 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2412 .func
= afiucv_hs_rcv
,
2415 static int afiucv_iucv_init(void)
2419 err
= pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2422 /* establish dummy device */
2423 af_iucv_driver
.bus
= pr_iucv
->bus
;
2424 err
= driver_register(&af_iucv_driver
);
2427 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2432 dev_set_name(af_iucv_dev
, "af_iucv");
2433 af_iucv_dev
->bus
= pr_iucv
->bus
;
2434 af_iucv_dev
->parent
= pr_iucv
->root
;
2435 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
2436 af_iucv_dev
->driver
= &af_iucv_driver
;
2437 err
= device_register(af_iucv_dev
);
2443 driver_unregister(&af_iucv_driver
);
2445 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2450 static int __init
afiucv_init(void)
2454 if (MACHINE_IS_VM
) {
2455 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2456 if (unlikely(err
)) {
2458 err
= -EPROTONOSUPPORT
;
2462 pr_iucv
= try_then_request_module(symbol_get(iucv_if
), "iucv");
2464 printk(KERN_WARNING
"iucv_if lookup failed\n");
2465 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2468 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2472 err
= proto_register(&iucv_proto
, 0);
2475 err
= sock_register(&iucv_sock_family_ops
);
2480 err
= afiucv_iucv_init();
2484 register_netdevice_notifier(&afiucv_netdev_notifier
);
2485 dev_add_pack(&iucv_packet_type
);
2489 sock_unregister(PF_IUCV
);
2491 proto_unregister(&iucv_proto
);
2494 symbol_put(iucv_if
);
2498 static void __exit
afiucv_exit(void)
2501 device_unregister(af_iucv_dev
);
2502 driver_unregister(&af_iucv_driver
);
2503 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2504 symbol_put(iucv_if
);
2506 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2507 dev_remove_pack(&iucv_packet_type
);
2508 sock_unregister(PF_IUCV
);
2509 proto_unregister(&iucv_proto
);
2512 module_init(afiucv_init
);
2513 module_exit(afiucv_exit
);
2515 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2516 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2517 MODULE_VERSION(VERSION
);
2518 MODULE_LICENSE("GPL");
2519 MODULE_ALIAS_NETPROTO(PF_IUCV
);