2 * IUCV protocol stack for Linux on zSeries
4 * Copyright IBM Corp. 2006, 2009
6 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
7 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
9 * Ursula Braun <ursula.braun@de.ibm.com>
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <linux/security.h>
27 #include <asm/ebcdic.h>
28 #include <asm/cpcmd.h>
29 #include <linux/kmod.h>
31 #include <net/iucv/af_iucv.h>
35 static char iucv_userid
[80];
37 static const struct proto_ops iucv_sock_ops
;
39 static struct proto iucv_proto
= {
42 .obj_size
= sizeof(struct iucv_sock
),
45 static struct iucv_interface
*pr_iucv
;
47 /* special AF_IUCV IPRM messages */
48 static const u8 iprm_shutdown
[8] =
49 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
51 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
53 #define __iucv_sock_wait(sk, condition, timeo, ret) \
55 DEFINE_WAIT(__wait); \
56 long __timeo = timeo; \
58 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
59 while (!(condition)) { \
64 if (signal_pending(current)) { \
65 ret = sock_intr_errno(__timeo); \
69 __timeo = schedule_timeout(__timeo); \
71 ret = sock_error(sk); \
75 finish_wait(sk_sleep(sk), &__wait); \
78 #define iucv_sock_wait(sk, condition, timeo) \
82 __iucv_sock_wait(sk, condition, timeo, __ret); \
86 static void iucv_sock_kill(struct sock
*sk
);
87 static void iucv_sock_close(struct sock
*sk
);
88 static void iucv_sever_path(struct sock
*, int);
90 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
91 struct packet_type
*pt
, struct net_device
*orig_dev
);
92 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
93 struct sk_buff
*skb
, u8 flags
);
94 static void afiucv_hs_callback_txnotify(struct sk_buff
*, enum iucv_tx_notify
);
96 /* Call Back functions */
97 static void iucv_callback_rx(struct iucv_path
*, struct iucv_message
*);
98 static void iucv_callback_txdone(struct iucv_path
*, struct iucv_message
*);
99 static void iucv_callback_connack(struct iucv_path
*, u8
*);
100 static int iucv_callback_connreq(struct iucv_path
*, u8
*, u8
*);
101 static void iucv_callback_connrej(struct iucv_path
*, u8
*);
102 static void iucv_callback_shutdown(struct iucv_path
*, u8
*);
104 static struct iucv_sock_list iucv_sk_list
= {
105 .lock
= __RW_LOCK_UNLOCKED(iucv_sk_list
.lock
),
106 .autobind_name
= ATOMIC_INIT(0)
109 static struct iucv_handler af_iucv_handler
= {
110 .path_pending
= iucv_callback_connreq
,
111 .path_complete
= iucv_callback_connack
,
112 .path_severed
= iucv_callback_connrej
,
113 .message_pending
= iucv_callback_rx
,
114 .message_complete
= iucv_callback_txdone
,
115 .path_quiesced
= iucv_callback_shutdown
,
118 static inline void high_nmcpy(unsigned char *dst
, char *src
)
123 static inline void low_nmcpy(unsigned char *dst
, char *src
)
125 memcpy(&dst
[8], src
, 8);
128 static int afiucv_pm_prepare(struct device
*dev
)
130 #ifdef CONFIG_PM_DEBUG
131 printk(KERN_WARNING
"afiucv_pm_prepare\n");
136 static void afiucv_pm_complete(struct device
*dev
)
138 #ifdef CONFIG_PM_DEBUG
139 printk(KERN_WARNING
"afiucv_pm_complete\n");
144 * afiucv_pm_freeze() - Freeze PM callback
145 * @dev: AFIUCV dummy device
147 * Sever all established IUCV communication pathes
149 static int afiucv_pm_freeze(struct device
*dev
)
151 struct iucv_sock
*iucv
;
155 #ifdef CONFIG_PM_DEBUG
156 printk(KERN_WARNING
"afiucv_pm_freeze\n");
158 read_lock(&iucv_sk_list
.lock
);
159 sk_for_each(sk
, &iucv_sk_list
.head
) {
161 switch (sk
->sk_state
) {
165 iucv_sever_path(sk
, 0);
174 skb_queue_purge(&iucv
->send_skb_q
);
175 skb_queue_purge(&iucv
->backlog_skb_q
);
177 read_unlock(&iucv_sk_list
.lock
);
182 * afiucv_pm_restore_thaw() - Thaw and restore PM callback
183 * @dev: AFIUCV dummy device
185 * socket clean up after freeze
187 static int afiucv_pm_restore_thaw(struct device
*dev
)
191 #ifdef CONFIG_PM_DEBUG
192 printk(KERN_WARNING
"afiucv_pm_restore_thaw\n");
194 read_lock(&iucv_sk_list
.lock
);
195 sk_for_each(sk
, &iucv_sk_list
.head
) {
196 switch (sk
->sk_state
) {
199 sk
->sk_state
= IUCV_DISCONN
;
200 sk
->sk_state_change(sk
);
211 read_unlock(&iucv_sk_list
.lock
);
215 static const struct dev_pm_ops afiucv_pm_ops
= {
216 .prepare
= afiucv_pm_prepare
,
217 .complete
= afiucv_pm_complete
,
218 .freeze
= afiucv_pm_freeze
,
219 .thaw
= afiucv_pm_restore_thaw
,
220 .restore
= afiucv_pm_restore_thaw
,
223 static struct device_driver af_iucv_driver
= {
224 .owner
= THIS_MODULE
,
227 .pm
= &afiucv_pm_ops
,
230 /* dummy device used as trigger for PM functions */
231 static struct device
*af_iucv_dev
;
234 * iucv_msg_length() - Returns the length of an iucv message.
235 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
237 * The function returns the length of the specified iucv message @msg of data
238 * stored in a buffer and of data stored in the parameter list (PRMDATA).
240 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
242 * PRMDATA[0..6] socket data (max 7 bytes);
243 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
245 * The socket data length is computed by subtracting the socket data length
247 * If the socket data len is greater 7, then PRMDATA can be used for special
248 * notifications (see iucv_sock_shutdown); and further,
249 * if the socket data len is > 7, the function returns 8.
251 * Use this function to allocate socket buffers to store iucv message data.
253 static inline size_t iucv_msg_length(struct iucv_message
*msg
)
257 if (msg
->flags
& IUCV_IPRMDATA
) {
258 datalen
= 0xff - msg
->rmmsg
[7];
259 return (datalen
< 8) ? datalen
: 8;
265 * iucv_sock_in_state() - check for specific states
266 * @sk: sock structure
267 * @state: first iucv sk state
268 * @state: second iucv sk state
270 * Returns true if the socket in either in the first or second state.
272 static int iucv_sock_in_state(struct sock
*sk
, int state
, int state2
)
274 return (sk
->sk_state
== state
|| sk
->sk_state
== state2
);
278 * iucv_below_msglim() - function to check if messages can be sent
279 * @sk: sock structure
281 * Returns true if the send queue length is lower than the message limit.
282 * Always returns true if the socket is not connected (no iucv path for
283 * checking the message limit).
285 static inline int iucv_below_msglim(struct sock
*sk
)
287 struct iucv_sock
*iucv
= iucv_sk(sk
);
289 if (sk
->sk_state
!= IUCV_CONNECTED
)
291 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
)
292 return (skb_queue_len(&iucv
->send_skb_q
) < iucv
->path
->msglim
);
294 return ((atomic_read(&iucv
->msg_sent
) < iucv
->msglimit_peer
) &&
295 (atomic_read(&iucv
->pendings
) <= 0));
299 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
301 static void iucv_sock_wake_msglim(struct sock
*sk
)
303 struct socket_wq
*wq
;
306 wq
= rcu_dereference(sk
->sk_wq
);
307 if (skwq_has_sleeper(wq
))
308 wake_up_interruptible_all(&wq
->wait
);
309 sk_wake_async(sk
, SOCK_WAKE_SPACE
, POLL_OUT
);
314 * afiucv_hs_send() - send a message through HiperSockets transport
316 static int afiucv_hs_send(struct iucv_message
*imsg
, struct sock
*sock
,
317 struct sk_buff
*skb
, u8 flags
)
319 struct iucv_sock
*iucv
= iucv_sk(sock
);
320 struct af_iucv_trans_hdr
*phs_hdr
;
321 struct sk_buff
*nskb
;
322 int err
, confirm_recv
= 0;
324 memset(skb
->head
, 0, ETH_HLEN
);
325 phs_hdr
= skb_push(skb
, sizeof(struct af_iucv_trans_hdr
));
326 skb_reset_mac_header(skb
);
327 skb_reset_network_header(skb
);
328 skb_push(skb
, ETH_HLEN
);
329 skb_reset_mac_header(skb
);
330 memset(phs_hdr
, 0, sizeof(struct af_iucv_trans_hdr
));
332 phs_hdr
->magic
= ETH_P_AF_IUCV
;
333 phs_hdr
->version
= 1;
334 phs_hdr
->flags
= flags
;
335 if (flags
== AF_IUCV_FLAG_SYN
)
336 phs_hdr
->window
= iucv
->msglimit
;
337 else if ((flags
== AF_IUCV_FLAG_WIN
) || !flags
) {
338 confirm_recv
= atomic_read(&iucv
->msg_recv
);
339 phs_hdr
->window
= confirm_recv
;
341 phs_hdr
->flags
= phs_hdr
->flags
| AF_IUCV_FLAG_WIN
;
343 memcpy(phs_hdr
->destUserID
, iucv
->dst_user_id
, 8);
344 memcpy(phs_hdr
->destAppName
, iucv
->dst_name
, 8);
345 memcpy(phs_hdr
->srcUserID
, iucv
->src_user_id
, 8);
346 memcpy(phs_hdr
->srcAppName
, iucv
->src_name
, 8);
347 ASCEBC(phs_hdr
->destUserID
, sizeof(phs_hdr
->destUserID
));
348 ASCEBC(phs_hdr
->destAppName
, sizeof(phs_hdr
->destAppName
));
349 ASCEBC(phs_hdr
->srcUserID
, sizeof(phs_hdr
->srcUserID
));
350 ASCEBC(phs_hdr
->srcAppName
, sizeof(phs_hdr
->srcAppName
));
352 memcpy(&phs_hdr
->iucv_hdr
, imsg
, sizeof(struct iucv_message
));
354 skb
->dev
= iucv
->hs_dev
;
359 if (!(skb
->dev
->flags
& IFF_UP
) || !netif_carrier_ok(skb
->dev
)) {
363 if (skb
->len
> skb
->dev
->mtu
) {
364 if (sock
->sk_type
== SOCK_SEQPACKET
) {
368 skb_trim(skb
, skb
->dev
->mtu
);
370 skb
->protocol
= cpu_to_be16(ETH_P_AF_IUCV
);
371 nskb
= skb_clone(skb
, GFP_ATOMIC
);
377 skb_queue_tail(&iucv
->send_skb_q
, nskb
);
378 err
= dev_queue_xmit(skb
);
379 if (net_xmit_eval(err
)) {
380 skb_unlink(nskb
, &iucv
->send_skb_q
);
383 atomic_sub(confirm_recv
, &iucv
->msg_recv
);
384 WARN_ON(atomic_read(&iucv
->msg_recv
) < 0);
386 return net_xmit_eval(err
);
393 static struct sock
*__iucv_get_sock_by_name(char *nm
)
397 sk_for_each(sk
, &iucv_sk_list
.head
)
398 if (!memcmp(&iucv_sk(sk
)->src_name
, nm
, 8))
404 static void iucv_sock_destruct(struct sock
*sk
)
406 skb_queue_purge(&sk
->sk_receive_queue
);
407 skb_queue_purge(&sk
->sk_error_queue
);
411 if (!sock_flag(sk
, SOCK_DEAD
)) {
412 pr_err("Attempt to release alive iucv socket %p\n", sk
);
416 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
417 WARN_ON(refcount_read(&sk
->sk_wmem_alloc
));
418 WARN_ON(sk
->sk_wmem_queued
);
419 WARN_ON(sk
->sk_forward_alloc
);
423 static void iucv_sock_cleanup_listen(struct sock
*parent
)
427 /* Close non-accepted connections */
428 while ((sk
= iucv_accept_dequeue(parent
, NULL
))) {
433 parent
->sk_state
= IUCV_CLOSED
;
436 /* Kill socket (only if zapped and orphaned) */
437 static void iucv_sock_kill(struct sock
*sk
)
439 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
442 iucv_sock_unlink(&iucv_sk_list
, sk
);
443 sock_set_flag(sk
, SOCK_DEAD
);
447 /* Terminate an IUCV path */
448 static void iucv_sever_path(struct sock
*sk
, int with_user_data
)
450 unsigned char user_data
[16];
451 struct iucv_sock
*iucv
= iucv_sk(sk
);
452 struct iucv_path
*path
= iucv
->path
;
456 if (with_user_data
) {
457 low_nmcpy(user_data
, iucv
->src_name
);
458 high_nmcpy(user_data
, iucv
->dst_name
);
459 ASCEBC(user_data
, sizeof(user_data
));
460 pr_iucv
->path_sever(path
, user_data
);
462 pr_iucv
->path_sever(path
, NULL
);
463 iucv_path_free(path
);
467 /* Send controlling flags through an IUCV socket for HIPER transport */
468 static int iucv_send_ctrl(struct sock
*sk
, u8 flags
)
475 blen
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
476 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
477 /* controlling flags should be sent anyway */
478 shutdown
= sk
->sk_shutdown
;
479 sk
->sk_shutdown
&= RCV_SHUTDOWN
;
481 skb
= sock_alloc_send_skb(sk
, blen
, 1, &err
);
483 skb_reserve(skb
, blen
);
484 err
= afiucv_hs_send(NULL
, sk
, skb
, flags
);
487 sk
->sk_shutdown
= shutdown
;
491 /* Close an IUCV socket */
492 static void iucv_sock_close(struct sock
*sk
)
494 struct iucv_sock
*iucv
= iucv_sk(sk
);
500 switch (sk
->sk_state
) {
502 iucv_sock_cleanup_listen(sk
);
506 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
507 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
508 sk
->sk_state
= IUCV_DISCONN
;
509 sk
->sk_state_change(sk
);
511 case IUCV_DISCONN
: /* fall through */
512 sk
->sk_state
= IUCV_CLOSING
;
513 sk
->sk_state_change(sk
);
515 if (!err
&& !skb_queue_empty(&iucv
->send_skb_q
)) {
516 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
517 timeo
= sk
->sk_lingertime
;
519 timeo
= IUCV_DISCONN_TIMEOUT
;
521 iucv_sock_in_state(sk
, IUCV_CLOSED
, 0),
525 case IUCV_CLOSING
: /* fall through */
526 sk
->sk_state
= IUCV_CLOSED
;
527 sk
->sk_state_change(sk
);
529 sk
->sk_err
= ECONNRESET
;
530 sk
->sk_state_change(sk
);
532 skb_queue_purge(&iucv
->send_skb_q
);
533 skb_queue_purge(&iucv
->backlog_skb_q
);
535 default: /* fall through */
536 iucv_sever_path(sk
, 1);
540 dev_put(iucv
->hs_dev
);
542 sk
->sk_bound_dev_if
= 0;
545 /* mark socket for deletion by iucv_sock_kill() */
546 sock_set_flag(sk
, SOCK_ZAPPED
);
551 static void iucv_sock_init(struct sock
*sk
, struct sock
*parent
)
554 sk
->sk_type
= parent
->sk_type
;
555 security_sk_clone(parent
, sk
);
559 static struct sock
*iucv_sock_alloc(struct socket
*sock
, int proto
, gfp_t prio
, int kern
)
562 struct iucv_sock
*iucv
;
564 sk
= sk_alloc(&init_net
, PF_IUCV
, prio
, &iucv_proto
, kern
);
569 sock_init_data(sock
, sk
);
570 INIT_LIST_HEAD(&iucv
->accept_q
);
571 spin_lock_init(&iucv
->accept_q_lock
);
572 skb_queue_head_init(&iucv
->send_skb_q
);
573 INIT_LIST_HEAD(&iucv
->message_q
.list
);
574 spin_lock_init(&iucv
->message_q
.lock
);
575 skb_queue_head_init(&iucv
->backlog_skb_q
);
577 atomic_set(&iucv
->pendings
, 0);
580 atomic_set(&iucv
->msg_sent
, 0);
581 atomic_set(&iucv
->msg_recv
, 0);
583 iucv
->sk_txnotify
= afiucv_hs_callback_txnotify
;
584 memset(&iucv
->src_user_id
, 0, 32);
586 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
588 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
590 sk
->sk_destruct
= iucv_sock_destruct
;
591 sk
->sk_sndtimeo
= IUCV_CONN_TIMEOUT
;
592 sk
->sk_allocation
= GFP_DMA
;
594 sock_reset_flag(sk
, SOCK_ZAPPED
);
596 sk
->sk_protocol
= proto
;
597 sk
->sk_state
= IUCV_OPEN
;
599 iucv_sock_link(&iucv_sk_list
, sk
);
603 /* Create an IUCV socket */
604 static int iucv_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
609 if (protocol
&& protocol
!= PF_IUCV
)
610 return -EPROTONOSUPPORT
;
612 sock
->state
= SS_UNCONNECTED
;
614 switch (sock
->type
) {
616 sock
->ops
= &iucv_sock_ops
;
619 /* currently, proto ops can handle both sk types */
620 sock
->ops
= &iucv_sock_ops
;
623 return -ESOCKTNOSUPPORT
;
626 sk
= iucv_sock_alloc(sock
, protocol
, GFP_KERNEL
, kern
);
630 iucv_sock_init(sk
, NULL
);
635 void iucv_sock_link(struct iucv_sock_list
*l
, struct sock
*sk
)
637 write_lock_bh(&l
->lock
);
638 sk_add_node(sk
, &l
->head
);
639 write_unlock_bh(&l
->lock
);
642 void iucv_sock_unlink(struct iucv_sock_list
*l
, struct sock
*sk
)
644 write_lock_bh(&l
->lock
);
645 sk_del_node_init(sk
);
646 write_unlock_bh(&l
->lock
);
649 void iucv_accept_enqueue(struct sock
*parent
, struct sock
*sk
)
652 struct iucv_sock
*par
= iucv_sk(parent
);
655 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
656 list_add_tail(&iucv_sk(sk
)->accept_q
, &par
->accept_q
);
657 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
658 iucv_sk(sk
)->parent
= parent
;
659 sk_acceptq_added(parent
);
662 void iucv_accept_unlink(struct sock
*sk
)
665 struct iucv_sock
*par
= iucv_sk(iucv_sk(sk
)->parent
);
667 spin_lock_irqsave(&par
->accept_q_lock
, flags
);
668 list_del_init(&iucv_sk(sk
)->accept_q
);
669 spin_unlock_irqrestore(&par
->accept_q_lock
, flags
);
670 sk_acceptq_removed(iucv_sk(sk
)->parent
);
671 iucv_sk(sk
)->parent
= NULL
;
675 struct sock
*iucv_accept_dequeue(struct sock
*parent
, struct socket
*newsock
)
677 struct iucv_sock
*isk
, *n
;
680 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
681 sk
= (struct sock
*) isk
;
684 if (sk
->sk_state
== IUCV_CLOSED
) {
685 iucv_accept_unlink(sk
);
690 if (sk
->sk_state
== IUCV_CONNECTED
||
691 sk
->sk_state
== IUCV_DISCONN
||
693 iucv_accept_unlink(sk
);
695 sock_graft(sk
, newsock
);
706 static void __iucv_auto_name(struct iucv_sock
*iucv
)
710 sprintf(name
, "%08x", atomic_inc_return(&iucv_sk_list
.autobind_name
));
711 while (__iucv_get_sock_by_name(name
)) {
712 sprintf(name
, "%08x",
713 atomic_inc_return(&iucv_sk_list
.autobind_name
));
715 memcpy(iucv
->src_name
, name
, 8);
718 /* Bind an unbound socket */
719 static int iucv_sock_bind(struct socket
*sock
, struct sockaddr
*addr
,
722 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
723 struct sock
*sk
= sock
->sk
;
724 struct iucv_sock
*iucv
;
726 struct net_device
*dev
;
729 /* Verify the input sockaddr */
730 if (addr_len
< sizeof(struct sockaddr_iucv
) ||
731 addr
->sa_family
!= AF_IUCV
)
735 if (sk
->sk_state
!= IUCV_OPEN
) {
740 write_lock_bh(&iucv_sk_list
.lock
);
743 if (__iucv_get_sock_by_name(sa
->siucv_name
)) {
750 /* Bind the socket */
752 if (!memcmp(sa
->siucv_user_id
, iucv_userid
, 8))
753 goto vm_bind
; /* VM IUCV transport */
755 /* try hiper transport */
756 memcpy(uid
, sa
->siucv_user_id
, sizeof(uid
));
759 for_each_netdev_rcu(&init_net
, dev
) {
760 if (!memcmp(dev
->perm_addr
, uid
, 8)) {
761 memcpy(iucv
->src_user_id
, sa
->siucv_user_id
, 8);
762 /* Check for unitialized siucv_name */
763 if (strncmp(sa
->siucv_name
, " ", 8) == 0)
764 __iucv_auto_name(iucv
);
766 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
767 sk
->sk_bound_dev_if
= dev
->ifindex
;
770 sk
->sk_state
= IUCV_BOUND
;
771 iucv
->transport
= AF_IUCV_TRANS_HIPER
;
773 iucv
->msglimit
= IUCV_HIPER_MSGLIM_DEFAULT
;
781 /* use local userid for backward compat */
782 memcpy(iucv
->src_name
, sa
->siucv_name
, 8);
783 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
784 sk
->sk_state
= IUCV_BOUND
;
785 iucv
->transport
= AF_IUCV_TRANS_IUCV
;
787 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
790 /* found no dev to bind */
793 /* Release the socket list lock */
794 write_unlock_bh(&iucv_sk_list
.lock
);
800 /* Automatically bind an unbound socket */
801 static int iucv_sock_autobind(struct sock
*sk
)
803 struct iucv_sock
*iucv
= iucv_sk(sk
);
806 if (unlikely(!pr_iucv
))
809 memcpy(iucv
->src_user_id
, iucv_userid
, 8);
811 write_lock_bh(&iucv_sk_list
.lock
);
812 __iucv_auto_name(iucv
);
813 write_unlock_bh(&iucv_sk_list
.lock
);
816 iucv
->msglimit
= IUCV_QUEUELEN_DEFAULT
;
821 static int afiucv_path_connect(struct socket
*sock
, struct sockaddr
*addr
)
823 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
824 struct sock
*sk
= sock
->sk
;
825 struct iucv_sock
*iucv
= iucv_sk(sk
);
826 unsigned char user_data
[16];
829 high_nmcpy(user_data
, sa
->siucv_name
);
830 low_nmcpy(user_data
, iucv
->src_name
);
831 ASCEBC(user_data
, sizeof(user_data
));
834 iucv
->path
= iucv_path_alloc(iucv
->msglimit
,
835 IUCV_IPRMDATA
, GFP_KERNEL
);
840 err
= pr_iucv
->path_connect(iucv
->path
, &af_iucv_handler
,
841 sa
->siucv_user_id
, NULL
, user_data
,
844 iucv_path_free(iucv
->path
);
847 case 0x0b: /* Target communicator is not logged on */
850 case 0x0d: /* Max connections for this guest exceeded */
851 case 0x0e: /* Max connections for target guest exceeded */
854 case 0x0f: /* Missing IUCV authorization */
866 /* Connect an unconnected socket */
867 static int iucv_sock_connect(struct socket
*sock
, struct sockaddr
*addr
,
870 struct sockaddr_iucv
*sa
= (struct sockaddr_iucv
*) addr
;
871 struct sock
*sk
= sock
->sk
;
872 struct iucv_sock
*iucv
= iucv_sk(sk
);
875 if (alen
< sizeof(struct sockaddr_iucv
) || addr
->sa_family
!= AF_IUCV
)
878 if (sk
->sk_state
!= IUCV_OPEN
&& sk
->sk_state
!= IUCV_BOUND
)
881 if (sk
->sk_state
== IUCV_OPEN
&&
882 iucv
->transport
== AF_IUCV_TRANS_HIPER
)
883 return -EBADFD
; /* explicit bind required */
885 if (sk
->sk_type
!= SOCK_STREAM
&& sk
->sk_type
!= SOCK_SEQPACKET
)
888 if (sk
->sk_state
== IUCV_OPEN
) {
889 err
= iucv_sock_autobind(sk
);
896 /* Set the destination information */
897 memcpy(iucv
->dst_user_id
, sa
->siucv_user_id
, 8);
898 memcpy(iucv
->dst_name
, sa
->siucv_name
, 8);
900 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
)
901 err
= iucv_send_ctrl(sock
->sk
, AF_IUCV_FLAG_SYN
);
903 err
= afiucv_path_connect(sock
, addr
);
907 if (sk
->sk_state
!= IUCV_CONNECTED
)
908 err
= iucv_sock_wait(sk
, iucv_sock_in_state(sk
, IUCV_CONNECTED
,
910 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
912 if (sk
->sk_state
== IUCV_DISCONN
|| sk
->sk_state
== IUCV_CLOSED
)
915 if (err
&& iucv
->transport
== AF_IUCV_TRANS_IUCV
)
916 iucv_sever_path(sk
, 0);
923 /* Move a socket into listening state. */
924 static int iucv_sock_listen(struct socket
*sock
, int backlog
)
926 struct sock
*sk
= sock
->sk
;
932 if (sk
->sk_state
!= IUCV_BOUND
)
935 if (sock
->type
!= SOCK_STREAM
&& sock
->type
!= SOCK_SEQPACKET
)
938 sk
->sk_max_ack_backlog
= backlog
;
939 sk
->sk_ack_backlog
= 0;
940 sk
->sk_state
= IUCV_LISTEN
;
948 /* Accept a pending connection */
949 static int iucv_sock_accept(struct socket
*sock
, struct socket
*newsock
,
950 int flags
, bool kern
)
952 DECLARE_WAITQUEUE(wait
, current
);
953 struct sock
*sk
= sock
->sk
, *nsk
;
957 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
959 if (sk
->sk_state
!= IUCV_LISTEN
) {
964 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
966 /* Wait for an incoming connection */
967 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
968 while (!(nsk
= iucv_accept_dequeue(sk
, newsock
))) {
969 set_current_state(TASK_INTERRUPTIBLE
);
976 timeo
= schedule_timeout(timeo
);
977 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
979 if (sk
->sk_state
!= IUCV_LISTEN
) {
984 if (signal_pending(current
)) {
985 err
= sock_intr_errno(timeo
);
990 set_current_state(TASK_RUNNING
);
991 remove_wait_queue(sk_sleep(sk
), &wait
);
996 newsock
->state
= SS_CONNECTED
;
1003 static int iucv_sock_getname(struct socket
*sock
, struct sockaddr
*addr
,
1006 struct sockaddr_iucv
*siucv
= (struct sockaddr_iucv
*) addr
;
1007 struct sock
*sk
= sock
->sk
;
1008 struct iucv_sock
*iucv
= iucv_sk(sk
);
1010 addr
->sa_family
= AF_IUCV
;
1011 *len
= sizeof(struct sockaddr_iucv
);
1014 memcpy(siucv
->siucv_user_id
, iucv
->dst_user_id
, 8);
1015 memcpy(siucv
->siucv_name
, iucv
->dst_name
, 8);
1017 memcpy(siucv
->siucv_user_id
, iucv
->src_user_id
, 8);
1018 memcpy(siucv
->siucv_name
, iucv
->src_name
, 8);
1020 memset(&siucv
->siucv_port
, 0, sizeof(siucv
->siucv_port
));
1021 memset(&siucv
->siucv_addr
, 0, sizeof(siucv
->siucv_addr
));
1022 memset(&siucv
->siucv_nodeid
, 0, sizeof(siucv
->siucv_nodeid
));
1028 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1030 * @msg: Pointer to a struct iucv_message
1031 * @skb: The socket data to send, skb->len MUST BE <= 7
1033 * Send the socket data in the parameter list in the iucv message
1034 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1035 * list and the socket data len at index 7 (last byte).
1036 * See also iucv_msg_length().
1038 * Returns the error code from the iucv_message_send() call.
1040 static int iucv_send_iprm(struct iucv_path
*path
, struct iucv_message
*msg
,
1041 struct sk_buff
*skb
)
1045 memcpy(prmdata
, (void *) skb
->data
, skb
->len
);
1046 prmdata
[7] = 0xff - (u8
) skb
->len
;
1047 return pr_iucv
->message_send(path
, msg
, IUCV_IPRMDATA
, 0,
1048 (void *) prmdata
, 8);
1051 static int iucv_sock_sendmsg(struct socket
*sock
, struct msghdr
*msg
,
1054 struct sock
*sk
= sock
->sk
;
1055 struct iucv_sock
*iucv
= iucv_sk(sk
);
1056 size_t headroom
= 0;
1058 struct sk_buff
*skb
;
1059 struct iucv_message txmsg
= {0};
1060 struct cmsghdr
*cmsg
;
1066 int noblock
= msg
->msg_flags
& MSG_DONTWAIT
;
1068 err
= sock_error(sk
);
1072 if (msg
->msg_flags
& MSG_OOB
)
1075 /* SOCK_SEQPACKET: we do not support segmented records */
1076 if (sk
->sk_type
== SOCK_SEQPACKET
&& !(msg
->msg_flags
& MSG_EOR
))
1081 if (sk
->sk_shutdown
& SEND_SHUTDOWN
) {
1086 /* Return if the socket is not in connected state */
1087 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1092 /* initialize defaults */
1093 cmsg_done
= 0; /* check for duplicate headers */
1096 /* iterate over control messages */
1097 for_each_cmsghdr(cmsg
, msg
) {
1098 if (!CMSG_OK(msg
, cmsg
)) {
1103 if (cmsg
->cmsg_level
!= SOL_IUCV
)
1106 if (cmsg
->cmsg_type
& cmsg_done
) {
1110 cmsg_done
|= cmsg
->cmsg_type
;
1112 switch (cmsg
->cmsg_type
) {
1113 case SCM_IUCV_TRGCLS
:
1114 if (cmsg
->cmsg_len
!= CMSG_LEN(TRGCLS_SIZE
)) {
1119 /* set iucv message target class */
1120 memcpy(&txmsg
.class,
1121 (void *) CMSG_DATA(cmsg
), TRGCLS_SIZE
);
1131 /* allocate one skb for each iucv message:
1132 * this is fine for SOCK_SEQPACKET (unless we want to support
1133 * segmented records using the MSG_EOR flag), but
1134 * for SOCK_STREAM we might want to improve it in future */
1135 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1136 headroom
= sizeof(struct af_iucv_trans_hdr
) + ETH_HLEN
;
1139 if (len
< PAGE_SIZE
) {
1142 /* In nonlinear "classic" iucv skb,
1143 * reserve space for iucv_array
1145 headroom
= sizeof(struct iucv_array
) *
1146 (MAX_SKB_FRAGS
+ 1);
1147 linear
= PAGE_SIZE
- headroom
;
1150 skb
= sock_alloc_send_pskb(sk
, headroom
+ linear
, len
- linear
,
1155 skb_reserve(skb
, headroom
);
1156 skb_put(skb
, linear
);
1158 skb
->data_len
= len
- linear
;
1159 err
= skb_copy_datagram_from_iter(skb
, 0, &msg
->msg_iter
, len
);
1163 /* wait if outstanding messages for iucv path has reached */
1164 timeo
= sock_sndtimeo(sk
, noblock
);
1165 err
= iucv_sock_wait(sk
, iucv_below_msglim(sk
), timeo
);
1169 /* return -ECONNRESET if the socket is no longer connected */
1170 if (sk
->sk_state
!= IUCV_CONNECTED
) {
1175 /* increment and save iucv message tag for msg_completion cbk */
1176 txmsg
.tag
= iucv
->send_tag
++;
1177 IUCV_SKB_CB(skb
)->tag
= txmsg
.tag
;
1179 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1180 atomic_inc(&iucv
->msg_sent
);
1181 err
= afiucv_hs_send(&txmsg
, sk
, skb
, 0);
1183 atomic_dec(&iucv
->msg_sent
);
1186 } else { /* Classic VM IUCV transport */
1187 skb_queue_tail(&iucv
->send_skb_q
, skb
);
1189 if (((iucv
->path
->flags
& IUCV_IPRMDATA
) & iucv
->flags
) &&
1191 err
= iucv_send_iprm(iucv
->path
, &txmsg
, skb
);
1193 /* on success: there is no message_complete callback */
1194 /* for an IPRMDATA msg; remove skb from send queue */
1196 skb_unlink(skb
, &iucv
->send_skb_q
);
1200 /* this error should never happen since the */
1201 /* IUCV_IPRMDATA path flag is set... sever path */
1203 pr_iucv
->path_sever(iucv
->path
, NULL
);
1204 skb_unlink(skb
, &iucv
->send_skb_q
);
1208 } else if (skb_is_nonlinear(skb
)) {
1209 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1212 /* skip iucv_array lying in the headroom */
1213 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1214 iba
[0].length
= (u32
)skb_headlen(skb
);
1215 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1216 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1218 iba
[i
+ 1].address
=
1219 (u32
)(addr_t
)skb_frag_address(frag
);
1220 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1222 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1224 (void *)iba
, skb
->len
);
1225 } else { /* non-IPRM Linear skb */
1226 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1227 0, 0, (void *)skb
->data
, skb
->len
);
1232 memcpy(user_id
, iucv
->dst_user_id
, 8);
1234 memcpy(appl_id
, iucv
->dst_name
, 8);
1236 "Application %s on z/VM guest %s exceeds message limit\n",
1242 skb_unlink(skb
, &iucv
->send_skb_q
);
1257 static struct sk_buff
*alloc_iucv_recv_skb(unsigned long len
)
1259 size_t headroom
, linear
;
1260 struct sk_buff
*skb
;
1263 if (len
< PAGE_SIZE
) {
1267 headroom
= sizeof(struct iucv_array
) * (MAX_SKB_FRAGS
+ 1);
1268 linear
= PAGE_SIZE
- headroom
;
1270 skb
= alloc_skb_with_frags(headroom
+ linear
, len
- linear
,
1271 0, &err
, GFP_ATOMIC
| GFP_DMA
);
1273 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1277 skb_reserve(skb
, headroom
);
1278 skb_put(skb
, linear
);
1280 skb
->data_len
= len
- linear
;
1285 /* iucv_process_message() - Receive a single outstanding IUCV message
1287 * Locking: must be called with message_q.lock held
1289 static void iucv_process_message(struct sock
*sk
, struct sk_buff
*skb
,
1290 struct iucv_path
*path
,
1291 struct iucv_message
*msg
)
1296 len
= iucv_msg_length(msg
);
1298 /* store msg target class in the second 4 bytes of skb ctrl buffer */
1299 /* Note: the first 4 bytes are reserved for msg tag */
1300 IUCV_SKB_CB(skb
)->class = msg
->class;
1302 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1303 if ((msg
->flags
& IUCV_IPRMDATA
) && len
> 7) {
1304 if (memcmp(msg
->rmmsg
, iprm_shutdown
, 8) == 0) {
1309 if (skb_is_nonlinear(skb
)) {
1310 struct iucv_array
*iba
= (struct iucv_array
*)skb
->head
;
1313 iba
[0].address
= (u32
)(addr_t
)skb
->data
;
1314 iba
[0].length
= (u32
)skb_headlen(skb
);
1315 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1316 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1318 iba
[i
+ 1].address
=
1319 (u32
)(addr_t
)skb_frag_address(frag
);
1320 iba
[i
+ 1].length
= (u32
)skb_frag_size(frag
);
1322 rc
= pr_iucv
->message_receive(path
, msg
,
1324 (void *)iba
, len
, NULL
);
1326 rc
= pr_iucv
->message_receive(path
, msg
,
1327 msg
->flags
& IUCV_IPRMDATA
,
1328 skb
->data
, len
, NULL
);
1334 WARN_ON_ONCE(skb
->len
!= len
);
1337 IUCV_SKB_CB(skb
)->offset
= 0;
1338 if (sk_filter(sk
, skb
)) {
1339 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
1343 if (__sock_queue_rcv_skb(sk
, skb
)) /* handle rcv queue full */
1344 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
1347 /* iucv_process_message_q() - Process outstanding IUCV messages
1349 * Locking: must be called with message_q.lock held
1351 static void iucv_process_message_q(struct sock
*sk
)
1353 struct iucv_sock
*iucv
= iucv_sk(sk
);
1354 struct sk_buff
*skb
;
1355 struct sock_msg_q
*p
, *n
;
1357 list_for_each_entry_safe(p
, n
, &iucv
->message_q
.list
, list
) {
1358 skb
= alloc_iucv_recv_skb(iucv_msg_length(&p
->msg
));
1361 iucv_process_message(sk
, skb
, p
->path
, &p
->msg
);
1364 if (!skb_queue_empty(&iucv
->backlog_skb_q
))
1369 static int iucv_sock_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
1370 size_t len
, int flags
)
1372 int noblock
= flags
& MSG_DONTWAIT
;
1373 struct sock
*sk
= sock
->sk
;
1374 struct iucv_sock
*iucv
= iucv_sk(sk
);
1375 unsigned int copied
, rlen
;
1376 struct sk_buff
*skb
, *rskb
, *cskb
;
1380 if ((sk
->sk_state
== IUCV_DISCONN
) &&
1381 skb_queue_empty(&iucv
->backlog_skb_q
) &&
1382 skb_queue_empty(&sk
->sk_receive_queue
) &&
1383 list_empty(&iucv
->message_q
.list
))
1386 if (flags
& (MSG_OOB
))
1389 /* receive/dequeue next skb:
1390 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1391 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
1393 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1398 offset
= IUCV_SKB_CB(skb
)->offset
;
1399 rlen
= skb
->len
- offset
; /* real length of skb */
1400 copied
= min_t(unsigned int, rlen
, len
);
1402 sk
->sk_shutdown
= sk
->sk_shutdown
| RCV_SHUTDOWN
;
1405 if (skb_copy_datagram_msg(cskb
, offset
, msg
, copied
)) {
1406 if (!(flags
& MSG_PEEK
))
1407 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1411 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1412 if (sk
->sk_type
== SOCK_SEQPACKET
) {
1414 msg
->msg_flags
|= MSG_TRUNC
;
1415 /* each iucv message contains a complete record */
1416 msg
->msg_flags
|= MSG_EOR
;
1419 /* create control message to store iucv msg target class:
1420 * get the trgcls from the control buffer of the skb due to
1421 * fragmentation of original iucv message. */
1422 err
= put_cmsg(msg
, SOL_IUCV
, SCM_IUCV_TRGCLS
,
1423 sizeof(IUCV_SKB_CB(skb
)->class),
1424 (void *)&IUCV_SKB_CB(skb
)->class);
1426 if (!(flags
& MSG_PEEK
))
1427 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1431 /* Mark read part of skb as used */
1432 if (!(flags
& MSG_PEEK
)) {
1434 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1435 if (sk
->sk_type
== SOCK_STREAM
) {
1436 if (copied
< rlen
) {
1437 IUCV_SKB_CB(skb
)->offset
= offset
+ copied
;
1438 skb_queue_head(&sk
->sk_receive_queue
, skb
);
1444 if (iucv
->transport
== AF_IUCV_TRANS_HIPER
) {
1445 atomic_inc(&iucv
->msg_recv
);
1446 if (atomic_read(&iucv
->msg_recv
) > iucv
->msglimit
) {
1448 iucv_sock_close(sk
);
1453 /* Queue backlog skbs */
1454 spin_lock_bh(&iucv
->message_q
.lock
);
1455 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1457 IUCV_SKB_CB(rskb
)->offset
= 0;
1458 if (__sock_queue_rcv_skb(sk
, rskb
)) {
1459 /* handle rcv queue full */
1460 skb_queue_head(&iucv
->backlog_skb_q
,
1464 rskb
= skb_dequeue(&iucv
->backlog_skb_q
);
1466 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
1467 if (!list_empty(&iucv
->message_q
.list
))
1468 iucv_process_message_q(sk
);
1469 if (atomic_read(&iucv
->msg_recv
) >=
1470 iucv
->msglimit
/ 2) {
1471 err
= iucv_send_ctrl(sk
, AF_IUCV_FLAG_WIN
);
1473 sk
->sk_state
= IUCV_DISCONN
;
1474 sk
->sk_state_change(sk
);
1478 spin_unlock_bh(&iucv
->message_q
.lock
);
1482 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1483 if (sk
->sk_type
== SOCK_SEQPACKET
&& (flags
& MSG_TRUNC
))
1489 static inline unsigned int iucv_accept_poll(struct sock
*parent
)
1491 struct iucv_sock
*isk
, *n
;
1494 list_for_each_entry_safe(isk
, n
, &iucv_sk(parent
)->accept_q
, accept_q
) {
1495 sk
= (struct sock
*) isk
;
1497 if (sk
->sk_state
== IUCV_CONNECTED
)
1498 return POLLIN
| POLLRDNORM
;
1504 unsigned int iucv_sock_poll(struct file
*file
, struct socket
*sock
,
1507 struct sock
*sk
= sock
->sk
;
1508 unsigned int mask
= 0;
1510 sock_poll_wait(file
, sk_sleep(sk
), wait
);
1512 if (sk
->sk_state
== IUCV_LISTEN
)
1513 return iucv_accept_poll(sk
);
1515 if (sk
->sk_err
|| !skb_queue_empty(&sk
->sk_error_queue
))
1517 (sock_flag(sk
, SOCK_SELECT_ERR_QUEUE
) ? POLLPRI
: 0);
1519 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1522 if (sk
->sk_shutdown
== SHUTDOWN_MASK
)
1525 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
1526 (sk
->sk_shutdown
& RCV_SHUTDOWN
))
1527 mask
|= POLLIN
| POLLRDNORM
;
1529 if (sk
->sk_state
== IUCV_CLOSED
)
1532 if (sk
->sk_state
== IUCV_DISCONN
)
1535 if (sock_writeable(sk
) && iucv_below_msglim(sk
))
1536 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
1538 sk_set_bit(SOCKWQ_ASYNC_NOSPACE
, sk
);
1543 static int iucv_sock_shutdown(struct socket
*sock
, int how
)
1545 struct sock
*sk
= sock
->sk
;
1546 struct iucv_sock
*iucv
= iucv_sk(sk
);
1547 struct iucv_message txmsg
;
1552 if ((how
& ~SHUTDOWN_MASK
) || !how
)
1556 switch (sk
->sk_state
) {
1567 if (how
== SEND_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1568 if (iucv
->transport
== AF_IUCV_TRANS_IUCV
) {
1571 err
= pr_iucv
->message_send(iucv
->path
, &txmsg
,
1572 IUCV_IPRMDATA
, 0, (void *) iprm_shutdown
, 8);
1587 iucv_send_ctrl(sk
, AF_IUCV_FLAG_SHT
);
1590 sk
->sk_shutdown
|= how
;
1591 if (how
== RCV_SHUTDOWN
|| how
== SHUTDOWN_MASK
) {
1592 if ((iucv
->transport
== AF_IUCV_TRANS_IUCV
) &&
1594 err
= pr_iucv
->path_quiesce(iucv
->path
, NULL
);
1597 /* skb_queue_purge(&sk->sk_receive_queue); */
1599 skb_queue_purge(&sk
->sk_receive_queue
);
1602 /* Wake up anyone sleeping in poll */
1603 sk
->sk_state_change(sk
);
1610 static int iucv_sock_release(struct socket
*sock
)
1612 struct sock
*sk
= sock
->sk
;
1618 iucv_sock_close(sk
);
1625 /* getsockopt and setsockopt */
1626 static int iucv_sock_setsockopt(struct socket
*sock
, int level
, int optname
,
1627 char __user
*optval
, unsigned int optlen
)
1629 struct sock
*sk
= sock
->sk
;
1630 struct iucv_sock
*iucv
= iucv_sk(sk
);
1634 if (level
!= SOL_IUCV
)
1635 return -ENOPROTOOPT
;
1637 if (optlen
< sizeof(int))
1640 if (get_user(val
, (int __user
*) optval
))
1647 case SO_IPRMDATA_MSG
:
1649 iucv
->flags
|= IUCV_IPRMDATA
;
1651 iucv
->flags
&= ~IUCV_IPRMDATA
;
1654 switch (sk
->sk_state
) {
1657 if (val
< 1 || val
> (u16
)(~0))
1660 iucv
->msglimit
= val
;
1676 static int iucv_sock_getsockopt(struct socket
*sock
, int level
, int optname
,
1677 char __user
*optval
, int __user
*optlen
)
1679 struct sock
*sk
= sock
->sk
;
1680 struct iucv_sock
*iucv
= iucv_sk(sk
);
1684 if (level
!= SOL_IUCV
)
1685 return -ENOPROTOOPT
;
1687 if (get_user(len
, optlen
))
1693 len
= min_t(unsigned int, len
, sizeof(int));
1696 case SO_IPRMDATA_MSG
:
1697 val
= (iucv
->flags
& IUCV_IPRMDATA
) ? 1 : 0;
1701 val
= (iucv
->path
!= NULL
) ? iucv
->path
->msglim
/* connected */
1702 : iucv
->msglimit
; /* default */
1706 if (sk
->sk_state
== IUCV_OPEN
)
1708 val
= (iucv
->hs_dev
) ? iucv
->hs_dev
->mtu
-
1709 sizeof(struct af_iucv_trans_hdr
) - ETH_HLEN
:
1713 return -ENOPROTOOPT
;
1716 if (put_user(len
, optlen
))
1718 if (copy_to_user(optval
, &val
, len
))
1725 /* Callback wrappers - called from iucv base support */
1726 static int iucv_callback_connreq(struct iucv_path
*path
,
1727 u8 ipvmid
[8], u8 ipuser
[16])
1729 unsigned char user_data
[16];
1730 unsigned char nuser_data
[16];
1731 unsigned char src_name
[8];
1732 struct sock
*sk
, *nsk
;
1733 struct iucv_sock
*iucv
, *niucv
;
1736 memcpy(src_name
, ipuser
, 8);
1737 EBCASC(src_name
, 8);
1738 /* Find out if this path belongs to af_iucv. */
1739 read_lock(&iucv_sk_list
.lock
);
1742 sk_for_each(sk
, &iucv_sk_list
.head
)
1743 if (sk
->sk_state
== IUCV_LISTEN
&&
1744 !memcmp(&iucv_sk(sk
)->src_name
, src_name
, 8)) {
1746 * Found a listening socket with
1747 * src_name == ipuser[0-7].
1752 read_unlock(&iucv_sk_list
.lock
);
1754 /* No socket found, not one of our paths. */
1759 /* Check if parent socket is listening */
1760 low_nmcpy(user_data
, iucv
->src_name
);
1761 high_nmcpy(user_data
, iucv
->dst_name
);
1762 ASCEBC(user_data
, sizeof(user_data
));
1763 if (sk
->sk_state
!= IUCV_LISTEN
) {
1764 err
= pr_iucv
->path_sever(path
, user_data
);
1765 iucv_path_free(path
);
1769 /* Check for backlog size */
1770 if (sk_acceptq_is_full(sk
)) {
1771 err
= pr_iucv
->path_sever(path
, user_data
);
1772 iucv_path_free(path
);
1776 /* Create the new socket */
1777 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1779 err
= pr_iucv
->path_sever(path
, user_data
);
1780 iucv_path_free(path
);
1784 niucv
= iucv_sk(nsk
);
1785 iucv_sock_init(nsk
, sk
);
1787 /* Set the new iucv_sock */
1788 memcpy(niucv
->dst_name
, ipuser
+ 8, 8);
1789 EBCASC(niucv
->dst_name
, 8);
1790 memcpy(niucv
->dst_user_id
, ipvmid
, 8);
1791 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
1792 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
1795 /* Call iucv_accept */
1796 high_nmcpy(nuser_data
, ipuser
+ 8);
1797 memcpy(nuser_data
+ 8, niucv
->src_name
, 8);
1798 ASCEBC(nuser_data
+ 8, 8);
1800 /* set message limit for path based on msglimit of accepting socket */
1801 niucv
->msglimit
= iucv
->msglimit
;
1802 path
->msglim
= iucv
->msglimit
;
1803 err
= pr_iucv
->path_accept(path
, &af_iucv_handler
, nuser_data
, nsk
);
1805 iucv_sever_path(nsk
, 1);
1806 iucv_sock_kill(nsk
);
1810 iucv_accept_enqueue(sk
, nsk
);
1812 /* Wake up accept */
1813 nsk
->sk_state
= IUCV_CONNECTED
;
1814 sk
->sk_data_ready(sk
);
1821 static void iucv_callback_connack(struct iucv_path
*path
, u8 ipuser
[16])
1823 struct sock
*sk
= path
->private;
1825 sk
->sk_state
= IUCV_CONNECTED
;
1826 sk
->sk_state_change(sk
);
1829 static void iucv_callback_rx(struct iucv_path
*path
, struct iucv_message
*msg
)
1831 struct sock
*sk
= path
->private;
1832 struct iucv_sock
*iucv
= iucv_sk(sk
);
1833 struct sk_buff
*skb
;
1834 struct sock_msg_q
*save_msg
;
1837 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1838 pr_iucv
->message_reject(path
, msg
);
1842 spin_lock(&iucv
->message_q
.lock
);
1844 if (!list_empty(&iucv
->message_q
.list
) ||
1845 !skb_queue_empty(&iucv
->backlog_skb_q
))
1848 len
= atomic_read(&sk
->sk_rmem_alloc
);
1849 len
+= SKB_TRUESIZE(iucv_msg_length(msg
));
1850 if (len
> sk
->sk_rcvbuf
)
1853 skb
= alloc_iucv_recv_skb(iucv_msg_length(msg
));
1857 iucv_process_message(sk
, skb
, path
, msg
);
1861 save_msg
= kzalloc(sizeof(struct sock_msg_q
), GFP_ATOMIC
| GFP_DMA
);
1864 save_msg
->path
= path
;
1865 save_msg
->msg
= *msg
;
1867 list_add_tail(&save_msg
->list
, &iucv
->message_q
.list
);
1870 spin_unlock(&iucv
->message_q
.lock
);
1873 static void iucv_callback_txdone(struct iucv_path
*path
,
1874 struct iucv_message
*msg
)
1876 struct sock
*sk
= path
->private;
1877 struct sk_buff
*this = NULL
;
1878 struct sk_buff_head
*list
= &iucv_sk(sk
)->send_skb_q
;
1879 struct sk_buff
*list_skb
= list
->next
;
1880 unsigned long flags
;
1883 if (!skb_queue_empty(list
)) {
1884 spin_lock_irqsave(&list
->lock
, flags
);
1886 while (list_skb
!= (struct sk_buff
*)list
) {
1887 if (msg
->tag
== IUCV_SKB_CB(list_skb
)->tag
) {
1891 list_skb
= list_skb
->next
;
1894 __skb_unlink(this, list
);
1896 spin_unlock_irqrestore(&list
->lock
, flags
);
1900 /* wake up any process waiting for sending */
1901 iucv_sock_wake_msglim(sk
);
1905 if (sk
->sk_state
== IUCV_CLOSING
) {
1906 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
1907 sk
->sk_state
= IUCV_CLOSED
;
1908 sk
->sk_state_change(sk
);
1915 static void iucv_callback_connrej(struct iucv_path
*path
, u8 ipuser
[16])
1917 struct sock
*sk
= path
->private;
1919 if (sk
->sk_state
== IUCV_CLOSED
)
1923 iucv_sever_path(sk
, 1);
1924 sk
->sk_state
= IUCV_DISCONN
;
1926 sk
->sk_state_change(sk
);
1930 /* called if the other communication side shuts down its RECV direction;
1931 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1933 static void iucv_callback_shutdown(struct iucv_path
*path
, u8 ipuser
[16])
1935 struct sock
*sk
= path
->private;
1938 if (sk
->sk_state
!= IUCV_CLOSED
) {
1939 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
1940 sk
->sk_state_change(sk
);
1945 /***************** HiperSockets transport callbacks ********************/
1946 static void afiucv_swap_src_dest(struct sk_buff
*skb
)
1948 struct af_iucv_trans_hdr
*trans_hdr
=
1949 (struct af_iucv_trans_hdr
*)skb
->data
;
1953 ASCEBC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
1954 ASCEBC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
1955 ASCEBC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
1956 ASCEBC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
1957 memcpy(tmpID
, trans_hdr
->srcUserID
, 8);
1958 memcpy(tmpName
, trans_hdr
->srcAppName
, 8);
1959 memcpy(trans_hdr
->srcUserID
, trans_hdr
->destUserID
, 8);
1960 memcpy(trans_hdr
->srcAppName
, trans_hdr
->destAppName
, 8);
1961 memcpy(trans_hdr
->destUserID
, tmpID
, 8);
1962 memcpy(trans_hdr
->destAppName
, tmpName
, 8);
1963 skb_push(skb
, ETH_HLEN
);
1964 memset(skb
->data
, 0, ETH_HLEN
);
1968 * afiucv_hs_callback_syn - react on received SYN
1970 static int afiucv_hs_callback_syn(struct sock
*sk
, struct sk_buff
*skb
)
1973 struct iucv_sock
*iucv
, *niucv
;
1974 struct af_iucv_trans_hdr
*trans_hdr
;
1978 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
1980 /* no sock - connection refused */
1981 afiucv_swap_src_dest(skb
);
1982 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1983 err
= dev_queue_xmit(skb
);
1987 nsk
= iucv_sock_alloc(NULL
, sk
->sk_type
, GFP_ATOMIC
, 0);
1989 if ((sk
->sk_state
!= IUCV_LISTEN
) ||
1990 sk_acceptq_is_full(sk
) ||
1992 /* error on server socket - connection refused */
1993 afiucv_swap_src_dest(skb
);
1994 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
;
1995 err
= dev_queue_xmit(skb
);
1996 iucv_sock_kill(nsk
);
2001 niucv
= iucv_sk(nsk
);
2002 iucv_sock_init(nsk
, sk
);
2003 niucv
->transport
= AF_IUCV_TRANS_HIPER
;
2004 niucv
->msglimit
= iucv
->msglimit
;
2005 if (!trans_hdr
->window
)
2006 niucv
->msglimit_peer
= IUCV_HIPER_MSGLIM_DEFAULT
;
2008 niucv
->msglimit_peer
= trans_hdr
->window
;
2009 memcpy(niucv
->dst_name
, trans_hdr
->srcAppName
, 8);
2010 memcpy(niucv
->dst_user_id
, trans_hdr
->srcUserID
, 8);
2011 memcpy(niucv
->src_name
, iucv
->src_name
, 8);
2012 memcpy(niucv
->src_user_id
, iucv
->src_user_id
, 8);
2013 nsk
->sk_bound_dev_if
= sk
->sk_bound_dev_if
;
2014 niucv
->hs_dev
= iucv
->hs_dev
;
2015 dev_hold(niucv
->hs_dev
);
2016 afiucv_swap_src_dest(skb
);
2017 trans_hdr
->flags
= AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
;
2018 trans_hdr
->window
= niucv
->msglimit
;
2019 /* if receiver acks the xmit connection is established */
2020 err
= dev_queue_xmit(skb
);
2022 iucv_accept_enqueue(sk
, nsk
);
2023 nsk
->sk_state
= IUCV_CONNECTED
;
2024 sk
->sk_data_ready(sk
);
2026 iucv_sock_kill(nsk
);
2030 return NET_RX_SUCCESS
;
2034 * afiucv_hs_callback_synack() - react on received SYN-ACK
2036 static int afiucv_hs_callback_synack(struct sock
*sk
, struct sk_buff
*skb
)
2038 struct iucv_sock
*iucv
= iucv_sk(sk
);
2039 struct af_iucv_trans_hdr
*trans_hdr
=
2040 (struct af_iucv_trans_hdr
*)skb
->data
;
2044 if (sk
->sk_state
!= IUCV_BOUND
)
2047 iucv
->msglimit_peer
= trans_hdr
->window
;
2048 sk
->sk_state
= IUCV_CONNECTED
;
2049 sk
->sk_state_change(sk
);
2053 return NET_RX_SUCCESS
;
2057 * afiucv_hs_callback_synfin() - react on received SYN_FIN
2059 static int afiucv_hs_callback_synfin(struct sock
*sk
, struct sk_buff
*skb
)
2061 struct iucv_sock
*iucv
= iucv_sk(sk
);
2065 if (sk
->sk_state
!= IUCV_BOUND
)
2068 sk
->sk_state
= IUCV_DISCONN
;
2069 sk
->sk_state_change(sk
);
2073 return NET_RX_SUCCESS
;
2077 * afiucv_hs_callback_fin() - react on received FIN
2079 static int afiucv_hs_callback_fin(struct sock
*sk
, struct sk_buff
*skb
)
2081 struct iucv_sock
*iucv
= iucv_sk(sk
);
2083 /* other end of connection closed */
2087 if (sk
->sk_state
== IUCV_CONNECTED
) {
2088 sk
->sk_state
= IUCV_DISCONN
;
2089 sk
->sk_state_change(sk
);
2094 return NET_RX_SUCCESS
;
2098 * afiucv_hs_callback_win() - react on received WIN
2100 static int afiucv_hs_callback_win(struct sock
*sk
, struct sk_buff
*skb
)
2102 struct iucv_sock
*iucv
= iucv_sk(sk
);
2103 struct af_iucv_trans_hdr
*trans_hdr
=
2104 (struct af_iucv_trans_hdr
*)skb
->data
;
2107 return NET_RX_SUCCESS
;
2109 if (sk
->sk_state
!= IUCV_CONNECTED
)
2110 return NET_RX_SUCCESS
;
2112 atomic_sub(trans_hdr
->window
, &iucv
->msg_sent
);
2113 iucv_sock_wake_msglim(sk
);
2114 return NET_RX_SUCCESS
;
2118 * afiucv_hs_callback_rx() - react on received data
2120 static int afiucv_hs_callback_rx(struct sock
*sk
, struct sk_buff
*skb
)
2122 struct iucv_sock
*iucv
= iucv_sk(sk
);
2126 return NET_RX_SUCCESS
;
2129 if (sk
->sk_state
!= IUCV_CONNECTED
) {
2131 return NET_RX_SUCCESS
;
2134 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2136 return NET_RX_SUCCESS
;
2139 /* write stuff from iucv_msg to skb cb */
2140 skb_pull(skb
, sizeof(struct af_iucv_trans_hdr
));
2141 skb_reset_transport_header(skb
);
2142 skb_reset_network_header(skb
);
2143 IUCV_SKB_CB(skb
)->offset
= 0;
2144 if (sk_filter(sk
, skb
)) {
2145 atomic_inc(&sk
->sk_drops
); /* skb rejected by filter */
2147 return NET_RX_SUCCESS
;
2150 spin_lock(&iucv
->message_q
.lock
);
2151 if (skb_queue_empty(&iucv
->backlog_skb_q
)) {
2152 if (__sock_queue_rcv_skb(sk
, skb
))
2153 /* handle rcv queue full */
2154 skb_queue_tail(&iucv
->backlog_skb_q
, skb
);
2156 skb_queue_tail(&iucv_sk(sk
)->backlog_skb_q
, skb
);
2157 spin_unlock(&iucv
->message_q
.lock
);
2158 return NET_RX_SUCCESS
;
2162 * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2164 * called from netif RX softirq
2166 static int afiucv_hs_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
2167 struct packet_type
*pt
, struct net_device
*orig_dev
)
2170 struct iucv_sock
*iucv
;
2171 struct af_iucv_trans_hdr
*trans_hdr
;
2172 int err
= NET_RX_SUCCESS
;
2175 if (skb
->len
< (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
))) {
2176 WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
2178 (int)(ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)));
2180 return NET_RX_SUCCESS
;
2182 if (skb_headlen(skb
) < (ETH_HLEN
+ sizeof(struct af_iucv_trans_hdr
)))
2183 if (skb_linearize(skb
)) {
2184 WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
2187 return NET_RX_SUCCESS
;
2189 skb_pull(skb
, ETH_HLEN
);
2190 trans_hdr
= (struct af_iucv_trans_hdr
*)skb
->data
;
2191 EBCASC(trans_hdr
->destAppName
, sizeof(trans_hdr
->destAppName
));
2192 EBCASC(trans_hdr
->destUserID
, sizeof(trans_hdr
->destUserID
));
2193 EBCASC(trans_hdr
->srcAppName
, sizeof(trans_hdr
->srcAppName
));
2194 EBCASC(trans_hdr
->srcUserID
, sizeof(trans_hdr
->srcUserID
));
2195 memset(nullstring
, 0, sizeof(nullstring
));
2198 read_lock(&iucv_sk_list
.lock
);
2199 sk_for_each(sk
, &iucv_sk_list
.head
) {
2200 if (trans_hdr
->flags
== AF_IUCV_FLAG_SYN
) {
2201 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2202 trans_hdr
->destAppName
, 8)) &&
2203 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2204 trans_hdr
->destUserID
, 8)) &&
2205 (!memcmp(&iucv_sk(sk
)->dst_name
, nullstring
, 8)) &&
2206 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2212 if ((!memcmp(&iucv_sk(sk
)->src_name
,
2213 trans_hdr
->destAppName
, 8)) &&
2214 (!memcmp(&iucv_sk(sk
)->src_user_id
,
2215 trans_hdr
->destUserID
, 8)) &&
2216 (!memcmp(&iucv_sk(sk
)->dst_name
,
2217 trans_hdr
->srcAppName
, 8)) &&
2218 (!memcmp(&iucv_sk(sk
)->dst_user_id
,
2219 trans_hdr
->srcUserID
, 8))) {
2225 read_unlock(&iucv_sk_list
.lock
);
2230 how should we send with no sock
2231 1) send without sock no send rc checking?
2232 2) introduce default sock to handle this cases
2234 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2236 SYN|ACK, SYN|FIN, FIN -> no action? */
2238 switch (trans_hdr
->flags
) {
2239 case AF_IUCV_FLAG_SYN
:
2240 /* connect request */
2241 err
= afiucv_hs_callback_syn(sk
, skb
);
2243 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_ACK
):
2244 /* connect request confirmed */
2245 err
= afiucv_hs_callback_synack(sk
, skb
);
2247 case (AF_IUCV_FLAG_SYN
| AF_IUCV_FLAG_FIN
):
2248 /* connect request refused */
2249 err
= afiucv_hs_callback_synfin(sk
, skb
);
2251 case (AF_IUCV_FLAG_FIN
):
2253 err
= afiucv_hs_callback_fin(sk
, skb
);
2255 case (AF_IUCV_FLAG_WIN
):
2256 err
= afiucv_hs_callback_win(sk
, skb
);
2257 if (skb
->len
== sizeof(struct af_iucv_trans_hdr
)) {
2261 /* fall through and receive non-zero length data */
2262 case (AF_IUCV_FLAG_SHT
):
2263 /* shutdown request */
2264 /* fall through and receive zero length data */
2266 /* plain data frame */
2267 IUCV_SKB_CB(skb
)->class = trans_hdr
->iucv_hdr
.class;
2268 err
= afiucv_hs_callback_rx(sk
, skb
);
2278 * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2281 static void afiucv_hs_callback_txnotify(struct sk_buff
*skb
,
2282 enum iucv_tx_notify n
)
2284 struct sock
*isk
= skb
->sk
;
2285 struct sock
*sk
= NULL
;
2286 struct iucv_sock
*iucv
= NULL
;
2287 struct sk_buff_head
*list
;
2288 struct sk_buff
*list_skb
;
2289 struct sk_buff
*nskb
;
2290 unsigned long flags
;
2292 read_lock_irqsave(&iucv_sk_list
.lock
, flags
);
2293 sk_for_each(sk
, &iucv_sk_list
.head
)
2298 read_unlock_irqrestore(&iucv_sk_list
.lock
, flags
);
2300 if (!iucv
|| sock_flag(sk
, SOCK_ZAPPED
))
2303 list
= &iucv
->send_skb_q
;
2304 spin_lock_irqsave(&list
->lock
, flags
);
2305 if (skb_queue_empty(list
))
2307 list_skb
= list
->next
;
2308 nskb
= list_skb
->next
;
2309 while (list_skb
!= (struct sk_buff
*)list
) {
2310 if (skb_shinfo(list_skb
) == skb_shinfo(skb
)) {
2313 __skb_unlink(list_skb
, list
);
2314 kfree_skb(list_skb
);
2315 iucv_sock_wake_msglim(sk
);
2317 case TX_NOTIFY_PENDING
:
2318 atomic_inc(&iucv
->pendings
);
2320 case TX_NOTIFY_DELAYED_OK
:
2321 __skb_unlink(list_skb
, list
);
2322 atomic_dec(&iucv
->pendings
);
2323 if (atomic_read(&iucv
->pendings
) <= 0)
2324 iucv_sock_wake_msglim(sk
);
2325 kfree_skb(list_skb
);
2327 case TX_NOTIFY_UNREACHABLE
:
2328 case TX_NOTIFY_DELAYED_UNREACHABLE
:
2329 case TX_NOTIFY_TPQFULL
: /* not yet used */
2330 case TX_NOTIFY_GENERALERROR
:
2331 case TX_NOTIFY_DELAYED_GENERALERROR
:
2332 __skb_unlink(list_skb
, list
);
2333 kfree_skb(list_skb
);
2334 if (sk
->sk_state
== IUCV_CONNECTED
) {
2335 sk
->sk_state
= IUCV_DISCONN
;
2336 sk
->sk_state_change(sk
);
2346 spin_unlock_irqrestore(&list
->lock
, flags
);
2348 if (sk
->sk_state
== IUCV_CLOSING
) {
2349 if (skb_queue_empty(&iucv_sk(sk
)->send_skb_q
)) {
2350 sk
->sk_state
= IUCV_CLOSED
;
2351 sk
->sk_state_change(sk
);
2358 * afiucv_netdev_event: handle netdev notifier chain events
2360 static int afiucv_netdev_event(struct notifier_block
*this,
2361 unsigned long event
, void *ptr
)
2363 struct net_device
*event_dev
= netdev_notifier_info_to_dev(ptr
);
2365 struct iucv_sock
*iucv
;
2369 case NETDEV_GOING_DOWN
:
2370 sk_for_each(sk
, &iucv_sk_list
.head
) {
2372 if ((iucv
->hs_dev
== event_dev
) &&
2373 (sk
->sk_state
== IUCV_CONNECTED
)) {
2374 if (event
== NETDEV_GOING_DOWN
)
2375 iucv_send_ctrl(sk
, AF_IUCV_FLAG_FIN
);
2376 sk
->sk_state
= IUCV_DISCONN
;
2377 sk
->sk_state_change(sk
);
2382 case NETDEV_UNREGISTER
:
2389 static struct notifier_block afiucv_netdev_notifier
= {
2390 .notifier_call
= afiucv_netdev_event
,
2393 static const struct proto_ops iucv_sock_ops
= {
2395 .owner
= THIS_MODULE
,
2396 .release
= iucv_sock_release
,
2397 .bind
= iucv_sock_bind
,
2398 .connect
= iucv_sock_connect
,
2399 .listen
= iucv_sock_listen
,
2400 .accept
= iucv_sock_accept
,
2401 .getname
= iucv_sock_getname
,
2402 .sendmsg
= iucv_sock_sendmsg
,
2403 .recvmsg
= iucv_sock_recvmsg
,
2404 .poll
= iucv_sock_poll
,
2405 .ioctl
= sock_no_ioctl
,
2406 .mmap
= sock_no_mmap
,
2407 .socketpair
= sock_no_socketpair
,
2408 .shutdown
= iucv_sock_shutdown
,
2409 .setsockopt
= iucv_sock_setsockopt
,
2410 .getsockopt
= iucv_sock_getsockopt
,
2413 static const struct net_proto_family iucv_sock_family_ops
= {
2415 .owner
= THIS_MODULE
,
2416 .create
= iucv_sock_create
,
2419 static struct packet_type iucv_packet_type
= {
2420 .type
= cpu_to_be16(ETH_P_AF_IUCV
),
2421 .func
= afiucv_hs_rcv
,
2424 static int afiucv_iucv_init(void)
2428 err
= pr_iucv
->iucv_register(&af_iucv_handler
, 0);
2431 /* establish dummy device */
2432 af_iucv_driver
.bus
= pr_iucv
->bus
;
2433 err
= driver_register(&af_iucv_driver
);
2436 af_iucv_dev
= kzalloc(sizeof(struct device
), GFP_KERNEL
);
2441 dev_set_name(af_iucv_dev
, "af_iucv");
2442 af_iucv_dev
->bus
= pr_iucv
->bus
;
2443 af_iucv_dev
->parent
= pr_iucv
->root
;
2444 af_iucv_dev
->release
= (void (*)(struct device
*))kfree
;
2445 af_iucv_dev
->driver
= &af_iucv_driver
;
2446 err
= device_register(af_iucv_dev
);
2452 put_device(af_iucv_dev
);
2454 driver_unregister(&af_iucv_driver
);
2456 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2461 static void afiucv_iucv_exit(void)
2463 device_unregister(af_iucv_dev
);
2464 driver_unregister(&af_iucv_driver
);
2465 pr_iucv
->iucv_unregister(&af_iucv_handler
, 0);
2468 static int __init
afiucv_init(void)
2472 if (MACHINE_IS_VM
) {
2473 cpcmd("QUERY USERID", iucv_userid
, sizeof(iucv_userid
), &err
);
2474 if (unlikely(err
)) {
2476 err
= -EPROTONOSUPPORT
;
2480 pr_iucv
= try_then_request_module(symbol_get(iucv_if
), "iucv");
2482 printk(KERN_WARNING
"iucv_if lookup failed\n");
2483 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2486 memset(&iucv_userid
, 0, sizeof(iucv_userid
));
2490 err
= proto_register(&iucv_proto
, 0);
2493 err
= sock_register(&iucv_sock_family_ops
);
2498 err
= afiucv_iucv_init();
2503 err
= register_netdevice_notifier(&afiucv_netdev_notifier
);
2507 dev_add_pack(&iucv_packet_type
);
2514 sock_unregister(PF_IUCV
);
2516 proto_unregister(&iucv_proto
);
2519 symbol_put(iucv_if
);
2523 static void __exit
afiucv_exit(void)
2527 symbol_put(iucv_if
);
2530 unregister_netdevice_notifier(&afiucv_netdev_notifier
);
2531 dev_remove_pack(&iucv_packet_type
);
2532 sock_unregister(PF_IUCV
);
2533 proto_unregister(&iucv_proto
);
2536 module_init(afiucv_init
);
2537 module_exit(afiucv_exit
);
2539 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2540 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION
);
2541 MODULE_VERSION(VERSION
);
2542 MODULE_LICENSE("GPL");
2543 MODULE_ALIAS_NETPROTO(PF_IUCV
);