]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blobdiff - net/iucv/af_iucv.c
af_iucv: change net_device handling for HS transport
[mirror_ubuntu-eoan-kernel.git] / net / iucv / af_iucv.c
index d5c5b8fd1d01e50f2c9a928809184b28e77ea4bd..fbce4a3126de59ed4fc99e8a3b34083f02d92ffb 100644 (file)
@@ -90,6 +90,7 @@ do {                                                                  \
 
 static void iucv_sock_kill(struct sock *sk);
 static void iucv_sock_close(struct sock *sk);
+static void iucv_sever_path(struct sock *, int);
 
 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        struct packet_type *pt, struct net_device *orig_dev);
@@ -130,17 +131,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
        memcpy(&dst[8], src, 8);
 }
 
-static void iucv_skb_queue_purge(struct sk_buff_head *list)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(list)) != NULL) {
-               if (skb->dev)
-                       dev_put(skb->dev);
-               kfree_skb(skb);
-       }
-}
-
 static int afiucv_pm_prepare(struct device *dev)
 {
 #ifdef CONFIG_PM_DEBUG
@@ -175,17 +165,13 @@ static int afiucv_pm_freeze(struct device *dev)
        read_lock(&iucv_sk_list.lock);
        sk_for_each(sk, node, &iucv_sk_list.head) {
                iucv = iucv_sk(sk);
-               iucv_skb_queue_purge(&iucv->send_skb_q);
+               skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
                switch (sk->sk_state) {
                case IUCV_DISCONN:
                case IUCV_CLOSING:
                case IUCV_CONNECTED:
-                       if (iucv->path) {
-                               err = pr_iucv->path_sever(iucv->path, NULL);
-                               iucv_path_free(iucv->path);
-                               iucv->path = NULL;
-                       }
+                       iucv_sever_path(sk, 0);
                        break;
                case IUCV_OPEN:
                case IUCV_BOUND:
@@ -194,6 +180,8 @@ static int afiucv_pm_freeze(struct device *dev)
                default:
                        break;
                }
+               skb_queue_purge(&iucv->send_skb_q);
+               skb_queue_purge(&iucv->backlog_skb_q);
        }
        read_unlock(&iucv_sk_list.lock);
        return err;
@@ -338,7 +326,6 @@ static void iucv_sock_wake_msglim(struct sock *sk)
 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                   struct sk_buff *skb, u8 flags)
 {
-       struct net *net = sock_net(sock);
        struct iucv_sock *iucv = iucv_sk(sock);
        struct af_iucv_trans_hdr *phs_hdr;
        struct sk_buff *nskb;
@@ -375,10 +362,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
        if (imsg)
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
-       skb->dev = dev_get_by_index(net, sock->sk_bound_dev_if);
+       skb->dev = iucv->hs_dev;
        if (!skb->dev)
                return -ENODEV;
-       if (!(skb->dev->flags & IFF_UP))
+       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
                return -ENETDOWN;
        if (skb->len > skb->dev->mtu) {
                if (sock->sk_type == SOCK_SEQPACKET)
@@ -393,15 +380,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                return -ENOMEM;
        skb_queue_tail(&iucv->send_skb_q, nskb);
        err = dev_queue_xmit(skb);
-       if (err) {
+       if (net_xmit_eval(err)) {
                skb_unlink(nskb, &iucv->send_skb_q);
-               dev_put(nskb->dev);
                kfree_skb(nskb);
        } else {
                atomic_sub(confirm_recv, &iucv->msg_recv);
                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
        }
-       return err;
+       return net_xmit_eval(err);
 }
 
 static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -447,13 +433,33 @@ static void iucv_sock_kill(struct sock *sk)
        sock_put(sk);
 }
 
+/* Terminate an IUCV path */
+static void iucv_sever_path(struct sock *sk, int with_user_data)
+{
+       unsigned char user_data[16];
+       struct iucv_sock *iucv = iucv_sk(sk);
+       struct iucv_path *path = iucv->path;
+
+       if (iucv->path) {
+               iucv->path = NULL;
+               if (with_user_data) {
+                       low_nmcpy(user_data, iucv->src_name);
+                       high_nmcpy(user_data, iucv->dst_name);
+                       ASCEBC(user_data, sizeof(user_data));
+                       pr_iucv->path_sever(path, user_data);
+               } else
+                       pr_iucv->path_sever(path, NULL);
+               iucv_path_free(path);
+       }
+}
+
 /* Close an IUCV socket */
 static void iucv_sock_close(struct sock *sk)
 {
-       unsigned char user_data[16];
        struct iucv_sock *iucv = iucv_sk(sk);
        unsigned long timeo;
-       int err, blen;
+       int err = 0;
+       int blen;
        struct sk_buff *skb;
 
        lock_sock(sk);
@@ -480,7 +486,7 @@ static void iucv_sock_close(struct sock *sk)
                sk->sk_state = IUCV_CLOSING;
                sk->sk_state_change(sk);
 
-               if (!skb_queue_empty(&iucv->send_skb_q)) {
+               if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
                        if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
                                timeo = sk->sk_lingertime;
                        else
@@ -494,25 +500,20 @@ static void iucv_sock_close(struct sock *sk)
                sk->sk_state = IUCV_CLOSED;
                sk->sk_state_change(sk);
 
-               if (iucv->path) {
-                       low_nmcpy(user_data, iucv->src_name);
-                       high_nmcpy(user_data, iucv->dst_name);
-                       ASCEBC(user_data, sizeof(user_data));
-                       pr_iucv->path_sever(iucv->path, user_data);
-                       iucv_path_free(iucv->path);
-                       iucv->path = NULL;
-               }
-
                sk->sk_err = ECONNRESET;
                sk->sk_state_change(sk);
 
-               iucv_skb_queue_purge(&iucv->send_skb_q);
+               skb_queue_purge(&iucv->send_skb_q);
                skb_queue_purge(&iucv->backlog_skb_q);
-               break;
 
-       default:
-               /* nothing to do here */
-               break;
+       default:   /* fall through */
+               iucv_sever_path(sk, 1);
+       }
+
+       if (iucv->hs_dev) {
+               dev_put(iucv->hs_dev);
+               iucv->hs_dev = NULL;
+               sk->sk_bound_dev_if = 0;
        }
 
        /* mark socket for deletion by iucv_sock_kill() */
@@ -706,7 +707,6 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                goto done_unlock;
 
        /* Bind the socket */
-
        if (pr_iucv)
                if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
                        goto vm_bind; /* VM IUCV transport */
@@ -720,6 +720,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
                        memcpy(iucv->src_name, sa->siucv_name, 8);
                        memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
                        sk->sk_bound_dev_if = dev->ifindex;
+                       iucv->hs_dev = dev;
+                       dev_hold(dev);
                        sk->sk_state = IUCV_BOUND;
                        iucv->transport = AF_IUCV_TRANS_HIPER;
                        if (!iucv->msglimit)
@@ -894,11 +896,8 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
        if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
                err = -ECONNREFUSED;
 
-       if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
-               pr_iucv->path_sever(iucv->path, NULL);
-               iucv_path_free(iucv->path);
-               iucv->path = NULL;
-       }
+       if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
+               iucv_sever_path(sk, 0);
 
 done:
        release_sock(sk);
@@ -1124,8 +1123,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        noblock, &err);
        else
                skb = sock_alloc_send_skb(sk, len, noblock, &err);
-       if (!skb)
+       if (!skb) {
+               err = -ENOMEM;
                goto out;
+       }
        if (iucv->transport == AF_IUCV_TRANS_HIPER)
                skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1148,6 +1149,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        /* increment and save iucv message tag for msg_completion cbk */
        txmsg.tag = iucv->send_tag++;
        memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+
        if (iucv->transport == AF_IUCV_TRANS_HIPER) {
                atomic_inc(&iucv->msg_sent);
                err = afiucv_hs_send(&txmsg, sk, skb, 0);
@@ -1202,8 +1204,6 @@ release:
        return len;
 
 fail:
-       if (skb->dev)
-               dev_put(skb->dev);
        kfree_skb(skb);
 out:
        release_sock(sk);
@@ -1396,7 +1396,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                }
 
                kfree_skb(skb);
-               atomic_inc(&iucv->msg_recv);
+               if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+                       atomic_inc(&iucv->msg_recv);
+                       if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
+                               WARN_ON(1);
+                               iucv_sock_close(sk);
+                               return -EFAULT;
+                       }
+               }
 
                /* Queue backlog skbs */
                spin_lock_bh(&iucv->message_q.lock);
@@ -1486,7 +1493,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == IUCV_DISCONN)
                mask |= POLLIN;
 
-       if (sock_writeable(sk))
+       if (sock_writeable(sk) && iucv_below_msglim(sk))
                mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
        else
                set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
@@ -1565,13 +1572,6 @@ static int iucv_sock_release(struct socket *sock)
 
        iucv_sock_close(sk);
 
-       /* Unregister with IUCV base support */
-       if (iucv_sk(sk)->path) {
-               pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
-               iucv_path_free(iucv_sk(sk)->path);
-               iucv_sk(sk)->path = NULL;
-       }
-
        sock_orphan(sk);
        iucv_sock_kill(sk);
        return err;
@@ -1750,8 +1750,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
        path->msglim = iucv->msglimit;
        err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
        if (err) {
-               err = pr_iucv->path_sever(path, user_data);
-               iucv_path_free(path);
+               iucv_sever_path(nsk, 1);
                iucv_sock_kill(nsk);
                goto fail;
        }
@@ -1828,6 +1827,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
        struct sk_buff *list_skb = list->next;
        unsigned long flags;
 
+       bh_lock_sock(sk);
        if (!skb_queue_empty(list)) {
                spin_lock_irqsave(&list->lock, flags);
 
@@ -1849,7 +1849,6 @@ static void iucv_callback_txdone(struct iucv_path *path,
                        iucv_sock_wake_msglim(sk);
                }
        }
-       BUG_ON(!this);
 
        if (sk->sk_state == IUCV_CLOSING) {
                if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
@@ -1857,6 +1856,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                        sk->sk_state_change(sk);
                }
        }
+       bh_unlock_sock(sk);
 
 }
 
@@ -1864,9 +1864,15 @@ static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
 {
        struct sock *sk = path->private;
 
+       if (sk->sk_state == IUCV_CLOSED)
+               return;
+
+       bh_lock_sock(sk);
+       iucv_sever_path(sk, 1);
        sk->sk_state = IUCV_DISCONN;
 
        sk->sk_state_change(sk);
+       bh_unlock_sock(sk);
 }
 
 /* called if the other communication side shuts down its RECV direction;
@@ -1954,6 +1960,8 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
        memcpy(niucv->src_name, iucv->src_name, 8);
        memcpy(niucv->src_user_id, iucv->src_user_id, 8);
        nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+       niucv->hs_dev = iucv->hs_dev;
+       dev_hold(niucv->hs_dev);
        afiucv_swap_src_dest(skb);
        trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
        trans_hdr->window = niucv->msglimit;
@@ -2022,12 +2030,15 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
        struct iucv_sock *iucv = iucv_sk(sk);
 
        /* other end of connection closed */
-       if (iucv) {
-               bh_lock_sock(sk);
+       if (!iucv)
+               goto out;
+       bh_lock_sock(sk);
+       if (sk->sk_state == IUCV_CONNECTED) {
                sk->sk_state = IUCV_DISCONN;
                sk->sk_state_change(sk);
-               bh_unlock_sock(sk);
        }
+       bh_unlock_sock(sk);
+out:
        kfree_skb(skb);
        return NET_RX_SUCCESS;
 }
@@ -2172,11 +2183,11 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                break;
        case (AF_IUCV_FLAG_WIN):
                err = afiucv_hs_callback_win(sk, skb);
-               if (skb->len > sizeof(struct af_iucv_trans_hdr))
-                       err = afiucv_hs_callback_rx(sk, skb);
-               else
-                       kfree(skb);
-               break;
+               if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
+                       kfree_skb(skb);
+                       break;
+               }
+               /* fall through */
        case 0:
                /* plain data frame */
                memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class,
@@ -2202,65 +2213,64 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
        struct iucv_sock *iucv = NULL;
        struct sk_buff_head *list;
        struct sk_buff *list_skb;
-       struct sk_buff *this = NULL;
+       struct sk_buff *nskb;
        unsigned long flags;
        struct hlist_node *node;
 
-       read_lock(&iucv_sk_list.lock);
+       read_lock_irqsave(&iucv_sk_list.lock, flags);
        sk_for_each(sk, node, &iucv_sk_list.head)
                if (sk == isk) {
                        iucv = iucv_sk(sk);
                        break;
                }
-       read_unlock(&iucv_sk_list.lock);
+       read_unlock_irqrestore(&iucv_sk_list.lock, flags);
 
-       if (!iucv)
+       if (!iucv || sock_flag(sk, SOCK_ZAPPED))
                return;
 
-       bh_lock_sock(sk);
        list = &iucv->send_skb_q;
-       list_skb = list->next;
+       spin_lock_irqsave(&list->lock, flags);
        if (skb_queue_empty(list))
                goto out_unlock;
-
-       spin_lock_irqsave(&list->lock, flags);
+       list_skb = list->next;
+       nskb = list_skb->next;
        while (list_skb != (struct sk_buff *)list) {
                if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
-                       this = list_skb;
                        switch (n) {
                        case TX_NOTIFY_OK:
-                               __skb_unlink(this, list);
+                               __skb_unlink(list_skb, list);
+                               kfree_skb(list_skb);
                                iucv_sock_wake_msglim(sk);
-                               dev_put(this->dev);
-                               kfree_skb(this);
                                break;
                        case TX_NOTIFY_PENDING:
                                atomic_inc(&iucv->pendings);
                                break;
                        case TX_NOTIFY_DELAYED_OK:
-                               __skb_unlink(this, list);
+                               __skb_unlink(list_skb, list);
                                atomic_dec(&iucv->pendings);
                                if (atomic_read(&iucv->pendings) <= 0)
                                        iucv_sock_wake_msglim(sk);
-                               dev_put(this->dev);
-                               kfree_skb(this);
+                               kfree_skb(list_skb);
                                break;
                        case TX_NOTIFY_UNREACHABLE:
                        case TX_NOTIFY_DELAYED_UNREACHABLE:
                        case TX_NOTIFY_TPQFULL: /* not yet used */
                        case TX_NOTIFY_GENERALERROR:
                        case TX_NOTIFY_DELAYED_GENERALERROR:
-                               __skb_unlink(this, list);
-                               dev_put(this->dev);
-                               kfree_skb(this);
-                               sk->sk_state = IUCV_DISCONN;
-                               sk->sk_state_change(sk);
+                               __skb_unlink(list_skb, list);
+                               kfree_skb(list_skb);
+                               if (sk->sk_state == IUCV_CONNECTED) {
+                                       sk->sk_state = IUCV_DISCONN;
+                                       sk->sk_state_change(sk);
+                               }
                                break;
                        }
                        break;
                }
-               list_skb = list_skb->next;
+               list_skb = nskb;
+               nskb = nskb->next;
        }
+out_unlock:
        spin_unlock_irqrestore(&list->lock, flags);
 
        if (sk->sk_state == IUCV_CLOSING) {
@@ -2270,8 +2280,6 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
                }
        }
 
-out_unlock:
-       bh_unlock_sock(sk);
 }
 static const struct proto_ops iucv_sock_ops = {
        .family         = PF_IUCV,