]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
staging: lustre: lnet: cleanup some of the > 80 line issues
authorJames Simmons <jsimmons@infradead.org>
Thu, 17 Nov 2016 19:35:57 +0000 (14:35 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 18 Nov 2016 07:47:33 +0000 (08:47 +0100)
Cleanup some of the checkpatch over 80 character lines
reported.

Signed-off-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
drivers/staging/lustre/lnet/selftest/conrpc.c
drivers/staging/lustre/lnet/selftest/conrpc.h
drivers/staging/lustre/lnet/selftest/console.c
drivers/staging/lustre/lnet/selftest/framework.c
drivers/staging/lustre/lnet/selftest/rpc.c

index cbc9a9c5385f4819cadb9dc93f663a993fa3c318..b74cf635faee42fc925284903736ea12e150e1dd 100644 (file)
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
 }
 
 static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
+                    lnet_process_id_t id)
 {
        int cpt = lnet_cpt_of_nid(id.nid);
        struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
 }
 
 static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+                                    struct ksock_conn *conn)
 {
        struct ksock_peer *peer = route->ksnr_peer;
        int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
                                if (k < peer->ksnp_n_passive_ips) /* using it already */
                                        continue;
 
-                               k = ksocknal_match_peerip(iface, peerips, n_peerips);
+                               k = ksocknal_match_peerip(iface, peerips,
+                                                         n_peerips);
                                xor = ip ^ peerips[k];
                                this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
 
@@ -1302,8 +1305,11 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
 
        /* Take packets blocking for this connection. */
        list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
-               if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
-                               continue;
+               int match = conn->ksnc_proto->pro_match_tx(conn, tx,
+                                                          tx->tx_nonblk);
+
+               if (match == SOCKNAL_MATCH_NO)
+                       continue;
 
                list_del(&tx->tx_list);
                ksocknal_queue_tx_locked(tx, conn);
@@ -1493,8 +1499,8 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
                        spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
                }
 
-               peer->ksnp_proto = NULL;        /* renegotiate protocol version */
-               peer->ksnp_error = error;       /* stash last conn close reason */
+               peer->ksnp_proto = NULL;  /* renegotiate protocol version */
+               peer->ksnp_error = error; /* stash last conn close reason */
 
                if (list_empty(&peer->ksnp_routes)) {
                        /*
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
                              (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
                                continue;
 
-                       count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+                       count += ksocknal_close_peer_conns_locked(peer, ipaddr,
+                                                                 0);
                }
        }
 
@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                }
 
                rc = 0;
-               /* NB only new connections will pay attention to the new interface! */
+               /*
+                * NB only new connections will pay attention to the
+                * new interface!
+                */
        }
 
        write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                int txmem;
                int rxmem;
                int nagle;
-               struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+               struct ksock_conn *conn;
 
+               conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
                if (!conn)
                        return -ENOENT;
 
index f31f4a19705b546b2b5aa72fc691b3b234e5ed36..972f6094be75d7b39ed4cc0d55b278563dd4306d 100644 (file)
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
 }
 
 struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
+                         int nonblk)
 {
        struct list_head *tmp;
        struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
        int fnob = 0;
 
        list_for_each(tmp, &peer->ksnp_conns) {
-               struct ksock_conn *c  = list_entry(tmp, struct ksock_conn, ksnc_list);
-               int nob = atomic_read(&c->ksnc_tx_nob) +
-                       c->ksnc_sock->sk->sk_wmem_queued;
-               int rc;
+               struct ksock_conn *c;
+               int nob, rc;
+
+               c = list_entry(tmp, struct ksock_conn, ksnc_list);
+               nob = atomic_read(&c->ksnc_tx_nob) +
+                     c->ksnc_sock->sk->sk_wmem_queued;
 
                LASSERT(!c->ksnc_closing);
                LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
                LASSERT(msg->ksm_zc_cookies[1]);
                LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
+               /* ZC ACK piggybacked on ztx release tx later */
                if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
-                       ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
+                       ztx = tx;
        } else {
                /*
                 * It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
 
                LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
-               if (route->ksnr_scheduled)      /* connections being established */
+               /* connections being established */
+               if (route->ksnr_scheduled)
                        continue;
 
                /* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
                        rc = ksocknal_process_transmit(conn, tx);
 
                        if (rc == -ENOMEM || rc == -EAGAIN) {
-                               /* Incomplete send: replace tx on HEAD of tx_queue */
+                               /*
+                                * Incomplete send: replace tx on HEAD of
+                                * tx_queue
+                                */
                                spin_lock_bh(&sched->kss_lock);
                                list_add(&tx->tx_list, &conn->ksnc_tx_queue);
                        } else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
        timeout = active ? *ksocknal_tunables.ksnd_timeout :
                            lnet_acceptor_timeout();
 
-       rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+       rc = lnet_sock_read(sock, &hello->kshm_magic,
+                           sizeof(hello->kshm_magic), timeout);
        if (rc) {
                CERROR("Error %d reading HELLO from %pI4h\n",
                       rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
            conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
                /* Userspace NAL assigns peer process ID from socket */
                recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
-               recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+               recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+                                        conn->ksnc_ipaddr);
        } else {
                recv_id.nid = hello->kshm_src_nid;
                recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
                if (peer->ksnp_accepting > 0) {
                        CDEBUG(D_NET,
                               "peer %s(%d) already connecting to me, retry later.\n",
-                              libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+                              libcfs_nid2str(peer->ksnp_id.nid),
+                              peer->ksnp_accepting);
                        retry_later = 1;
                }
 
@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)
 
                /* Nothing to do for 'timeout'  */
                set_current_state(TASK_INTERRUPTIBLE);
-               add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+               add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+                                        &wait);
                spin_unlock_bh(connd_lock);
 
                nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
        struct ksock_conn *conn;
        struct ksock_tx *tx;
 
-       if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+       /* last_alive will be updated by create_conn */
+       if (list_empty(&peer->ksnp_conns))
                return 0;
 
        if (peer->ksnp_proto != &ksocknal_protocol_v3x)
index 6c95e989ca129087fc5adce305830784db57c8f7..4bcab4bcc2de1327a5a491711326a19f881ed774 100644 (file)
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
                                fragnob = sum;
 
                        conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
-                                                          iov[i].iov_base, fragnob);
+                                                          iov[i].iov_base,
+                                                          fragnob);
                }
                conn->ksnc_msg.ksm_csum = saved_csum;
        }
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
 }
 
 int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
+                              int *rxmem, int *nagle)
 {
        struct socket *sock = conn->ksnc_sock;
        int len;
index 82e174f6d9fef26d97343238753eaec94b51bb96..8f0ff6ca1f396ca66128c5a803a5fefa3a3587b7 100644 (file)
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
        }
 
        if (!tx->tx_msg.ksm_zc_cookies[0]) {
-               /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+               /*
+                * NOOP tx has only one ZC-ACK cookie,
+                * can carry at least one more
+                */
                if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
                        tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
                        tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
                }
 
                if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
-                       /* not likely to carry more ACKs, skip it to simplify logic */
+                       /*
+                        * not likely to carry more ACKs, skip it
+                        * to simplify logic
+                        */
                        ksocknal_next_tx_carrier(conn);
                }
 
@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
                }
 
        } else {
-               /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+               /*
+                * ksm_zc_cookies[0] < ksm_zc_cookies[1],
+                * it is range of cookies
+                */
                if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
                    cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
                        CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
                                 tx_zc_list) {
                __u64 c = tx->tx_msg.ksm_zc_cookies[0];
 
-               if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+               if (c == cookie1 || c == cookie2 ||
+                   (cookie1 < c && c < cookie2)) {
                        tx->tx_msg.ksm_zc_cookies[0] = 0;
                        list_del(&tx->tx_zc_list);
                        list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ out:
 }
 
 static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+                      int timeout)
 {
        struct socket *sock = conn->ksnc_sock;
        int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
                tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
                tx->tx_resid = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
        }
-       /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+       /*
+        * Don't checksum before start sending, because packet can be
+        * piggybacked with ACK
+        */
 }
 
 static void
index 3d071b4ebf9a9dc9f13961b4fe7559a96cc365e0..994422c62487665e43f9ae8fe58511e4c8b0a4f5 100644 (file)
@@ -87,7 +87,8 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)
 
 static int
 lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
-               int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
+               int bulk_npg, int bulk_len, int embedded,
+               struct lstcon_rpc *crpc)
 {
        crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
                                       feats, bulk_npg, bulk_len,
@@ -292,8 +293,8 @@ lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
 
                spin_lock(&rpc->crpc_lock);
 
-               if (!crpc->crp_posted ||        /* not posted */
-                   crpc->crp_stamp) {          /* rpc done or aborted already */
+               if (!crpc->crp_posted || /* not posted */
+                   crpc->crp_stamp) {   /* rpc done or aborted already */
                        if (!crpc->crp_stamp) {
                                crpc->crp_stamp = cfs_time_current();
                                crpc->crp_status = -EINTR;
@@ -778,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
 }
 
 static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
+                      struct srpc_test_reqst *req)
 {
        struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
 
index 27ba6394487dc0ace6df097b3732713ab55da945..e629e87c461cb7ac3185b9644dfd16095d8b9d9d 100644 (file)
@@ -131,7 +131,8 @@ int  lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
                                  lstcon_rpc_readent_func_t readent);
 void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
 void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
+                            struct lstcon_rpc *req);
 int  lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
 int  lstcon_rpc_pinger_start(void);
 void lstcon_rpc_pinger_stop(void);
index faa532015a98889a786d508b943a6f29f70e6454..f2d11fac5bd895cc0b5ad252252374c485420ea3 100644 (file)
@@ -86,7 +86,7 @@ lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
        if (!create)
                return -ENOENT;
 
-       LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+       LIBCFS_ALLOC(*ndpp, sizeof(*ndpp) + sizeof(*ndl));
        if (!*ndpp)
                return -ENOMEM;
 
@@ -131,12 +131,12 @@ lstcon_node_put(struct lstcon_node *nd)
        list_del(&ndl->ndl_link);
        list_del(&ndl->ndl_hlink);
 
-       LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+       LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
 }
 
 static int
-lstcon_ndlink_find(struct list_head *hash,
-                  lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
+lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+                  struct lstcon_ndlink **ndlpp, int create)
 {
        unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
        struct lstcon_ndlink *ndl;
@@ -230,7 +230,8 @@ lstcon_group_addref(struct lstcon_group *grp)
        grp->grp_ref++;
 }
 
-static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
+static void lstcon_group_ndlink_release(struct lstcon_group *,
+                                       struct lstcon_ndlink *);
 
 static void
 lstcon_group_drain(struct lstcon_group *grp, int keep)
@@ -1183,7 +1184,8 @@ lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
 }
 
 static int
-lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test,
+                     struct list_head __user *result_up)
 {
        struct lstcon_rpc_trans *trans;
        struct lstcon_group *grp;
@@ -1366,7 +1368,8 @@ out:
 }
 
 static int
-lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx,
+                struct lstcon_test **testpp)
 {
        struct lstcon_test *test;
 
index 8f9ee3fcdbad3ec8d495b816f5ea8637e83356a4..48dcc330dc9b21e9e732c076e99392852ba87847 100644 (file)
@@ -131,7 +131,8 @@ sfw_find_test_case(int id)
 }
 
 static int
-sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
+sfw_register_test(struct srpc_service *service,
+                 struct sfw_test_client_ops *cliops)
 {
        struct sfw_test_case *tsc;
 
@@ -469,7 +470,8 @@ sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
 }
 
 static int
-sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request,
+                  struct srpc_rmsn_reply *reply)
 {
        struct sfw_session *sn = sfw_data.fw_session;
 
@@ -501,7 +503,8 @@ sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *repl
 }
 
 static int
-sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
+sfw_debug_session(struct srpc_debug_reqst *request,
+                 struct srpc_debug_reply *reply)
 {
        struct sfw_session *sn = sfw_data.fw_session;
 
@@ -1064,7 +1067,8 @@ sfw_stop_batch(struct sfw_batch *tsb, int force)
 }
 
 static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx,
+               struct srpc_batch_reply *reply)
 {
        struct sfw_test_instance *tsi;
 
@@ -1179,7 +1183,8 @@ sfw_add_test(struct srpc_server_rpc *rpc)
 }
 
 static int
-sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
+sfw_control_batch(struct srpc_batch_reqst *request,
+                 struct srpc_batch_reply *reply)
 {
        struct sfw_session *sn = sfw_data.fw_session;
        int rc = 0;
index 0498c56bf26edbbb8d19e2094eaedf0372826c30..ce9de8c9be57fa93c47820d4dc68869a10a9ccc3 100644 (file)
@@ -696,7 +696,8 @@ srpc_finish_service(struct srpc_service *sv)
 
 /* called with sv->sv_lock held */
 static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+                           struct srpc_buffer *buf)
 __must_hold(&scd->scd_lock)
 {
        if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {