]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - net/rxrpc/conn_object.c
rxrpc: Support network namespacing
[mirror_ubuntu-artful-kernel.git] / net / rxrpc / conn_object.c
index b0ecb770fdcebaaa1dd5258e5d9e2d7c937e6edf..ade4d3d0b2a7e24e6d3fd94acfb393de7a49822f 100644 (file)
  */
 unsigned int rxrpc_connection_expiry = 10 * 60;
 
-static void rxrpc_connection_reaper(struct work_struct *work);
-
-LIST_HEAD(rxrpc_connections);
-LIST_HEAD(rxrpc_connection_proc_list);
-DEFINE_RWLOCK(rxrpc_connection_lock);
-static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
-
 static void rxrpc_destroy_connection(struct rcu_head *);
 
 /*
@@ -222,15 +215,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
  */
 void rxrpc_kill_connection(struct rxrpc_connection *conn)
 {
+       struct rxrpc_net *rxnet = conn->params.local->rxnet;
+
        ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
               !rcu_access_pointer(conn->channels[1].call) &&
               !rcu_access_pointer(conn->channels[2].call) &&
               !rcu_access_pointer(conn->channels[3].call));
        ASSERT(list_empty(&conn->cache_link));
 
-       write_lock(&rxrpc_connection_lock);
+       write_lock(&rxnet->conn_lock);
        list_del_init(&conn->proc_link);
-       write_unlock(&rxrpc_connection_lock);
+       write_unlock(&rxnet->conn_lock);
 
        /* Drain the Rx queue.  Note that even though we've unpublished, an
         * incoming packet could still be being added to our Rx queue, so we
@@ -309,14 +304,17 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
  */
 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 {
+       struct rxrpc_net *rxnet;
        const void *here = __builtin_return_address(0);
        int n;
 
        n = atomic_dec_return(&conn->usage);
        trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
        ASSERTCMP(n, >=, 0);
-       if (n == 0)
-               rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+       if (n == 0) {
+               rxnet = conn->params.local->rxnet;
+               rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
+       }
 }
 
 /*
@@ -348,9 +346,12 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
 /*
  * reap dead service connections
  */
-static void rxrpc_connection_reaper(struct work_struct *work)
+void rxrpc_service_connection_reaper(struct work_struct *work)
 {
        struct rxrpc_connection *conn, *_p;
+       struct rxrpc_net *rxnet =
+               container_of(to_delayed_work(work),
+                            struct rxrpc_net, service_conn_reaper);
        unsigned long reap_older_than, earliest, idle_timestamp, now;
 
        LIST_HEAD(graveyard);
@@ -361,8 +362,8 @@ static void rxrpc_connection_reaper(struct work_struct *work)
        reap_older_than = now - rxrpc_connection_expiry * HZ;
        earliest = ULONG_MAX;
 
-       write_lock(&rxrpc_connection_lock);
-       list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+       write_lock(&rxnet->conn_lock);
+       list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
                ASSERTCMP(atomic_read(&conn->usage), >, 0);
                if (likely(atomic_read(&conn->usage) > 1))
                        continue;
@@ -393,12 +394,12 @@ static void rxrpc_connection_reaper(struct work_struct *work)
 
                list_move_tail(&conn->link, &graveyard);
        }
-       write_unlock(&rxrpc_connection_lock);
+       write_unlock(&rxnet->conn_lock);
 
        if (earliest != ULONG_MAX) {
                _debug("reschedule reaper %ld", (long) earliest - now);
                ASSERT(time_after(earliest, now));
-               rxrpc_queue_delayed_work(&rxrpc_connection_reap,
+               rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
                                         earliest - now);
        }
 
@@ -418,36 +419,30 @@ static void rxrpc_connection_reaper(struct work_struct *work)
  * preemptively destroy all the service connection records rather than
  * waiting for them to time out
  */
-void __exit rxrpc_destroy_all_connections(void)
+void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
 {
        struct rxrpc_connection *conn, *_p;
        bool leak = false;
 
        _enter("");
 
-       rxrpc_destroy_all_client_connections();
+       rxrpc_destroy_all_client_connections(rxnet);
 
        rxrpc_connection_expiry = 0;
-       cancel_delayed_work(&rxrpc_connection_reap);
-       rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
+       cancel_delayed_work(&rxnet->client_conn_reaper);
+       rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
        flush_workqueue(rxrpc_workqueue);
 
-       write_lock(&rxrpc_connection_lock);
-       list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
+       write_lock(&rxnet->conn_lock);
+       list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
                pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
                       conn, atomic_read(&conn->usage));
                leak = true;
        }
-       write_unlock(&rxrpc_connection_lock);
+       write_unlock(&rxnet->conn_lock);
        BUG_ON(leak);
 
-       ASSERT(list_empty(&rxrpc_connection_proc_list));
-
-       /* Make sure the local and peer records pinned by any dying connections
-        * are released.
-        */
-       rcu_barrier();
-       rxrpc_destroy_client_conn_ids();
+       ASSERT(list_empty(&rxnet->conn_proc_list));
 
        _leave("");
 }