]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
rapidio: run discovery as an asynchronous process
authorAlexandre Bounine <alexandre.bounine@idt.com>
Fri, 5 Oct 2012 00:16:08 +0000 (17:16 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Oct 2012 18:05:22 +0000 (03:05 +0900)
Modify mport initialization routine to run the RapidIO discovery process
asynchronously.  This allows to have an arbitrary order of enumerating and
discovering ports in systems with multiple RapidIO controllers without
creating a deadlock situation if enumerator port is registered after a
discovering one.

Making netID matching to mportID ensures consistent net ID assignment in
multiport RapidIO systems with asynchronous discovery process (global
counter implementation is affected by race between threads).

[akpm@linux-foundation.org: tweak code layput]
Signed-off-by: Alexandre Bounine <alexandre.bounine@idt.com>
Cc: Matt Porter <mporter@kernel.crashing.org>
Cc: Li Yang <leoli@freescale.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/rapidio/rio-scan.c
drivers/rapidio/rio.c

index 8b7c4bce7a4ab8410a43b9debc76076d6cf1004a..745670f535e815870c0e1db5ba1570bb120ec83f 100644 (file)
@@ -44,7 +44,6 @@ static void rio_init_em(struct rio_dev *rdev);
 DEFINE_SPINLOCK(rio_global_list_lock);
 
 static int next_destid = 0;
-static int next_net = 0;
 static int next_comptag = 1;
 
 static int rio_mport_phys_table[] = {
@@ -1062,7 +1061,7 @@ static struct rio_net __devinit *rio_alloc_net(struct rio_mport *port)
                INIT_LIST_HEAD(&net->mports);
                list_add_tail(&port->nnode, &net->mports);
                net->hport = port;
-               net->id = next_net++;
+               net->id = port->id;
        }
        return net;
 }
index d7b68cc2d8433b2e08693258b63cbca37a17da98..d4bd69013c501010fc5ba76ce58c949d46708a9e 100644 (file)
@@ -1260,15 +1260,62 @@ static int __devinit rio_init(void)
        return 0;
 }
 
+static struct workqueue_struct *rio_wq;
+
+struct rio_disc_work {
+       struct work_struct      work;
+       struct rio_mport        *mport;
+};
+
+static void __devinit disc_work_handler(struct work_struct *_work)
+{
+       struct rio_disc_work *work;
+
+       work = container_of(_work, struct rio_disc_work, work);
+       pr_debug("RIO: discovery work for mport %d %s\n",
+                work->mport->id, work->mport->name);
+       rio_disc_mport(work->mport);
+
+       kfree(work);
+}
+
 int __devinit rio_init_mports(void)
 {
        struct rio_mport *port;
+       struct rio_disc_work *work;
+       int no_disc = 0;
 
        list_for_each_entry(port, &rio_mports, node) {
                if (port->host_deviceid >= 0)
                        rio_enum_mport(port);
-               else
-                       rio_disc_mport(port);
+               else if (!no_disc) {
+                       if (!rio_wq) {
+                               rio_wq = alloc_workqueue("riodisc", 0, 0);
+                               if (!rio_wq) {
+                                       pr_err("RIO: unable allocate rio_wq\n");
+                                       no_disc = 1;
+                                       continue;
+                               }
+                       }
+
+                       work = kzalloc(sizeof *work, GFP_KERNEL);
+                       if (!work) {
+                               pr_err("RIO: no memory for work struct\n");
+                               no_disc = 1;
+                               continue;
+                       }
+
+                       work->mport = port;
+                       INIT_WORK(&work->work, disc_work_handler);
+                       queue_work(rio_wq, &work->work);
+               }
+       }
+
+       if (rio_wq) {
+               pr_debug("RIO: flush discovery workqueue\n");
+               flush_workqueue(rio_wq);
+               pr_debug("RIO: flush discovery workqueue finished\n");
+               destroy_workqueue(rio_wq);
        }
 
        rio_init();