]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - drivers/base/core.c
drivers/base: Introduce kill_device()
[mirror_ubuntu-bionic-kernel.git] / drivers / base / core.c
index 715145798d97f63bab42d70e5de1405b3626f3b6..ddfaf230292c68cff999bcdfc739ff7593cde6dd 100644 (file)
@@ -217,6 +217,13 @@ struct device_link *device_link_add(struct device *consumer,
                        link->rpm_active = true;
                }
                pm_runtime_new_link(consumer);
+               /*
+                * If the link is being added by the consumer driver at probe
+                * time, balance the decrementation of the supplier's runtime PM
+                * usage counter after consumer probe in driver_probe_device().
+                */
+               if (consumer->links.status == DL_DEV_PROBING)
+                       pm_runtime_get_noresume(supplier);
        }
        get_device(supplier);
        link->supplier = supplier;
@@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer,
                        switch (consumer->links.status) {
                        case DL_DEV_PROBING:
                                /*
-                                * Balance the decrementation of the supplier's
-                                * runtime PM usage counter after consumer probe
-                                * in driver_probe_device().
+                                * Some callers expect the link creation during
+                                * consumer driver probe to resume the supplier
+                                * even without DL_FLAG_RPM_ACTIVE.
                                 */
                                if (flags & DL_FLAG_PM_RUNTIME)
-                                       pm_runtime_get_sync(supplier);
+                                       pm_runtime_resume(supplier);
 
                                link->status = DL_STATE_CONSUMER_PROBE;
                                break;
@@ -993,8 +1000,14 @@ out:
 static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       if (kobject_synth_uevent(&dev->kobj, buf, count))
+       int rc;
+
+       rc = kobject_synth_uevent(&dev->kobj, buf, count);
+
+       if (rc) {
                dev_err(dev, "uevent: failed to send synthetic uevent\n");
+               return rc;
+       }
 
        return count;
 }
@@ -1573,6 +1586,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
                return;
 
        mutex_lock(&gdp_mutex);
+       if (!kobject_has_children(glue_dir))
+               kobject_del(glue_dir);
        kobject_put(glue_dir);
        mutex_unlock(&gdp_mutex);
 }
@@ -1949,6 +1964,24 @@ void put_device(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(put_device);
 
+bool kill_device(struct device *dev)
+{
+       /*
+        * Require the device lock and set the "dead" flag to guarantee that
+        * the update behavior is consistent with the other bitfields near
+        * it and that we cannot have an asynchronous probe routine trying
+        * to run while we are tearing out the bus/class/sysfs from
+        * underneath the device.
+        */
+       lockdep_assert_held(&dev->mutex);
+
+       if (dev->p->dead)
+               return false;
+       dev->p->dead = true;
+       return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
 /**
  * device_del - delete device from system.
  * @dev: device.
@@ -1968,6 +2001,10 @@ void device_del(struct device *dev)
        struct kobject *glue_dir = NULL;
        struct class_interface *class_intf;
 
+       device_lock(dev);
+       kill_device(dev);
+       device_unlock(dev);
+
        /* Notify clients of device removal.  This call must come
         * before dpm_sysfs_remove().
         */
@@ -2785,6 +2822,9 @@ void device_shutdown(void)
 {
        struct device *dev, *parent;
 
+       wait_for_device_probe();
+       device_block_probing();
+
        spin_lock(&devices_kset->list_lock);
        /*
         * Walk the devices list backward, shutting down each in turn.