]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
staging: greybus: move the greybus core to drivers/greybus
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 25 Aug 2019 05:54:28 +0000 (07:54 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 27 Aug 2019 17:03:04 +0000 (19:03 +0200)
The Greybus core code has been stable for a long time, and has been
shipping for many years in millions of phones.  With the advent of a
recent Google Summer of Code project, and a number of new devices in the
works from various companies, it is time to get the core greybus code
out of staging as it really is going to be with us for a while.

Cc: Johan Hovold <johan@kernel.org>
Cc: linux-kernel@vger.kernel.org
Cc: greybus-dev@lists.linaro.org
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Alex Elder <elder@kernel.org>
Link: https://lore.kernel.org/r/20190825055429.18547-9-gregkh@linuxfoundation.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
34 files changed:
MAINTAINERS
drivers/Kconfig
drivers/Makefile
drivers/greybus/Kconfig [new file with mode: 0644]
drivers/greybus/Makefile [new file with mode: 0644]
drivers/greybus/bundle.c [new file with mode: 0644]
drivers/greybus/connection.c [new file with mode: 0644]
drivers/greybus/control.c [new file with mode: 0644]
drivers/greybus/core.c [new file with mode: 0644]
drivers/greybus/debugfs.c [new file with mode: 0644]
drivers/greybus/greybus_trace.h [new file with mode: 0644]
drivers/greybus/hd.c [new file with mode: 0644]
drivers/greybus/interface.c [new file with mode: 0644]
drivers/greybus/manifest.c [new file with mode: 0644]
drivers/greybus/module.c [new file with mode: 0644]
drivers/greybus/operation.c [new file with mode: 0644]
drivers/greybus/svc.c [new file with mode: 0644]
drivers/greybus/svc_watchdog.c [new file with mode: 0644]
drivers/staging/greybus/Kconfig
drivers/staging/greybus/Makefile
drivers/staging/greybus/bundle.c [deleted file]
drivers/staging/greybus/connection.c [deleted file]
drivers/staging/greybus/control.c [deleted file]
drivers/staging/greybus/core.c [deleted file]
drivers/staging/greybus/debugfs.c [deleted file]
drivers/staging/greybus/es2.c
drivers/staging/greybus/greybus_trace.h [deleted file]
drivers/staging/greybus/hd.c [deleted file]
drivers/staging/greybus/interface.c [deleted file]
drivers/staging/greybus/manifest.c [deleted file]
drivers/staging/greybus/module.c [deleted file]
drivers/staging/greybus/operation.c [deleted file]
drivers/staging/greybus/svc.c [deleted file]
drivers/staging/greybus/svc_watchdog.c [deleted file]

index 0f38cba2c581ffb99d5f9f5f24a2cf00341bf4bf..e3242687cd192f4cd3e4a2d0458fc03712b870f7 100644 (file)
@@ -7003,6 +7003,9 @@ M:        Alex Elder <elder@kernel.org>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
 F:     drivers/staging/greybus/
+F:     drivers/greybus/
+F:     include/linux/greybus.h
+F:     include/linux/greybus/
 L:     greybus-dev@lists.linaro.org (moderated for non-subscribers)
 
 GREYBUS UART PROTOCOLS DRIVERS
index 61cf4ea2c229b3c03e2fcb5a890def6a15642d31..7dce76ae7369239fbe3d4061ecdbc951d97af986 100644 (file)
@@ -146,6 +146,8 @@ source "drivers/hv/Kconfig"
 
 source "drivers/xen/Kconfig"
 
+source "drivers/greybus/Kconfig"
+
 source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
index 6d37564e783cb2d27f86a5c38b00891dee611cee..73df8e5a2fce30ce8a09381e78bdedba8642aa91 100644 (file)
@@ -148,6 +148,7 @@ obj-$(CONFIG_BCMA)          += bcma/
 obj-$(CONFIG_VHOST_RING)       += vhost/
 obj-$(CONFIG_VHOST)            += vhost/
 obj-$(CONFIG_VLYNQ)            += vlynq/
+obj-$(CONFIG_GREYBUS)          += greybus/
 obj-$(CONFIG_STAGING)          += staging/
 obj-y                          += platform/
 
diff --git a/drivers/greybus/Kconfig b/drivers/greybus/Kconfig
new file mode 100644 (file)
index 0000000..158d889
--- /dev/null
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+menuconfig GREYBUS
+       tristate "Greybus support"
+       depends on SYSFS
+       ---help---
+         This option enables the Greybus driver core.  Greybus is an
+         hardware protocol that was designed to provide Unipro with a
+         sane application layer.  It was originally designed for the
+         ARA project, a module phone system, but has shown up in other
+         phones, and can be tunneled over other busses in order to
+         control hardware devices.
+
+         Say Y here to enable support for these types of drivers.
+
+         To compile this code as a module, chose M here: the module
+         will be called greybus.ko
diff --git a/drivers/greybus/Makefile b/drivers/greybus/Makefile
new file mode 100644 (file)
index 0000000..03b2261
--- /dev/null
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Greybus core
+greybus-y :=   core.o          \
+               debugfs.o       \
+               hd.o            \
+               manifest.o      \
+               module.o        \
+               interface.o     \
+               bundle.o        \
+               connection.o    \
+               control.o       \
+               svc.o           \
+               svc_watchdog.o  \
+               operation.o
+
+obj-$(CONFIG_GREYBUS)          += greybus.o
+
+# needed for trace events
+ccflags-y += -I$(src)
diff --git a/drivers/greybus/bundle.c b/drivers/greybus/bundle.c
new file mode 100644 (file)
index 0000000..8466072
--- /dev/null
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus bundles
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/greybus.h>
+#include "greybus_trace.h"
+
+static ssize_t bundle_class_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       return sprintf(buf, "0x%02x\n", bundle->class);
+}
+static DEVICE_ATTR_RO(bundle_class);
+
+static ssize_t bundle_id_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       return sprintf(buf, "%u\n", bundle->id);
+}
+static DEVICE_ATTR_RO(bundle_id);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       if (!bundle->state)
+               return sprintf(buf, "\n");
+
+       return sprintf(buf, "%s\n", bundle->state);
+}
+
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       kfree(bundle->state);
+       bundle->state = kstrdup(buf, GFP_KERNEL);
+       if (!bundle->state)
+               return -ENOMEM;
+
+       /* Tell userspace that the file contents changed */
+       sysfs_notify(&bundle->dev.kobj, NULL, "state");
+
+       return size;
+}
+static DEVICE_ATTR_RW(state);
+
+static struct attribute *bundle_attrs[] = {
+       &dev_attr_bundle_class.attr,
+       &dev_attr_bundle_id.attr,
+       &dev_attr_state.attr,
+       NULL,
+};
+
+ATTRIBUTE_GROUPS(bundle);
+
+static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
+                                       u8 bundle_id)
+{
+       struct gb_bundle *bundle;
+
+       list_for_each_entry(bundle, &intf->bundles, links) {
+               if (bundle->id == bundle_id)
+                       return bundle;
+       }
+
+       return NULL;
+}
+
+static void gb_bundle_release(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+
+       trace_gb_bundle_release(bundle);
+
+       kfree(bundle->state);
+       kfree(bundle->cport_desc);
+       kfree(bundle);
+}
+
+#ifdef CONFIG_PM
+static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &bundle->connections, bundle_links)
+               gb_connection_disable(connection);
+}
+
+static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
+{
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &bundle->connections, bundle_links)
+               gb_connection_enable(connection);
+}
+
+static int gb_bundle_suspend(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct dev_pm_ops *pm = dev->driver->pm;
+       int ret;
+
+       if (pm && pm->runtime_suspend) {
+               ret = pm->runtime_suspend(&bundle->dev);
+               if (ret)
+                       return ret;
+       } else {
+               gb_bundle_disable_all_connections(bundle);
+       }
+
+       ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
+       if (ret) {
+               if (pm && pm->runtime_resume)
+                       ret = pm->runtime_resume(dev);
+               else
+                       gb_bundle_enable_all_connections(bundle);
+
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_bundle_resume(struct device *dev)
+{
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct dev_pm_ops *pm = dev->driver->pm;
+       int ret;
+
+       ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
+       if (ret)
+               return ret;
+
+       if (pm && pm->runtime_resume) {
+               ret = pm->runtime_resume(dev);
+               if (ret)
+                       return ret;
+       } else {
+               gb_bundle_enable_all_connections(bundle);
+       }
+
+       return 0;
+}
+
+static int gb_bundle_idle(struct device *dev)
+{
+       pm_runtime_mark_last_busy(dev);
+       pm_request_autosuspend(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_bundle_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
+};
+
+struct device_type greybus_bundle_type = {
+       .name =         "greybus_bundle",
+       .release =      gb_bundle_release,
+       .pm =           &gb_bundle_pm_ops,
+};
+
+/*
+ * Create a gb_bundle structure to represent a discovered
+ * bundle.  Returns a pointer to the new bundle or a null
+ * pointer if a failure occurs due to memory exhaustion.
+ */
+struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
+                                  u8 class)
+{
+       struct gb_bundle *bundle;
+
+       if (bundle_id == BUNDLE_ID_NONE) {
+               dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
+               return NULL;
+       }
+
+       /*
+        * Reject any attempt to reuse a bundle id.  We initialize
+        * these serially, so there's no need to worry about keeping
+        * the interface bundle list locked here.
+        */
+       if (gb_bundle_find(intf, bundle_id)) {
+               dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
+               return NULL;
+       }
+
+       bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+       if (!bundle)
+               return NULL;
+
+       bundle->intf = intf;
+       bundle->id = bundle_id;
+       bundle->class = class;
+       INIT_LIST_HEAD(&bundle->connections);
+
+       bundle->dev.parent = &intf->dev;
+       bundle->dev.bus = &greybus_bus_type;
+       bundle->dev.type = &greybus_bundle_type;
+       bundle->dev.groups = bundle_groups;
+       bundle->dev.dma_mask = intf->dev.dma_mask;
+       device_initialize(&bundle->dev);
+       dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
+
+       list_add(&bundle->links, &intf->bundles);
+
+       trace_gb_bundle_create(bundle);
+
+       return bundle;
+}
+
+int gb_bundle_add(struct gb_bundle *bundle)
+{
+       int ret;
+
+       ret = device_add(&bundle->dev);
+       if (ret) {
+               dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_bundle_add(bundle);
+
+       return 0;
+}
+
+/*
+ * Tear down a previously set up bundle.
+ */
+void gb_bundle_destroy(struct gb_bundle *bundle)
+{
+       trace_gb_bundle_destroy(bundle);
+
+       if (device_is_registered(&bundle->dev))
+               device_del(&bundle->dev);
+
+       list_del(&bundle->links);
+
+       put_device(&bundle->dev);
+}
diff --git a/drivers/greybus/connection.c b/drivers/greybus/connection.c
new file mode 100644 (file)
index 0000000..fc8f57f
--- /dev/null
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus connections
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#include "greybus_trace.h"
+
+#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT    1000
+
+static void gb_connection_kref_release(struct kref *kref);
+
+static DEFINE_SPINLOCK(gb_connections_lock);
+static DEFINE_MUTEX(gb_connection_mutex);
+
+/* Caller holds gb_connection_mutex. */
+static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
+{
+       struct gb_host_device *hd = intf->hd;
+       struct gb_connection *connection;
+
+       list_for_each_entry(connection, &hd->connections, hd_links) {
+               if (connection->intf == intf &&
+                   connection->intf_cport_id == cport_id)
+                       return true;
+       }
+
+       return false;
+}
+
+static void gb_connection_get(struct gb_connection *connection)
+{
+       kref_get(&connection->kref);
+
+       trace_gb_connection_get(connection);
+}
+
+static void gb_connection_put(struct gb_connection *connection)
+{
+       trace_gb_connection_put(connection);
+
+       kref_put(&connection->kref, gb_connection_kref_release);
+}
+
+/*
+ * Returns a reference-counted pointer to the connection if found.
+ */
+static struct gb_connection *
+gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
+{
+       struct gb_connection *connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gb_connections_lock, flags);
+       list_for_each_entry(connection, &hd->connections, hd_links)
+               if (connection->hd_cport_id == cport_id) {
+                       gb_connection_get(connection);
+                       goto found;
+               }
+       connection = NULL;
+found:
+       spin_unlock_irqrestore(&gb_connections_lock, flags);
+
+       return connection;
+}
+
+/*
+ * Callback from the host driver to let us know that data has been
+ * received on the bundle.
+ */
+void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
+                      u8 *data, size_t length)
+{
+       struct gb_connection *connection;
+
+       trace_gb_hd_in(hd);
+
+       connection = gb_connection_hd_find(hd, cport_id);
+       if (!connection) {
+               dev_err(&hd->dev,
+                       "nonexistent connection (%zu bytes dropped)\n", length);
+               return;
+       }
+       gb_connection_recv(connection, data, length);
+       gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(greybus_data_rcvd);
+
+static void gb_connection_kref_release(struct kref *kref)
+{
+       struct gb_connection *connection;
+
+       connection = container_of(kref, struct gb_connection, kref);
+
+       trace_gb_connection_release(connection);
+
+       kfree(connection);
+}
+
+static void gb_connection_init_name(struct gb_connection *connection)
+{
+       u16 hd_cport_id = connection->hd_cport_id;
+       u16 cport_id = 0;
+       u8 intf_id = 0;
+
+       if (connection->intf) {
+               intf_id = connection->intf->interface_id;
+               cport_id = connection->intf_cport_id;
+       }
+
+       snprintf(connection->name, sizeof(connection->name),
+                "%u/%u:%u", hd_cport_id, intf_id, cport_id);
+}
+
+/*
+ * _gb_connection_create() - create a Greybus connection
+ * @hd:                        host device of the connection
+ * @hd_cport_id:       host-device cport id, or -1 for dynamic allocation
+ * @intf:              remote interface, or NULL for static connections
+ * @bundle:            remote-interface bundle (may be NULL)
+ * @cport_id:          remote-interface cport id, or 0 for static connections
+ * @handler:           request handler (may be NULL)
+ * @flags:             connection flags
+ *
+ * Create a Greybus connection, representing the bidirectional link
+ * between a CPort on a (local) Greybus host device and a CPort on
+ * another Greybus interface.
+ *
+ * A connection also maintains the state of operations sent over the
+ * connection.
+ *
+ * Serialised against concurrent create and destroy using the
+ * gb_connection_mutex.
+ *
+ * Return: A pointer to the new connection if successful, or an ERR_PTR
+ * otherwise.
+ */
+static struct gb_connection *
+_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
+                     struct gb_interface *intf,
+                     struct gb_bundle *bundle, int cport_id,
+                     gb_request_handler_t handler,
+                     unsigned long flags)
+{
+       struct gb_connection *connection;
+       int ret;
+
+       mutex_lock(&gb_connection_mutex);
+
+       if (intf && gb_connection_cport_in_use(intf, cport_id)) {
+               dev_err(&intf->dev, "cport %u already in use\n", cport_id);
+               ret = -EBUSY;
+               goto err_unlock;
+       }
+
+       ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
+       if (ret < 0) {
+               dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
+               goto err_unlock;
+       }
+       hd_cport_id = ret;
+
+       connection = kzalloc(sizeof(*connection), GFP_KERNEL);
+       if (!connection) {
+               ret = -ENOMEM;
+               goto err_hd_cport_release;
+       }
+
+       connection->hd_cport_id = hd_cport_id;
+       connection->intf_cport_id = cport_id;
+       connection->hd = hd;
+       connection->intf = intf;
+       connection->bundle = bundle;
+       connection->handler = handler;
+       connection->flags = flags;
+       if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
+               connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+
+       atomic_set(&connection->op_cycle, 0);
+       mutex_init(&connection->mutex);
+       spin_lock_init(&connection->lock);
+       INIT_LIST_HEAD(&connection->operations);
+
+       connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
+                                        dev_name(&hd->dev), hd_cport_id);
+       if (!connection->wq) {
+               ret = -ENOMEM;
+               goto err_free_connection;
+       }
+
+       kref_init(&connection->kref);
+
+       gb_connection_init_name(connection);
+
+       spin_lock_irq(&gb_connections_lock);
+       list_add(&connection->hd_links, &hd->connections);
+
+       if (bundle)
+               list_add(&connection->bundle_links, &bundle->connections);
+       else
+               INIT_LIST_HEAD(&connection->bundle_links);
+
+       spin_unlock_irq(&gb_connections_lock);
+
+       mutex_unlock(&gb_connection_mutex);
+
+       trace_gb_connection_create(connection);
+
+       return connection;
+
+err_free_connection:
+       kfree(connection);
+err_hd_cport_release:
+       gb_hd_cport_release(hd, hd_cport_id);
+err_unlock:
+       mutex_unlock(&gb_connection_mutex);
+
+       return ERR_PTR(ret);
+}
+
+struct gb_connection *
+gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
+                           gb_request_handler_t handler)
+{
+       return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
+                                    GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create_control(struct gb_interface *intf)
+{
+       return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
+                                    GB_CONNECTION_FLAG_CONTROL |
+                                    GB_CONNECTION_FLAG_HIGH_PRIO);
+}
+
+struct gb_connection *
+gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
+                    gb_request_handler_t handler)
+{
+       struct gb_interface *intf = bundle->intf;
+
+       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+                                    handler, 0);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create);
+
+struct gb_connection *
+gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
+                          gb_request_handler_t handler,
+                          unsigned long flags)
+{
+       struct gb_interface *intf = bundle->intf;
+
+       if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
+               flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
+
+       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
+                                    handler, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_flags);
+
+struct gb_connection *
+gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
+                              unsigned long flags)
+{
+       flags |= GB_CONNECTION_FLAG_OFFLOADED;
+
+       return gb_connection_create_flags(bundle, cport_id, NULL, flags);
+}
+EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
+
+static int gb_connection_hd_cport_enable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_enable)
+               return 0;
+
+       ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
+                                      connection->flags);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void gb_connection_hd_cport_disable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_disable)
+               return;
+
+       ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
+                       connection->name, ret);
+       }
+}
+
+static int gb_connection_hd_cport_connected(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_connected)
+               return 0;
+
+       ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_flush(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_flush)
+               return 0;
+
+       ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       size_t peer_space;
+       int ret;
+
+       if (!hd->driver->cport_quiesce)
+               return 0;
+
+       peer_space = sizeof(struct gb_operation_msg_hdr) +
+                       sizeof(struct gb_cport_shutdown_request);
+
+       if (connection->mode_switch)
+               peer_space += sizeof(struct gb_operation_msg_hdr);
+
+       if (!hd->driver->cport_quiesce)
+               return 0;
+
+       ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
+                                       peer_space,
+                                       GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_connection_hd_cport_clear(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->cport_clear)
+               return 0;
+
+       ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * Request the SVC to create a connection from AP's cport to interface's
+ * cport.
+ */
+static int
+gb_connection_svc_connection_create(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       struct gb_interface *intf;
+       u8 cport_flags;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       intf = connection->intf;
+
+       /*
+        * Enable either E2EFC or CSD, unless no flow control is requested.
+        */
+       cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
+       if (gb_connection_flow_control_disabled(connection)) {
+               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
+       } else if (gb_connection_e2efc_enabled(connection)) {
+               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
+                               GB_SVC_CPORT_FLAG_E2EFC;
+       }
+
+       ret = gb_svc_connection_create(hd->svc,
+                                      hd->svc->ap_intf_id,
+                                      connection->hd_cport_id,
+                                      intf->interface_id,
+                                      connection->intf_cport_id,
+                                      cport_flags);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to create svc connection: %d\n",
+                       connection->name, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void
+gb_connection_svc_connection_destroy(struct gb_connection *connection)
+{
+       if (gb_connection_is_static(connection))
+               return;
+
+       gb_svc_connection_destroy(connection->hd->svc,
+                                 connection->hd->svc->ap_intf_id,
+                                 connection->hd_cport_id,
+                                 connection->intf->interface_id,
+                                 connection->intf_cport_id);
+}
+
+/* Inform Interface about active CPorts */
+static int gb_connection_control_connected(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       if (gb_connection_is_control(connection))
+               return 0;
+
+       control = connection->intf->control;
+
+       ret = gb_control_connected_operation(control, cport_id);
+       if (ret) {
+               dev_err(&connection->bundle->dev,
+                       "failed to connect cport: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void
+gb_connection_control_disconnecting(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return;
+
+       control = connection->intf->control;
+
+       ret = gb_control_disconnecting_operation(control, cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to send disconnecting: %d\n",
+                       connection->name, ret);
+       }
+}
+
+static void
+gb_connection_control_disconnected(struct gb_connection *connection)
+{
+       struct gb_control *control;
+       u16 cport_id = connection->intf_cport_id;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return;
+
+       control = connection->intf->control;
+
+       if (gb_connection_is_control(connection)) {
+               if (connection->mode_switch) {
+                       ret = gb_control_mode_switch_operation(control);
+                       if (ret) {
+                               /*
+                                * Allow mode switch to time out waiting for
+                                * mailbox event.
+                                */
+                               return;
+                       }
+               }
+
+               return;
+       }
+
+       ret = gb_control_disconnected_operation(control, cport_id);
+       if (ret) {
+               dev_warn(&connection->bundle->dev,
+                        "failed to disconnect cport: %d\n", ret);
+       }
+}
+
+static int gb_connection_shutdown_operation(struct gb_connection *connection,
+                                           u8 phase)
+{
+       struct gb_cport_shutdown_request *req;
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(connection,
+                                            GB_REQUEST_TYPE_CPORT_SHUTDOWN,
+                                            sizeof(*req), 0, 0,
+                                            GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       req = operation->request->payload;
+       req->phase = phase;
+
+       ret = gb_operation_request_send_sync(operation);
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_connection_cport_shutdown(struct gb_connection *connection,
+                                       u8 phase)
+{
+       struct gb_host_device *hd = connection->hd;
+       const struct gb_hd_driver *drv = hd->driver;
+       int ret;
+
+       if (gb_connection_is_static(connection))
+               return 0;
+
+       if (gb_connection_is_offloaded(connection)) {
+               if (!drv->cport_shutdown)
+                       return 0;
+
+               ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
+                                         GB_OPERATION_TIMEOUT_DEFAULT);
+       } else {
+               ret = gb_connection_shutdown_operation(connection, phase);
+       }
+
+       if (ret) {
+               dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
+                       connection->name, phase, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
+{
+       return gb_connection_cport_shutdown(connection, 1);
+}
+
+static int
+gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
+{
+       return gb_connection_cport_shutdown(connection, 2);
+}
+
+/*
+ * Cancel all active operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to DISABLED or
+ * DISCONNECTING.
+ */
+static void gb_connection_cancel_operations(struct gb_connection *connection,
+                                           int errno)
+       __must_hold(&connection->lock)
+{
+       struct gb_operation *operation;
+
+       while (!list_empty(&connection->operations)) {
+               operation = list_last_entry(&connection->operations,
+                                           struct gb_operation, links);
+               gb_operation_get(operation);
+               spin_unlock_irq(&connection->lock);
+
+               if (gb_operation_is_incoming(operation))
+                       gb_operation_cancel_incoming(operation, errno);
+               else
+                       gb_operation_cancel(operation, errno);
+
+               gb_operation_put(operation);
+
+               spin_lock_irq(&connection->lock);
+       }
+}
+
+/*
+ * Cancel all active incoming operations on a connection.
+ *
+ * Locking: Called with connection lock held and state set to ENABLED_TX.
+ */
+static void
+gb_connection_flush_incoming_operations(struct gb_connection *connection,
+                                       int errno)
+       __must_hold(&connection->lock)
+{
+       struct gb_operation *operation;
+       bool incoming;
+
+       while (!list_empty(&connection->operations)) {
+               incoming = false;
+               list_for_each_entry(operation, &connection->operations,
+                                   links) {
+                       if (gb_operation_is_incoming(operation)) {
+                               gb_operation_get(operation);
+                               incoming = true;
+                               break;
+                       }
+               }
+
+               if (!incoming)
+                       break;
+
+               spin_unlock_irq(&connection->lock);
+
+               /* FIXME: flush, not cancel? */
+               gb_operation_cancel_incoming(operation, errno);
+               gb_operation_put(operation);
+
+               spin_lock_irq(&connection->lock);
+       }
+}
+
+/*
+ * _gb_connection_enable() - enable a connection
+ * @connection:                connection to enable
+ * @rx:                        whether to enable incoming requests
+ *
+ * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
+ * ENABLED_TX->ENABLED state transitions.
+ *
+ * Locking: Caller holds connection->mutex.
+ */
+static int _gb_connection_enable(struct gb_connection *connection, bool rx)
+{
+       int ret;
+
+       /* Handle ENABLED_TX -> ENABLED transitions. */
+       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
+               if (!(connection->handler && rx))
+                       return 0;
+
+               spin_lock_irq(&connection->lock);
+               connection->state = GB_CONNECTION_STATE_ENABLED;
+               spin_unlock_irq(&connection->lock);
+
+               return 0;
+       }
+
+       ret = gb_connection_hd_cport_enable(connection);
+       if (ret)
+               return ret;
+
+       ret = gb_connection_svc_connection_create(connection);
+       if (ret)
+               goto err_hd_cport_clear;
+
+       ret = gb_connection_hd_cport_connected(connection);
+       if (ret)
+               goto err_svc_connection_destroy;
+
+       spin_lock_irq(&connection->lock);
+       if (connection->handler && rx)
+               connection->state = GB_CONNECTION_STATE_ENABLED;
+       else
+               connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+       spin_unlock_irq(&connection->lock);
+
+       ret = gb_connection_control_connected(connection);
+       if (ret)
+               goto err_control_disconnecting;
+
+       return 0;
+
+err_control_disconnecting:
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       /* Transmit queue should already be empty. */
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_control_disconnecting(connection);
+       gb_connection_cport_shutdown_phase_1(connection);
+       gb_connection_hd_cport_quiesce(connection);
+       gb_connection_cport_shutdown_phase_2(connection);
+       gb_connection_control_disconnected(connection);
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+err_svc_connection_destroy:
+       gb_connection_svc_connection_destroy(connection);
+err_hd_cport_clear:
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+
+       return ret;
+}
+
+int gb_connection_enable(struct gb_connection *connection)
+{
+       int ret = 0;
+
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED)
+               goto out_unlock;
+
+       ret = _gb_connection_enable(connection, true);
+       if (!ret)
+               trace_gb_connection_enable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable);
+
+int gb_connection_enable_tx(struct gb_connection *connection)
+{
+       int ret = 0;
+
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
+               goto out_unlock;
+
+       ret = _gb_connection_enable(connection, false);
+       if (!ret)
+               trace_gb_connection_enable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
+
+void gb_connection_disable_rx(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       spin_lock_irq(&connection->lock);
+       if (connection->state != GB_CONNECTION_STATE_ENABLED) {
+               spin_unlock_irq(&connection->lock);
+               goto out_unlock;
+       }
+       connection->state = GB_CONNECTION_STATE_ENABLED_TX;
+       gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       trace_gb_connection_disable(connection);
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
+
+void gb_connection_mode_switch_prepare(struct gb_connection *connection)
+{
+       connection->mode_switch = true;
+}
+
+void gb_connection_mode_switch_complete(struct gb_connection *connection)
+{
+       gb_connection_svc_connection_destroy(connection);
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+
+       connection->mode_switch = false;
+}
+
+void gb_connection_disable(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED)
+               goto out_unlock;
+
+       trace_gb_connection_disable(connection);
+
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_control_disconnecting(connection);
+       gb_connection_cport_shutdown_phase_1(connection);
+       gb_connection_hd_cport_quiesce(connection);
+       gb_connection_cport_shutdown_phase_2(connection);
+       gb_connection_control_disconnected(connection);
+
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+
+       /* control-connection tear down is deferred when mode switching */
+       if (!connection->mode_switch) {
+               gb_connection_svc_connection_destroy(connection);
+               gb_connection_hd_cport_clear(connection);
+
+               gb_connection_hd_cport_disable(connection);
+       }
+
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable);
+
+/* Disable a connection without communicating with the remote end. */
+void gb_connection_disable_forced(struct gb_connection *connection)
+{
+       mutex_lock(&connection->mutex);
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED)
+               goto out_unlock;
+
+       trace_gb_connection_disable(connection);
+
+       spin_lock_irq(&connection->lock);
+       connection->state = GB_CONNECTION_STATE_DISABLED;
+       gb_connection_cancel_operations(connection, -ESHUTDOWN);
+       spin_unlock_irq(&connection->lock);
+
+       gb_connection_hd_cport_flush(connection);
+
+       gb_connection_svc_connection_destroy(connection);
+       gb_connection_hd_cport_clear(connection);
+
+       gb_connection_hd_cport_disable(connection);
+out_unlock:
+       mutex_unlock(&connection->mutex);
+}
+EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
+
+/* Caller must have disabled the connection before destroying it. */
+void gb_connection_destroy(struct gb_connection *connection)
+{
+       if (!connection)
+               return;
+
+       if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
+               gb_connection_disable(connection);
+
+       mutex_lock(&gb_connection_mutex);
+
+       spin_lock_irq(&gb_connections_lock);
+       list_del(&connection->bundle_links);
+       list_del(&connection->hd_links);
+       spin_unlock_irq(&gb_connections_lock);
+
+       destroy_workqueue(connection->wq);
+
+       gb_hd_cport_release(connection->hd, connection->hd_cport_id);
+       connection->hd_cport_id = CPORT_ID_BAD;
+
+       mutex_unlock(&gb_connection_mutex);
+
+       gb_connection_put(connection);
+}
+EXPORT_SYMBOL_GPL(gb_connection_destroy);
+
+void gb_connection_latency_tag_enable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->latency_tag_enable)
+               return;
+
+       ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to enable latency tag: %d\n",
+                       connection->name, ret);
+       }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
+
+void gb_connection_latency_tag_disable(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+       int ret;
+
+       if (!hd->driver->latency_tag_disable)
+               return;
+
+       ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to disable latency tag: %d\n",
+                       connection->name, ret);
+       }
+}
+EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
diff --git a/drivers/greybus/control.c b/drivers/greybus/control.c
new file mode 100644 (file)
index 0000000..359a258
--- /dev/null
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus CPort control protocol.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+
+/* Highest control-protocol version supported */
+#define GB_CONTROL_VERSION_MAJOR       0
+#define GB_CONTROL_VERSION_MINOR       1
+
+static int gb_control_get_version(struct gb_control *control)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_control_version_request request;
+       struct gb_control_version_response response;
+       int ret;
+
+       request.major = GB_CONTROL_VERSION_MAJOR;
+       request.minor = GB_CONTROL_VERSION_MINOR;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_VERSION,
+                               &request, sizeof(request), &response,
+                               sizeof(response));
+       if (ret) {
+               dev_err(&intf->dev,
+                       "failed to get control-protocol version: %d\n",
+                       ret);
+               return ret;
+       }
+
+       if (response.major > request.major) {
+               dev_err(&intf->dev,
+                       "unsupported major control-protocol version (%u > %u)\n",
+                       response.major, request.major);
+               return -ENOTSUPP;
+       }
+
+       control->protocol_major = response.major;
+       control->protocol_minor = response.minor;
+
+       dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
+               response.minor);
+
+       return 0;
+}
+
+static int gb_control_get_bundle_version(struct gb_control *control,
+                                        struct gb_bundle *bundle)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_control_bundle_version_request request;
+       struct gb_control_bundle_version_response response;
+       int ret;
+
+       request.bundle_id = bundle->id;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_VERSION,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&intf->dev,
+                       "failed to get bundle %u class version: %d\n",
+                       bundle->id, ret);
+               return ret;
+       }
+
+       bundle->class_major = response.major;
+       bundle->class_minor = response.minor;
+
+       dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
+               response.major, response.minor);
+
+       return 0;
+}
+
+int gb_control_get_bundle_versions(struct gb_control *control)
+{
+       struct gb_interface *intf = control->connection->intf;
+       struct gb_bundle *bundle;
+       int ret;
+
+       if (!control->has_bundle_version)
+               return 0;
+
+       list_for_each_entry(bundle, &intf->bundles, links) {
+               ret = gb_control_get_bundle_version(control, bundle);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* Get Manifest's size from the interface */
+int gb_control_get_manifest_size_operation(struct gb_interface *intf)
+{
+       struct gb_control_get_manifest_size_response response;
+       struct gb_connection *connection = intf->control->connection;
+       int ret;
+
+       ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
+                               NULL, 0, &response, sizeof(response));
+       if (ret) {
+               dev_err(&connection->intf->dev,
+                       "failed to get manifest size: %d\n", ret);
+               return ret;
+       }
+
+       return le16_to_cpu(response.size);
+}
+
+/* Reads Manifest from the interface */
+int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
+                                     size_t size)
+{
+       struct gb_connection *connection = intf->control->connection;
+
+       return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
+                               NULL, 0, manifest, size);
+}
+
+int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
+{
+       struct gb_control_connected_request request;
+
+       request.cport_id = cpu_to_le16(cport_id);
+       return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
+                                &request, sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
+{
+       struct gb_control_disconnected_request request;
+
+       request.cport_id = cpu_to_le16(cport_id);
+       return gb_operation_sync(control->connection,
+                                GB_CONTROL_TYPE_DISCONNECTED, &request,
+                                sizeof(request), NULL, 0);
+}
+
+int gb_control_disconnecting_operation(struct gb_control *control,
+                                      u16 cport_id)
+{
+       struct gb_control_disconnecting_request *request;
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(control->connection,
+                                            GB_CONTROL_TYPE_DISCONNECTING,
+                                            sizeof(*request), 0, 0,
+                                            GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       request = operation->request->payload;
+       request->cport_id = cpu_to_le16(cport_id);
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret) {
+               dev_err(&control->dev, "failed to send disconnecting: %d\n",
+                       ret);
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+int gb_control_mode_switch_operation(struct gb_control *control)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       operation = gb_operation_create_core(control->connection,
+                                            GB_CONTROL_TYPE_MODE_SWITCH,
+                                            0, 0,
+                                            GB_OPERATION_FLAG_UNIDIRECTIONAL,
+                                            GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       ret = gb_operation_request_send_sync(operation);
+       if (ret)
+               dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+static int gb_control_bundle_pm_status_map(u8 status)
+{
+       switch (status) {
+       case GB_CONTROL_BUNDLE_PM_INVAL:
+               return -EINVAL;
+       case GB_CONTROL_BUNDLE_PM_BUSY:
+               return -EBUSY;
+       case GB_CONTROL_BUNDLE_PM_NA:
+               return -ENOMSG;
+       case GB_CONTROL_BUNDLE_PM_FAIL:
+       default:
+               return -EREMOTEIO;
+       }
+}
+
+int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
+                       bundle_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
+                       bundle_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to resume bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send bundle %u deactivate: %d\n", bundle_id,
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
+{
+       struct gb_control_bundle_pm_request request;
+       struct gb_control_bundle_pm_response response;
+       int ret;
+
+       if (!control->has_bundle_activate)
+               return 0;
+
+       request.bundle_id = bundle_id;
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
+                               sizeof(request), &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send bundle %u activate: %d\n", bundle_id,
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
+               dev_err(&control->dev, "failed to activate bundle %u: %d\n",
+                       bundle_id, response.status);
+               return gb_control_bundle_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+static int gb_control_interface_pm_status_map(u8 status)
+{
+       switch (status) {
+       case GB_CONTROL_INTF_PM_BUSY:
+               return -EBUSY;
+       case GB_CONTROL_INTF_PM_NA:
+               return -ENOMSG;
+       default:
+               return -EREMOTEIO;
+       }
+}
+
+int gb_control_interface_suspend_prepare(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send interface suspend prepare: %d\n", ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while preparing suspend: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_interface_deactivate_prepare(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
+                               0, &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+int gb_control_interface_hibernate_abort(struct gb_control *control)
+{
+       struct gb_control_intf_pm_response response;
+       int ret;
+
+       ret = gb_operation_sync(control->connection,
+                               GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to send interface aborting hibernate: %d\n",
+                       ret);
+               return ret;
+       }
+
+       if (response.status != GB_CONTROL_INTF_PM_OK) {
+               dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
+                       response.status);
+               return gb_control_interface_pm_status_map(response.status);
+       }
+
+       return 0;
+}
+
+static ssize_t vendor_string_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
+}
+static DEVICE_ATTR_RO(vendor_string);
+
+static ssize_t product_string_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
+}
+static DEVICE_ATTR_RO(product_string);
+
+static struct attribute *control_attrs[] = {
+       &dev_attr_vendor_string.attr,
+       &dev_attr_product_string.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(control);
+
+static void gb_control_release(struct device *dev)
+{
+       struct gb_control *control = to_gb_control(dev);
+
+       gb_connection_destroy(control->connection);
+
+       kfree(control->vendor_string);
+       kfree(control->product_string);
+
+       kfree(control);
+}
+
+struct device_type greybus_control_type = {
+       .name =         "greybus_control",
+       .release =      gb_control_release,
+};
+
+struct gb_control *gb_control_create(struct gb_interface *intf)
+{
+       struct gb_connection *connection;
+       struct gb_control *control;
+
+       control = kzalloc(sizeof(*control), GFP_KERNEL);
+       if (!control)
+               return ERR_PTR(-ENOMEM);
+
+       control->intf = intf;
+
+       connection = gb_connection_create_control(intf);
+       if (IS_ERR(connection)) {
+               dev_err(&intf->dev,
+                       "failed to create control connection: %ld\n",
+                       PTR_ERR(connection));
+               kfree(control);
+               return ERR_CAST(connection);
+       }
+
+       control->connection = connection;
+
+       control->dev.parent = &intf->dev;
+       control->dev.bus = &greybus_bus_type;
+       control->dev.type = &greybus_control_type;
+       control->dev.groups = control_groups;
+       control->dev.dma_mask = intf->dev.dma_mask;
+       device_initialize(&control->dev);
+       dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
+
+       gb_connection_set_data(control->connection, control);
+
+       return control;
+}
+
+int gb_control_enable(struct gb_control *control)
+{
+       int ret;
+
+       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+       ret = gb_connection_enable_tx(control->connection);
+       if (ret) {
+               dev_err(&control->connection->intf->dev,
+                       "failed to enable control connection: %d\n",
+                       ret);
+               return ret;
+       }
+
+       ret = gb_control_get_version(control);
+       if (ret)
+               goto err_disable_connection;
+
+       if (control->protocol_major > 0 || control->protocol_minor > 1)
+               control->has_bundle_version = true;
+
+       /* FIXME: use protocol version instead */
+       if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
+               control->has_bundle_activate = true;
+
+       return 0;
+
+err_disable_connection:
+       gb_connection_disable(control->connection);
+
+       return ret;
+}
+
+void gb_control_disable(struct gb_control *control)
+{
+       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
+
+       if (control->intf->disconnected)
+               gb_connection_disable_forced(control->connection);
+       else
+               gb_connection_disable(control->connection);
+}
+
+int gb_control_suspend(struct gb_control *control)
+{
+       gb_connection_disable(control->connection);
+
+       return 0;
+}
+
+int gb_control_resume(struct gb_control *control)
+{
+       int ret;
+
+       ret = gb_connection_enable_tx(control->connection);
+       if (ret) {
+               dev_err(&control->connection->intf->dev,
+                       "failed to enable control connection: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int gb_control_add(struct gb_control *control)
+{
+       int ret;
+
+       ret = device_add(&control->dev);
+       if (ret) {
+               dev_err(&control->dev,
+                       "failed to register control device: %d\n",
+                       ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void gb_control_del(struct gb_control *control)
+{
+       if (device_is_registered(&control->dev))
+               device_del(&control->dev);
+}
+
+struct gb_control *gb_control_get(struct gb_control *control)
+{
+       get_device(&control->dev);
+
+       return control;
+}
+
+void gb_control_put(struct gb_control *control)
+{
+       put_device(&control->dev);
+}
+
+void gb_control_mode_switch_prepare(struct gb_control *control)
+{
+       gb_connection_mode_switch_prepare(control->connection);
+}
+
+void gb_control_mode_switch_complete(struct gb_control *control)
+{
+       gb_connection_mode_switch_complete(control->connection);
+}
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
new file mode 100644 (file)
index 0000000..e546c64
--- /dev/null
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus "Core"
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define CREATE_TRACE_POINTS
+#include <linux/greybus.h>
+#include "greybus_trace.h"
+
+#define GB_BUNDLE_AUTOSUSPEND_MS       3000
+
+/* Allow greybus to be disabled at boot if needed */
+static bool nogreybus;
+#ifdef MODULE
+module_param(nogreybus, bool, 0444);
+#else
+core_param(nogreybus, nogreybus, bool, 0444);
+#endif
+int greybus_disabled(void)
+{
+       return nogreybus;
+}
+EXPORT_SYMBOL_GPL(greybus_disabled);
+
+static bool greybus_match_one_id(struct gb_bundle *bundle,
+                                const struct greybus_bundle_id *id)
+{
+       if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
+           (id->vendor != bundle->intf->vendor_id))
+               return false;
+
+       if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
+           (id->product != bundle->intf->product_id))
+               return false;
+
+       if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
+           (id->class != bundle->class))
+               return false;
+
+       return true;
+}
+
+static const struct greybus_bundle_id *
+greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
+{
+       if (!id)
+               return NULL;
+
+       for (; id->vendor || id->product || id->class || id->driver_info;
+                                                                       id++) {
+               if (greybus_match_one_id(bundle, id))
+                       return id;
+       }
+
+       return NULL;
+}
+
+static int greybus_match_device(struct device *dev, struct device_driver *drv)
+{
+       struct greybus_driver *driver = to_greybus_driver(drv);
+       struct gb_bundle *bundle;
+       const struct greybus_bundle_id *id;
+
+       if (!is_gb_bundle(dev))
+               return 0;
+
+       bundle = to_gb_bundle(dev);
+
+       id = greybus_match_id(bundle, driver->id_table);
+       if (id)
+               return 1;
+       /* FIXME - Dynamic ids? */
+       return 0;
+}
+
+static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct gb_host_device *hd;
+       struct gb_module *module = NULL;
+       struct gb_interface *intf = NULL;
+       struct gb_control *control = NULL;
+       struct gb_bundle *bundle = NULL;
+       struct gb_svc *svc = NULL;
+
+       if (is_gb_host_device(dev)) {
+               hd = to_gb_host_device(dev);
+       } else if (is_gb_module(dev)) {
+               module = to_gb_module(dev);
+               hd = module->hd;
+       } else if (is_gb_interface(dev)) {
+               intf = to_gb_interface(dev);
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_control(dev)) {
+               control = to_gb_control(dev);
+               intf = control->intf;
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_bundle(dev)) {
+               bundle = to_gb_bundle(dev);
+               intf = bundle->intf;
+               module = intf->module;
+               hd = intf->hd;
+       } else if (is_gb_svc(dev)) {
+               svc = to_gb_svc(dev);
+               hd = svc->hd;
+       } else {
+               dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
+               return -EINVAL;
+       }
+
+       if (add_uevent_var(env, "BUS=%u", hd->bus_id))
+               return -ENOMEM;
+
+       if (module) {
+               if (add_uevent_var(env, "MODULE=%u", module->module_id))
+                       return -ENOMEM;
+       }
+
+       if (intf) {
+               if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
+                       return -ENOMEM;
+               if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
+                                  intf->vendor_id, intf->product_id))
+                       return -ENOMEM;
+       }
+
+       if (bundle) {
+               // FIXME
+               // add a uevent that can "load" a bundle type
+               // This is what we need to bind a driver to so use the info
+               // in gmod here as well
+
+               if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
+                       return -ENOMEM;
+               if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void greybus_shutdown(struct device *dev)
+{
+       if (is_gb_host_device(dev)) {
+               struct gb_host_device *hd;
+
+               hd = to_gb_host_device(dev);
+               gb_hd_shutdown(hd);
+       }
+}
+
+struct bus_type greybus_bus_type = {
+       .name =         "greybus",
+       .match =        greybus_match_device,
+       .uevent =       greybus_uevent,
+       .shutdown =     greybus_shutdown,
+};
+
+static int greybus_probe(struct device *dev)
+{
+       struct greybus_driver *driver = to_greybus_driver(dev->driver);
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       const struct greybus_bundle_id *id;
+       int retval;
+
+       /* match id */
+       id = greybus_match_id(bundle, driver->id_table);
+       if (!id)
+               return -ENODEV;
+
+       retval = pm_runtime_get_sync(&bundle->intf->dev);
+       if (retval < 0) {
+               pm_runtime_put_noidle(&bundle->intf->dev);
+               return retval;
+       }
+
+       retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
+       if (retval) {
+               pm_runtime_put(&bundle->intf->dev);
+               return retval;
+       }
+
+       /*
+        * Unbound bundle devices are always deactivated. During probe, the
+        * Runtime PM is set to enabled and active and the usage count is
+        * incremented. If the driver supports runtime PM, it should call
+        * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
+        * in remove routine.
+        */
+       pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       retval = driver->probe(bundle, id);
+       if (retval) {
+               /*
+                * Catch buggy drivers that fail to destroy their connections.
+                */
+               WARN_ON(!list_empty(&bundle->connections));
+
+               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+               pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+               pm_runtime_put_noidle(dev);
+               pm_runtime_dont_use_autosuspend(dev);
+               pm_runtime_put(&bundle->intf->dev);
+
+               return retval;
+       }
+
+       pm_runtime_put(&bundle->intf->dev);
+
+       return 0;
+}
+
+static int greybus_remove(struct device *dev)
+{
+       struct greybus_driver *driver = to_greybus_driver(dev->driver);
+       struct gb_bundle *bundle = to_gb_bundle(dev);
+       struct gb_connection *connection;
+       int retval;
+
+       retval = pm_runtime_get_sync(dev);
+       if (retval < 0)
+               dev_err(dev, "failed to resume bundle: %d\n", retval);
+
+       /*
+        * Disable (non-offloaded) connections early in case the interface is
+        * already gone to avoid unceccessary operation timeouts during
+        * driver disconnect. Otherwise, only disable incoming requests.
+        */
+       list_for_each_entry(connection, &bundle->connections, bundle_links) {
+               if (gb_connection_is_offloaded(connection))
+                       continue;
+
+               if (bundle->intf->disconnected)
+                       gb_connection_disable_forced(connection);
+               else
+                       gb_connection_disable_rx(connection);
+       }
+
+       driver->disconnect(bundle);
+
+       /* Catch buggy drivers that fail to destroy their connections. */
+       WARN_ON(!list_empty(&bundle->connections));
+
+       if (!bundle->intf->disconnected)
+               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
+
+       pm_runtime_put_noidle(dev);
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+       pm_runtime_put_noidle(dev);
+
+       return 0;
+}
+
+int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
+                           const char *mod_name)
+{
+       int retval;
+
+       if (greybus_disabled())
+               return -ENODEV;
+
+       driver->driver.bus = &greybus_bus_type;
+       driver->driver.name = driver->name;
+       driver->driver.probe = greybus_probe;
+       driver->driver.remove = greybus_remove;
+       driver->driver.owner = owner;
+       driver->driver.mod_name = mod_name;
+
+       retval = driver_register(&driver->driver);
+       if (retval)
+               return retval;
+
+       pr_info("registered new driver %s\n", driver->name);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(greybus_register_driver);
+
+void greybus_deregister_driver(struct greybus_driver *driver)
+{
+       driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL_GPL(greybus_deregister_driver);
+
+static int __init gb_init(void)
+{
+       int retval;
+
+       if (greybus_disabled())
+               return -ENODEV;
+
+       BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
+
+       gb_debugfs_init();
+
+       retval = bus_register(&greybus_bus_type);
+       if (retval) {
+               pr_err("bus_register failed (%d)\n", retval);
+               goto error_bus;
+       }
+
+       retval = gb_hd_init();
+       if (retval) {
+               pr_err("gb_hd_init failed (%d)\n", retval);
+               goto error_hd;
+       }
+
+       retval = gb_operation_init();
+       if (retval) {
+               pr_err("gb_operation_init failed (%d)\n", retval);
+               goto error_operation;
+       }
+       return 0;       /* Success */
+
+error_operation:
+       gb_hd_exit();
+error_hd:
+       bus_unregister(&greybus_bus_type);
+error_bus:
+       gb_debugfs_cleanup();
+
+       return retval;
+}
+module_init(gb_init);
+
+static void __exit gb_exit(void)
+{
+       gb_operation_exit();
+       gb_hd_exit();
+       bus_unregister(&greybus_bus_type);
+       gb_debugfs_cleanup();
+       tracepoint_synchronize_unregister();
+}
+module_exit(gb_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/greybus/debugfs.c b/drivers/greybus/debugfs.c
new file mode 100644 (file)
index 0000000..e102d7b
--- /dev/null
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus debugfs code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/greybus.h>
+
+static struct dentry *gb_debug_root;
+
+void __init gb_debugfs_init(void)
+{
+       gb_debug_root = debugfs_create_dir("greybus", NULL);
+}
+
+void gb_debugfs_cleanup(void)
+{
+       debugfs_remove_recursive(gb_debug_root);
+       gb_debug_root = NULL;
+}
+
+struct dentry *gb_debugfs_get(void)
+{
+       return gb_debug_root;
+}
+EXPORT_SYMBOL_GPL(gb_debugfs_get);
diff --git a/drivers/greybus/greybus_trace.h b/drivers/greybus/greybus_trace.h
new file mode 100644 (file)
index 0000000..1bc9f12
--- /dev/null
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Greybus driver and device API
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM greybus
+
+#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GREYBUS_H
+
+#include <linux/tracepoint.h>
+
+struct gb_message;
+struct gb_operation;
+struct gb_connection;
+struct gb_bundle;
+struct gb_host_device;
+
+DECLARE_EVENT_CLASS(gb_message,
+
+       TP_PROTO(struct gb_message *message),
+
+       TP_ARGS(message),
+
+       TP_STRUCT__entry(
+               __field(u16, size)
+               __field(u16, operation_id)
+               __field(u8, type)
+               __field(u8, result)
+       ),
+
+       TP_fast_assign(
+               __entry->size = le16_to_cpu(message->header->size);
+               __entry->operation_id =
+                       le16_to_cpu(message->header->operation_id);
+               __entry->type = message->header->type;
+               __entry->result = message->header->result;
+       ),
+
+       TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
+                 __entry->size, __entry->operation_id,
+                 __entry->type, __entry->result)
+);
+
+#define DEFINE_MESSAGE_EVENT(name)                                     \
+               DEFINE_EVENT(gb_message, name,                          \
+                               TP_PROTO(struct gb_message *message),   \
+                               TP_ARGS(message))
+
+/*
+ * Occurs immediately before calling a host device's message_send()
+ * method.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_send);
+
+/*
+ * Occurs after an incoming request message has been received
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_request);
+
+/*
+ * Occurs after an incoming response message has been received,
+ * after its matching request has been found.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_recv_response);
+
+/*
+ * Occurs after an operation has been canceled, possibly before the
+ * cancellation is complete.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
+
+/*
+ * Occurs when an incoming request is cancelled; if the response has
+ * been queued for sending, this occurs after it is sent.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
+
+/*
+ * Occurs in the host driver message_send() function just prior to
+ * handing off the data to be processed by hardware.
+ */
+DEFINE_MESSAGE_EVENT(gb_message_submit);
+
+#undef DEFINE_MESSAGE_EVENT
+
+DECLARE_EVENT_CLASS(gb_operation,
+
+       TP_PROTO(struct gb_operation *operation),
+
+       TP_ARGS(operation),
+
+       TP_STRUCT__entry(
+               __field(u16, cport_id)  /* CPort of HD side of connection */
+               __field(u16, id)        /* Operation ID */
+               __field(u8, type)
+               __field(unsigned long, flags)
+               __field(int, active)
+               __field(int, waiters)
+               __field(int, errno)
+       ),
+
+       TP_fast_assign(
+               __entry->cport_id = operation->connection->hd_cport_id;
+               __entry->id = operation->id;
+               __entry->type = operation->type;
+               __entry->flags = operation->flags;
+               __entry->active = operation->active;
+               __entry->waiters = atomic_read(&operation->waiters);
+               __entry->errno = operation->errno;
+       ),
+
+       TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
+                 __entry->id, __entry->cport_id, __entry->type, __entry->flags,
+                 __entry->active, __entry->waiters, __entry->errno)
+);
+
+#define DEFINE_OPERATION_EVENT(name)                                   \
+               DEFINE_EVENT(gb_operation, name,                        \
+                               TP_PROTO(struct gb_operation *operation), \
+                               TP_ARGS(operation))
+
+/*
+ * Occurs after a new operation is created for an outgoing request
+ * has been successfully created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create);
+
+/*
+ * Occurs after a new core operation has been created.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_core);
+
+/*
+ * Occurs after a new operation has been created for an incoming
+ * request has been successfully created and initialized.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
+
+/*
+ * Occurs when the last reference to an operation has been dropped,
+ * prior to freeing resources.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_destroy);
+
+/*
+ * Occurs when an operation has been marked active, after updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_get_active);
+
+/*
+ * Occurs when an operation has been marked active, before updating
+ * its active count.
+ */
+DEFINE_OPERATION_EVENT(gb_operation_put_active);
+
+#undef DEFINE_OPERATION_EVENT
+
+DECLARE_EVENT_CLASS(gb_connection,
+
+       TP_PROTO(struct gb_connection *connection),
+
+       TP_ARGS(connection),
+
+       TP_STRUCT__entry(
+               __field(int, hd_bus_id)
+               __field(u8, bundle_id)
+               /* name contains "hd_cport_id/intf_id:cport_id" */
+               __dynamic_array(char, name, sizeof(connection->name))
+               __field(enum gb_connection_state, state)
+               __field(unsigned long, flags)
+       ),
+
+       TP_fast_assign(
+               __entry->hd_bus_id = connection->hd->bus_id;
+               __entry->bundle_id = connection->bundle ?
+                               connection->bundle->id : BUNDLE_ID_NONE;
+               memcpy(__get_str(name), connection->name,
+                                       sizeof(connection->name));
+               __entry->state = connection->state;
+               __entry->flags = connection->flags;
+       ),
+
+       TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
+                 __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
+                 (unsigned int)__entry->state, __entry->flags)
+);
+
+#define DEFINE_CONNECTION_EVENT(name)                                  \
+               DEFINE_EVENT(gb_connection, name,                       \
+                               TP_PROTO(struct gb_connection *connection), \
+                               TP_ARGS(connection))
+
+/*
+ * Occurs after a new connection is successfully created.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_create);
+
+/*
+ * Occurs when the last reference to a connection has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_release);
+
+/*
+ * Occurs when a new reference to connection is added, currently
+ * only when a message over the connection is received.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_get);
+
+/*
+ * Occurs when a new reference to connection is dropped, after a
+ * a received message is handled, or when the connection is
+ * destroyed.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_put);
+
+/*
+ * Occurs when a request to enable a connection is made, either for
+ * transmit only, or for both transmit and receive.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_enable);
+
+/*
+ * Occurs when a request to disable a connection is made, either for
+ * receive only, or for both transmit and receive.  Also occurs when
+ * a request to forcefully disable a connection is made.
+ */
+DEFINE_CONNECTION_EVENT(gb_connection_disable);
+
+#undef DEFINE_CONNECTION_EVENT
+
+DECLARE_EVENT_CLASS(gb_bundle,
+
+       TP_PROTO(struct gb_bundle *bundle),
+
+       TP_ARGS(bundle),
+
+       TP_STRUCT__entry(
+               __field(u8, intf_id)
+               __field(u8, id)
+               __field(u8, class)
+               __field(size_t, num_cports)
+       ),
+
+       TP_fast_assign(
+               __entry->intf_id = bundle->intf->interface_id;
+               __entry->id = bundle->id;
+               __entry->class = bundle->class;
+               __entry->num_cports = bundle->num_cports;
+       ),
+
+       TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
+                 __entry->intf_id, __entry->id, __entry->class,
+                 __entry->num_cports)
+);
+
+#define DEFINE_BUNDLE_EVENT(name)                                      \
+               DEFINE_EVENT(gb_bundle, name,                   \
+                               TP_PROTO(struct gb_bundle *bundle), \
+                               TP_ARGS(bundle))
+
+/*
+ * Occurs after a new bundle is successfully created.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_create);
+
+/*
+ * Occurs when the last reference to a bundle has been dropped,
+ * before its resources are freed.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_release);
+
+/*
+ * Occurs when a bundle is added to an interface when the interface
+ * is enabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_add);
+
+/*
+ * Occurs when a registered bundle gets destroyed, normally at the
+ * time an interface is disabled.
+ */
+DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
+
+#undef DEFINE_BUNDLE_EVENT
+
+DECLARE_EVENT_CLASS(gb_interface,
+
+       TP_PROTO(struct gb_interface *intf),
+
+       TP_ARGS(intf),
+
+       TP_STRUCT__entry(
+               __field(u8, module_id)
+               __field(u8, id)         /* Interface id */
+               __field(u8, device_id)
+               __field(int, disconnected)      /* bool */
+               __field(int, ejected)           /* bool */
+               __field(int, active)            /* bool */
+               __field(int, enabled)           /* bool */
+               __field(int, mode_switch)       /* bool */
+       ),
+
+       TP_fast_assign(
+               __entry->module_id = intf->module->module_id;
+               __entry->id = intf->interface_id;
+               __entry->device_id = intf->device_id;
+               __entry->disconnected = intf->disconnected;
+               __entry->ejected = intf->ejected;
+               __entry->active = intf->active;
+               __entry->enabled = intf->enabled;
+               __entry->mode_switch = intf->mode_switch;
+       ),
+
+       TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
+               __entry->id, __entry->device_id, __entry->module_id,
+               __entry->disconnected, __entry->ejected, __entry->active,
+               __entry->enabled, __entry->mode_switch)
+);
+
+#define DEFINE_INTERFACE_EVENT(name)                                   \
+               DEFINE_EVENT(gb_interface, name,                        \
+                               TP_PROTO(struct gb_interface *intf),    \
+                               TP_ARGS(intf))
+
+/*
+ * Occurs after a new interface is successfully created.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_create);
+
+/*
+ * Occurs after the last reference to an interface has been dropped.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_release);
+
+/*
+ * Occurs after an interface been registerd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_add);
+
+/*
+ * Occurs when a registered interface gets deregisterd.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_del);
+
+/*
+ * Occurs when a registered interface has been successfully
+ * activated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_activate);
+
+/*
+ * Occurs when an activated interface is being deactivated.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
+
+/*
+ * Occurs when an interface has been successfully enabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_enable);
+
+/*
+ * Occurs when an enabled interface is being disabled.
+ */
+DEFINE_INTERFACE_EVENT(gb_interface_disable);
+
+#undef DEFINE_INTERFACE_EVENT
+
+DECLARE_EVENT_CLASS(gb_module,
+
+       TP_PROTO(struct gb_module *module),
+
+       TP_ARGS(module),
+
+       TP_STRUCT__entry(
+               __field(int, hd_bus_id)
+               __field(u8, module_id)
+               __field(size_t, num_interfaces)
+               __field(int, disconnected)      /* bool */
+       ),
+
+       TP_fast_assign(
+               __entry->hd_bus_id = module->hd->bus_id;
+               __entry->module_id = module->module_id;
+               __entry->num_interfaces = module->num_interfaces;
+               __entry->disconnected = module->disconnected;
+       ),
+
+       TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
+               __entry->hd_bus_id, __entry->module_id,
+               __entry->num_interfaces, __entry->disconnected)
+);
+
+#define DEFINE_MODULE_EVENT(name)                                      \
+               DEFINE_EVENT(gb_module, name,                           \
+                               TP_PROTO(struct gb_module *module),     \
+                               TP_ARGS(module))
+
+/*
+ * Occurs after a new module is successfully created, before
+ * creating any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_create);
+
+/*
+ * Occurs after the last reference to a module has been dropped.
+ */
+DEFINE_MODULE_EVENT(gb_module_release);
+
+/*
+ * Occurs after a module is successfully created, before registering
+ * any of its interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_add);
+
+/*
+ * Occurs when a module is deleted, before deregistering its
+ * interfaces.
+ */
+DEFINE_MODULE_EVENT(gb_module_del);
+
+#undef DEFINE_MODULE_EVENT
+
+DECLARE_EVENT_CLASS(gb_host_device,
+
+       TP_PROTO(struct gb_host_device *hd),
+
+       TP_ARGS(hd),
+
+       TP_STRUCT__entry(
+               __field(int, bus_id)
+               __field(size_t, num_cports)
+               __field(size_t, buffer_size_max)
+       ),
+
+       TP_fast_assign(
+               __entry->bus_id = hd->bus_id;
+               __entry->num_cports = hd->num_cports;
+               __entry->buffer_size_max = hd->buffer_size_max;
+       ),
+
+       TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
+               __entry->bus_id, __entry->num_cports,
+               __entry->buffer_size_max)
+);
+
+#define DEFINE_HD_EVENT(name)                                          \
+               DEFINE_EVENT(gb_host_device, name,                      \
+                               TP_PROTO(struct gb_host_device *hd),    \
+                               TP_ARGS(hd))
+
+/*
+ * Occurs after a new host device is successfully created, before
+ * its SVC has been set up.
+ */
+DEFINE_HD_EVENT(gb_hd_create);
+
+/*
+ * Occurs after the last reference to a host device has been
+ * dropped.
+ */
+DEFINE_HD_EVENT(gb_hd_release);
+
+/*
+ * Occurs after a new host device has been added, after the
+ * connection to its SVC has been enabled.
+ */
+DEFINE_HD_EVENT(gb_hd_add);
+
+/*
+ * Occurs when a host device is being disconnected from the AP USB
+ * host controller.
+ */
+DEFINE_HD_EVENT(gb_hd_del);
+
+/*
+ * Occurs when a host device has passed received data to the Greybus
+ * core, after it has been determined it is destined for a valid
+ * CPort.
+ */
+DEFINE_HD_EVENT(gb_hd_in);
+
+#undef DEFINE_HD_EVENT
+
+#endif /* _TRACE_GREYBUS_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+/*
+ * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
+ */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE greybus_trace
+#include <trace/define_trace.h>
+
diff --git a/drivers/greybus/hd.c b/drivers/greybus/hd.c
new file mode 100644 (file)
index 0000000..72b21bf
--- /dev/null
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Host Device
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/greybus.h>
+
+#include "greybus_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
+EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
+
+static struct ida gb_hd_bus_id_map;
+
+int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
+                bool async)
+{
+       if (!hd || !hd->driver || !hd->driver->output)
+               return -EINVAL;
+       return hd->driver->output(hd, req, size, cmd, async);
+}
+EXPORT_SYMBOL_GPL(gb_hd_output);
+
+static ssize_t bus_id_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       struct gb_host_device *hd = to_gb_host_device(dev);
+
+       return sprintf(buf, "%d\n", hd->bus_id);
+}
+static DEVICE_ATTR_RO(bus_id);
+
+static struct attribute *bus_attrs[] = {
+       &dev_attr_bus_id.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(bus);
+
+int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
+{
+       struct ida *id_map = &hd->cport_id_map;
+       int ret;
+
+       ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
+
+void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
+{
+       struct ida *id_map = &hd->cport_id_map;
+
+       ida_simple_remove(id_map, cport_id);
+}
+EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
+
+/* Locking: Caller guarantees serialisation */
+int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
+                        unsigned long flags)
+{
+       struct ida *id_map = &hd->cport_id_map;
+       int ida_start, ida_end;
+
+       if (hd->driver->cport_allocate)
+               return hd->driver->cport_allocate(hd, cport_id, flags);
+
+       if (cport_id < 0) {
+               ida_start = 0;
+               ida_end = hd->num_cports;
+       } else if (cport_id < hd->num_cports) {
+               ida_start = cport_id;
+               ida_end = cport_id + 1;
+       } else {
+               dev_err(&hd->dev, "cport %d not available\n", cport_id);
+               return -EINVAL;
+       }
+
+       return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+}
+
+/* Locking: Caller guarantees serialisation */
+void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
+{
+       if (hd->driver->cport_release) {
+               hd->driver->cport_release(hd, cport_id);
+               return;
+       }
+
+       ida_simple_remove(&hd->cport_id_map, cport_id);
+}
+
+static void gb_hd_release(struct device *dev)
+{
+       struct gb_host_device *hd = to_gb_host_device(dev);
+
+       trace_gb_hd_release(hd);
+
+       if (hd->svc)
+               gb_svc_put(hd->svc);
+       ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
+       ida_destroy(&hd->cport_id_map);
+       kfree(hd);
+}
+
+struct device_type greybus_hd_type = {
+       .name           = "greybus_host_device",
+       .release        = gb_hd_release,
+};
+
+struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
+                                   struct device *parent,
+                                   size_t buffer_size_max,
+                                   size_t num_cports)
+{
+       struct gb_host_device *hd;
+       int ret;
+
+       /*
+        * Validate that the driver implements all of the callbacks
+        * so that we don't have to every time we make them.
+        */
+       if ((!driver->message_send) || (!driver->message_cancel)) {
+               dev_err(parent, "mandatory hd-callbacks missing\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
+               dev_err(parent, "greybus host-device buffers too small\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
+               dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /*
+        * Make sure to never allocate messages larger than what the Greybus
+        * protocol supports.
+        */
+       if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
+               dev_warn(parent, "limiting buffer size to %u\n",
+                        GB_OPERATION_MESSAGE_SIZE_MAX);
+               buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
+       }
+
+       hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
+       if (!hd)
+               return ERR_PTR(-ENOMEM);
+
+       ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
+       if (ret < 0) {
+               kfree(hd);
+               return ERR_PTR(ret);
+       }
+       hd->bus_id = ret;
+
+       hd->driver = driver;
+       INIT_LIST_HEAD(&hd->modules);
+       INIT_LIST_HEAD(&hd->connections);
+       ida_init(&hd->cport_id_map);
+       hd->buffer_size_max = buffer_size_max;
+       hd->num_cports = num_cports;
+
+       hd->dev.parent = parent;
+       hd->dev.bus = &greybus_bus_type;
+       hd->dev.type = &greybus_hd_type;
+       hd->dev.groups = bus_groups;
+       hd->dev.dma_mask = hd->dev.parent->dma_mask;
+       device_initialize(&hd->dev);
+       dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
+
+       trace_gb_hd_create(hd);
+
+       hd->svc = gb_svc_create(hd);
+       if (!hd->svc) {
+               dev_err(&hd->dev, "failed to create svc\n");
+               put_device(&hd->dev);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return hd;
+}
+EXPORT_SYMBOL_GPL(gb_hd_create);
+
+int gb_hd_add(struct gb_host_device *hd)
+{
+       int ret;
+
+       ret = device_add(&hd->dev);
+       if (ret)
+               return ret;
+
+       ret = gb_svc_add(hd->svc);
+       if (ret) {
+               device_del(&hd->dev);
+               return ret;
+       }
+
+       trace_gb_hd_add(hd);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_hd_add);
+
+void gb_hd_del(struct gb_host_device *hd)
+{
+       trace_gb_hd_del(hd);
+
+       /*
+        * Tear down the svc and flush any on-going hotplug processing before
+        * removing the remaining interfaces.
+        */
+       gb_svc_del(hd->svc);
+
+       device_del(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_del);
+
+void gb_hd_shutdown(struct gb_host_device *hd)
+{
+       gb_svc_del(hd->svc);
+}
+EXPORT_SYMBOL_GPL(gb_hd_shutdown);
+
+void gb_hd_put(struct gb_host_device *hd)
+{
+       put_device(&hd->dev);
+}
+EXPORT_SYMBOL_GPL(gb_hd_put);
+
+int __init gb_hd_init(void)
+{
+       ida_init(&gb_hd_bus_id_map);
+
+       return 0;
+}
+
+void gb_hd_exit(void)
+{
+       ida_destroy(&gb_hd_bus_id_map);
+}
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
new file mode 100644 (file)
index 0000000..67dbe6f
--- /dev/null
@@ -0,0 +1,1263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus interface code
+ *
+ * Copyright 2014 Google Inc.
+ * Copyright 2014 Linaro Ltd.
+ */
+
+#include <linux/delay.h>
+#include <linux/greybus.h>
+
+#include "greybus_trace.h"
+
+#define GB_INTERFACE_MODE_SWITCH_TIMEOUT       2000
+
+#define GB_INTERFACE_DEVICE_ID_BAD     0xff
+
+#define GB_INTERFACE_AUTOSUSPEND_MS                    3000
+
+/* Time required for interface to enter standby before disabling REFCLK */
+#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS                        20
+
+/* Don't-care selector index */
+#define DME_SELECTOR_INDEX_NULL                0
+
+/* DME attributes */
+/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
+#define DME_T_TST_SRC_INCREMENT                0x4083
+
+#define DME_DDBL1_MANUFACTURERID       0x5003
+#define DME_DDBL1_PRODUCTID            0x5004
+
+#define DME_TOSHIBA_GMP_VID            0x6000
+#define DME_TOSHIBA_GMP_PID            0x6001
+#define DME_TOSHIBA_GMP_SN0            0x6002
+#define DME_TOSHIBA_GMP_SN1            0x6003
+#define DME_TOSHIBA_GMP_INIT_STATUS    0x6101
+
+/* DDBL1 Manufacturer and Product ids */
+#define TOSHIBA_DMID                   0x0126
+#define TOSHIBA_ES2_BRIDGE_DPID                0x1000
+#define TOSHIBA_ES3_APBRIDGE_DPID      0x1001
+#define TOSHIBA_ES3_GBPHY_DPID 0x1002
+
+static int gb_interface_hibernate_link(struct gb_interface *intf);
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
+
+static int gb_interface_dme_attr_get(struct gb_interface *intf,
+                                    u16 attr, u32 *val)
+{
+       return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
+                                       attr, DME_SELECTOR_INDEX_NULL, val);
+}
+
+static int gb_interface_read_ara_dme(struct gb_interface *intf)
+{
+       u32 sn0, sn1;
+       int ret;
+
+       /*
+        * Unless this is a Toshiba bridge, bail out until we have defined
+        * standard GMP attributes.
+        */
+       if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
+               dev_err(&intf->dev, "unknown manufacturer %08x\n",
+                       intf->ddbl1_manufacturer_id);
+               return -ENODEV;
+       }
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
+                                       &intf->vendor_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
+                                       &intf->product_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
+       if (ret)
+               return ret;
+
+       intf->serial_number = (u64)sn1 << 32 | sn0;
+
+       return 0;
+}
+
+static int gb_interface_read_dme(struct gb_interface *intf)
+{
+       int ret;
+
+       /* DME attributes have already been read */
+       if (intf->dme_read)
+               return 0;
+
+       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
+                                       &intf->ddbl1_manufacturer_id);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
+                                       &intf->ddbl1_product_id);
+       if (ret)
+               return ret;
+
+       if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
+           intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
+               intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
+               intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
+       }
+
+       ret = gb_interface_read_ara_dme(intf);
+       if (ret)
+               return ret;
+
+       intf->dme_read = true;
+
+       return 0;
+}
+
+static int gb_interface_route_create(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       u8 intf_id = intf->interface_id;
+       u8 device_id;
+       int ret;
+
+       /* Allocate an interface device id. */
+       ret = ida_simple_get(&svc->device_id_map,
+                            GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
+                            GFP_KERNEL);
+       if (ret < 0) {
+               dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
+               return ret;
+       }
+       device_id = ret;
+
+       ret = gb_svc_intf_device_id(svc, intf_id, device_id);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set device id %u: %d\n",
+                       device_id, ret);
+               goto err_ida_remove;
+       }
+
+       /* FIXME: Hard-coded AP device id. */
+       ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
+                                 intf_id, device_id);
+       if (ret) {
+               dev_err(&intf->dev, "failed to create route: %d\n", ret);
+               goto err_svc_id_free;
+       }
+
+       intf->device_id = device_id;
+
+       return 0;
+
+err_svc_id_free:
+       /*
+        * XXX Should we tell SVC that this id doesn't belong to interface
+        * XXX anymore.
+        */
+err_ida_remove:
+       ida_simple_remove(&svc->device_id_map, device_id);
+
+       return ret;
+}
+
+static void gb_interface_route_destroy(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+
+       if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
+               return;
+
+       gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
+       ida_simple_remove(&svc->device_id_map, intf->device_id);
+       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+}
+
+/* Locking: Caller holds the interface mutex. */
+static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
+{
+       int ret;
+
+       dev_info(&intf->dev, "legacy mode switch detected\n");
+
+       /* Mark as disconnected to prevent I/O during disable. */
+       intf->disconnected = true;
+       gb_interface_disable(intf);
+       intf->disconnected = false;
+
+       ret = gb_interface_enable(intf);
+       if (ret) {
+               dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
+               gb_interface_deactivate(intf);
+       }
+
+       return ret;
+}
+
+void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
+                               u32 mailbox)
+{
+       mutex_lock(&intf->mutex);
+
+       if (result) {
+               dev_warn(&intf->dev,
+                        "mailbox event with UniPro error: 0x%04x\n",
+                        result);
+               goto err_disable;
+       }
+
+       if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
+               dev_warn(&intf->dev,
+                        "mailbox event with unexpected value: 0x%08x\n",
+                        mailbox);
+               goto err_disable;
+       }
+
+       if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
+               gb_interface_legacy_mode_switch(intf);
+               goto out_unlock;
+       }
+
+       if (!intf->mode_switch) {
+               dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
+                        mailbox);
+               goto err_disable;
+       }
+
+       dev_info(&intf->dev, "mode switch detected\n");
+
+       complete(&intf->mode_switch_completion);
+
+out_unlock:
+       mutex_unlock(&intf->mutex);
+
+       return;
+
+err_disable:
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_interface_mode_switch_work(struct work_struct *work)
+{
+       struct gb_interface *intf;
+       struct gb_control *control;
+       unsigned long timeout;
+       int ret;
+
+       intf = container_of(work, struct gb_interface, mode_switch_work);
+
+       mutex_lock(&intf->mutex);
+       /* Make sure interface is still enabled. */
+       if (!intf->enabled) {
+               dev_dbg(&intf->dev, "mode switch aborted\n");
+               intf->mode_switch = false;
+               mutex_unlock(&intf->mutex);
+               goto out_interface_put;
+       }
+
+       /*
+        * Prepare the control device for mode switch and make sure to get an
+        * extra reference before it goes away during interface disable.
+        */
+       control = gb_control_get(intf->control);
+       gb_control_mode_switch_prepare(control);
+       gb_interface_disable(intf);
+       mutex_unlock(&intf->mutex);
+
+       timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
+       ret = wait_for_completion_interruptible_timeout(
+                       &intf->mode_switch_completion, timeout);
+
+       /* Finalise control-connection mode switch. */
+       gb_control_mode_switch_complete(control);
+       gb_control_put(control);
+
+       if (ret < 0) {
+               dev_err(&intf->dev, "mode switch interrupted\n");
+               goto err_deactivate;
+       } else if (ret == 0) {
+               dev_err(&intf->dev, "mode switch timed out\n");
+               goto err_deactivate;
+       }
+
+       /* Re-enable (re-enumerate) interface if still active. */
+       mutex_lock(&intf->mutex);
+       intf->mode_switch = false;
+       if (intf->active) {
+               ret = gb_interface_enable(intf);
+               if (ret) {
+                       dev_err(&intf->dev, "failed to re-enable interface: %d\n",
+                               ret);
+                       gb_interface_deactivate(intf);
+               }
+       }
+       mutex_unlock(&intf->mutex);
+
+out_interface_put:
+       gb_interface_put(intf);
+
+       return;
+
+err_deactivate:
+       mutex_lock(&intf->mutex);
+       intf->mode_switch = false;
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+
+       gb_interface_put(intf);
+}
+
+int gb_interface_request_mode_switch(struct gb_interface *intf)
+{
+       int ret = 0;
+
+       mutex_lock(&intf->mutex);
+       if (intf->mode_switch) {
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
+       intf->mode_switch = true;
+       reinit_completion(&intf->mode_switch_completion);
+
+       /*
+        * Get a reference to the interface device, which will be put once the
+        * mode switch is complete.
+        */
+       get_device(&intf->dev);
+
+       if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
+               put_device(&intf->dev);
+               ret = -EBUSY;
+               goto out_unlock;
+       }
+
+out_unlock:
+       mutex_unlock(&intf->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
+
+/*
+ * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
+ * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
+ * clear it after reading a non-zero value from it.
+ *
+ * FIXME: This is module-hardware dependent and needs to be extended for every
+ * type of module we want to support.
+ */
+static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
+{
+       struct gb_host_device *hd = intf->hd;
+       unsigned long bootrom_quirks;
+       unsigned long s2l_quirks;
+       int ret;
+       u32 value;
+       u16 attr;
+       u8 init_status;
+
+       /*
+        * ES2 bridges use T_TstSrcIncrement for the init status.
+        *
+        * FIXME: Remove ES2 support
+        */
+       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+               attr = DME_T_TST_SRC_INCREMENT;
+       else
+               attr = DME_TOSHIBA_GMP_INIT_STATUS;
+
+       ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
+                                 DME_SELECTOR_INDEX_NULL, &value);
+       if (ret)
+               return ret;
+
+       /*
+        * A nonzero init status indicates the module has finished
+        * initializing.
+        */
+       if (!value) {
+               dev_err(&intf->dev, "invalid init status\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Extract the init status.
+        *
+        * For ES2: We need to check lowest 8 bits of 'value'.
+        * For ES3: We need to check highest 8 bits out of 32 of 'value'.
+        *
+        * FIXME: Remove ES2 support
+        */
+       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
+               init_status = value & 0xff;
+       else
+               init_status = value >> 24;
+
+       /*
+        * Check if the interface is executing the quirky ES3 bootrom that,
+        * for example, requires E2EFC, CSD and CSV to be disabled.
+        */
+       bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
+                               GB_INTERFACE_QUIRK_FORCED_DISABLE |
+                               GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
+                               GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
+
+       s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
+
+       switch (init_status) {
+       case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
+       case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
+               intf->quirks |= bootrom_quirks;
+               break;
+       case GB_INIT_S2_LOADER_BOOT_STARTED:
+               /* S2 Loader doesn't support runtime PM */
+               intf->quirks &= ~bootrom_quirks;
+               intf->quirks |= s2l_quirks;
+               break;
+       default:
+               intf->quirks &= ~bootrom_quirks;
+               intf->quirks &= ~s2l_quirks;
+       }
+
+       /* Clear the init status. */
+       return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
+                                  DME_SELECTOR_INDEX_NULL, 0);
+}
+
+/* interface sysfs attributes */
+#define gb_interface_attr(field, type)                                 \
+static ssize_t field##_show(struct device *dev,                                \
+                           struct device_attribute *attr,              \
+                           char *buf)                                  \
+{                                                                      \
+       struct gb_interface *intf = to_gb_interface(dev);               \
+       return scnprintf(buf, PAGE_SIZE, type"\n", intf->field);        \
+}                                                                      \
+static DEVICE_ATTR_RO(field)
+
+gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
+gb_interface_attr(ddbl1_product_id, "0x%08x");
+gb_interface_attr(interface_id, "%u");
+gb_interface_attr(vendor_id, "0x%08x");
+gb_interface_attr(product_id, "0x%08x");
+gb_interface_attr(serial_number, "0x%016llx");
+
+static ssize_t voltage_now_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_VOL,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(voltage_now);
+
+static ssize_t current_now_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_CURR,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(current_now);
+
+static ssize_t power_now_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+       u32 measurement;
+
+       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
+                                           GB_SVC_PWRMON_TYPE_PWR,
+                                           &measurement);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
+               return ret;
+       }
+
+       return sprintf(buf, "%u\n", measurement);
+}
+static DEVICE_ATTR_RO(power_now);
+
+static ssize_t power_state_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       if (intf->active)
+               return scnprintf(buf, PAGE_SIZE, "on\n");
+       else
+               return scnprintf(buf, PAGE_SIZE, "off\n");
+}
+
+static ssize_t power_state_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t len)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       bool activate;
+       int ret = 0;
+
+       if (kstrtobool(buf, &activate))
+               return -EINVAL;
+
+       mutex_lock(&intf->mutex);
+
+       if (activate == intf->active)
+               goto unlock;
+
+       if (activate) {
+               ret = gb_interface_activate(intf);
+               if (ret) {
+                       dev_err(&intf->dev,
+                               "failed to activate interface: %d\n", ret);
+                       goto unlock;
+               }
+
+               ret = gb_interface_enable(intf);
+               if (ret) {
+                       dev_err(&intf->dev,
+                               "failed to enable interface: %d\n", ret);
+                       gb_interface_deactivate(intf);
+                       goto unlock;
+               }
+       } else {
+               gb_interface_disable(intf);
+               gb_interface_deactivate(intf);
+       }
+
+unlock:
+       mutex_unlock(&intf->mutex);
+
+       if (ret)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_RW(power_state);
+
+static const char *gb_interface_type_string(struct gb_interface *intf)
+{
+       static const char * const types[] = {
+               [GB_INTERFACE_TYPE_INVALID] = "invalid",
+               [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
+               [GB_INTERFACE_TYPE_DUMMY] = "dummy",
+               [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
+               [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
+       };
+
+       return types[intf->type];
+}
+
+static ssize_t interface_type_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       return sprintf(buf, "%s\n", gb_interface_type_string(intf));
+}
+static DEVICE_ATTR_RO(interface_type);
+
+static struct attribute *interface_unipro_attrs[] = {
+       &dev_attr_ddbl1_manufacturer_id.attr,
+       &dev_attr_ddbl1_product_id.attr,
+       NULL
+};
+
+static struct attribute *interface_greybus_attrs[] = {
+       &dev_attr_vendor_id.attr,
+       &dev_attr_product_id.attr,
+       &dev_attr_serial_number.attr,
+       NULL
+};
+
+static struct attribute *interface_power_attrs[] = {
+       &dev_attr_voltage_now.attr,
+       &dev_attr_current_now.attr,
+       &dev_attr_power_now.attr,
+       &dev_attr_power_state.attr,
+       NULL
+};
+
+static struct attribute *interface_common_attrs[] = {
+       &dev_attr_interface_id.attr,
+       &dev_attr_interface_type.attr,
+       NULL
+};
+
+static umode_t interface_unipro_is_visible(struct kobject *kobj,
+                                          struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_UNIPRO:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static umode_t interface_greybus_is_visible(struct kobject *kobj,
+                                           struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static umode_t interface_power_is_visible(struct kobject *kobj,
+                                         struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_UNIPRO:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               return attr->mode;
+       default:
+               return 0;
+       }
+}
+
+static const struct attribute_group interface_unipro_group = {
+       .is_visible     = interface_unipro_is_visible,
+       .attrs          = interface_unipro_attrs,
+};
+
+static const struct attribute_group interface_greybus_group = {
+       .is_visible     = interface_greybus_is_visible,
+       .attrs          = interface_greybus_attrs,
+};
+
+static const struct attribute_group interface_power_group = {
+       .is_visible     = interface_power_is_visible,
+       .attrs          = interface_power_attrs,
+};
+
+static const struct attribute_group interface_common_group = {
+       .attrs          = interface_common_attrs,
+};
+
+static const struct attribute_group *interface_groups[] = {
+       &interface_unipro_group,
+       &interface_greybus_group,
+       &interface_power_group,
+       &interface_common_group,
+       NULL
+};
+
+static void gb_interface_release(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+
+       trace_gb_interface_release(intf);
+
+       kfree(intf);
+}
+
+#ifdef CONFIG_PM
+static int gb_interface_suspend(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       int ret;
+
+       ret = gb_control_interface_suspend_prepare(intf->control);
+       if (ret)
+               return ret;
+
+       ret = gb_control_suspend(intf->control);
+       if (ret)
+               goto err_hibernate_abort;
+
+       ret = gb_interface_hibernate_link(intf);
+       if (ret)
+               return ret;
+
+       /* Delay to allow interface to enter standby before disabling refclk */
+       msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
+
+       ret = gb_interface_refclk_set(intf, false);
+       if (ret)
+               return ret;
+
+       return 0;
+
+err_hibernate_abort:
+       gb_control_interface_hibernate_abort(intf->control);
+
+       return ret;
+}
+
+static int gb_interface_resume(struct device *dev)
+{
+       struct gb_interface *intf = to_gb_interface(dev);
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       ret = gb_interface_refclk_set(intf, true);
+       if (ret)
+               return ret;
+
+       ret = gb_svc_intf_resume(svc, intf->interface_id);
+       if (ret)
+               return ret;
+
+       ret = gb_control_resume(intf->control);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int gb_interface_runtime_idle(struct device *dev)
+{
+       pm_runtime_mark_last_busy(dev);
+       pm_request_autosuspend(dev);
+
+       return 0;
+}
+#endif
+
+static const struct dev_pm_ops gb_interface_pm_ops = {
+       SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
+                          gb_interface_runtime_idle)
+};
+
+struct device_type greybus_interface_type = {
+       .name =         "greybus_interface",
+       .release =      gb_interface_release,
+       .pm =           &gb_interface_pm_ops,
+};
+
+/*
+ * A Greybus module represents a user-replaceable component on a GMP
+ * phone.  An interface is the physical connection on that module.  A
+ * module may have more than one interface.
+ *
+ * Create a gb_interface structure to represent a discovered interface.
+ * The position of interface within the Endo is encoded in "interface_id"
+ * argument.
+ *
+ * Returns a pointer to the new interfce or a null pointer if a
+ * failure occurs due to memory exhaustion.
+ */
+struct gb_interface *gb_interface_create(struct gb_module *module,
+                                        u8 interface_id)
+{
+       struct gb_host_device *hd = module->hd;
+       struct gb_interface *intf;
+
+       intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+       if (!intf)
+               return NULL;
+
+       intf->hd = hd;          /* XXX refcount? */
+       intf->module = module;
+       intf->interface_id = interface_id;
+       INIT_LIST_HEAD(&intf->bundles);
+       INIT_LIST_HEAD(&intf->manifest_descs);
+       mutex_init(&intf->mutex);
+       INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
+       init_completion(&intf->mode_switch_completion);
+
+       /* Invalid device id to start with */
+       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
+
+       intf->dev.parent = &module->dev;
+       intf->dev.bus = &greybus_bus_type;
+       intf->dev.type = &greybus_interface_type;
+       intf->dev.groups = interface_groups;
+       intf->dev.dma_mask = module->dev.dma_mask;
+       device_initialize(&intf->dev);
+       dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
+                    interface_id);
+
+       pm_runtime_set_autosuspend_delay(&intf->dev,
+                                        GB_INTERFACE_AUTOSUSPEND_MS);
+
+       trace_gb_interface_create(intf);
+
+       return intf;
+}
+
+static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
+
+       ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
+       if (ret) {
+               dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int gb_interface_activate_operation(struct gb_interface *intf,
+                                          enum gb_interface_type *intf_type)
+{
+       struct gb_svc *svc = intf->hd->svc;
+       u8 type;
+       int ret;
+
+       dev_dbg(&intf->dev, "%s\n", __func__);
+
+       ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
+       if (ret) {
+               dev_err(&intf->dev, "failed to activate: %d\n", ret);
+               return ret;
+       }
+
+       switch (type) {
+       case GB_SVC_INTF_TYPE_DUMMY:
+               *intf_type = GB_INTERFACE_TYPE_DUMMY;
+               /* FIXME: handle as an error for now */
+               return -ENODEV;
+       case GB_SVC_INTF_TYPE_UNIPRO:
+               *intf_type = GB_INTERFACE_TYPE_UNIPRO;
+               dev_err(&intf->dev, "interface type UniPro not supported\n");
+               /* FIXME: handle as an error for now */
+               return -ENODEV;
+       case GB_SVC_INTF_TYPE_GREYBUS:
+               *intf_type = GB_INTERFACE_TYPE_GREYBUS;
+               break;
+       default:
+               dev_err(&intf->dev, "unknown interface type: %u\n", type);
+               *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int gb_interface_hibernate_link(struct gb_interface *intf)
+{
+       struct gb_svc *svc = intf->hd->svc;
+
+       return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
+}
+
+static int _gb_interface_activate(struct gb_interface *intf,
+                                 enum gb_interface_type *type)
+{
+       int ret;
+
+       *type = GB_INTERFACE_TYPE_UNKNOWN;
+
+       if (intf->ejected || intf->removed)
+               return -ENODEV;
+
+       ret = gb_interface_vsys_set(intf, true);
+       if (ret)
+               return ret;
+
+       ret = gb_interface_refclk_set(intf, true);
+       if (ret)
+               goto err_vsys_disable;
+
+       ret = gb_interface_unipro_set(intf, true);
+       if (ret)
+               goto err_refclk_disable;
+
+       ret = gb_interface_activate_operation(intf, type);
+       if (ret) {
+               switch (*type) {
+               case GB_INTERFACE_TYPE_UNIPRO:
+               case GB_INTERFACE_TYPE_GREYBUS:
+                       goto err_hibernate_link;
+               default:
+                       goto err_unipro_disable;
+               }
+       }
+
+       ret = gb_interface_read_dme(intf);
+       if (ret)
+               goto err_hibernate_link;
+
+       ret = gb_interface_route_create(intf);
+       if (ret)
+               goto err_hibernate_link;
+
+       intf->active = true;
+
+       trace_gb_interface_activate(intf);
+
+       return 0;
+
+err_hibernate_link:
+       gb_interface_hibernate_link(intf);
+err_unipro_disable:
+       gb_interface_unipro_set(intf, false);
+err_refclk_disable:
+       gb_interface_refclk_set(intf, false);
+err_vsys_disable:
+       gb_interface_vsys_set(intf, false);
+
+       return ret;
+}
+
+/*
+ * At present, we assume a UniPro-only module to be a Greybus module that
+ * failed to send its mailbox poke. There is some reason to believe that this
+ * is because of a bug in the ES3 bootrom.
+ *
+ * FIXME: Check if this is a Toshiba bridge before retrying?
+ */
+static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
+                                          enum gb_interface_type *type)
+{
+       int retries = 3;
+       int ret;
+
+       while (retries--) {
+               ret = _gb_interface_activate(intf, type);
+               if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
+                       continue;
+
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * Activate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_activate(struct gb_interface *intf)
+{
+       enum gb_interface_type type;
+       int ret;
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_INVALID:
+       case GB_INTERFACE_TYPE_GREYBUS:
+               ret = _gb_interface_activate_es3_hack(intf, &type);
+               break;
+       default:
+               ret = _gb_interface_activate(intf, &type);
+       }
+
+       /* Make sure type is detected correctly during reactivation. */
+       if (intf->type != GB_INTERFACE_TYPE_INVALID) {
+               if (type != intf->type) {
+                       dev_err(&intf->dev, "failed to detect interface type\n");
+
+                       if (!ret)
+                               gb_interface_deactivate(intf);
+
+                       return -EIO;
+               }
+       } else {
+               intf->type = type;
+       }
+
+       return ret;
+}
+
+/*
+ * Deactivate an interface.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_deactivate(struct gb_interface *intf)
+{
+       if (!intf->active)
+               return;
+
+       trace_gb_interface_deactivate(intf);
+
+       /* Abort any ongoing mode switch. */
+       if (intf->mode_switch)
+               complete(&intf->mode_switch_completion);
+
+       gb_interface_route_destroy(intf);
+       gb_interface_hibernate_link(intf);
+       gb_interface_unipro_set(intf, false);
+       gb_interface_refclk_set(intf, false);
+       gb_interface_vsys_set(intf, false);
+
+       intf->active = false;
+}
+
+/*
+ * Enable an interface by enabling its control connection, fetching the
+ * manifest and other information over it, and finally registering its child
+ * devices.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+int gb_interface_enable(struct gb_interface *intf)
+{
+       struct gb_control *control;
+       struct gb_bundle *bundle, *tmp;
+       int ret, size;
+       void *manifest;
+
+       ret = gb_interface_read_and_clear_init_status(intf);
+       if (ret) {
+               dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
+               return ret;
+       }
+
+       /* Establish control connection */
+       control = gb_control_create(intf);
+       if (IS_ERR(control)) {
+               dev_err(&intf->dev, "failed to create control device: %ld\n",
+                       PTR_ERR(control));
+               return PTR_ERR(control);
+       }
+       intf->control = control;
+
+       ret = gb_control_enable(intf->control);
+       if (ret)
+               goto err_put_control;
+
+       /* Get manifest size using control protocol on CPort */
+       size = gb_control_get_manifest_size_operation(intf);
+       if (size <= 0) {
+               dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
+
+               if (size)
+                       ret = size;
+               else
+                       ret =  -EINVAL;
+
+               goto err_disable_control;
+       }
+
+       manifest = kmalloc(size, GFP_KERNEL);
+       if (!manifest) {
+               ret = -ENOMEM;
+               goto err_disable_control;
+       }
+
+       /* Get manifest using control protocol on CPort */
+       ret = gb_control_get_manifest_operation(intf, manifest, size);
+       if (ret) {
+               dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
+               goto err_free_manifest;
+       }
+
+       /*
+        * Parse the manifest and build up our data structures representing
+        * what's in it.
+        */
+       if (!gb_manifest_parse(intf, manifest, size)) {
+               dev_err(&intf->dev, "failed to parse manifest\n");
+               ret = -EINVAL;
+               goto err_destroy_bundles;
+       }
+
+       ret = gb_control_get_bundle_versions(intf->control);
+       if (ret)
+               goto err_destroy_bundles;
+
+       /* Register the control device and any bundles */
+       ret = gb_control_add(intf->control);
+       if (ret)
+               goto err_destroy_bundles;
+
+       pm_runtime_use_autosuspend(&intf->dev);
+       pm_runtime_get_noresume(&intf->dev);
+       pm_runtime_set_active(&intf->dev);
+       pm_runtime_enable(&intf->dev);
+
+       list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
+               ret = gb_bundle_add(bundle);
+               if (ret) {
+                       gb_bundle_destroy(bundle);
+                       continue;
+               }
+       }
+
+       kfree(manifest);
+
+       intf->enabled = true;
+
+       pm_runtime_put(&intf->dev);
+
+       trace_gb_interface_enable(intf);
+
+       return 0;
+
+err_destroy_bundles:
+       list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
+               gb_bundle_destroy(bundle);
+err_free_manifest:
+       kfree(manifest);
+err_disable_control:
+       gb_control_disable(intf->control);
+err_put_control:
+       gb_control_put(intf->control);
+       intf->control = NULL;
+
+       return ret;
+}
+
+/*
+ * Disable an interface and destroy its bundles.
+ *
+ * Locking: Caller holds the interface mutex.
+ */
+void gb_interface_disable(struct gb_interface *intf)
+{
+       struct gb_bundle *bundle;
+       struct gb_bundle *next;
+
+       if (!intf->enabled)
+               return;
+
+       trace_gb_interface_disable(intf);
+
+       pm_runtime_get_sync(&intf->dev);
+
+       /* Set disconnected flag to avoid I/O during connection tear down. */
+       if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
+               intf->disconnected = true;
+
+       list_for_each_entry_safe(bundle, next, &intf->bundles, links)
+               gb_bundle_destroy(bundle);
+
+       if (!intf->mode_switch && !intf->disconnected)
+               gb_control_interface_deactivate_prepare(intf->control);
+
+       gb_control_del(intf->control);
+       gb_control_disable(intf->control);
+       gb_control_put(intf->control);
+       intf->control = NULL;
+
+       intf->enabled = false;
+
+       pm_runtime_disable(&intf->dev);
+       pm_runtime_set_suspended(&intf->dev);
+       pm_runtime_dont_use_autosuspend(&intf->dev);
+       pm_runtime_put_noidle(&intf->dev);
+}
+
+/* Register an interface. */
+int gb_interface_add(struct gb_interface *intf)
+{
+       int ret;
+
+       ret = device_add(&intf->dev);
+       if (ret) {
+               dev_err(&intf->dev, "failed to register interface: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_interface_add(intf);
+
+       dev_info(&intf->dev, "Interface added (%s)\n",
+                gb_interface_type_string(intf));
+
+       switch (intf->type) {
+       case GB_INTERFACE_TYPE_GREYBUS:
+               dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
+                        intf->vendor_id, intf->product_id);
+               /* fall-through */
+       case GB_INTERFACE_TYPE_UNIPRO:
+               dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
+                        intf->ddbl1_manufacturer_id,
+                        intf->ddbl1_product_id);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+/* Deregister an interface. */
+void gb_interface_del(struct gb_interface *intf)
+{
+       if (device_is_registered(&intf->dev)) {
+               trace_gb_interface_del(intf);
+
+               device_del(&intf->dev);
+               dev_info(&intf->dev, "Interface removed\n");
+       }
+}
+
+void gb_interface_put(struct gb_interface *intf)
+{
+       put_device(&intf->dev);
+}
diff --git a/drivers/greybus/manifest.c b/drivers/greybus/manifest.c
new file mode 100644 (file)
index 0000000..dd70406
--- /dev/null
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus manifest parsing
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/greybus.h>
+
+static const char *get_descriptor_type_string(u8 type)
+{
+       switch (type) {
+       case GREYBUS_TYPE_INVALID:
+               return "invalid";
+       case GREYBUS_TYPE_STRING:
+               return "string";
+       case GREYBUS_TYPE_INTERFACE:
+               return "interface";
+       case GREYBUS_TYPE_CPORT:
+               return "cport";
+       case GREYBUS_TYPE_BUNDLE:
+               return "bundle";
+       default:
+               WARN_ON(1);
+               return "unknown";
+       }
+}
+
+/*
+ * We scan the manifest once to identify where all the descriptors
+ * are.  The result is a list of these manifest_desc structures.  We
+ * then pick through them for what we're looking for (starting with
+ * the interface descriptor).  As each is processed we remove it from
+ * the list.  When we're done the list should (probably) be empty.
+ */
+struct manifest_desc {
+       struct list_head                links;
+
+       size_t                          size;
+       void                            *data;
+       enum greybus_descriptor_type    type;
+};
+
+static void release_manifest_descriptor(struct manifest_desc *descriptor)
+{
+       list_del(&descriptor->links);
+       kfree(descriptor);
+}
+
+static void release_manifest_descriptors(struct gb_interface *intf)
+{
+       struct manifest_desc *descriptor;
+       struct manifest_desc *next;
+
+       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+               release_manifest_descriptor(descriptor);
+}
+
+static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
+{
+       struct manifest_desc *desc, *tmp;
+       struct greybus_descriptor_cport *desc_cport;
+
+       list_for_each_entry_safe(desc, tmp, head, links) {
+               desc_cport = desc->data;
+
+               if (desc->type != GREYBUS_TYPE_CPORT)
+                       continue;
+
+               if (desc_cport->bundle == bundle_id)
+                       release_manifest_descriptor(desc);
+       }
+}
+
+static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
+{
+       struct manifest_desc *descriptor;
+       struct manifest_desc *next;
+
+       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
+               if (descriptor->type == GREYBUS_TYPE_BUNDLE)
+                       return descriptor;
+
+       return NULL;
+}
+
+/*
+ * Validate the given descriptor.  Its reported size must fit within
+ * the number of bytes remaining, and it must have a recognized
+ * type.  Check that the reported size is at least as big as what
+ * we expect to see.  (It could be bigger, perhaps for a new version
+ * of the format.)
+ *
+ * Returns the (non-zero) number of bytes consumed by the descriptor,
+ * or a negative errno.
+ */
+static int identify_descriptor(struct gb_interface *intf,
+                              struct greybus_descriptor *desc, size_t size)
+{
+       struct greybus_descriptor_header *desc_header = &desc->header;
+       struct manifest_desc *descriptor;
+       size_t desc_size;
+       size_t expected_size;
+
+       if (size < sizeof(*desc_header)) {
+               dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
+                       sizeof(*desc_header));
+               return -EINVAL;         /* Must at least have header */
+       }
+
+       desc_size = le16_to_cpu(desc_header->size);
+       if (desc_size > size) {
+               dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
+                       desc_size, size);
+               return -EINVAL;
+       }
+
+       /* Descriptor needs to at least have a header */
+       expected_size = sizeof(*desc_header);
+
+       switch (desc_header->type) {
+       case GREYBUS_TYPE_STRING:
+               expected_size += sizeof(struct greybus_descriptor_string);
+               expected_size += desc->string.length;
+
+               /* String descriptors are padded to 4 byte boundaries */
+               expected_size = ALIGN(expected_size, 4);
+               break;
+       case GREYBUS_TYPE_INTERFACE:
+               expected_size += sizeof(struct greybus_descriptor_interface);
+               break;
+       case GREYBUS_TYPE_BUNDLE:
+               expected_size += sizeof(struct greybus_descriptor_bundle);
+               break;
+       case GREYBUS_TYPE_CPORT:
+               expected_size += sizeof(struct greybus_descriptor_cport);
+               break;
+       case GREYBUS_TYPE_INVALID:
+       default:
+               dev_err(&intf->dev, "invalid descriptor type (%u)\n",
+                       desc_header->type);
+               return -EINVAL;
+       }
+
+       if (desc_size < expected_size) {
+               dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
+                       get_descriptor_type_string(desc_header->type),
+                       desc_size, expected_size);
+               return -EINVAL;
+       }
+
+       /* Descriptor bigger than what we expect */
+       if (desc_size > expected_size) {
+               dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
+                        get_descriptor_type_string(desc_header->type),
+                        expected_size, desc_size);
+       }
+
+       descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
+       if (!descriptor)
+               return -ENOMEM;
+
+       descriptor->size = desc_size;
+       descriptor->data = (char *)desc + sizeof(*desc_header);
+       descriptor->type = desc_header->type;
+       list_add_tail(&descriptor->links, &intf->manifest_descs);
+
+       /* desc_size is positive and is known to fit in a signed int */
+
+       return desc_size;
+}
+
+/*
+ * Find the string descriptor having the given id, validate it, and
+ * allocate a duplicate copy of it.  The duplicate has an extra byte
+ * which guarantees the returned string is NUL-terminated.
+ *
+ * String index 0 is valid (it represents "no string"), and for
+ * that a null pointer is returned.
+ *
+ * Otherwise returns a pointer to a newly-allocated copy of the
+ * descriptor string, or an error-coded pointer on failure.
+ */
+static char *gb_string_get(struct gb_interface *intf, u8 string_id)
+{
+       struct greybus_descriptor_string *desc_string;
+       struct manifest_desc *descriptor;
+       bool found = false;
+       char *string;
+
+       /* A zero string id means no string (but no error) */
+       if (!string_id)
+               return NULL;
+
+       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+               if (descriptor->type != GREYBUS_TYPE_STRING)
+                       continue;
+
+               desc_string = descriptor->data;
+               if (desc_string->id == string_id) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found)
+               return ERR_PTR(-ENOENT);
+
+       /* Allocate an extra byte so we can guarantee it's NUL-terminated */
+       string = kmemdup(&desc_string->string, desc_string->length + 1,
+                        GFP_KERNEL);
+       if (!string)
+               return ERR_PTR(-ENOMEM);
+       string[desc_string->length] = '\0';
+
+       /* Ok we've used this string, so we're done with it */
+       release_manifest_descriptor(descriptor);
+
+       return string;
+}
+
+/*
+ * Find cport descriptors in the manifest associated with the given
+ * bundle, and set up data structures for the functions that use
+ * them.  Returns the number of cports set up for the bundle, or 0
+ * if there is an error.
+ */
+static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
+{
+       struct gb_interface *intf = bundle->intf;
+       struct greybus_descriptor_cport *desc_cport;
+       struct manifest_desc *desc, *next, *tmp;
+       LIST_HEAD(list);
+       u8 bundle_id = bundle->id;
+       u16 cport_id;
+       u32 count = 0;
+       int i;
+
+       /* Set up all cport descriptors associated with this bundle */
+       list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
+               if (desc->type != GREYBUS_TYPE_CPORT)
+                       continue;
+
+               desc_cport = desc->data;
+               if (desc_cport->bundle != bundle_id)
+                       continue;
+
+               cport_id = le16_to_cpu(desc_cport->id);
+               if (cport_id > CPORT_ID_MAX)
+                       goto exit;
+
+               /* Nothing else should have its cport_id as control cport id */
+               if (cport_id == GB_CONTROL_CPORT_ID) {
+                       dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
+                               cport_id);
+                       goto exit;
+               }
+
+               /*
+                * Found one, move it to our temporary list after checking for
+                * duplicates.
+                */
+               list_for_each_entry(tmp, &list, links) {
+                       desc_cport = tmp->data;
+                       if (cport_id == le16_to_cpu(desc_cport->id)) {
+                               dev_err(&bundle->dev,
+                                       "duplicate CPort %u found\n", cport_id);
+                               goto exit;
+                       }
+               }
+               list_move_tail(&desc->links, &list);
+               count++;
+       }
+
+       if (!count)
+               return 0;
+
+       bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
+                                    GFP_KERNEL);
+       if (!bundle->cport_desc)
+               goto exit;
+
+       bundle->num_cports = count;
+
+       i = 0;
+       list_for_each_entry_safe(desc, next, &list, links) {
+               desc_cport = desc->data;
+               memcpy(&bundle->cport_desc[i++], desc_cport,
+                      sizeof(*desc_cport));
+
+               /* Release the cport descriptor */
+               release_manifest_descriptor(desc);
+       }
+
+       return count;
+exit:
+       release_cport_descriptors(&list, bundle_id);
+       /*
+        * Free all cports for this bundle to avoid 'excess descriptors'
+        * warnings.
+        */
+       release_cport_descriptors(&intf->manifest_descs, bundle_id);
+
+       return 0;       /* Error; count should also be 0 */
+}
+
+/*
+ * Find bundle descriptors in the manifest and set up their data
+ * structures.  Returns the number of bundles set up for the
+ * given interface.
+ */
+static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
+{
+       struct manifest_desc *desc;
+       struct gb_bundle *bundle;
+       struct gb_bundle *bundle_next;
+       u32 count = 0;
+       u8 bundle_id;
+       u8 class;
+
+       while ((desc = get_next_bundle_desc(intf))) {
+               struct greybus_descriptor_bundle *desc_bundle;
+
+               /* Found one.  Set up its bundle structure*/
+               desc_bundle = desc->data;
+               bundle_id = desc_bundle->id;
+               class = desc_bundle->class;
+
+               /* Done with this bundle descriptor */
+               release_manifest_descriptor(desc);
+
+               /* Ignore any legacy control bundles */
+               if (bundle_id == GB_CONTROL_BUNDLE_ID) {
+                       dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
+                               __func__);
+                       release_cport_descriptors(&intf->manifest_descs,
+                                                 bundle_id);
+                       continue;
+               }
+
+               /* Nothing else should have its class set to control class */
+               if (class == GREYBUS_CLASS_CONTROL) {
+                       dev_err(&intf->dev,
+                               "bundle %u cannot use control class\n",
+                               bundle_id);
+                       goto cleanup;
+               }
+
+               bundle = gb_bundle_create(intf, bundle_id, class);
+               if (!bundle)
+                       goto cleanup;
+
+               /*
+                * Now go set up this bundle's functions and cports.
+                *
+                * A 'bundle' represents a device in greybus. It may require
+                * multiple cports for its functioning. If we fail to setup any
+                * cport of a bundle, we better reject the complete bundle as
+                * the device may not be able to function properly then.
+                *
+                * But, failing to setup a cport of bundle X doesn't mean that
+                * the device corresponding to bundle Y will not work properly.
+                * Bundles should be treated as separate independent devices.
+                *
+                * While parsing manifest for an interface, treat bundles as
+                * separate entities and don't reject entire interface and its
+                * bundles on failing to initialize a cport. But make sure the
+                * bundle which needs the cport, gets destroyed properly.
+                */
+               if (!gb_manifest_parse_cports(bundle)) {
+                       gb_bundle_destroy(bundle);
+                       continue;
+               }
+
+               count++;
+       }
+
+       return count;
+cleanup:
+       /* An error occurred; undo any changes we've made */
+       list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
+               gb_bundle_destroy(bundle);
+               count--;
+       }
+       return 0;       /* Error; count should also be 0 */
+}
+
+static bool gb_manifest_parse_interface(struct gb_interface *intf,
+                                       struct manifest_desc *interface_desc)
+{
+       struct greybus_descriptor_interface *desc_intf = interface_desc->data;
+       struct gb_control *control = intf->control;
+       char *str;
+
+       /* Handle the strings first--they can fail */
+       str = gb_string_get(intf, desc_intf->vendor_stringid);
+       if (IS_ERR(str))
+               return false;
+       control->vendor_string = str;
+
+       str = gb_string_get(intf, desc_intf->product_stringid);
+       if (IS_ERR(str))
+               goto out_free_vendor_string;
+       control->product_string = str;
+
+       /* Assign feature flags communicated via manifest */
+       intf->features = desc_intf->features;
+
+       /* Release the interface descriptor, now that we're done with it */
+       release_manifest_descriptor(interface_desc);
+
+       /* An interface must have at least one bundle descriptor */
+       if (!gb_manifest_parse_bundles(intf)) {
+               dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
+               goto out_err;
+       }
+
+       return true;
+out_err:
+       kfree(control->product_string);
+       control->product_string = NULL;
+out_free_vendor_string:
+       kfree(control->vendor_string);
+       control->vendor_string = NULL;
+
+       return false;
+}
+
+/*
+ * Parse a buffer containing an interface manifest.
+ *
+ * If we find anything wrong with the content/format of the buffer
+ * we reject it.
+ *
+ * The first requirement is that the manifest's version is
+ * one we can parse.
+ *
+ * We make an initial pass through the buffer and identify all of
+ * the descriptors it contains, keeping track for each its type
+ * and the location size of its data in the buffer.
+ *
+ * Next we scan the descriptors, looking for an interface descriptor;
+ * there must be exactly one of those.  When found, we record the
+ * information it contains, and then remove that descriptor (and any
+ * string descriptors it refers to) from further consideration.
+ *
+ * After that we look for the interface's bundles--there must be at
+ * least one of those.
+ *
+ * Returns true if parsing was successful, false otherwise.
+ */
+bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
+{
+       struct greybus_manifest *manifest;
+       struct greybus_manifest_header *header;
+       struct greybus_descriptor *desc;
+       struct manifest_desc *descriptor;
+       struct manifest_desc *interface_desc = NULL;
+       u16 manifest_size;
+       u32 found = 0;
+       bool result;
+
+       /* Manifest descriptor list should be empty here */
+       if (WARN_ON(!list_empty(&intf->manifest_descs)))
+               return false;
+
+       /* we have to have at _least_ the manifest header */
+       if (size < sizeof(*header)) {
+               dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
+                       size, sizeof(*header));
+               return false;
+       }
+
+       /* Make sure the size is right */
+       manifest = data;
+       header = &manifest->header;
+       manifest_size = le16_to_cpu(header->size);
+       if (manifest_size != size) {
+               dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
+                       size, manifest_size);
+               return false;
+       }
+
+       /* Validate major/minor number */
+       if (header->version_major > GREYBUS_VERSION_MAJOR) {
+               dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
+                       header->version_major, header->version_minor,
+                       GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
+               return false;
+       }
+
+       /* OK, find all the descriptors */
+       desc = manifest->descriptors;
+       size -= sizeof(*header);
+       while (size) {
+               int desc_size;
+
+               desc_size = identify_descriptor(intf, desc, size);
+               if (desc_size < 0) {
+                       result = false;
+                       goto out;
+               }
+               desc = (struct greybus_descriptor *)((char *)desc + desc_size);
+               size -= desc_size;
+       }
+
+       /* There must be a single interface descriptor */
+       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
+               if (descriptor->type == GREYBUS_TYPE_INTERFACE)
+                       if (!found++)
+                               interface_desc = descriptor;
+       }
+       if (found != 1) {
+               dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
+                       found);
+               result = false;
+               goto out;
+       }
+
+       /* Parse the manifest, starting with the interface descriptor */
+       result = gb_manifest_parse_interface(intf, interface_desc);
+
+       /*
+        * We really should have no remaining descriptors, but we
+        * don't know what newer format manifests might leave.
+        */
+       if (result && !list_empty(&intf->manifest_descs))
+               dev_info(&intf->dev, "excess descriptors in interface manifest\n");
+out:
+       release_manifest_descriptors(intf);
+
+       return result;
+}
diff --git a/drivers/greybus/module.c b/drivers/greybus/module.c
new file mode 100644 (file)
index 0000000..36f77f9
--- /dev/null
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus Module code
+ *
+ * Copyright 2016 Google Inc.
+ * Copyright 2016 Linaro Ltd.
+ */
+
+#include <linux/greybus.h>
+#include "greybus_trace.h"
+
+static ssize_t eject_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t len)
+{
+       struct gb_module *module = to_gb_module(dev);
+       struct gb_interface *intf;
+       size_t i;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 0, &val);
+       if (ret)
+               return ret;
+
+       if (!val)
+               return len;
+
+       for (i = 0; i < module->num_interfaces; ++i) {
+               intf = module->interfaces[i];
+
+               mutex_lock(&intf->mutex);
+               /* Set flag to prevent concurrent activation. */
+               intf->ejected = true;
+               gb_interface_disable(intf);
+               gb_interface_deactivate(intf);
+               mutex_unlock(&intf->mutex);
+       }
+
+       /* Tell the SVC to eject the primary interface. */
+       ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
+       if (ret)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_WO(eject);
+
+static ssize_t module_id_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       return sprintf(buf, "%u\n", module->module_id);
+}
+static DEVICE_ATTR_RO(module_id);
+
+static ssize_t num_interfaces_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       return sprintf(buf, "%zu\n", module->num_interfaces);
+}
+static DEVICE_ATTR_RO(num_interfaces);
+
+static struct attribute *module_attrs[] = {
+       &dev_attr_eject.attr,
+       &dev_attr_module_id.attr,
+       &dev_attr_num_interfaces.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(module);
+
+static void gb_module_release(struct device *dev)
+{
+       struct gb_module *module = to_gb_module(dev);
+
+       trace_gb_module_release(module);
+
+       kfree(module);
+}
+
+struct device_type greybus_module_type = {
+       .name           = "greybus_module",
+       .release        = gb_module_release,
+};
+
+struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
+                                  size_t num_interfaces)
+{
+       struct gb_interface *intf;
+       struct gb_module *module;
+       int i;
+
+       module = kzalloc(struct_size(module, interfaces, num_interfaces),
+                        GFP_KERNEL);
+       if (!module)
+               return NULL;
+
+       module->hd = hd;
+       module->module_id = module_id;
+       module->num_interfaces = num_interfaces;
+
+       module->dev.parent = &hd->dev;
+       module->dev.bus = &greybus_bus_type;
+       module->dev.type = &greybus_module_type;
+       module->dev.groups = module_groups;
+       module->dev.dma_mask = hd->dev.dma_mask;
+       device_initialize(&module->dev);
+       dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
+
+       trace_gb_module_create(module);
+
+       for (i = 0; i < num_interfaces; ++i) {
+               intf = gb_interface_create(module, module_id + i);
+               if (!intf) {
+                       dev_err(&module->dev, "failed to create interface %u\n",
+                               module_id + i);
+                       goto err_put_interfaces;
+               }
+               module->interfaces[i] = intf;
+       }
+
+       return module;
+
+err_put_interfaces:
+       for (--i; i >= 0; --i)
+               gb_interface_put(module->interfaces[i]);
+
+       put_device(&module->dev);
+
+       return NULL;
+}
+
+/*
+ * Register and enable an interface after first attempting to activate it.
+ */
+static void gb_module_register_interface(struct gb_interface *intf)
+{
+       struct gb_module *module = intf->module;
+       u8 intf_id = intf->interface_id;
+       int ret;
+
+       mutex_lock(&intf->mutex);
+
+       ret = gb_interface_activate(intf);
+       if (ret) {
+               if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
+                       dev_err(&module->dev,
+                               "failed to activate interface %u: %d\n",
+                               intf_id, ret);
+               }
+
+               gb_interface_add(intf);
+               goto err_unlock;
+       }
+
+       ret = gb_interface_add(intf);
+       if (ret)
+               goto err_interface_deactivate;
+
+       ret = gb_interface_enable(intf);
+       if (ret) {
+               dev_err(&module->dev, "failed to enable interface %u: %d\n",
+                       intf_id, ret);
+               goto err_interface_deactivate;
+       }
+
+       mutex_unlock(&intf->mutex);
+
+       return;
+
+err_interface_deactivate:
+       gb_interface_deactivate(intf);
+err_unlock:
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_module_deregister_interface(struct gb_interface *intf)
+{
+       /* Mark as disconnected to prevent I/O during disable. */
+       if (intf->module->disconnected)
+               intf->disconnected = true;
+
+       mutex_lock(&intf->mutex);
+       intf->removed = true;
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+
+       gb_interface_del(intf);
+}
+
+/* Register a module and its interfaces. */
+int gb_module_add(struct gb_module *module)
+{
+       size_t i;
+       int ret;
+
+       ret = device_add(&module->dev);
+       if (ret) {
+               dev_err(&module->dev, "failed to register module: %d\n", ret);
+               return ret;
+       }
+
+       trace_gb_module_add(module);
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_module_register_interface(module->interfaces[i]);
+
+       return 0;
+}
+
+/* Deregister a module and its interfaces. */
+void gb_module_del(struct gb_module *module)
+{
+       size_t i;
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_module_deregister_interface(module->interfaces[i]);
+
+       trace_gb_module_del(module);
+
+       device_del(&module->dev);
+}
+
+void gb_module_put(struct gb_module *module)
+{
+       size_t i;
+
+       for (i = 0; i < module->num_interfaces; ++i)
+               gb_interface_put(module->interfaces[i]);
+
+       put_device(&module->dev);
+}
diff --git a/drivers/greybus/operation.c b/drivers/greybus/operation.c
new file mode 100644 (file)
index 0000000..8459e9b
--- /dev/null
@@ -0,0 +1,1264 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Greybus operations
+ *
+ * Copyright 2014-2015 Google Inc.
+ * Copyright 2014-2015 Linaro Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#include "greybus_trace.h"
+
+static struct kmem_cache *gb_operation_cache;
+static struct kmem_cache *gb_message_cache;
+
+/* Workqueue to handle Greybus operation completions. */
+static struct workqueue_struct *gb_operation_completion_wq;
+
+/* Wait queue for synchronous cancellations. */
+static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
+
+/*
+ * Protects updates to operation->errno.
+ */
+static DEFINE_SPINLOCK(gb_operations_lock);
+
+static int gb_operation_response_send(struct gb_operation *operation,
+                                     int errno);
+
+/*
+ * Increment operation active count and add to connection list unless the
+ * connection is going away.
+ *
+ * Caller holds operation reference.
+ */
+static int gb_operation_get_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       switch (connection->state) {
+       case GB_CONNECTION_STATE_ENABLED:
+               break;
+       case GB_CONNECTION_STATE_ENABLED_TX:
+               if (gb_operation_is_incoming(operation))
+                       goto err_unlock;
+               break;
+       case GB_CONNECTION_STATE_DISCONNECTING:
+               if (!gb_operation_is_core(operation))
+                       goto err_unlock;
+               break;
+       default:
+               goto err_unlock;
+       }
+
+       if (operation->active++ == 0)
+               list_add_tail(&operation->links, &connection->operations);
+
+       trace_gb_operation_get_active(operation);
+
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return 0;
+
+err_unlock:
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return -ENOTCONN;
+}
+
+/* Caller holds operation reference. */
+static void gb_operation_put_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+
+       spin_lock_irqsave(&connection->lock, flags);
+
+       trace_gb_operation_put_active(operation);
+
+       if (--operation->active == 0) {
+               list_del(&operation->links);
+               if (atomic_read(&operation->waiters))
+                       wake_up(&gb_operation_cancellation_queue);
+       }
+       spin_unlock_irqrestore(&connection->lock, flags);
+}
+
+static bool gb_operation_is_active(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       unsigned long flags;
+       bool ret;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       ret = operation->active;
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return ret;
+}
+
+/*
+ * Set an operation's result.
+ *
+ * Initially an outgoing operation's errno value is -EBADR.
+ * If no error occurs before sending the request message the only
+ * valid value operation->errno can be set to is -EINPROGRESS,
+ * indicating the request has been (or rather is about to be) sent.
+ * At that point nobody should be looking at the result until the
+ * response arrives.
+ *
+ * The first time the result gets set after the request has been
+ * sent, that result "sticks."  That is, if two concurrent threads
+ * race to set the result, the first one wins.  The return value
+ * tells the caller whether its result was recorded; if not the
+ * caller has nothing more to do.
+ *
+ * The result value -EILSEQ is reserved to signal an implementation
+ * error; if it's ever observed, the code performing the request has
+ * done something fundamentally wrong.  It is an error to try to set
+ * the result to -EBADR, and attempts to do so result in a warning,
+ * and -EILSEQ is used instead.  Similarly, the only valid result
+ * value to set for an operation in initial state is -EINPROGRESS.
+ * Attempts to do otherwise will also record a (successful) -EILSEQ
+ * operation result.
+ */
+static bool gb_operation_result_set(struct gb_operation *operation, int result)
+{
+       unsigned long flags;
+       int prev;
+
+       if (result == -EINPROGRESS) {
+               /*
+                * -EINPROGRESS is used to indicate the request is
+                * in flight.  It should be the first result value
+                * set after the initial -EBADR.  Issue a warning
+                * and record an implementation error if it's
+                * set at any other time.
+                */
+               spin_lock_irqsave(&gb_operations_lock, flags);
+               prev = operation->errno;
+               if (prev == -EBADR)
+                       operation->errno = result;
+               else
+                       operation->errno = -EILSEQ;
+               spin_unlock_irqrestore(&gb_operations_lock, flags);
+               WARN_ON(prev != -EBADR);
+
+               return true;
+       }
+
+       /*
+        * The first result value set after a request has been sent
+        * will be the final result of the operation.  Subsequent
+        * attempts to set the result are ignored.
+        *
+        * Note that -EBADR is a reserved "initial state" result
+        * value.  Attempts to set this value result in a warning,
+        * and the result code is set to -EILSEQ instead.
+        */
+       if (WARN_ON(result == -EBADR))
+               result = -EILSEQ; /* Nobody should be setting -EBADR */
+
+       spin_lock_irqsave(&gb_operations_lock, flags);
+       prev = operation->errno;
+       if (prev == -EINPROGRESS)
+               operation->errno = result;      /* First and final result */
+       spin_unlock_irqrestore(&gb_operations_lock, flags);
+
+       return prev == -EINPROGRESS;
+}
+
+int gb_operation_result(struct gb_operation *operation)
+{
+       int result = operation->errno;
+
+       WARN_ON(result == -EBADR);
+       WARN_ON(result == -EINPROGRESS);
+
+       return result;
+}
+EXPORT_SYMBOL_GPL(gb_operation_result);
+
+/*
+ * Looks up an outgoing operation on a connection and returns a refcounted
+ * pointer if found, or NULL otherwise.
+ */
+static struct gb_operation *
+gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
+{
+       struct gb_operation *operation;
+       unsigned long flags;
+       bool found = false;
+
+       spin_lock_irqsave(&connection->lock, flags);
+       list_for_each_entry(operation, &connection->operations, links)
+               if (operation->id == operation_id &&
+                   !gb_operation_is_incoming(operation)) {
+                       gb_operation_get(operation);
+                       found = true;
+                       break;
+               }
+       spin_unlock_irqrestore(&connection->lock, flags);
+
+       return found ? operation : NULL;
+}
+
+static int gb_message_send(struct gb_message *message, gfp_t gfp)
+{
+       struct gb_connection *connection = message->operation->connection;
+
+       trace_gb_message_send(message);
+       return connection->hd->driver->message_send(connection->hd,
+                                       connection->hd_cport_id,
+                                       message,
+                                       gfp);
+}
+
+/*
+ * Cancel a message we have passed to the host device layer to be sent.
+ */
+static void gb_message_cancel(struct gb_message *message)
+{
+       struct gb_host_device *hd = message->operation->connection->hd;
+
+       hd->driver->message_cancel(message);
+}
+
+static void gb_operation_request_handle(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       int status;
+       int ret;
+
+       if (connection->handler) {
+               status = connection->handler(operation);
+       } else {
+               dev_err(&connection->hd->dev,
+                       "%s: unexpected incoming request of type 0x%02x\n",
+                       connection->name, operation->type);
+
+               status = -EPROTONOSUPPORT;
+       }
+
+       ret = gb_operation_response_send(operation, status);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: failed to send response %d for type 0x%02x: %d\n",
+                       connection->name, status, operation->type, ret);
+               return;
+       }
+}
+
+/*
+ * Process operation work.
+ *
+ * For incoming requests, call the protocol request handler. The operation
+ * result should be -EINPROGRESS at this point.
+ *
+ * For outgoing requests, the operation result value should have
+ * been set before queueing this.  The operation callback function
+ * allows the original requester to know the request has completed
+ * and its result is available.
+ */
+static void gb_operation_work(struct work_struct *work)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       operation = container_of(work, struct gb_operation, work);
+
+       if (gb_operation_is_incoming(operation)) {
+               gb_operation_request_handle(operation);
+       } else {
+               ret = del_timer_sync(&operation->timer);
+               if (!ret) {
+                       /* Cancel request message if scheduled by timeout. */
+                       if (gb_operation_result(operation) == -ETIMEDOUT)
+                               gb_message_cancel(operation->request);
+               }
+
+               operation->callback(operation);
+       }
+
+       gb_operation_put_active(operation);
+       gb_operation_put(operation);
+}
+
+static void gb_operation_timeout(struct timer_list *t)
+{
+       struct gb_operation *operation = from_timer(operation, t, timer);
+
+       if (gb_operation_result_set(operation, -ETIMEDOUT)) {
+               /*
+                * A stuck request message will be cancelled from the
+                * workqueue.
+                */
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+}
+
+static void gb_operation_message_init(struct gb_host_device *hd,
+                                     struct gb_message *message,
+                                     u16 operation_id,
+                                     size_t payload_size, u8 type)
+{
+       struct gb_operation_msg_hdr *header;
+
+       header = message->buffer;
+
+       message->header = header;
+       message->payload = payload_size ? header + 1 : NULL;
+       message->payload_size = payload_size;
+
+       /*
+        * The type supplied for incoming message buffers will be
+        * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
+        * arriving data so there's no need to initialize the message header.
+        */
+       if (type != GB_REQUEST_TYPE_INVALID) {
+               u16 message_size = (u16)(sizeof(*header) + payload_size);
+
+               /*
+                * For a request, the operation id gets filled in
+                * when the message is sent.  For a response, it
+                * will be copied from the request by the caller.
+                *
+                * The result field in a request message must be
+                * zero.  It will be set just prior to sending for
+                * a response.
+                */
+               header->size = cpu_to_le16(message_size);
+               header->operation_id = 0;
+               header->type = type;
+               header->result = 0;
+       }
+}
+
+/*
+ * Allocate a message to be used for an operation request or response.
+ * Both types of message contain a common header.  The request message
+ * for an outgoing operation is outbound, as is the response message
+ * for an incoming operation.  The message header for an outbound
+ * message is partially initialized here.
+ *
+ * The headers for inbound messages don't need to be initialized;
+ * they'll be filled in by arriving data.
+ *
+ * Our message buffers have the following layout:
+ *     message header  \_ these combined are
+ *     message payload /  the message size
+ */
+static struct gb_message *
+gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
+                          size_t payload_size, gfp_t gfp_flags)
+{
+       struct gb_message *message;
+       struct gb_operation_msg_hdr *header;
+       size_t message_size = payload_size + sizeof(*header);
+
+       if (message_size > hd->buffer_size_max) {
+               dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
+                        message_size, hd->buffer_size_max);
+               return NULL;
+       }
+
+       /* Allocate the message structure and buffer. */
+       message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
+       if (!message)
+               return NULL;
+
+       message->buffer = kzalloc(message_size, gfp_flags);
+       if (!message->buffer)
+               goto err_free_message;
+
+       /* Initialize the message.  Operation id is filled in later. */
+       gb_operation_message_init(hd, message, 0, payload_size, type);
+
+       return message;
+
+err_free_message:
+       kmem_cache_free(gb_message_cache, message);
+
+       return NULL;
+}
+
+static void gb_operation_message_free(struct gb_message *message)
+{
+       kfree(message->buffer);
+       kmem_cache_free(gb_message_cache, message);
+}
+
+/*
+ * Map an enum gb_operation_status value (which is represented in a
+ * message as a single byte) to an appropriate Linux negative errno.
+ */
+static int gb_operation_status_map(u8 status)
+{
+       switch (status) {
+       case GB_OP_SUCCESS:
+               return 0;
+       case GB_OP_INTERRUPTED:
+               return -EINTR;
+       case GB_OP_TIMEOUT:
+               return -ETIMEDOUT;
+       case GB_OP_NO_MEMORY:
+               return -ENOMEM;
+       case GB_OP_PROTOCOL_BAD:
+               return -EPROTONOSUPPORT;
+       case GB_OP_OVERFLOW:
+               return -EMSGSIZE;
+       case GB_OP_INVALID:
+               return -EINVAL;
+       case GB_OP_RETRY:
+               return -EAGAIN;
+       case GB_OP_NONEXISTENT:
+               return -ENODEV;
+       case GB_OP_MALFUNCTION:
+               return -EILSEQ;
+       case GB_OP_UNKNOWN_ERROR:
+       default:
+               return -EIO;
+       }
+}
+
+/*
+ * Map a Linux errno value (from operation->errno) into the value
+ * that should represent it in a response message status sent
+ * over the wire.  Returns an enum gb_operation_status value (which
+ * is represented in a message as a single byte).
+ */
+static u8 gb_operation_errno_map(int errno)
+{
+       switch (errno) {
+       case 0:
+               return GB_OP_SUCCESS;
+       case -EINTR:
+               return GB_OP_INTERRUPTED;
+       case -ETIMEDOUT:
+               return GB_OP_TIMEOUT;
+       case -ENOMEM:
+               return GB_OP_NO_MEMORY;
+       case -EPROTONOSUPPORT:
+               return GB_OP_PROTOCOL_BAD;
+       case -EMSGSIZE:
+               return GB_OP_OVERFLOW;  /* Could be underflow too */
+       case -EINVAL:
+               return GB_OP_INVALID;
+       case -EAGAIN:
+               return GB_OP_RETRY;
+       case -EILSEQ:
+               return GB_OP_MALFUNCTION;
+       case -ENODEV:
+               return GB_OP_NONEXISTENT;
+       case -EIO:
+       default:
+               return GB_OP_UNKNOWN_ERROR;
+       }
+}
+
+bool gb_operation_response_alloc(struct gb_operation *operation,
+                                size_t response_size, gfp_t gfp)
+{
+       struct gb_host_device *hd = operation->connection->hd;
+       struct gb_operation_msg_hdr *request_header;
+       struct gb_message *response;
+       u8 type;
+
+       type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
+       response = gb_operation_message_alloc(hd, type, response_size, gfp);
+       if (!response)
+               return false;
+       response->operation = operation;
+
+       /*
+        * Size and type get initialized when the message is
+        * allocated.  The errno will be set before sending.  All
+        * that's left is the operation id, which we copy from the
+        * request message header (as-is, in little-endian order).
+        */
+       request_header = operation->request->header;
+       response->header->operation_id = request_header->operation_id;
+       operation->response = response;
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
+
+/*
+ * Create a Greybus operation to be sent over the given connection.
+ * The request buffer will be big enough for a payload of the given
+ * size.
+ *
+ * For outgoing requests, the request message's header will be
+ * initialized with the type of the request and the message size.
+ * Outgoing operations must also specify the response buffer size,
+ * which must be sufficient to hold all expected response data.  The
+ * response message header will eventually be overwritten, so there's
+ * no need to initialize it here.
+ *
+ * Request messages for incoming operations can arrive in interrupt
+ * context, so they must be allocated with GFP_ATOMIC.  In this case
+ * the request buffer will be immediately overwritten, so there is
+ * no need to initialize the message header.  Responsibility for
+ * allocating a response buffer lies with the incoming request
+ * handler for a protocol.  So we don't allocate that here.
+ *
+ * Returns a pointer to the new operation or a null pointer if an
+ * error occurs.
+ */
+static struct gb_operation *
+gb_operation_create_common(struct gb_connection *connection, u8 type,
+                          size_t request_size, size_t response_size,
+                          unsigned long op_flags, gfp_t gfp_flags)
+{
+       struct gb_host_device *hd = connection->hd;
+       struct gb_operation *operation;
+
+       operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
+       if (!operation)
+               return NULL;
+       operation->connection = connection;
+
+       operation->request = gb_operation_message_alloc(hd, type, request_size,
+                                                       gfp_flags);
+       if (!operation->request)
+               goto err_cache;
+       operation->request->operation = operation;
+
+       /* Allocate the response buffer for outgoing operations */
+       if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
+               if (!gb_operation_response_alloc(operation, response_size,
+                                                gfp_flags)) {
+                       goto err_request;
+               }
+
+               timer_setup(&operation->timer, gb_operation_timeout, 0);
+       }
+
+       operation->flags = op_flags;
+       operation->type = type;
+       operation->errno = -EBADR;  /* Initial value--means "never set" */
+
+       INIT_WORK(&operation->work, gb_operation_work);
+       init_completion(&operation->completion);
+       kref_init(&operation->kref);
+       atomic_set(&operation->waiters, 0);
+
+       return operation;
+
+err_request:
+       gb_operation_message_free(operation->request);
+err_cache:
+       kmem_cache_free(gb_operation_cache, operation);
+
+       return NULL;
+}
+
+/*
+ * Create a new operation associated with the given connection.  The
+ * request and response sizes provided are the number of bytes
+ * required to hold the request/response payload only.  Both of
+ * these are allowed to be 0.  Note that 0x00 is reserved as an
+ * invalid operation type for all protocols, and this is enforced
+ * here.
+ */
+struct gb_operation *
+gb_operation_create_flags(struct gb_connection *connection,
+                         u8 type, size_t request_size,
+                         size_t response_size, unsigned long flags,
+                         gfp_t gfp)
+{
+       struct gb_operation *operation;
+
+       if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
+               return NULL;
+       if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
+               type &= ~GB_MESSAGE_TYPE_RESPONSE;
+
+       if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
+               flags &= GB_OPERATION_FLAG_USER_MASK;
+
+       operation = gb_operation_create_common(connection, type,
+                                              request_size, response_size,
+                                              flags, gfp);
+       if (operation)
+               trace_gb_operation_create(operation);
+
+       return operation;
+}
+EXPORT_SYMBOL_GPL(gb_operation_create_flags);
+
+struct gb_operation *
+gb_operation_create_core(struct gb_connection *connection,
+                        u8 type, size_t request_size,
+                        size_t response_size, unsigned long flags,
+                        gfp_t gfp)
+{
+       struct gb_operation *operation;
+
+       flags |= GB_OPERATION_FLAG_CORE;
+
+       operation = gb_operation_create_common(connection, type,
+                                              request_size, response_size,
+                                              flags, gfp);
+       if (operation)
+               trace_gb_operation_create_core(operation);
+
+       return operation;
+}
+
+/* Do not export this function. */
+
+size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
+{
+       struct gb_host_device *hd = connection->hd;
+
+       return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
+
+static struct gb_operation *
+gb_operation_create_incoming(struct gb_connection *connection, u16 id,
+                            u8 type, void *data, size_t size)
+{
+       struct gb_operation *operation;
+       size_t request_size;
+       unsigned long flags = GB_OPERATION_FLAG_INCOMING;
+
+       /* Caller has made sure we at least have a message header. */
+       request_size = size - sizeof(struct gb_operation_msg_hdr);
+
+       if (!id)
+               flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
+
+       operation = gb_operation_create_common(connection, type,
+                                              request_size,
+                                              GB_REQUEST_TYPE_INVALID,
+                                              flags, GFP_ATOMIC);
+       if (!operation)
+               return NULL;
+
+       operation->id = id;
+       memcpy(operation->request->header, data, size);
+       trace_gb_operation_create_incoming(operation);
+
+       return operation;
+}
+
+/*
+ * Get an additional reference on an operation.
+ */
+void gb_operation_get(struct gb_operation *operation)
+{
+       kref_get(&operation->kref);
+}
+EXPORT_SYMBOL_GPL(gb_operation_get);
+
+/*
+ * Destroy a previously created operation.
+ */
+static void _gb_operation_destroy(struct kref *kref)
+{
+       struct gb_operation *operation;
+
+       operation = container_of(kref, struct gb_operation, kref);
+
+       trace_gb_operation_destroy(operation);
+
+       if (operation->response)
+               gb_operation_message_free(operation->response);
+       gb_operation_message_free(operation->request);
+
+       kmem_cache_free(gb_operation_cache, operation);
+}
+
+/*
+ * Drop a reference on an operation, and destroy it when the last
+ * one is gone.
+ */
+void gb_operation_put(struct gb_operation *operation)
+{
+       if (WARN_ON(!operation))
+               return;
+
+       kref_put(&operation->kref, _gb_operation_destroy);
+}
+EXPORT_SYMBOL_GPL(gb_operation_put);
+
+/* Tell the requester we're done */
+static void gb_operation_sync_callback(struct gb_operation *operation)
+{
+       complete(&operation->completion);
+}
+
+/**
+ * gb_operation_request_send() - send an operation request message
+ * @operation: the operation to initiate
+ * @callback:  the operation completion callback
+ * @timeout:   operation timeout in milliseconds, or zero for no timeout
+ * @gfp:       the memory flags to use for any allocations
+ *
+ * The caller has filled in any payload so the request message is ready to go.
+ * The callback function supplied will be called when the response message has
+ * arrived, a unidirectional request has been sent, or the operation is
+ * cancelled, indicating that the operation is complete. The callback function
+ * can fetch the result of the operation using gb_operation_result() if
+ * desired.
+ *
+ * Return: 0 if the request was successfully queued in the host-driver queues,
+ * or a negative errno.
+ */
+int gb_operation_request_send(struct gb_operation *operation,
+                             gb_operation_callback callback,
+                             unsigned int timeout,
+                             gfp_t gfp)
+{
+       struct gb_connection *connection = operation->connection;
+       struct gb_operation_msg_hdr *header;
+       unsigned int cycle;
+       int ret;
+
+       if (gb_connection_is_offloaded(connection))
+               return -EBUSY;
+
+       if (!callback)
+               return -EINVAL;
+
+       /*
+        * Record the callback function, which is executed in
+        * non-atomic (workqueue) context when the final result
+        * of an operation has been set.
+        */
+       operation->callback = callback;
+
+       /*
+        * Assign the operation's id, and store it in the request header.
+        * Zero is a reserved operation id for unidirectional operations.
+        */
+       if (gb_operation_is_unidirectional(operation)) {
+               operation->id = 0;
+       } else {
+               cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
+               operation->id = (u16)(cycle % U16_MAX + 1);
+       }
+
+       header = operation->request->header;
+       header->operation_id = cpu_to_le16(operation->id);
+
+       gb_operation_result_set(operation, -EINPROGRESS);
+
+       /*
+        * Get an extra reference on the operation. It'll be dropped when the
+        * operation completes.
+        */
+       gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
+       if (ret)
+               goto err_put;
+
+       ret = gb_message_send(operation->request, gfp);
+       if (ret)
+               goto err_put_active;
+
+       if (timeout) {
+               operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
+               add_timer(&operation->timer);
+       }
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send);
+
+/*
+ * Send a synchronous operation.  This function is expected to
+ * block, returning only when the response has arrived, (or when an
+ * error is detected.  The return value is the result of the
+ * operation.
+ */
+int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
+                                          unsigned int timeout)
+{
+       int ret;
+
+       ret = gb_operation_request_send(operation, gb_operation_sync_callback,
+                                       timeout, GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       ret = wait_for_completion_interruptible(&operation->completion);
+       if (ret < 0) {
+               /* Cancel the operation if interrupted */
+               gb_operation_cancel(operation, -ECANCELED);
+       }
+
+       return gb_operation_result(operation);
+}
+EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
+
+/*
+ * Send a response for an incoming operation request.  A non-zero
+ * errno indicates a failed operation.
+ *
+ * If there is any response payload, the incoming request handler is
+ * responsible for allocating the response message.  Otherwise the
+ * it can simply supply the result errno; this function will
+ * allocate the response message if necessary.
+ */
+static int gb_operation_response_send(struct gb_operation *operation,
+                                     int errno)
+{
+       struct gb_connection *connection = operation->connection;
+       int ret;
+
+       if (!operation->response &&
+           !gb_operation_is_unidirectional(operation)) {
+               if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
+                       return -ENOMEM;
+       }
+
+       /* Record the result */
+       if (!gb_operation_result_set(operation, errno)) {
+               dev_err(&connection->hd->dev, "request result already set\n");
+               return -EIO;    /* Shouldn't happen */
+       }
+
+       /* Sender of request does not care about response. */
+       if (gb_operation_is_unidirectional(operation))
+               return 0;
+
+       /* Reference will be dropped when message has been sent. */
+       gb_operation_get(operation);
+       ret = gb_operation_get_active(operation);
+       if (ret)
+               goto err_put;
+
+       /* Fill in the response header and send it */
+       operation->response->header->result = gb_operation_errno_map(errno);
+
+       ret = gb_message_send(operation->response, GFP_KERNEL);
+       if (ret)
+               goto err_put_active;
+
+       return 0;
+
+err_put_active:
+       gb_operation_put_active(operation);
+err_put:
+       gb_operation_put(operation);
+
+       return ret;
+}
+
+/*
+ * This function is called when a message send request has completed.
+ */
+void greybus_message_sent(struct gb_host_device *hd,
+                         struct gb_message *message, int status)
+{
+       struct gb_operation *operation = message->operation;
+       struct gb_connection *connection = operation->connection;
+
+       /*
+        * If the message was a response, we just need to drop our
+        * reference to the operation.  If an error occurred, report
+        * it.
+        *
+        * For requests, if there's no error and the operation in not
+        * unidirectional, there's nothing more to do until the response
+        * arrives. If an error occurred attempting to send it, or if the
+        * operation is unidrectional, record the result of the operation and
+        * schedule its completion.
+        */
+       if (message == operation->response) {
+               if (status) {
+                       dev_err(&connection->hd->dev,
+                               "%s: error sending response 0x%02x: %d\n",
+                               connection->name, operation->type, status);
+               }
+
+               gb_operation_put_active(operation);
+               gb_operation_put(operation);
+       } else if (status || gb_operation_is_unidirectional(operation)) {
+               if (gb_operation_result_set(operation, status)) {
+                       queue_work(gb_operation_completion_wq,
+                                  &operation->work);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(greybus_message_sent);
+
+/*
+ * We've received data on a connection, and it doesn't look like a
+ * response, so we assume it's a request.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the request buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_request(struct gb_connection *connection,
+                               const struct gb_operation_msg_hdr *header,
+                               void *data, size_t size)
+{
+       struct gb_operation *operation;
+       u16 operation_id;
+       u8 type;
+       int ret;
+
+       operation_id = le16_to_cpu(header->operation_id);
+       type = header->type;
+
+       operation = gb_operation_create_incoming(connection, operation_id,
+                                                type, data, size);
+       if (!operation) {
+               dev_err(&connection->hd->dev,
+                       "%s: can't create incoming operation\n",
+                       connection->name);
+               return;
+       }
+
+       ret = gb_operation_get_active(operation);
+       if (ret) {
+               gb_operation_put(operation);
+               return;
+       }
+       trace_gb_message_recv_request(operation->request);
+
+       /*
+        * The initial reference to the operation will be dropped when the
+        * request handler returns.
+        */
+       if (gb_operation_result_set(operation, -EINPROGRESS))
+               queue_work(connection->wq, &operation->work);
+}
+
+/*
+ * We've received data that appears to be an operation response
+ * message.  Look up the operation, and record that we've received
+ * its response.
+ *
+ * This is called in interrupt context, so just copy the incoming
+ * data into the response buffer and handle the rest via workqueue.
+ */
+static void gb_connection_recv_response(struct gb_connection *connection,
+                               const struct gb_operation_msg_hdr *header,
+                               void *data, size_t size)
+{
+       struct gb_operation *operation;
+       struct gb_message *message;
+       size_t message_size;
+       u16 operation_id;
+       int errno;
+
+       operation_id = le16_to_cpu(header->operation_id);
+
+       if (!operation_id) {
+               dev_err_ratelimited(&connection->hd->dev,
+                                   "%s: invalid response id 0 received\n",
+                                   connection->name);
+               return;
+       }
+
+       operation = gb_operation_find_outgoing(connection, operation_id);
+       if (!operation) {
+               dev_err_ratelimited(&connection->hd->dev,
+                                   "%s: unexpected response id 0x%04x received\n",
+                                   connection->name, operation_id);
+               return;
+       }
+
+       errno = gb_operation_status_map(header->result);
+       message = operation->response;
+       message_size = sizeof(*header) + message->payload_size;
+       if (!errno && size > message_size) {
+               dev_err_ratelimited(&connection->hd->dev,
+                                   "%s: malformed response 0x%02x received (%zu > %zu)\n",
+                                   connection->name, header->type,
+                                   size, message_size);
+               errno = -EMSGSIZE;
+       } else if (!errno && size < message_size) {
+               if (gb_operation_short_response_allowed(operation)) {
+                       message->payload_size = size - sizeof(*header);
+               } else {
+                       dev_err_ratelimited(&connection->hd->dev,
+                                           "%s: short response 0x%02x received (%zu < %zu)\n",
+                                           connection->name, header->type,
+                                           size, message_size);
+                       errno = -EMSGSIZE;
+               }
+       }
+
+       /* We must ignore the payload if a bad status is returned */
+       if (errno)
+               size = sizeof(*header);
+
+       /* The rest will be handled in work queue context */
+       if (gb_operation_result_set(operation, errno)) {
+               memcpy(message->buffer, data, size);
+
+               trace_gb_message_recv_response(message);
+
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+
+       gb_operation_put(operation);
+}
+
+/*
+ * Handle data arriving on a connection.  As soon as we return the
+ * supplied data buffer will be reused (so unless we do something
+ * with, it's effectively dropped).
+ */
+void gb_connection_recv(struct gb_connection *connection,
+                       void *data, size_t size)
+{
+       struct gb_operation_msg_hdr header;
+       struct device *dev = &connection->hd->dev;
+       size_t msg_size;
+
+       if (connection->state == GB_CONNECTION_STATE_DISABLED ||
+           gb_connection_is_offloaded(connection)) {
+               dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
+                                    connection->name, size);
+               return;
+       }
+
+       if (size < sizeof(header)) {
+               dev_err_ratelimited(dev, "%s: short message received\n",
+                                   connection->name);
+               return;
+       }
+
+       /* Use memcpy as data may be unaligned */
+       memcpy(&header, data, sizeof(header));
+       msg_size = le16_to_cpu(header.size);
+       if (size < msg_size) {
+               dev_err_ratelimited(dev,
+                                   "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
+                                   connection->name,
+                                   le16_to_cpu(header.operation_id),
+                                   header.type, size, msg_size);
+               return;         /* XXX Should still complete operation */
+       }
+
+       if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
+               gb_connection_recv_response(connection, &header, data,
+                                           msg_size);
+       } else {
+               gb_connection_recv_request(connection, &header, data,
+                                          msg_size);
+       }
+}
+
+/*
+ * Cancel an outgoing operation synchronously, and record the given error to
+ * indicate why.
+ */
+void gb_operation_cancel(struct gb_operation *operation, int errno)
+{
+       if (WARN_ON(gb_operation_is_incoming(operation)))
+               return;
+
+       if (gb_operation_result_set(operation, errno)) {
+               gb_message_cancel(operation->request);
+               queue_work(gb_operation_completion_wq, &operation->work);
+       }
+       trace_gb_message_cancel_outgoing(operation->request);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                  !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
+}
+EXPORT_SYMBOL_GPL(gb_operation_cancel);
+
+/*
+ * Cancel an incoming operation synchronously. Called during connection tear
+ * down.
+ */
+void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
+{
+       if (WARN_ON(!gb_operation_is_incoming(operation)))
+               return;
+
+       if (!gb_operation_is_unidirectional(operation)) {
+               /*
+                * Make sure the request handler has submitted the response
+                * before cancelling it.
+                */
+               flush_work(&operation->work);
+               if (!gb_operation_result_set(operation, errno))
+                       gb_message_cancel(operation->response);
+       }
+       trace_gb_message_cancel_incoming(operation->response);
+
+       atomic_inc(&operation->waiters);
+       wait_event(gb_operation_cancellation_queue,
+                  !gb_operation_is_active(operation));
+       atomic_dec(&operation->waiters);
+}
+
+/**
+ * gb_operation_sync_timeout() - implement a "simple" synchronous operation
+ * @connection: the Greybus connection to send this to
+ * @type: the type of operation to send
+ * @request: pointer to a memory buffer to copy the request from
+ * @request_size: size of @request
+ * @response: pointer to a memory buffer to copy the response to
+ * @response_size: the size of @response.
+ * @timeout: operation timeout in milliseconds
+ *
+ * This function implements a simple synchronous Greybus operation.  It sends
+ * the provided operation request and waits (sleeps) until the corresponding
+ * operation response message has been successfully received, or an error
+ * occurs.  @request and @response are buffers to hold the request and response
+ * data respectively, and if they are not NULL, their size must be specified in
+ * @request_size and @response_size.
+ *
+ * If a response payload is to come back, and @response is not NULL,
+ * @response_size number of bytes will be copied into @response if the operation
+ * is successful.
+ *
+ * If there is an error, the response buffer is left alone.
+ */
+int gb_operation_sync_timeout(struct gb_connection *connection, int type,
+                             void *request, int request_size,
+                             void *response, int response_size,
+                             unsigned int timeout)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       if ((response_size && !response) ||
+           (request_size && !request))
+               return -EINVAL;
+
+       operation = gb_operation_create(connection, type,
+                                       request_size, response_size,
+                                       GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
+                       connection->name, operation->id, type, ret);
+       } else {
+               if (response_size) {
+                       memcpy(response, operation->response->payload,
+                              response_size);
+               }
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
+
+/**
+ * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
+ * @connection:                connection to use
+ * @type:              type of operation to send
+ * @request:           memory buffer to copy the request from
+ * @request_size:      size of @request
+ * @timeout:           send timeout in milliseconds
+ *
+ * Initiate a unidirectional operation by sending a request message and
+ * waiting for it to be acknowledged as sent by the host device.
+ *
+ * Note that successful send of a unidirectional operation does not imply that
+ * the request as actually reached the remote end of the connection.
+ */
+int gb_operation_unidirectional_timeout(struct gb_connection *connection,
+                                       int type, void *request,
+                                       int request_size,
+                                       unsigned int timeout)
+{
+       struct gb_operation *operation;
+       int ret;
+
+       if (request_size && !request)
+               return -EINVAL;
+
+       operation = gb_operation_create_flags(connection, type,
+                                             request_size, 0,
+                                             GB_OPERATION_FLAG_UNIDIRECTIONAL,
+                                             GFP_KERNEL);
+       if (!operation)
+               return -ENOMEM;
+
+       if (request_size)
+               memcpy(operation->request->payload, request, request_size);
+
+       ret = gb_operation_request_send_sync_timeout(operation, timeout);
+       if (ret) {
+               dev_err(&connection->hd->dev,
+                       "%s: unidirectional operation of type 0x%02x failed: %d\n",
+                       connection->name, type, ret);
+       }
+
+       gb_operation_put(operation);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
+
+int __init gb_operation_init(void)
+{
+       gb_message_cache = kmem_cache_create("gb_message_cache",
+                                            sizeof(struct gb_message), 0, 0,
+                                            NULL);
+       if (!gb_message_cache)
+               return -ENOMEM;
+
+       gb_operation_cache = kmem_cache_create("gb_operation_cache",
+                                              sizeof(struct gb_operation), 0,
+                                              0, NULL);
+       if (!gb_operation_cache)
+               goto err_destroy_message_cache;
+
+       gb_operation_completion_wq = alloc_workqueue("greybus_completion",
+                                                    0, 0);
+       if (!gb_operation_completion_wq)
+               goto err_destroy_operation_cache;
+
+       return 0;
+
+err_destroy_operation_cache:
+       kmem_cache_destroy(gb_operation_cache);
+       gb_operation_cache = NULL;
+err_destroy_message_cache:
+       kmem_cache_destroy(gb_message_cache);
+       gb_message_cache = NULL;
+
+       return -ENOMEM;
+}
+
+void gb_operation_exit(void)
+{
+       destroy_workqueue(gb_operation_completion_wq);
+       gb_operation_completion_wq = NULL;
+       kmem_cache_destroy(gb_operation_cache);
+       gb_operation_cache = NULL;
+       kmem_cache_destroy(gb_message_cache);
+       gb_message_cache = NULL;
+}
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
new file mode 100644 (file)
index 0000000..ce7740e
--- /dev/null
@@ -0,0 +1,1397 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SVC Greybus driver.
+ *
+ * Copyright 2015 Google Inc.
+ * Copyright 2015 Linaro Ltd.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#define SVC_INTF_EJECT_TIMEOUT         9000
+#define SVC_INTF_ACTIVATE_TIMEOUT      6000
+#define SVC_INTF_RESUME_TIMEOUT                3000
+
+struct gb_svc_deferred_request {
+       struct work_struct work;
+       struct gb_operation *operation;
+};
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation);
+
+static ssize_t endo_id_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "0x%04x\n", svc->endo_id);
+}
+static DEVICE_ATTR_RO(endo_id);
+
+static ssize_t ap_intf_id_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "%u\n", svc->ap_intf_id);
+}
+static DEVICE_ATTR_RO(ap_intf_id);
+
+// FIXME
+// This is a hack, we need to do this "right" and clean the interface up
+// properly, not just forcibly yank the thing out of the system and hope for the
+// best.  But for now, people want their modules to come out without having to
+// throw the thing to the ground or get out a screwdriver.
+static ssize_t intf_eject_store(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+       unsigned short intf_id;
+       int ret;
+
+       ret = kstrtou16(buf, 10, &intf_id);
+       if (ret < 0)
+               return ret;
+
+       dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
+
+       ret = gb_svc_intf_eject(svc, intf_id);
+       if (ret < 0)
+               return ret;
+
+       return len;
+}
+static DEVICE_ATTR_WO(intf_eject);
+
+static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       return sprintf(buf, "%s\n",
+                      gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+}
+
+static ssize_t watchdog_store(struct device *dev,
+                             struct device_attribute *attr, const char *buf,
+                             size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+       int retval;
+       bool user_request;
+
+       retval = strtobool(buf, &user_request);
+       if (retval)
+               return retval;
+
+       if (user_request)
+               retval = gb_svc_watchdog_enable(svc);
+       else
+               retval = gb_svc_watchdog_disable(svc);
+       if (retval)
+               return retval;
+       return len;
+}
+static DEVICE_ATTR_RW(watchdog);
+
+static ssize_t watchdog_action_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
+               return sprintf(buf, "panic\n");
+       else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
+               return sprintf(buf, "reset\n");
+
+       return -EINVAL;
+}
+
+static ssize_t watchdog_action_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t len)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (sysfs_streq(buf, "panic"))
+               svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
+       else if (sysfs_streq(buf, "reset"))
+               svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
+       else
+               return -EINVAL;
+
+       return len;
+}
+static DEVICE_ATTR_RW(watchdog_action);
+
+static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
+{
+       struct gb_svc_pwrmon_rail_count_get_response response;
+       int ret;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
+               return ret;
+       }
+
+       *value = response.rail_count;
+
+       return 0;
+}
+
+static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
+               struct gb_svc_pwrmon_rail_names_get_response *response,
+               size_t bufsize)
+{
+       int ret;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
+                               response, bufsize);
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
+               return ret;
+       }
+
+       if (response->status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev,
+                       "SVC error while getting rail names: %u\n",
+                       response->status);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
+                                   u8 measurement_type, u32 *value)
+{
+       struct gb_svc_pwrmon_sample_get_request request;
+       struct gb_svc_pwrmon_sample_get_response response;
+       int ret;
+
+       request.rail_id = rail_id;
+       request.measurement_type = measurement_type;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
+               return ret;
+       }
+
+       if (response.result) {
+               dev_err(&svc->dev,
+                       "UniPro error while getting rail power sample (%d %d): %d\n",
+                       rail_id, measurement_type, response.result);
+               switch (response.result) {
+               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+                       return -EINVAL;
+               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+                       return -ENOMSG;
+               default:
+                       return -EREMOTEIO;
+               }
+       }
+
+       *value = le32_to_cpu(response.measurement);
+
+       return 0;
+}
+
+int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
+                                 u8 measurement_type, u32 *value)
+{
+       struct gb_svc_pwrmon_intf_sample_get_request request;
+       struct gb_svc_pwrmon_intf_sample_get_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.measurement_type = measurement_type;
+
+       ret = gb_operation_sync(svc->connection,
+                               GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
+               return ret;
+       }
+
+       if (response.result) {
+               dev_err(&svc->dev,
+                       "UniPro error while getting intf power sample (%d %d): %d\n",
+                       intf_id, measurement_type, response.result);
+               switch (response.result) {
+               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
+                       return -EINVAL;
+               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
+                       return -ENOMSG;
+               default:
+                       return -EREMOTEIO;
+               }
+       }
+
+       *value = le32_to_cpu(response.measurement);
+
+       return 0;
+}
+
+static struct attribute *svc_attrs[] = {
+       &dev_attr_endo_id.attr,
+       &dev_attr_ap_intf_id.attr,
+       &dev_attr_intf_eject.attr,
+       &dev_attr_watchdog.attr,
+       &dev_attr_watchdog_action.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(svc);
+
+int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
+{
+       struct gb_svc_intf_device_id_request request;
+
+       request.intf_id = intf_id;
+       request.device_id = device_id;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
+                                &request, sizeof(request), NULL, 0);
+}
+
+int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_eject_request request;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       /*
+        * The pulse width for module release in svc is long so we need to
+        * increase the timeout so the operation will not return to soon.
+        */
+       ret = gb_operation_sync_timeout(svc->connection,
+                                       GB_SVC_TYPE_INTF_EJECT, &request,
+                                       sizeof(request), NULL, 0,
+                                       SVC_INTF_EJECT_TIMEOUT);
+       if (ret) {
+               dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
+               return ret;
+       }
+
+       return 0;
+}
+
+int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_vsys_request request;
+       struct gb_svc_intf_vsys_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_VSYS_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_refclk_request request;
+       struct gb_svc_intf_refclk_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_REFCLK_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
+{
+       struct gb_svc_intf_unipro_request request;
+       struct gb_svc_intf_unipro_response response;
+       int type, ret;
+
+       request.intf_id = intf_id;
+
+       if (enable)
+               type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
+       else
+               type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
+
+       ret = gb_operation_sync(svc->connection, type,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+       if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
+               return -EREMOTEIO;
+       return 0;
+}
+
+int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
+{
+       struct gb_svc_intf_activate_request request;
+       struct gb_svc_intf_activate_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       ret = gb_operation_sync_timeout(svc->connection,
+                                       GB_SVC_TYPE_INTF_ACTIVATE,
+                                       &request, sizeof(request),
+                                       &response, sizeof(response),
+                                       SVC_INTF_ACTIVATE_TIMEOUT);
+       if (ret < 0)
+               return ret;
+       if (response.status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev, "failed to activate interface %u: %u\n",
+                       intf_id, response.status);
+               return -EREMOTEIO;
+       }
+
+       *intf_type = response.intf_type;
+
+       return 0;
+}
+
+int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_resume_request request;
+       struct gb_svc_intf_resume_response response;
+       int ret;
+
+       request.intf_id = intf_id;
+
+       ret = gb_operation_sync_timeout(svc->connection,
+                                       GB_SVC_TYPE_INTF_RESUME,
+                                       &request, sizeof(request),
+                                       &response, sizeof(response),
+                                       SVC_INTF_RESUME_TIMEOUT);
+       if (ret < 0) {
+               dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
+                       intf_id, ret);
+               return ret;
+       }
+
+       if (response.status != GB_SVC_OP_SUCCESS) {
+               dev_err(&svc->dev, "failed to resume interface %u: %u\n",
+                       intf_id, response.status);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 *value)
+{
+       struct gb_svc_dme_peer_get_request request;
+       struct gb_svc_dme_peer_get_response response;
+       u16 result;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.attr = cpu_to_le16(attr);
+       request.selector = cpu_to_le16(selector);
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
+                       intf_id, attr, selector, ret);
+               return ret;
+       }
+
+       result = le16_to_cpu(response.result_code);
+       if (result) {
+               dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
+                       intf_id, attr, selector, result);
+               return -EREMOTEIO;
+       }
+
+       if (value)
+               *value = le32_to_cpu(response.attr_value);
+
+       return 0;
+}
+
+int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
+                       u32 value)
+{
+       struct gb_svc_dme_peer_set_request request;
+       struct gb_svc_dme_peer_set_response response;
+       u16 result;
+       int ret;
+
+       request.intf_id = intf_id;
+       request.attr = cpu_to_le16(attr);
+       request.selector = cpu_to_le16(selector);
+       request.value = cpu_to_le32(value);
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret) {
+               dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
+                       intf_id, attr, selector, value, ret);
+               return ret;
+       }
+
+       result = le16_to_cpu(response.result_code);
+       if (result) {
+               dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
+                       intf_id, attr, selector, value, result);
+               return -EREMOTEIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_connection_create(struct gb_svc *svc,
+                            u8 intf1_id, u16 cport1_id,
+                            u8 intf2_id, u16 cport2_id,
+                            u8 cport_flags)
+{
+       struct gb_svc_conn_create_request request;
+
+       request.intf1_id = intf1_id;
+       request.cport1_id = cpu_to_le16(cport1_id);
+       request.intf2_id = intf2_id;
+       request.cport2_id = cpu_to_le16(cport2_id);
+       request.tc = 0;         /* TC0 */
+       request.flags = cport_flags;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
+                              u8 intf2_id, u16 cport2_id)
+{
+       struct gb_svc_conn_destroy_request request;
+       struct gb_connection *connection = svc->connection;
+       int ret;
+
+       request.intf1_id = intf1_id;
+       request.cport1_id = cpu_to_le16(cport1_id);
+       request.intf2_id = intf2_id;
+       request.cport2_id = cpu_to_le16(cport2_id);
+
+       ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
+                               &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
+                       intf1_id, cport1_id, intf2_id, cport2_id, ret);
+       }
+}
+
+/* Creates bi-directional routes between the devices */
+int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
+                       u8 intf2_id, u8 dev2_id)
+{
+       struct gb_svc_route_create_request request;
+
+       request.intf1_id = intf1_id;
+       request.dev1_id = dev1_id;
+       request.intf2_id = intf2_id;
+       request.dev2_id = dev2_id;
+
+       return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
+                                &request, sizeof(request), NULL, 0);
+}
+
+/* Destroys bi-directional routes between the devices */
+void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
+{
+       struct gb_svc_route_destroy_request request;
+       int ret;
+
+       request.intf1_id = intf1_id;
+       request.intf2_id = intf2_id;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
+                               &request, sizeof(request), NULL, 0);
+       if (ret) {
+               dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
+                       intf1_id, intf2_id, ret);
+       }
+}
+
+int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
+                              u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
+                              u8 tx_amplitude, u8 tx_hs_equalizer,
+                              u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
+                              u8 flags, u32 quirks,
+                              struct gb_svc_l2_timer_cfg *local,
+                              struct gb_svc_l2_timer_cfg *remote)
+{
+       struct gb_svc_intf_set_pwrm_request request;
+       struct gb_svc_intf_set_pwrm_response response;
+       int ret;
+       u16 result_code;
+
+       memset(&request, 0, sizeof(request));
+
+       request.intf_id = intf_id;
+       request.hs_series = hs_series;
+       request.tx_mode = tx_mode;
+       request.tx_gear = tx_gear;
+       request.tx_nlanes = tx_nlanes;
+       request.tx_amplitude = tx_amplitude;
+       request.tx_hs_equalizer = tx_hs_equalizer;
+       request.rx_mode = rx_mode;
+       request.rx_gear = rx_gear;
+       request.rx_nlanes = rx_nlanes;
+       request.flags = flags;
+       request.quirks = cpu_to_le32(quirks);
+       if (local)
+               request.local_l2timerdata = *local;
+       if (remote)
+               request.remote_l2timerdata = *remote;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0)
+               return ret;
+
+       result_code = response.result_code;
+       if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
+               dev_err(&svc->dev, "set power mode = %d\n", result_code);
+               return -EIO;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
+
+int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
+{
+       struct gb_svc_intf_set_pwrm_request request;
+       struct gb_svc_intf_set_pwrm_response response;
+       int ret;
+       u16 result_code;
+
+       memset(&request, 0, sizeof(request));
+
+       request.intf_id = intf_id;
+       request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
+       request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+       request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
+
+       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
+                               &request, sizeof(request),
+                               &response, sizeof(response));
+       if (ret < 0) {
+               dev_err(&svc->dev,
+                       "failed to send set power mode operation to interface %u: %d\n",
+                       intf_id, ret);
+               return ret;
+       }
+
+       result_code = response.result_code;
+       if (result_code != GB_SVC_SETPWRM_PWR_OK) {
+               dev_err(&svc->dev,
+                       "failed to hibernate the link for interface %u: %u\n",
+                       intf_id, result_code);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int gb_svc_ping(struct gb_svc *svc)
+{
+       return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
+                                        NULL, 0, NULL, 0,
+                                        GB_OPERATION_TIMEOUT_DEFAULT * 2);
+}
+
+static int gb_svc_version_request(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_svc_version_request *request;
+       struct gb_svc_version_response *response;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_err(&svc->dev, "short version request (%zu < %zu)\n",
+                       op->request->payload_size,
+                       sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       if (request->major > GB_SVC_VERSION_MAJOR) {
+               dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
+                        request->major, GB_SVC_VERSION_MAJOR);
+               return -ENOTSUPP;
+       }
+
+       svc->protocol_major = request->major;
+       svc->protocol_minor = request->minor;
+
+       if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
+               return -ENOMEM;
+
+       response = op->response->payload;
+       response->major = svc->protocol_major;
+       response->minor = svc->protocol_minor;
+
+       return 0;
+}
+
+static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
+                                       size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
+               file_inode(file)->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_VOL, &value);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "failed to get voltage sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
+                                       size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
+               file_inode(file)->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_CURR, &value);
+       if (ret) {
+               dev_err(&svc->dev,
+                       "failed to get current sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
+                                     size_t len, loff_t *offset)
+{
+       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
+               file_inode(file)->i_private;
+       struct gb_svc *svc = pwrmon_rails->svc;
+       int ret, desc;
+       u32 value;
+       char buff[16];
+
+       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
+                                      GB_SVC_PWRMON_TYPE_PWR, &value);
+       if (ret) {
+               dev_err(&svc->dev, "failed to get power sample %u: %d\n",
+                       pwrmon_rails->id, ret);
+               return ret;
+       }
+
+       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
+
+       return simple_read_from_buffer(buf, len, offset, buff, desc);
+}
+
+static const struct file_operations pwrmon_debugfs_voltage_fops = {
+       .read           = pwr_debugfs_voltage_read,
+};
+
+static const struct file_operations pwrmon_debugfs_current_fops = {
+       .read           = pwr_debugfs_current_read,
+};
+
+static const struct file_operations pwrmon_debugfs_power_fops = {
+       .read           = pwr_debugfs_power_read,
+};
+
+static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
+{
+       int i;
+       size_t bufsize;
+       struct dentry *dent;
+       struct gb_svc_pwrmon_rail_names_get_response *rail_names;
+       u8 rail_count;
+
+       dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
+       if (IS_ERR_OR_NULL(dent))
+               return;
+
+       if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
+               goto err_pwrmon_debugfs;
+
+       if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
+               goto err_pwrmon_debugfs;
+
+       bufsize = sizeof(*rail_names) +
+               GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
+
+       rail_names = kzalloc(bufsize, GFP_KERNEL);
+       if (!rail_names)
+               goto err_pwrmon_debugfs;
+
+       svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
+                                   GFP_KERNEL);
+       if (!svc->pwrmon_rails)
+               goto err_pwrmon_debugfs_free;
+
+       if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
+               goto err_pwrmon_debugfs_free;
+
+       for (i = 0; i < rail_count; i++) {
+               struct dentry *dir;
+               struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
+               char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
+
+               snprintf(fname, sizeof(fname), "%s",
+                        (char *)&rail_names->name[i]);
+
+               rail->id = i;
+               rail->svc = svc;
+
+               dir = debugfs_create_dir(fname, dent);
+               debugfs_create_file("voltage_now", 0444, dir, rail,
+                                   &pwrmon_debugfs_voltage_fops);
+               debugfs_create_file("current_now", 0444, dir, rail,
+                                   &pwrmon_debugfs_current_fops);
+               debugfs_create_file("power_now", 0444, dir, rail,
+                                   &pwrmon_debugfs_power_fops);
+       }
+
+       kfree(rail_names);
+       return;
+
+err_pwrmon_debugfs_free:
+       kfree(rail_names);
+       kfree(svc->pwrmon_rails);
+       svc->pwrmon_rails = NULL;
+
+err_pwrmon_debugfs:
+       debugfs_remove(dent);
+}
+
+static void gb_svc_debugfs_init(struct gb_svc *svc)
+{
+       svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
+                                                gb_debugfs_get());
+       gb_svc_pwrmon_debugfs_init(svc);
+}
+
+static void gb_svc_debugfs_exit(struct gb_svc *svc)
+{
+       debugfs_remove_recursive(svc->debugfs_dentry);
+       kfree(svc->pwrmon_rails);
+       svc->pwrmon_rails = NULL;
+}
+
+static int gb_svc_hello(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_svc_hello_request *hello_request;
+       int ret;
+
+       if (op->request->payload_size < sizeof(*hello_request)) {
+               dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
+                        op->request->payload_size,
+                        sizeof(*hello_request));
+               return -EINVAL;
+       }
+
+       hello_request = op->request->payload;
+       svc->endo_id = le16_to_cpu(hello_request->endo_id);
+       svc->ap_intf_id = hello_request->interface_id;
+
+       ret = device_add(&svc->dev);
+       if (ret) {
+               dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
+               return ret;
+       }
+
+       ret = gb_svc_watchdog_create(svc);
+       if (ret) {
+               dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
+               goto err_unregister_device;
+       }
+
+       gb_svc_debugfs_init(svc);
+
+       return gb_svc_queue_deferred_request(op);
+
+err_unregister_device:
+       gb_svc_watchdog_destroy(svc);
+       device_del(&svc->dev);
+       return ret;
+}
+
+static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
+                                                   u8 intf_id)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+       size_t num_interfaces;
+       u8 module_id;
+
+       list_for_each_entry(module, &hd->modules, hd_node) {
+               module_id = module->module_id;
+               num_interfaces = module->num_interfaces;
+
+               if (intf_id >= module_id &&
+                   intf_id < module_id + num_interfaces) {
+                       return module->interfaces[intf_id - module_id];
+               }
+       }
+
+       return NULL;
+}
+
+static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+
+       list_for_each_entry(module, &hd->modules, hd_node) {
+               if (module->module_id == module_id)
+                       return module;
+       }
+
+       return NULL;
+}
+
+static void gb_svc_process_hello_deferred(struct gb_operation *operation)
+{
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       int ret;
+
+       /*
+        * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
+        * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
+        * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
+        * module.
+        *
+        * The code should be removed once SW-2217, Heuristic for UniPro
+        * Power Mode Changes is resolved.
+        */
+       ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
+                                        GB_SVC_UNIPRO_HS_SERIES_A,
+                                        GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                        2, 1,
+                                        GB_SVC_SMALL_AMPLITUDE,
+                                        GB_SVC_NO_DE_EMPHASIS,
+                                        GB_SVC_UNIPRO_SLOW_AUTO_MODE,
+                                        2, 1,
+                                        0, 0,
+                                        NULL, NULL);
+
+       if (ret)
+               dev_warn(&svc->dev,
+                        "power mode change failed on AP to switch link: %d\n",
+                        ret);
+}
+
+static void gb_svc_process_module_inserted(struct gb_operation *operation)
+{
+       struct gb_svc_module_inserted_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module;
+       size_t num_interfaces;
+       u8 module_id;
+       u16 flags;
+       int ret;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       module_id = request->primary_intf_id;
+       num_interfaces = request->intf_count;
+       flags = le16_to_cpu(request->flags);
+
+       dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
+               __func__, module_id, num_interfaces, flags);
+
+       if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
+               dev_warn(&svc->dev, "no primary interface detected on module %u\n",
+                        module_id);
+       }
+
+       module = gb_svc_module_lookup(svc, module_id);
+       if (module) {
+               dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
+                        module_id);
+               return;
+       }
+
+       module = gb_module_create(hd, module_id, num_interfaces);
+       if (!module) {
+               dev_err(&svc->dev, "failed to create module\n");
+               return;
+       }
+
+       ret = gb_module_add(module);
+       if (ret) {
+               gb_module_put(module);
+               return;
+       }
+
+       list_add(&module->hd_node, &hd->modules);
+}
+
+static void gb_svc_process_module_removed(struct gb_operation *operation)
+{
+       struct gb_svc_module_removed_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_module *module;
+       u8 module_id;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       module_id = request->primary_intf_id;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
+
+       module = gb_svc_module_lookup(svc, module_id);
+       if (!module) {
+               dev_warn(&svc->dev, "unexpected module-removed event %u\n",
+                        module_id);
+               return;
+       }
+
+       module->disconnected = true;
+
+       gb_module_del(module);
+       list_del(&module->hd_node);
+       gb_module_put(module);
+}
+
+static void gb_svc_process_intf_oops(struct gb_operation *operation)
+{
+       struct gb_svc_intf_oops_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_interface *intf;
+       u8 intf_id;
+       u8 reason;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       intf_id = request->intf_id;
+       reason = request->reason;
+
+       intf = gb_svc_interface_lookup(svc, intf_id);
+       if (!intf) {
+               dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
+                        intf_id);
+               return;
+       }
+
+       dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
+                intf_id, reason);
+
+       mutex_lock(&intf->mutex);
+       intf->disconnected = true;
+       gb_interface_disable(intf);
+       gb_interface_deactivate(intf);
+       mutex_unlock(&intf->mutex);
+}
+
+static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
+{
+       struct gb_svc_intf_mailbox_event_request *request;
+       struct gb_connection *connection = operation->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       struct gb_interface *intf;
+       u8 intf_id;
+       u16 result_code;
+       u32 mailbox;
+
+       /* The request message size has already been verified. */
+       request = operation->request->payload;
+       intf_id = request->intf_id;
+       result_code = le16_to_cpu(request->result_code);
+       mailbox = le32_to_cpu(request->mailbox);
+
+       dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
+               __func__, intf_id, result_code, mailbox);
+
+       intf = gb_svc_interface_lookup(svc, intf_id);
+       if (!intf) {
+               dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
+               return;
+       }
+
+       gb_interface_mailbox_event(intf, result_code, mailbox);
+}
+
+static void gb_svc_process_deferred_request(struct work_struct *work)
+{
+       struct gb_svc_deferred_request *dr;
+       struct gb_operation *operation;
+       struct gb_svc *svc;
+       u8 type;
+
+       dr = container_of(work, struct gb_svc_deferred_request, work);
+       operation = dr->operation;
+       svc = gb_connection_get_data(operation->connection);
+       type = operation->request->header->type;
+
+       switch (type) {
+       case GB_SVC_TYPE_SVC_HELLO:
+               gb_svc_process_hello_deferred(operation);
+               break;
+       case GB_SVC_TYPE_MODULE_INSERTED:
+               gb_svc_process_module_inserted(operation);
+               break;
+       case GB_SVC_TYPE_MODULE_REMOVED:
+               gb_svc_process_module_removed(operation);
+               break;
+       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+               gb_svc_process_intf_mailbox_event(operation);
+               break;
+       case GB_SVC_TYPE_INTF_OOPS:
+               gb_svc_process_intf_oops(operation);
+               break;
+       default:
+               dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
+       }
+
+       gb_operation_put(operation);
+       kfree(dr);
+}
+
+static int gb_svc_queue_deferred_request(struct gb_operation *operation)
+{
+       struct gb_svc *svc = gb_connection_get_data(operation->connection);
+       struct gb_svc_deferred_request *dr;
+
+       dr = kmalloc(sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       gb_operation_get(operation);
+
+       dr->operation = operation;
+       INIT_WORK(&dr->work, gb_svc_process_deferred_request);
+
+       queue_work(svc->wq, &dr->work);
+
+       return 0;
+}
+
+static int gb_svc_intf_reset_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_message *request = op->request;
+       struct gb_svc_intf_reset_request *reset;
+
+       if (request->payload_size < sizeof(*reset)) {
+               dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
+                        request->payload_size, sizeof(*reset));
+               return -EINVAL;
+       }
+       reset = request->payload;
+
+       /* FIXME Reset the interface here */
+
+       return 0;
+}
+
+static int gb_svc_module_inserted_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_module_inserted_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
+                        op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+               request->primary_intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_module_removed_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_module_removed_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
+                        op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
+               request->primary_intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_oops_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_intf_oops_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
+                        op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
+{
+       struct gb_svc *svc = gb_connection_get_data(op->connection);
+       struct gb_svc_intf_mailbox_event_request *request;
+
+       if (op->request->payload_size < sizeof(*request)) {
+               dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
+                        op->request->payload_size, sizeof(*request));
+               return -EINVAL;
+       }
+
+       request = op->request->payload;
+
+       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
+
+       return gb_svc_queue_deferred_request(op);
+}
+
+static int gb_svc_request_handler(struct gb_operation *op)
+{
+       struct gb_connection *connection = op->connection;
+       struct gb_svc *svc = gb_connection_get_data(connection);
+       u8 type = op->type;
+       int ret = 0;
+
+       /*
+        * SVC requests need to follow a specific order (at least initially) and
+        * below code takes care of enforcing that. The expected order is:
+        * - PROTOCOL_VERSION
+        * - SVC_HELLO
+        * - Any other request, but the earlier two.
+        *
+        * Incoming requests are guaranteed to be serialized and so we don't
+        * need to protect 'state' for any races.
+        */
+       switch (type) {
+       case GB_SVC_TYPE_PROTOCOL_VERSION:
+               if (svc->state != GB_SVC_STATE_RESET)
+                       ret = -EINVAL;
+               break;
+       case GB_SVC_TYPE_SVC_HELLO:
+               if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
+                       ret = -EINVAL;
+               break;
+       default:
+               if (svc->state != GB_SVC_STATE_SVC_HELLO)
+                       ret = -EINVAL;
+               break;
+       }
+
+       if (ret) {
+               dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
+                        type, svc->state);
+               return ret;
+       }
+
+       switch (type) {
+       case GB_SVC_TYPE_PROTOCOL_VERSION:
+               ret = gb_svc_version_request(op);
+               if (!ret)
+                       svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
+               return ret;
+       case GB_SVC_TYPE_SVC_HELLO:
+               ret = gb_svc_hello(op);
+               if (!ret)
+                       svc->state = GB_SVC_STATE_SVC_HELLO;
+               return ret;
+       case GB_SVC_TYPE_INTF_RESET:
+               return gb_svc_intf_reset_recv(op);
+       case GB_SVC_TYPE_MODULE_INSERTED:
+               return gb_svc_module_inserted_recv(op);
+       case GB_SVC_TYPE_MODULE_REMOVED:
+               return gb_svc_module_removed_recv(op);
+       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
+               return gb_svc_intf_mailbox_event_recv(op);
+       case GB_SVC_TYPE_INTF_OOPS:
+               return gb_svc_intf_oops_recv(op);
+       default:
+               dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
+               return -EINVAL;
+       }
+}
+
+static void gb_svc_release(struct device *dev)
+{
+       struct gb_svc *svc = to_gb_svc(dev);
+
+       if (svc->connection)
+               gb_connection_destroy(svc->connection);
+       ida_destroy(&svc->device_id_map);
+       destroy_workqueue(svc->wq);
+       kfree(svc);
+}
+
+struct device_type greybus_svc_type = {
+       .name           = "greybus_svc",
+       .release        = gb_svc_release,
+};
+
+struct gb_svc *gb_svc_create(struct gb_host_device *hd)
+{
+       struct gb_svc *svc;
+
+       svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+       if (!svc)
+               return NULL;
+
+       svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
+       if (!svc->wq) {
+               kfree(svc);
+               return NULL;
+       }
+
+       svc->dev.parent = &hd->dev;
+       svc->dev.bus = &greybus_bus_type;
+       svc->dev.type = &greybus_svc_type;
+       svc->dev.groups = svc_groups;
+       svc->dev.dma_mask = svc->dev.parent->dma_mask;
+       device_initialize(&svc->dev);
+
+       dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
+
+       ida_init(&svc->device_id_map);
+       svc->state = GB_SVC_STATE_RESET;
+       svc->hd = hd;
+
+       svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
+                                                     gb_svc_request_handler);
+       if (IS_ERR(svc->connection)) {
+               dev_err(&svc->dev, "failed to create connection: %ld\n",
+                       PTR_ERR(svc->connection));
+               goto err_put_device;
+       }
+
+       gb_connection_set_data(svc->connection, svc);
+
+       return svc;
+
+err_put_device:
+       put_device(&svc->dev);
+       return NULL;
+}
+
+int gb_svc_add(struct gb_svc *svc)
+{
+       int ret;
+
+       /*
+        * The SVC protocol is currently driven by the SVC, so the SVC device
+        * is added from the connection request handler when enough
+        * information has been received.
+        */
+       ret = gb_connection_enable(svc->connection);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void gb_svc_remove_modules(struct gb_svc *svc)
+{
+       struct gb_host_device *hd = svc->hd;
+       struct gb_module *module, *tmp;
+
+       list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
+               gb_module_del(module);
+               list_del(&module->hd_node);
+               gb_module_put(module);
+       }
+}
+
+void gb_svc_del(struct gb_svc *svc)
+{
+       gb_connection_disable_rx(svc->connection);
+
+       /*
+        * The SVC device may have been registered from the request handler.
+        */
+       if (device_is_registered(&svc->dev)) {
+               gb_svc_debugfs_exit(svc);
+               gb_svc_watchdog_destroy(svc);
+               device_del(&svc->dev);
+       }
+
+       flush_workqueue(svc->wq);
+
+       gb_svc_remove_modules(svc);
+
+       gb_connection_disable(svc->connection);
+}
+
+void gb_svc_put(struct gb_svc *svc)
+{
+       put_device(&svc->dev);
+}
diff --git a/drivers/greybus/svc_watchdog.c b/drivers/greybus/svc_watchdog.c
new file mode 100644 (file)
index 0000000..b6b1682
--- /dev/null
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SVC Greybus "watchdog" driver.
+ *
+ * Copyright 2016 Google Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/greybus.h>
+
+#define SVC_WATCHDOG_PERIOD    (2 * HZ)
+
+struct gb_svc_watchdog {
+       struct delayed_work     work;
+       struct gb_svc           *svc;
+       bool                    enabled;
+       struct notifier_block pm_notifier;
+};
+
+static struct delayed_work reset_work;
+
+static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
+                                   unsigned long pm_event, void *unused)
+{
+       struct gb_svc_watchdog *watchdog =
+               container_of(notifier, struct gb_svc_watchdog, pm_notifier);
+
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+               gb_svc_watchdog_disable(watchdog->svc);
+               break;
+       case PM_POST_SUSPEND:
+               gb_svc_watchdog_enable(watchdog->svc);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static void greybus_reset(struct work_struct *work)
+{
+       static char const start_path[] = "/system/bin/start";
+       static char *envp[] = {
+               "HOME=/",
+               "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
+               NULL,
+       };
+       static char *argv[] = {
+               (char *)start_path,
+               "unipro_reset",
+               NULL,
+       };
+
+       pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
+              argv[0], argv[1]);
+       call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
+}
+
+static void do_work(struct work_struct *work)
+{
+       struct gb_svc_watchdog *watchdog;
+       struct gb_svc *svc;
+       int retval;
+
+       watchdog = container_of(work, struct gb_svc_watchdog, work.work);
+       svc = watchdog->svc;
+
+       dev_dbg(&svc->dev, "%s: ping.\n", __func__);
+       retval = gb_svc_ping(svc);
+       if (retval) {
+               /*
+                * Something went really wrong, let's warn userspace and then
+                * pull the plug and reset the whole greybus network.
+                * We need to do this outside of this workqueue as we will be
+                * tearing down the svc device itself.  So queue up
+                * yet-another-callback to do that.
+                */
+               dev_err(&svc->dev,
+                       "SVC ping has returned %d, something is wrong!!!\n",
+                       retval);
+
+               if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
+                       panic("SVC is not responding\n");
+               } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
+                       dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
+
+                       INIT_DELAYED_WORK(&reset_work, greybus_reset);
+                       schedule_delayed_work(&reset_work, HZ / 2);
+
+                       /*
+                        * Disable ourselves, we don't want to trip again unless
+                        * userspace wants us to.
+                        */
+                       watchdog->enabled = false;
+               }
+       }
+
+       /* resubmit our work to happen again, if we are still "alive" */
+       if (watchdog->enabled)
+               schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+}
+
+int gb_svc_watchdog_create(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+       int retval;
+
+       if (svc->watchdog)
+               return 0;
+
+       watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
+       if (!watchdog)
+               return -ENOMEM;
+
+       watchdog->enabled = false;
+       watchdog->svc = svc;
+       INIT_DELAYED_WORK(&watchdog->work, do_work);
+       svc->watchdog = watchdog;
+
+       watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
+       retval = register_pm_notifier(&watchdog->pm_notifier);
+       if (retval) {
+               dev_err(&svc->dev, "error registering pm notifier(%d)\n",
+                       retval);
+               goto svc_watchdog_create_err;
+       }
+
+       retval = gb_svc_watchdog_enable(svc);
+       if (retval) {
+               dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
+               unregister_pm_notifier(&watchdog->pm_notifier);
+               goto svc_watchdog_create_err;
+       }
+       return retval;
+
+svc_watchdog_create_err:
+       svc->watchdog = NULL;
+       kfree(watchdog);
+
+       return retval;
+}
+
+void gb_svc_watchdog_destroy(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog = svc->watchdog;
+
+       if (!watchdog)
+               return;
+
+       unregister_pm_notifier(&watchdog->pm_notifier);
+       gb_svc_watchdog_disable(svc);
+       svc->watchdog = NULL;
+       kfree(watchdog);
+}
+
+bool gb_svc_watchdog_enabled(struct gb_svc *svc)
+{
+       if (!svc || !svc->watchdog)
+               return false;
+       return svc->watchdog->enabled;
+}
+
+int gb_svc_watchdog_enable(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+
+       if (!svc->watchdog)
+               return -ENODEV;
+
+       watchdog = svc->watchdog;
+       if (watchdog->enabled)
+               return 0;
+
+       watchdog->enabled = true;
+       schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
+       return 0;
+}
+
+int gb_svc_watchdog_disable(struct gb_svc *svc)
+{
+       struct gb_svc_watchdog *watchdog;
+
+       if (!svc->watchdog)
+               return -ENODEV;
+
+       watchdog = svc->watchdog;
+       if (!watchdog->enabled)
+               return 0;
+
+       watchdog->enabled = false;
+       cancel_delayed_work_sync(&watchdog->work);
+       return 0;
+}
index 4894c35149551d241d6754ea1edac8020f2fe1aa..d03c37e1e6e88ef1c127a34e0a6a93580c3c0457 100644 (file)
@@ -1,20 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-menuconfig GREYBUS
-       tristate "Greybus support"
-       depends on SYSFS
-       ---help---
-         This option enables the Greybus driver core.  Greybus is an
-         hardware protocol that was designed to provide Unipro with a
-         sane application layer.  It was originally designed for the
-         ARA project, a module phone system, but has shown up in other
-         phones, and can be tunneled over other busses in order to
-         control hardware devices.
-
-         Say Y here to enable support for these types of drivers.
-
-         To compile this code as a module, chose M here: the module
-         will be called greybus.ko
-
 if GREYBUS
 
 config GREYBUS_ES2
index 2551ed16b7423b1d1706fc3a2aeefc4fb0b9fe41..d16853399c9abfa24d5ded90e64238b476e74494 100644 (file)
@@ -1,24 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
-# Greybus core
-greybus-y :=   core.o          \
-               debugfs.o       \
-               hd.o            \
-               manifest.o      \
-               module.o        \
-               interface.o     \
-               bundle.o        \
-               connection.o    \
-               control.o       \
-               svc.o           \
-               svc_watchdog.o  \
-               operation.o
-
-obj-$(CONFIG_GREYBUS)          += greybus.o
-
 # needed for trace events
 ccflags-y += -I$(src)
 
-
 # Greybus Host controller drivers
 gb-es2-y := es2.o
 
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
deleted file mode 100644 (file)
index 8466072..0000000
+++ /dev/null
@@ -1,252 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus bundles
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#include <linux/greybus.h>
-#include "greybus_trace.h"
-
-static ssize_t bundle_class_show(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-
-       return sprintf(buf, "0x%02x\n", bundle->class);
-}
-static DEVICE_ATTR_RO(bundle_class);
-
-static ssize_t bundle_id_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-
-       return sprintf(buf, "%u\n", bundle->id);
-}
-static DEVICE_ATTR_RO(bundle_id);
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-
-       if (!bundle->state)
-               return sprintf(buf, "\n");
-
-       return sprintf(buf, "%s\n", bundle->state);
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t size)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-
-       kfree(bundle->state);
-       bundle->state = kstrdup(buf, GFP_KERNEL);
-       if (!bundle->state)
-               return -ENOMEM;
-
-       /* Tell userspace that the file contents changed */
-       sysfs_notify(&bundle->dev.kobj, NULL, "state");
-
-       return size;
-}
-static DEVICE_ATTR_RW(state);
-
-static struct attribute *bundle_attrs[] = {
-       &dev_attr_bundle_class.attr,
-       &dev_attr_bundle_id.attr,
-       &dev_attr_state.attr,
-       NULL,
-};
-
-ATTRIBUTE_GROUPS(bundle);
-
-static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
-                                       u8 bundle_id)
-{
-       struct gb_bundle *bundle;
-
-       list_for_each_entry(bundle, &intf->bundles, links) {
-               if (bundle->id == bundle_id)
-                       return bundle;
-       }
-
-       return NULL;
-}
-
-static void gb_bundle_release(struct device *dev)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-
-       trace_gb_bundle_release(bundle);
-
-       kfree(bundle->state);
-       kfree(bundle->cport_desc);
-       kfree(bundle);
-}
-
-#ifdef CONFIG_PM
-static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
-{
-       struct gb_connection *connection;
-
-       list_for_each_entry(connection, &bundle->connections, bundle_links)
-               gb_connection_disable(connection);
-}
-
-static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
-{
-       struct gb_connection *connection;
-
-       list_for_each_entry(connection, &bundle->connections, bundle_links)
-               gb_connection_enable(connection);
-}
-
-static int gb_bundle_suspend(struct device *dev)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-       const struct dev_pm_ops *pm = dev->driver->pm;
-       int ret;
-
-       if (pm && pm->runtime_suspend) {
-               ret = pm->runtime_suspend(&bundle->dev);
-               if (ret)
-                       return ret;
-       } else {
-               gb_bundle_disable_all_connections(bundle);
-       }
-
-       ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
-       if (ret) {
-               if (pm && pm->runtime_resume)
-                       ret = pm->runtime_resume(dev);
-               else
-                       gb_bundle_enable_all_connections(bundle);
-
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_bundle_resume(struct device *dev)
-{
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-       const struct dev_pm_ops *pm = dev->driver->pm;
-       int ret;
-
-       ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
-       if (ret)
-               return ret;
-
-       if (pm && pm->runtime_resume) {
-               ret = pm->runtime_resume(dev);
-               if (ret)
-                       return ret;
-       } else {
-               gb_bundle_enable_all_connections(bundle);
-       }
-
-       return 0;
-}
-
-static int gb_bundle_idle(struct device *dev)
-{
-       pm_runtime_mark_last_busy(dev);
-       pm_request_autosuspend(dev);
-
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops gb_bundle_pm_ops = {
-       SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
-};
-
-struct device_type greybus_bundle_type = {
-       .name =         "greybus_bundle",
-       .release =      gb_bundle_release,
-       .pm =           &gb_bundle_pm_ops,
-};
-
-/*
- * Create a gb_bundle structure to represent a discovered
- * bundle.  Returns a pointer to the new bundle or a null
- * pointer if a failure occurs due to memory exhaustion.
- */
-struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
-                                  u8 class)
-{
-       struct gb_bundle *bundle;
-
-       if (bundle_id == BUNDLE_ID_NONE) {
-               dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
-               return NULL;
-       }
-
-       /*
-        * Reject any attempt to reuse a bundle id.  We initialize
-        * these serially, so there's no need to worry about keeping
-        * the interface bundle list locked here.
-        */
-       if (gb_bundle_find(intf, bundle_id)) {
-               dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
-               return NULL;
-       }
-
-       bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
-       if (!bundle)
-               return NULL;
-
-       bundle->intf = intf;
-       bundle->id = bundle_id;
-       bundle->class = class;
-       INIT_LIST_HEAD(&bundle->connections);
-
-       bundle->dev.parent = &intf->dev;
-       bundle->dev.bus = &greybus_bus_type;
-       bundle->dev.type = &greybus_bundle_type;
-       bundle->dev.groups = bundle_groups;
-       bundle->dev.dma_mask = intf->dev.dma_mask;
-       device_initialize(&bundle->dev);
-       dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
-
-       list_add(&bundle->links, &intf->bundles);
-
-       trace_gb_bundle_create(bundle);
-
-       return bundle;
-}
-
-int gb_bundle_add(struct gb_bundle *bundle)
-{
-       int ret;
-
-       ret = device_add(&bundle->dev);
-       if (ret) {
-               dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
-               return ret;
-       }
-
-       trace_gb_bundle_add(bundle);
-
-       return 0;
-}
-
-/*
- * Tear down a previously set up bundle.
- */
-void gb_bundle_destroy(struct gb_bundle *bundle)
-{
-       trace_gb_bundle_destroy(bundle);
-
-       if (device_is_registered(&bundle->dev))
-               device_del(&bundle->dev);
-
-       list_del(&bundle->links);
-
-       put_device(&bundle->dev);
-}
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
deleted file mode 100644 (file)
index fc8f57f..0000000
+++ /dev/null
@@ -1,942 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus connections
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#include <linux/workqueue.h>
-#include <linux/greybus.h>
-
-#include "greybus_trace.h"
-
-#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT    1000
-
-static void gb_connection_kref_release(struct kref *kref);
-
-static DEFINE_SPINLOCK(gb_connections_lock);
-static DEFINE_MUTEX(gb_connection_mutex);
-
-/* Caller holds gb_connection_mutex. */
-static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
-{
-       struct gb_host_device *hd = intf->hd;
-       struct gb_connection *connection;
-
-       list_for_each_entry(connection, &hd->connections, hd_links) {
-               if (connection->intf == intf &&
-                   connection->intf_cport_id == cport_id)
-                       return true;
-       }
-
-       return false;
-}
-
-static void gb_connection_get(struct gb_connection *connection)
-{
-       kref_get(&connection->kref);
-
-       trace_gb_connection_get(connection);
-}
-
-static void gb_connection_put(struct gb_connection *connection)
-{
-       trace_gb_connection_put(connection);
-
-       kref_put(&connection->kref, gb_connection_kref_release);
-}
-
-/*
- * Returns a reference-counted pointer to the connection if found.
- */
-static struct gb_connection *
-gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
-{
-       struct gb_connection *connection;
-       unsigned long flags;
-
-       spin_lock_irqsave(&gb_connections_lock, flags);
-       list_for_each_entry(connection, &hd->connections, hd_links)
-               if (connection->hd_cport_id == cport_id) {
-                       gb_connection_get(connection);
-                       goto found;
-               }
-       connection = NULL;
-found:
-       spin_unlock_irqrestore(&gb_connections_lock, flags);
-
-       return connection;
-}
-
-/*
- * Callback from the host driver to let us know that data has been
- * received on the bundle.
- */
-void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
-                      u8 *data, size_t length)
-{
-       struct gb_connection *connection;
-
-       trace_gb_hd_in(hd);
-
-       connection = gb_connection_hd_find(hd, cport_id);
-       if (!connection) {
-               dev_err(&hd->dev,
-                       "nonexistent connection (%zu bytes dropped)\n", length);
-               return;
-       }
-       gb_connection_recv(connection, data, length);
-       gb_connection_put(connection);
-}
-EXPORT_SYMBOL_GPL(greybus_data_rcvd);
-
-static void gb_connection_kref_release(struct kref *kref)
-{
-       struct gb_connection *connection;
-
-       connection = container_of(kref, struct gb_connection, kref);
-
-       trace_gb_connection_release(connection);
-
-       kfree(connection);
-}
-
-static void gb_connection_init_name(struct gb_connection *connection)
-{
-       u16 hd_cport_id = connection->hd_cport_id;
-       u16 cport_id = 0;
-       u8 intf_id = 0;
-
-       if (connection->intf) {
-               intf_id = connection->intf->interface_id;
-               cport_id = connection->intf_cport_id;
-       }
-
-       snprintf(connection->name, sizeof(connection->name),
-                "%u/%u:%u", hd_cport_id, intf_id, cport_id);
-}
-
-/*
- * _gb_connection_create() - create a Greybus connection
- * @hd:                        host device of the connection
- * @hd_cport_id:       host-device cport id, or -1 for dynamic allocation
- * @intf:              remote interface, or NULL for static connections
- * @bundle:            remote-interface bundle (may be NULL)
- * @cport_id:          remote-interface cport id, or 0 for static connections
- * @handler:           request handler (may be NULL)
- * @flags:             connection flags
- *
- * Create a Greybus connection, representing the bidirectional link
- * between a CPort on a (local) Greybus host device and a CPort on
- * another Greybus interface.
- *
- * A connection also maintains the state of operations sent over the
- * connection.
- *
- * Serialised against concurrent create and destroy using the
- * gb_connection_mutex.
- *
- * Return: A pointer to the new connection if successful, or an ERR_PTR
- * otherwise.
- */
-static struct gb_connection *
-_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
-                     struct gb_interface *intf,
-                     struct gb_bundle *bundle, int cport_id,
-                     gb_request_handler_t handler,
-                     unsigned long flags)
-{
-       struct gb_connection *connection;
-       int ret;
-
-       mutex_lock(&gb_connection_mutex);
-
-       if (intf && gb_connection_cport_in_use(intf, cport_id)) {
-               dev_err(&intf->dev, "cport %u already in use\n", cport_id);
-               ret = -EBUSY;
-               goto err_unlock;
-       }
-
-       ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
-       if (ret < 0) {
-               dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
-               goto err_unlock;
-       }
-       hd_cport_id = ret;
-
-       connection = kzalloc(sizeof(*connection), GFP_KERNEL);
-       if (!connection) {
-               ret = -ENOMEM;
-               goto err_hd_cport_release;
-       }
-
-       connection->hd_cport_id = hd_cport_id;
-       connection->intf_cport_id = cport_id;
-       connection->hd = hd;
-       connection->intf = intf;
-       connection->bundle = bundle;
-       connection->handler = handler;
-       connection->flags = flags;
-       if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
-               connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
-       connection->state = GB_CONNECTION_STATE_DISABLED;
-
-       atomic_set(&connection->op_cycle, 0);
-       mutex_init(&connection->mutex);
-       spin_lock_init(&connection->lock);
-       INIT_LIST_HEAD(&connection->operations);
-
-       connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
-                                        dev_name(&hd->dev), hd_cport_id);
-       if (!connection->wq) {
-               ret = -ENOMEM;
-               goto err_free_connection;
-       }
-
-       kref_init(&connection->kref);
-
-       gb_connection_init_name(connection);
-
-       spin_lock_irq(&gb_connections_lock);
-       list_add(&connection->hd_links, &hd->connections);
-
-       if (bundle)
-               list_add(&connection->bundle_links, &bundle->connections);
-       else
-               INIT_LIST_HEAD(&connection->bundle_links);
-
-       spin_unlock_irq(&gb_connections_lock);
-
-       mutex_unlock(&gb_connection_mutex);
-
-       trace_gb_connection_create(connection);
-
-       return connection;
-
-err_free_connection:
-       kfree(connection);
-err_hd_cport_release:
-       gb_hd_cport_release(hd, hd_cport_id);
-err_unlock:
-       mutex_unlock(&gb_connection_mutex);
-
-       return ERR_PTR(ret);
-}
-
-struct gb_connection *
-gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
-                           gb_request_handler_t handler)
-{
-       return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
-                                    GB_CONNECTION_FLAG_HIGH_PRIO);
-}
-
-struct gb_connection *
-gb_connection_create_control(struct gb_interface *intf)
-{
-       return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
-                                    GB_CONNECTION_FLAG_CONTROL |
-                                    GB_CONNECTION_FLAG_HIGH_PRIO);
-}
-
-struct gb_connection *
-gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
-                    gb_request_handler_t handler)
-{
-       struct gb_interface *intf = bundle->intf;
-
-       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
-                                    handler, 0);
-}
-EXPORT_SYMBOL_GPL(gb_connection_create);
-
-struct gb_connection *
-gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
-                          gb_request_handler_t handler,
-                          unsigned long flags)
-{
-       struct gb_interface *intf = bundle->intf;
-
-       if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
-               flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
-
-       return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
-                                    handler, flags);
-}
-EXPORT_SYMBOL_GPL(gb_connection_create_flags);
-
-struct gb_connection *
-gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
-                              unsigned long flags)
-{
-       flags |= GB_CONNECTION_FLAG_OFFLOADED;
-
-       return gb_connection_create_flags(bundle, cport_id, NULL, flags);
-}
-EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
-
-static int gb_connection_hd_cport_enable(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->cport_enable)
-               return 0;
-
-       ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
-                                      connection->flags);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void gb_connection_hd_cport_disable(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->cport_disable)
-               return;
-
-       ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
-                       connection->name, ret);
-       }
-}
-
-static int gb_connection_hd_cport_connected(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->cport_connected)
-               return 0;
-
-       ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_connection_hd_cport_flush(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->cport_flush)
-               return 0;
-
-       ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       size_t peer_space;
-       int ret;
-
-       if (!hd->driver->cport_quiesce)
-               return 0;
-
-       peer_space = sizeof(struct gb_operation_msg_hdr) +
-                       sizeof(struct gb_cport_shutdown_request);
-
-       if (connection->mode_switch)
-               peer_space += sizeof(struct gb_operation_msg_hdr);
-
-       if (!hd->driver->cport_quiesce)
-               return 0;
-
-       ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
-                                       peer_space,
-                                       GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_connection_hd_cport_clear(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->cport_clear)
-               return 0;
-
-       ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-/*
- * Request the SVC to create a connection from AP's cport to interface's
- * cport.
- */
-static int
-gb_connection_svc_connection_create(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       struct gb_interface *intf;
-       u8 cport_flags;
-       int ret;
-
-       if (gb_connection_is_static(connection))
-               return 0;
-
-       intf = connection->intf;
-
-       /*
-        * Enable either E2EFC or CSD, unless no flow control is requested.
-        */
-       cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
-       if (gb_connection_flow_control_disabled(connection)) {
-               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
-       } else if (gb_connection_e2efc_enabled(connection)) {
-               cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
-                               GB_SVC_CPORT_FLAG_E2EFC;
-       }
-
-       ret = gb_svc_connection_create(hd->svc,
-                                      hd->svc->ap_intf_id,
-                                      connection->hd_cport_id,
-                                      intf->interface_id,
-                                      connection->intf_cport_id,
-                                      cport_flags);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: failed to create svc connection: %d\n",
-                       connection->name, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void
-gb_connection_svc_connection_destroy(struct gb_connection *connection)
-{
-       if (gb_connection_is_static(connection))
-               return;
-
-       gb_svc_connection_destroy(connection->hd->svc,
-                                 connection->hd->svc->ap_intf_id,
-                                 connection->hd_cport_id,
-                                 connection->intf->interface_id,
-                                 connection->intf_cport_id);
-}
-
-/* Inform Interface about active CPorts */
-static int gb_connection_control_connected(struct gb_connection *connection)
-{
-       struct gb_control *control;
-       u16 cport_id = connection->intf_cport_id;
-       int ret;
-
-       if (gb_connection_is_static(connection))
-               return 0;
-
-       if (gb_connection_is_control(connection))
-               return 0;
-
-       control = connection->intf->control;
-
-       ret = gb_control_connected_operation(control, cport_id);
-       if (ret) {
-               dev_err(&connection->bundle->dev,
-                       "failed to connect cport: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void
-gb_connection_control_disconnecting(struct gb_connection *connection)
-{
-       struct gb_control *control;
-       u16 cport_id = connection->intf_cport_id;
-       int ret;
-
-       if (gb_connection_is_static(connection))
-               return;
-
-       control = connection->intf->control;
-
-       ret = gb_control_disconnecting_operation(control, cport_id);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: failed to send disconnecting: %d\n",
-                       connection->name, ret);
-       }
-}
-
-static void
-gb_connection_control_disconnected(struct gb_connection *connection)
-{
-       struct gb_control *control;
-       u16 cport_id = connection->intf_cport_id;
-       int ret;
-
-       if (gb_connection_is_static(connection))
-               return;
-
-       control = connection->intf->control;
-
-       if (gb_connection_is_control(connection)) {
-               if (connection->mode_switch) {
-                       ret = gb_control_mode_switch_operation(control);
-                       if (ret) {
-                               /*
-                                * Allow mode switch to time out waiting for
-                                * mailbox event.
-                                */
-                               return;
-                       }
-               }
-
-               return;
-       }
-
-       ret = gb_control_disconnected_operation(control, cport_id);
-       if (ret) {
-               dev_warn(&connection->bundle->dev,
-                        "failed to disconnect cport: %d\n", ret);
-       }
-}
-
-static int gb_connection_shutdown_operation(struct gb_connection *connection,
-                                           u8 phase)
-{
-       struct gb_cport_shutdown_request *req;
-       struct gb_operation *operation;
-       int ret;
-
-       operation = gb_operation_create_core(connection,
-                                            GB_REQUEST_TYPE_CPORT_SHUTDOWN,
-                                            sizeof(*req), 0, 0,
-                                            GFP_KERNEL);
-       if (!operation)
-               return -ENOMEM;
-
-       req = operation->request->payload;
-       req->phase = phase;
-
-       ret = gb_operation_request_send_sync(operation);
-
-       gb_operation_put(operation);
-
-       return ret;
-}
-
-static int gb_connection_cport_shutdown(struct gb_connection *connection,
-                                       u8 phase)
-{
-       struct gb_host_device *hd = connection->hd;
-       const struct gb_hd_driver *drv = hd->driver;
-       int ret;
-
-       if (gb_connection_is_static(connection))
-               return 0;
-
-       if (gb_connection_is_offloaded(connection)) {
-               if (!drv->cport_shutdown)
-                       return 0;
-
-               ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
-                                         GB_OPERATION_TIMEOUT_DEFAULT);
-       } else {
-               ret = gb_connection_shutdown_operation(connection, phase);
-       }
-
-       if (ret) {
-               dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
-                       connection->name, phase, ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int
-gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
-{
-       return gb_connection_cport_shutdown(connection, 1);
-}
-
-static int
-gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
-{
-       return gb_connection_cport_shutdown(connection, 2);
-}
-
-/*
- * Cancel all active operations on a connection.
- *
- * Locking: Called with connection lock held and state set to DISABLED or
- * DISCONNECTING.
- */
-static void gb_connection_cancel_operations(struct gb_connection *connection,
-                                           int errno)
-       __must_hold(&connection->lock)
-{
-       struct gb_operation *operation;
-
-       while (!list_empty(&connection->operations)) {
-               operation = list_last_entry(&connection->operations,
-                                           struct gb_operation, links);
-               gb_operation_get(operation);
-               spin_unlock_irq(&connection->lock);
-
-               if (gb_operation_is_incoming(operation))
-                       gb_operation_cancel_incoming(operation, errno);
-               else
-                       gb_operation_cancel(operation, errno);
-
-               gb_operation_put(operation);
-
-               spin_lock_irq(&connection->lock);
-       }
-}
-
-/*
- * Cancel all active incoming operations on a connection.
- *
- * Locking: Called with connection lock held and state set to ENABLED_TX.
- */
-static void
-gb_connection_flush_incoming_operations(struct gb_connection *connection,
-                                       int errno)
-       __must_hold(&connection->lock)
-{
-       struct gb_operation *operation;
-       bool incoming;
-
-       while (!list_empty(&connection->operations)) {
-               incoming = false;
-               list_for_each_entry(operation, &connection->operations,
-                                   links) {
-                       if (gb_operation_is_incoming(operation)) {
-                               gb_operation_get(operation);
-                               incoming = true;
-                               break;
-                       }
-               }
-
-               if (!incoming)
-                       break;
-
-               spin_unlock_irq(&connection->lock);
-
-               /* FIXME: flush, not cancel? */
-               gb_operation_cancel_incoming(operation, errno);
-               gb_operation_put(operation);
-
-               spin_lock_irq(&connection->lock);
-       }
-}
-
-/*
- * _gb_connection_enable() - enable a connection
- * @connection:                connection to enable
- * @rx:                        whether to enable incoming requests
- *
- * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
- * ENABLED_TX->ENABLED state transitions.
- *
- * Locking: Caller holds connection->mutex.
- */
-static int _gb_connection_enable(struct gb_connection *connection, bool rx)
-{
-       int ret;
-
-       /* Handle ENABLED_TX -> ENABLED transitions. */
-       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
-               if (!(connection->handler && rx))
-                       return 0;
-
-               spin_lock_irq(&connection->lock);
-               connection->state = GB_CONNECTION_STATE_ENABLED;
-               spin_unlock_irq(&connection->lock);
-
-               return 0;
-       }
-
-       ret = gb_connection_hd_cport_enable(connection);
-       if (ret)
-               return ret;
-
-       ret = gb_connection_svc_connection_create(connection);
-       if (ret)
-               goto err_hd_cport_clear;
-
-       ret = gb_connection_hd_cport_connected(connection);
-       if (ret)
-               goto err_svc_connection_destroy;
-
-       spin_lock_irq(&connection->lock);
-       if (connection->handler && rx)
-               connection->state = GB_CONNECTION_STATE_ENABLED;
-       else
-               connection->state = GB_CONNECTION_STATE_ENABLED_TX;
-       spin_unlock_irq(&connection->lock);
-
-       ret = gb_connection_control_connected(connection);
-       if (ret)
-               goto err_control_disconnecting;
-
-       return 0;
-
-err_control_disconnecting:
-       spin_lock_irq(&connection->lock);
-       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
-       gb_connection_cancel_operations(connection, -ESHUTDOWN);
-       spin_unlock_irq(&connection->lock);
-
-       /* Transmit queue should already be empty. */
-       gb_connection_hd_cport_flush(connection);
-
-       gb_connection_control_disconnecting(connection);
-       gb_connection_cport_shutdown_phase_1(connection);
-       gb_connection_hd_cport_quiesce(connection);
-       gb_connection_cport_shutdown_phase_2(connection);
-       gb_connection_control_disconnected(connection);
-       connection->state = GB_CONNECTION_STATE_DISABLED;
-err_svc_connection_destroy:
-       gb_connection_svc_connection_destroy(connection);
-err_hd_cport_clear:
-       gb_connection_hd_cport_clear(connection);
-
-       gb_connection_hd_cport_disable(connection);
-
-       return ret;
-}
-
-int gb_connection_enable(struct gb_connection *connection)
-{
-       int ret = 0;
-
-       mutex_lock(&connection->mutex);
-
-       if (connection->state == GB_CONNECTION_STATE_ENABLED)
-               goto out_unlock;
-
-       ret = _gb_connection_enable(connection, true);
-       if (!ret)
-               trace_gb_connection_enable(connection);
-
-out_unlock:
-       mutex_unlock(&connection->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_connection_enable);
-
-int gb_connection_enable_tx(struct gb_connection *connection)
-{
-       int ret = 0;
-
-       mutex_lock(&connection->mutex);
-
-       if (connection->state == GB_CONNECTION_STATE_ENABLED) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
-               goto out_unlock;
-
-       ret = _gb_connection_enable(connection, false);
-       if (!ret)
-               trace_gb_connection_enable(connection);
-
-out_unlock:
-       mutex_unlock(&connection->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
-
-void gb_connection_disable_rx(struct gb_connection *connection)
-{
-       mutex_lock(&connection->mutex);
-
-       spin_lock_irq(&connection->lock);
-       if (connection->state != GB_CONNECTION_STATE_ENABLED) {
-               spin_unlock_irq(&connection->lock);
-               goto out_unlock;
-       }
-       connection->state = GB_CONNECTION_STATE_ENABLED_TX;
-       gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
-       spin_unlock_irq(&connection->lock);
-
-       trace_gb_connection_disable(connection);
-
-out_unlock:
-       mutex_unlock(&connection->mutex);
-}
-EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
-
-void gb_connection_mode_switch_prepare(struct gb_connection *connection)
-{
-       connection->mode_switch = true;
-}
-
-void gb_connection_mode_switch_complete(struct gb_connection *connection)
-{
-       gb_connection_svc_connection_destroy(connection);
-       gb_connection_hd_cport_clear(connection);
-
-       gb_connection_hd_cport_disable(connection);
-
-       connection->mode_switch = false;
-}
-
-void gb_connection_disable(struct gb_connection *connection)
-{
-       mutex_lock(&connection->mutex);
-
-       if (connection->state == GB_CONNECTION_STATE_DISABLED)
-               goto out_unlock;
-
-       trace_gb_connection_disable(connection);
-
-       spin_lock_irq(&connection->lock);
-       connection->state = GB_CONNECTION_STATE_DISCONNECTING;
-       gb_connection_cancel_operations(connection, -ESHUTDOWN);
-       spin_unlock_irq(&connection->lock);
-
-       gb_connection_hd_cport_flush(connection);
-
-       gb_connection_control_disconnecting(connection);
-       gb_connection_cport_shutdown_phase_1(connection);
-       gb_connection_hd_cport_quiesce(connection);
-       gb_connection_cport_shutdown_phase_2(connection);
-       gb_connection_control_disconnected(connection);
-
-       connection->state = GB_CONNECTION_STATE_DISABLED;
-
-       /* control-connection tear down is deferred when mode switching */
-       if (!connection->mode_switch) {
-               gb_connection_svc_connection_destroy(connection);
-               gb_connection_hd_cport_clear(connection);
-
-               gb_connection_hd_cport_disable(connection);
-       }
-
-out_unlock:
-       mutex_unlock(&connection->mutex);
-}
-EXPORT_SYMBOL_GPL(gb_connection_disable);
-
-/* Disable a connection without communicating with the remote end. */
-void gb_connection_disable_forced(struct gb_connection *connection)
-{
-       mutex_lock(&connection->mutex);
-
-       if (connection->state == GB_CONNECTION_STATE_DISABLED)
-               goto out_unlock;
-
-       trace_gb_connection_disable(connection);
-
-       spin_lock_irq(&connection->lock);
-       connection->state = GB_CONNECTION_STATE_DISABLED;
-       gb_connection_cancel_operations(connection, -ESHUTDOWN);
-       spin_unlock_irq(&connection->lock);
-
-       gb_connection_hd_cport_flush(connection);
-
-       gb_connection_svc_connection_destroy(connection);
-       gb_connection_hd_cport_clear(connection);
-
-       gb_connection_hd_cport_disable(connection);
-out_unlock:
-       mutex_unlock(&connection->mutex);
-}
-EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
-
-/* Caller must have disabled the connection before destroying it. */
-void gb_connection_destroy(struct gb_connection *connection)
-{
-       if (!connection)
-               return;
-
-       if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
-               gb_connection_disable(connection);
-
-       mutex_lock(&gb_connection_mutex);
-
-       spin_lock_irq(&gb_connections_lock);
-       list_del(&connection->bundle_links);
-       list_del(&connection->hd_links);
-       spin_unlock_irq(&gb_connections_lock);
-
-       destroy_workqueue(connection->wq);
-
-       gb_hd_cport_release(connection->hd, connection->hd_cport_id);
-       connection->hd_cport_id = CPORT_ID_BAD;
-
-       mutex_unlock(&gb_connection_mutex);
-
-       gb_connection_put(connection);
-}
-EXPORT_SYMBOL_GPL(gb_connection_destroy);
-
-void gb_connection_latency_tag_enable(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->latency_tag_enable)
-               return;
-
-       ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: failed to enable latency tag: %d\n",
-                       connection->name, ret);
-       }
-}
-EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
-
-void gb_connection_latency_tag_disable(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-       int ret;
-
-       if (!hd->driver->latency_tag_disable)
-               return;
-
-       ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: failed to disable latency tag: %d\n",
-                       connection->name, ret);
-       }
-}
-EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
deleted file mode 100644 (file)
index 359a258..0000000
+++ /dev/null
@@ -1,584 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus CPort control protocol.
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/greybus.h>
-
-/* Highest control-protocol version supported */
-#define GB_CONTROL_VERSION_MAJOR       0
-#define GB_CONTROL_VERSION_MINOR       1
-
-static int gb_control_get_version(struct gb_control *control)
-{
-       struct gb_interface *intf = control->connection->intf;
-       struct gb_control_version_request request;
-       struct gb_control_version_response response;
-       int ret;
-
-       request.major = GB_CONTROL_VERSION_MAJOR;
-       request.minor = GB_CONTROL_VERSION_MINOR;
-
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_VERSION,
-                               &request, sizeof(request), &response,
-                               sizeof(response));
-       if (ret) {
-               dev_err(&intf->dev,
-                       "failed to get control-protocol version: %d\n",
-                       ret);
-               return ret;
-       }
-
-       if (response.major > request.major) {
-               dev_err(&intf->dev,
-                       "unsupported major control-protocol version (%u > %u)\n",
-                       response.major, request.major);
-               return -ENOTSUPP;
-       }
-
-       control->protocol_major = response.major;
-       control->protocol_minor = response.minor;
-
-       dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
-               response.minor);
-
-       return 0;
-}
-
-static int gb_control_get_bundle_version(struct gb_control *control,
-                                        struct gb_bundle *bundle)
-{
-       struct gb_interface *intf = control->connection->intf;
-       struct gb_control_bundle_version_request request;
-       struct gb_control_bundle_version_response response;
-       int ret;
-
-       request.bundle_id = bundle->id;
-
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_BUNDLE_VERSION,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&intf->dev,
-                       "failed to get bundle %u class version: %d\n",
-                       bundle->id, ret);
-               return ret;
-       }
-
-       bundle->class_major = response.major;
-       bundle->class_minor = response.minor;
-
-       dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
-               response.major, response.minor);
-
-       return 0;
-}
-
-int gb_control_get_bundle_versions(struct gb_control *control)
-{
-       struct gb_interface *intf = control->connection->intf;
-       struct gb_bundle *bundle;
-       int ret;
-
-       if (!control->has_bundle_version)
-               return 0;
-
-       list_for_each_entry(bundle, &intf->bundles, links) {
-               ret = gb_control_get_bundle_version(control, bundle);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-/* Get Manifest's size from the interface */
-int gb_control_get_manifest_size_operation(struct gb_interface *intf)
-{
-       struct gb_control_get_manifest_size_response response;
-       struct gb_connection *connection = intf->control->connection;
-       int ret;
-
-       ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
-                               NULL, 0, &response, sizeof(response));
-       if (ret) {
-               dev_err(&connection->intf->dev,
-                       "failed to get manifest size: %d\n", ret);
-               return ret;
-       }
-
-       return le16_to_cpu(response.size);
-}
-
-/* Reads Manifest from the interface */
-int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
-                                     size_t size)
-{
-       struct gb_connection *connection = intf->control->connection;
-
-       return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
-                               NULL, 0, manifest, size);
-}
-
-int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
-{
-       struct gb_control_connected_request request;
-
-       request.cport_id = cpu_to_le16(cport_id);
-       return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
-                                &request, sizeof(request), NULL, 0);
-}
-
-int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
-{
-       struct gb_control_disconnected_request request;
-
-       request.cport_id = cpu_to_le16(cport_id);
-       return gb_operation_sync(control->connection,
-                                GB_CONTROL_TYPE_DISCONNECTED, &request,
-                                sizeof(request), NULL, 0);
-}
-
-int gb_control_disconnecting_operation(struct gb_control *control,
-                                      u16 cport_id)
-{
-       struct gb_control_disconnecting_request *request;
-       struct gb_operation *operation;
-       int ret;
-
-       operation = gb_operation_create_core(control->connection,
-                                            GB_CONTROL_TYPE_DISCONNECTING,
-                                            sizeof(*request), 0, 0,
-                                            GFP_KERNEL);
-       if (!operation)
-               return -ENOMEM;
-
-       request = operation->request->payload;
-       request->cport_id = cpu_to_le16(cport_id);
-
-       ret = gb_operation_request_send_sync(operation);
-       if (ret) {
-               dev_err(&control->dev, "failed to send disconnecting: %d\n",
-                       ret);
-       }
-
-       gb_operation_put(operation);
-
-       return ret;
-}
-
-int gb_control_mode_switch_operation(struct gb_control *control)
-{
-       struct gb_operation *operation;
-       int ret;
-
-       operation = gb_operation_create_core(control->connection,
-                                            GB_CONTROL_TYPE_MODE_SWITCH,
-                                            0, 0,
-                                            GB_OPERATION_FLAG_UNIDIRECTIONAL,
-                                            GFP_KERNEL);
-       if (!operation)
-               return -ENOMEM;
-
-       ret = gb_operation_request_send_sync(operation);
-       if (ret)
-               dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
-
-       gb_operation_put(operation);
-
-       return ret;
-}
-
-static int gb_control_bundle_pm_status_map(u8 status)
-{
-       switch (status) {
-       case GB_CONTROL_BUNDLE_PM_INVAL:
-               return -EINVAL;
-       case GB_CONTROL_BUNDLE_PM_BUSY:
-               return -EBUSY;
-       case GB_CONTROL_BUNDLE_PM_NA:
-               return -ENOMSG;
-       case GB_CONTROL_BUNDLE_PM_FAIL:
-       default:
-               return -EREMOTEIO;
-       }
-}
-
-int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
-{
-       struct gb_control_bundle_pm_request request;
-       struct gb_control_bundle_pm_response response;
-       int ret;
-
-       request.bundle_id = bundle_id;
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
-                               sizeof(request), &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
-                       bundle_id, ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
-               dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
-                       bundle_id, response.status);
-               return gb_control_bundle_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
-{
-       struct gb_control_bundle_pm_request request;
-       struct gb_control_bundle_pm_response response;
-       int ret;
-
-       request.bundle_id = bundle_id;
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
-                               sizeof(request), &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
-                       bundle_id, ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
-               dev_err(&control->dev, "failed to resume bundle %u: %d\n",
-                       bundle_id, response.status);
-               return gb_control_bundle_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
-{
-       struct gb_control_bundle_pm_request request;
-       struct gb_control_bundle_pm_response response;
-       int ret;
-
-       request.bundle_id = bundle_id;
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
-                               sizeof(request), &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev,
-                       "failed to send bundle %u deactivate: %d\n", bundle_id,
-                       ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
-               dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
-                       bundle_id, response.status);
-               return gb_control_bundle_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
-{
-       struct gb_control_bundle_pm_request request;
-       struct gb_control_bundle_pm_response response;
-       int ret;
-
-       if (!control->has_bundle_activate)
-               return 0;
-
-       request.bundle_id = bundle_id;
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
-                               sizeof(request), &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev,
-                       "failed to send bundle %u activate: %d\n", bundle_id,
-                       ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
-               dev_err(&control->dev, "failed to activate bundle %u: %d\n",
-                       bundle_id, response.status);
-               return gb_control_bundle_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-static int gb_control_interface_pm_status_map(u8 status)
-{
-       switch (status) {
-       case GB_CONTROL_INTF_PM_BUSY:
-               return -EBUSY;
-       case GB_CONTROL_INTF_PM_NA:
-               return -ENOMSG;
-       default:
-               return -EREMOTEIO;
-       }
-}
-
-int gb_control_interface_suspend_prepare(struct gb_control *control)
-{
-       struct gb_control_intf_pm_response response;
-       int ret;
-
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev,
-                       "failed to send interface suspend prepare: %d\n", ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_INTF_PM_OK) {
-               dev_err(&control->dev, "interface error while preparing suspend: %d\n",
-                       response.status);
-               return gb_control_interface_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-int gb_control_interface_deactivate_prepare(struct gb_control *control)
-{
-       struct gb_control_intf_pm_response response;
-       int ret;
-
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
-                               0, &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
-                       ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_INTF_PM_OK) {
-               dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
-                       response.status);
-               return gb_control_interface_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-int gb_control_interface_hibernate_abort(struct gb_control *control)
-{
-       struct gb_control_intf_pm_response response;
-       int ret;
-
-       ret = gb_operation_sync(control->connection,
-                               GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&control->dev,
-                       "failed to send interface aborting hibernate: %d\n",
-                       ret);
-               return ret;
-       }
-
-       if (response.status != GB_CONTROL_INTF_PM_OK) {
-               dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
-                       response.status);
-               return gb_control_interface_pm_status_map(response.status);
-       }
-
-       return 0;
-}
-
-static ssize_t vendor_string_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       struct gb_control *control = to_gb_control(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
-}
-static DEVICE_ATTR_RO(vendor_string);
-
-static ssize_t product_string_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct gb_control *control = to_gb_control(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
-}
-static DEVICE_ATTR_RO(product_string);
-
-static struct attribute *control_attrs[] = {
-       &dev_attr_vendor_string.attr,
-       &dev_attr_product_string.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(control);
-
-static void gb_control_release(struct device *dev)
-{
-       struct gb_control *control = to_gb_control(dev);
-
-       gb_connection_destroy(control->connection);
-
-       kfree(control->vendor_string);
-       kfree(control->product_string);
-
-       kfree(control);
-}
-
-struct device_type greybus_control_type = {
-       .name =         "greybus_control",
-       .release =      gb_control_release,
-};
-
-struct gb_control *gb_control_create(struct gb_interface *intf)
-{
-       struct gb_connection *connection;
-       struct gb_control *control;
-
-       control = kzalloc(sizeof(*control), GFP_KERNEL);
-       if (!control)
-               return ERR_PTR(-ENOMEM);
-
-       control->intf = intf;
-
-       connection = gb_connection_create_control(intf);
-       if (IS_ERR(connection)) {
-               dev_err(&intf->dev,
-                       "failed to create control connection: %ld\n",
-                       PTR_ERR(connection));
-               kfree(control);
-               return ERR_CAST(connection);
-       }
-
-       control->connection = connection;
-
-       control->dev.parent = &intf->dev;
-       control->dev.bus = &greybus_bus_type;
-       control->dev.type = &greybus_control_type;
-       control->dev.groups = control_groups;
-       control->dev.dma_mask = intf->dev.dma_mask;
-       device_initialize(&control->dev);
-       dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
-
-       gb_connection_set_data(control->connection, control);
-
-       return control;
-}
-
-int gb_control_enable(struct gb_control *control)
-{
-       int ret;
-
-       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
-
-       ret = gb_connection_enable_tx(control->connection);
-       if (ret) {
-               dev_err(&control->connection->intf->dev,
-                       "failed to enable control connection: %d\n",
-                       ret);
-               return ret;
-       }
-
-       ret = gb_control_get_version(control);
-       if (ret)
-               goto err_disable_connection;
-
-       if (control->protocol_major > 0 || control->protocol_minor > 1)
-               control->has_bundle_version = true;
-
-       /* FIXME: use protocol version instead */
-       if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
-               control->has_bundle_activate = true;
-
-       return 0;
-
-err_disable_connection:
-       gb_connection_disable(control->connection);
-
-       return ret;
-}
-
-void gb_control_disable(struct gb_control *control)
-{
-       dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
-
-       if (control->intf->disconnected)
-               gb_connection_disable_forced(control->connection);
-       else
-               gb_connection_disable(control->connection);
-}
-
-int gb_control_suspend(struct gb_control *control)
-{
-       gb_connection_disable(control->connection);
-
-       return 0;
-}
-
-int gb_control_resume(struct gb_control *control)
-{
-       int ret;
-
-       ret = gb_connection_enable_tx(control->connection);
-       if (ret) {
-               dev_err(&control->connection->intf->dev,
-                       "failed to enable control connection: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-int gb_control_add(struct gb_control *control)
-{
-       int ret;
-
-       ret = device_add(&control->dev);
-       if (ret) {
-               dev_err(&control->dev,
-                       "failed to register control device: %d\n",
-                       ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-void gb_control_del(struct gb_control *control)
-{
-       if (device_is_registered(&control->dev))
-               device_del(&control->dev);
-}
-
-struct gb_control *gb_control_get(struct gb_control *control)
-{
-       get_device(&control->dev);
-
-       return control;
-}
-
-void gb_control_put(struct gb_control *control)
-{
-       put_device(&control->dev);
-}
-
-void gb_control_mode_switch_prepare(struct gb_control *control)
-{
-       gb_connection_mode_switch_prepare(control->connection);
-}
-
-void gb_control_mode_switch_complete(struct gb_control *control)
-{
-       gb_connection_mode_switch_complete(control->connection);
-}
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
deleted file mode 100644 (file)
index e546c64..0000000
+++ /dev/null
@@ -1,349 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus "Core"
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define CREATE_TRACE_POINTS
-#include <linux/greybus.h>
-#include "greybus_trace.h"
-
-#define GB_BUNDLE_AUTOSUSPEND_MS       3000
-
-/* Allow greybus to be disabled at boot if needed */
-static bool nogreybus;
-#ifdef MODULE
-module_param(nogreybus, bool, 0444);
-#else
-core_param(nogreybus, nogreybus, bool, 0444);
-#endif
-int greybus_disabled(void)
-{
-       return nogreybus;
-}
-EXPORT_SYMBOL_GPL(greybus_disabled);
-
-static bool greybus_match_one_id(struct gb_bundle *bundle,
-                                const struct greybus_bundle_id *id)
-{
-       if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
-           (id->vendor != bundle->intf->vendor_id))
-               return false;
-
-       if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
-           (id->product != bundle->intf->product_id))
-               return false;
-
-       if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
-           (id->class != bundle->class))
-               return false;
-
-       return true;
-}
-
-static const struct greybus_bundle_id *
-greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
-{
-       if (!id)
-               return NULL;
-
-       for (; id->vendor || id->product || id->class || id->driver_info;
-                                                                       id++) {
-               if (greybus_match_one_id(bundle, id))
-                       return id;
-       }
-
-       return NULL;
-}
-
-static int greybus_match_device(struct device *dev, struct device_driver *drv)
-{
-       struct greybus_driver *driver = to_greybus_driver(drv);
-       struct gb_bundle *bundle;
-       const struct greybus_bundle_id *id;
-
-       if (!is_gb_bundle(dev))
-               return 0;
-
-       bundle = to_gb_bundle(dev);
-
-       id = greybus_match_id(bundle, driver->id_table);
-       if (id)
-               return 1;
-       /* FIXME - Dynamic ids? */
-       return 0;
-}
-
-static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct gb_host_device *hd;
-       struct gb_module *module = NULL;
-       struct gb_interface *intf = NULL;
-       struct gb_control *control = NULL;
-       struct gb_bundle *bundle = NULL;
-       struct gb_svc *svc = NULL;
-
-       if (is_gb_host_device(dev)) {
-               hd = to_gb_host_device(dev);
-       } else if (is_gb_module(dev)) {
-               module = to_gb_module(dev);
-               hd = module->hd;
-       } else if (is_gb_interface(dev)) {
-               intf = to_gb_interface(dev);
-               module = intf->module;
-               hd = intf->hd;
-       } else if (is_gb_control(dev)) {
-               control = to_gb_control(dev);
-               intf = control->intf;
-               module = intf->module;
-               hd = intf->hd;
-       } else if (is_gb_bundle(dev)) {
-               bundle = to_gb_bundle(dev);
-               intf = bundle->intf;
-               module = intf->module;
-               hd = intf->hd;
-       } else if (is_gb_svc(dev)) {
-               svc = to_gb_svc(dev);
-               hd = svc->hd;
-       } else {
-               dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
-               return -EINVAL;
-       }
-
-       if (add_uevent_var(env, "BUS=%u", hd->bus_id))
-               return -ENOMEM;
-
-       if (module) {
-               if (add_uevent_var(env, "MODULE=%u", module->module_id))
-                       return -ENOMEM;
-       }
-
-       if (intf) {
-               if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
-                       return -ENOMEM;
-               if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
-                                  intf->vendor_id, intf->product_id))
-                       return -ENOMEM;
-       }
-
-       if (bundle) {
-               // FIXME
-               // add a uevent that can "load" a bundle type
-               // This is what we need to bind a driver to so use the info
-               // in gmod here as well
-
-               if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
-                       return -ENOMEM;
-               if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static void greybus_shutdown(struct device *dev)
-{
-       if (is_gb_host_device(dev)) {
-               struct gb_host_device *hd;
-
-               hd = to_gb_host_device(dev);
-               gb_hd_shutdown(hd);
-       }
-}
-
-struct bus_type greybus_bus_type = {
-       .name =         "greybus",
-       .match =        greybus_match_device,
-       .uevent =       greybus_uevent,
-       .shutdown =     greybus_shutdown,
-};
-
-static int greybus_probe(struct device *dev)
-{
-       struct greybus_driver *driver = to_greybus_driver(dev->driver);
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-       const struct greybus_bundle_id *id;
-       int retval;
-
-       /* match id */
-       id = greybus_match_id(bundle, driver->id_table);
-       if (!id)
-               return -ENODEV;
-
-       retval = pm_runtime_get_sync(&bundle->intf->dev);
-       if (retval < 0) {
-               pm_runtime_put_noidle(&bundle->intf->dev);
-               return retval;
-       }
-
-       retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
-       if (retval) {
-               pm_runtime_put(&bundle->intf->dev);
-               return retval;
-       }
-
-       /*
-        * Unbound bundle devices are always deactivated. During probe, the
-        * Runtime PM is set to enabled and active and the usage count is
-        * incremented. If the driver supports runtime PM, it should call
-        * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
-        * in remove routine.
-        */
-       pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
-       pm_runtime_use_autosuspend(dev);
-       pm_runtime_get_noresume(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-
-       retval = driver->probe(bundle, id);
-       if (retval) {
-               /*
-                * Catch buggy drivers that fail to destroy their connections.
-                */
-               WARN_ON(!list_empty(&bundle->connections));
-
-               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
-
-               pm_runtime_disable(dev);
-               pm_runtime_set_suspended(dev);
-               pm_runtime_put_noidle(dev);
-               pm_runtime_dont_use_autosuspend(dev);
-               pm_runtime_put(&bundle->intf->dev);
-
-               return retval;
-       }
-
-       pm_runtime_put(&bundle->intf->dev);
-
-       return 0;
-}
-
-static int greybus_remove(struct device *dev)
-{
-       struct greybus_driver *driver = to_greybus_driver(dev->driver);
-       struct gb_bundle *bundle = to_gb_bundle(dev);
-       struct gb_connection *connection;
-       int retval;
-
-       retval = pm_runtime_get_sync(dev);
-       if (retval < 0)
-               dev_err(dev, "failed to resume bundle: %d\n", retval);
-
-       /*
-        * Disable (non-offloaded) connections early in case the interface is
-        * already gone to avoid unceccessary operation timeouts during
-        * driver disconnect. Otherwise, only disable incoming requests.
-        */
-       list_for_each_entry(connection, &bundle->connections, bundle_links) {
-               if (gb_connection_is_offloaded(connection))
-                       continue;
-
-               if (bundle->intf->disconnected)
-                       gb_connection_disable_forced(connection);
-               else
-                       gb_connection_disable_rx(connection);
-       }
-
-       driver->disconnect(bundle);
-
-       /* Catch buggy drivers that fail to destroy their connections. */
-       WARN_ON(!list_empty(&bundle->connections));
-
-       if (!bundle->intf->disconnected)
-               gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
-
-       pm_runtime_put_noidle(dev);
-       pm_runtime_disable(dev);
-       pm_runtime_set_suspended(dev);
-       pm_runtime_dont_use_autosuspend(dev);
-       pm_runtime_put_noidle(dev);
-
-       return 0;
-}
-
-int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
-                           const char *mod_name)
-{
-       int retval;
-
-       if (greybus_disabled())
-               return -ENODEV;
-
-       driver->driver.bus = &greybus_bus_type;
-       driver->driver.name = driver->name;
-       driver->driver.probe = greybus_probe;
-       driver->driver.remove = greybus_remove;
-       driver->driver.owner = owner;
-       driver->driver.mod_name = mod_name;
-
-       retval = driver_register(&driver->driver);
-       if (retval)
-               return retval;
-
-       pr_info("registered new driver %s\n", driver->name);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(greybus_register_driver);
-
-void greybus_deregister_driver(struct greybus_driver *driver)
-{
-       driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL_GPL(greybus_deregister_driver);
-
-static int __init gb_init(void)
-{
-       int retval;
-
-       if (greybus_disabled())
-               return -ENODEV;
-
-       BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
-
-       gb_debugfs_init();
-
-       retval = bus_register(&greybus_bus_type);
-       if (retval) {
-               pr_err("bus_register failed (%d)\n", retval);
-               goto error_bus;
-       }
-
-       retval = gb_hd_init();
-       if (retval) {
-               pr_err("gb_hd_init failed (%d)\n", retval);
-               goto error_hd;
-       }
-
-       retval = gb_operation_init();
-       if (retval) {
-               pr_err("gb_operation_init failed (%d)\n", retval);
-               goto error_operation;
-       }
-       return 0;       /* Success */
-
-error_operation:
-       gb_hd_exit();
-error_hd:
-       bus_unregister(&greybus_bus_type);
-error_bus:
-       gb_debugfs_cleanup();
-
-       return retval;
-}
-module_init(gb_init);
-
-static void __exit gb_exit(void)
-{
-       gb_operation_exit();
-       gb_hd_exit();
-       bus_unregister(&greybus_bus_type);
-       gb_debugfs_cleanup();
-       tracepoint_synchronize_unregister();
-}
-module_exit(gb_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
deleted file mode 100644 (file)
index e102d7b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus debugfs code
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#include <linux/debugfs.h>
-#include <linux/greybus.h>
-
-static struct dentry *gb_debug_root;
-
-void __init gb_debugfs_init(void)
-{
-       gb_debug_root = debugfs_create_dir("greybus", NULL);
-}
-
-void gb_debugfs_cleanup(void)
-{
-       debugfs_remove_recursive(gb_debug_root);
-       gb_debug_root = NULL;
-}
-
-struct dentry *gb_debugfs_get(void)
-{
-       return gb_debug_root;
-}
-EXPORT_SYMBOL_GPL(gb_debugfs_get);
index 366716f11b1a5087a6263fe5f0d9d08873839848..5b755e76d8a4dc92a8c9414e8d289401dd0749b0 100644 (file)
@@ -15,7 +15,7 @@
 #include <asm/unaligned.h>
 
 #include "arpc.h"
-#include "greybus_trace.h"
+#include "../../greybus/greybus_trace.h"
 
 
 /* Default timeout for USB vendor requests. */
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
deleted file mode 100644 (file)
index 1bc9f12..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Greybus driver and device API
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM greybus
-
-#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_GREYBUS_H
-
-#include <linux/tracepoint.h>
-
-struct gb_message;
-struct gb_operation;
-struct gb_connection;
-struct gb_bundle;
-struct gb_host_device;
-
-DECLARE_EVENT_CLASS(gb_message,
-
-       TP_PROTO(struct gb_message *message),
-
-       TP_ARGS(message),
-
-       TP_STRUCT__entry(
-               __field(u16, size)
-               __field(u16, operation_id)
-               __field(u8, type)
-               __field(u8, result)
-       ),
-
-       TP_fast_assign(
-               __entry->size = le16_to_cpu(message->header->size);
-               __entry->operation_id =
-                       le16_to_cpu(message->header->operation_id);
-               __entry->type = message->header->type;
-               __entry->result = message->header->result;
-       ),
-
-       TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
-                 __entry->size, __entry->operation_id,
-                 __entry->type, __entry->result)
-);
-
-#define DEFINE_MESSAGE_EVENT(name)                                     \
-               DEFINE_EVENT(gb_message, name,                          \
-                               TP_PROTO(struct gb_message *message),   \
-                               TP_ARGS(message))
-
-/*
- * Occurs immediately before calling a host device's message_send()
- * method.
- */
-DEFINE_MESSAGE_EVENT(gb_message_send);
-
-/*
- * Occurs after an incoming request message has been received
- */
-DEFINE_MESSAGE_EVENT(gb_message_recv_request);
-
-/*
- * Occurs after an incoming response message has been received,
- * after its matching request has been found.
- */
-DEFINE_MESSAGE_EVENT(gb_message_recv_response);
-
-/*
- * Occurs after an operation has been canceled, possibly before the
- * cancellation is complete.
- */
-DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
-
-/*
- * Occurs when an incoming request is cancelled; if the response has
- * been queued for sending, this occurs after it is sent.
- */
-DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
-
-/*
- * Occurs in the host driver message_send() function just prior to
- * handing off the data to be processed by hardware.
- */
-DEFINE_MESSAGE_EVENT(gb_message_submit);
-
-#undef DEFINE_MESSAGE_EVENT
-
-DECLARE_EVENT_CLASS(gb_operation,
-
-       TP_PROTO(struct gb_operation *operation),
-
-       TP_ARGS(operation),
-
-       TP_STRUCT__entry(
-               __field(u16, cport_id)  /* CPort of HD side of connection */
-               __field(u16, id)        /* Operation ID */
-               __field(u8, type)
-               __field(unsigned long, flags)
-               __field(int, active)
-               __field(int, waiters)
-               __field(int, errno)
-       ),
-
-       TP_fast_assign(
-               __entry->cport_id = operation->connection->hd_cport_id;
-               __entry->id = operation->id;
-               __entry->type = operation->type;
-               __entry->flags = operation->flags;
-               __entry->active = operation->active;
-               __entry->waiters = atomic_read(&operation->waiters);
-               __entry->errno = operation->errno;
-       ),
-
-       TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
-                 __entry->id, __entry->cport_id, __entry->type, __entry->flags,
-                 __entry->active, __entry->waiters, __entry->errno)
-);
-
-#define DEFINE_OPERATION_EVENT(name)                                   \
-               DEFINE_EVENT(gb_operation, name,                        \
-                               TP_PROTO(struct gb_operation *operation), \
-                               TP_ARGS(operation))
-
-/*
- * Occurs after a new operation is created for an outgoing request
- * has been successfully created.
- */
-DEFINE_OPERATION_EVENT(gb_operation_create);
-
-/*
- * Occurs after a new core operation has been created.
- */
-DEFINE_OPERATION_EVENT(gb_operation_create_core);
-
-/*
- * Occurs after a new operation has been created for an incoming
- * request has been successfully created and initialized.
- */
-DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
-
-/*
- * Occurs when the last reference to an operation has been dropped,
- * prior to freeing resources.
- */
-DEFINE_OPERATION_EVENT(gb_operation_destroy);
-
-/*
- * Occurs when an operation has been marked active, after updating
- * its active count.
- */
-DEFINE_OPERATION_EVENT(gb_operation_get_active);
-
-/*
- * Occurs when an operation has been marked active, before updating
- * its active count.
- */
-DEFINE_OPERATION_EVENT(gb_operation_put_active);
-
-#undef DEFINE_OPERATION_EVENT
-
-DECLARE_EVENT_CLASS(gb_connection,
-
-       TP_PROTO(struct gb_connection *connection),
-
-       TP_ARGS(connection),
-
-       TP_STRUCT__entry(
-               __field(int, hd_bus_id)
-               __field(u8, bundle_id)
-               /* name contains "hd_cport_id/intf_id:cport_id" */
-               __dynamic_array(char, name, sizeof(connection->name))
-               __field(enum gb_connection_state, state)
-               __field(unsigned long, flags)
-       ),
-
-       TP_fast_assign(
-               __entry->hd_bus_id = connection->hd->bus_id;
-               __entry->bundle_id = connection->bundle ?
-                               connection->bundle->id : BUNDLE_ID_NONE;
-               memcpy(__get_str(name), connection->name,
-                                       sizeof(connection->name));
-               __entry->state = connection->state;
-               __entry->flags = connection->flags;
-       ),
-
-       TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
-                 __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
-                 (unsigned int)__entry->state, __entry->flags)
-);
-
-#define DEFINE_CONNECTION_EVENT(name)                                  \
-               DEFINE_EVENT(gb_connection, name,                       \
-                               TP_PROTO(struct gb_connection *connection), \
-                               TP_ARGS(connection))
-
-/*
- * Occurs after a new connection is successfully created.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_create);
-
-/*
- * Occurs when the last reference to a connection has been dropped,
- * before its resources are freed.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_release);
-
-/*
- * Occurs when a new reference to connection is added, currently
- * only when a message over the connection is received.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_get);
-
-/*
- * Occurs when a new reference to connection is dropped, after a
- * a received message is handled, or when the connection is
- * destroyed.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_put);
-
-/*
- * Occurs when a request to enable a connection is made, either for
- * transmit only, or for both transmit and receive.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_enable);
-
-/*
- * Occurs when a request to disable a connection is made, either for
- * receive only, or for both transmit and receive.  Also occurs when
- * a request to forcefully disable a connection is made.
- */
-DEFINE_CONNECTION_EVENT(gb_connection_disable);
-
-#undef DEFINE_CONNECTION_EVENT
-
-DECLARE_EVENT_CLASS(gb_bundle,
-
-       TP_PROTO(struct gb_bundle *bundle),
-
-       TP_ARGS(bundle),
-
-       TP_STRUCT__entry(
-               __field(u8, intf_id)
-               __field(u8, id)
-               __field(u8, class)
-               __field(size_t, num_cports)
-       ),
-
-       TP_fast_assign(
-               __entry->intf_id = bundle->intf->interface_id;
-               __entry->id = bundle->id;
-               __entry->class = bundle->class;
-               __entry->num_cports = bundle->num_cports;
-       ),
-
-       TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
-                 __entry->intf_id, __entry->id, __entry->class,
-                 __entry->num_cports)
-);
-
-#define DEFINE_BUNDLE_EVENT(name)                                      \
-               DEFINE_EVENT(gb_bundle, name,                   \
-                               TP_PROTO(struct gb_bundle *bundle), \
-                               TP_ARGS(bundle))
-
-/*
- * Occurs after a new bundle is successfully created.
- */
-DEFINE_BUNDLE_EVENT(gb_bundle_create);
-
-/*
- * Occurs when the last reference to a bundle has been dropped,
- * before its resources are freed.
- */
-DEFINE_BUNDLE_EVENT(gb_bundle_release);
-
-/*
- * Occurs when a bundle is added to an interface when the interface
- * is enabled.
- */
-DEFINE_BUNDLE_EVENT(gb_bundle_add);
-
-/*
- * Occurs when a registered bundle gets destroyed, normally at the
- * time an interface is disabled.
- */
-DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
-
-#undef DEFINE_BUNDLE_EVENT
-
-DECLARE_EVENT_CLASS(gb_interface,
-
-       TP_PROTO(struct gb_interface *intf),
-
-       TP_ARGS(intf),
-
-       TP_STRUCT__entry(
-               __field(u8, module_id)
-               __field(u8, id)         /* Interface id */
-               __field(u8, device_id)
-               __field(int, disconnected)      /* bool */
-               __field(int, ejected)           /* bool */
-               __field(int, active)            /* bool */
-               __field(int, enabled)           /* bool */
-               __field(int, mode_switch)       /* bool */
-       ),
-
-       TP_fast_assign(
-               __entry->module_id = intf->module->module_id;
-               __entry->id = intf->interface_id;
-               __entry->device_id = intf->device_id;
-               __entry->disconnected = intf->disconnected;
-               __entry->ejected = intf->ejected;
-               __entry->active = intf->active;
-               __entry->enabled = intf->enabled;
-               __entry->mode_switch = intf->mode_switch;
-       ),
-
-       TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
-               __entry->id, __entry->device_id, __entry->module_id,
-               __entry->disconnected, __entry->ejected, __entry->active,
-               __entry->enabled, __entry->mode_switch)
-);
-
-#define DEFINE_INTERFACE_EVENT(name)                                   \
-               DEFINE_EVENT(gb_interface, name,                        \
-                               TP_PROTO(struct gb_interface *intf),    \
-                               TP_ARGS(intf))
-
-/*
- * Occurs after a new interface is successfully created.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_create);
-
-/*
- * Occurs after the last reference to an interface has been dropped.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_release);
-
-/*
- * Occurs after an interface been registerd.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_add);
-
-/*
- * Occurs when a registered interface gets deregisterd.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_del);
-
-/*
- * Occurs when a registered interface has been successfully
- * activated.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_activate);
-
-/*
- * Occurs when an activated interface is being deactivated.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
-
-/*
- * Occurs when an interface has been successfully enabled.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_enable);
-
-/*
- * Occurs when an enabled interface is being disabled.
- */
-DEFINE_INTERFACE_EVENT(gb_interface_disable);
-
-#undef DEFINE_INTERFACE_EVENT
-
-DECLARE_EVENT_CLASS(gb_module,
-
-       TP_PROTO(struct gb_module *module),
-
-       TP_ARGS(module),
-
-       TP_STRUCT__entry(
-               __field(int, hd_bus_id)
-               __field(u8, module_id)
-               __field(size_t, num_interfaces)
-               __field(int, disconnected)      /* bool */
-       ),
-
-       TP_fast_assign(
-               __entry->hd_bus_id = module->hd->bus_id;
-               __entry->module_id = module->module_id;
-               __entry->num_interfaces = module->num_interfaces;
-               __entry->disconnected = module->disconnected;
-       ),
-
-       TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
-               __entry->hd_bus_id, __entry->module_id,
-               __entry->num_interfaces, __entry->disconnected)
-);
-
-#define DEFINE_MODULE_EVENT(name)                                      \
-               DEFINE_EVENT(gb_module, name,                           \
-                               TP_PROTO(struct gb_module *module),     \
-                               TP_ARGS(module))
-
-/*
- * Occurs after a new module is successfully created, before
- * creating any of its interfaces.
- */
-DEFINE_MODULE_EVENT(gb_module_create);
-
-/*
- * Occurs after the last reference to a module has been dropped.
- */
-DEFINE_MODULE_EVENT(gb_module_release);
-
-/*
- * Occurs after a module is successfully created, before registering
- * any of its interfaces.
- */
-DEFINE_MODULE_EVENT(gb_module_add);
-
-/*
- * Occurs when a module is deleted, before deregistering its
- * interfaces.
- */
-DEFINE_MODULE_EVENT(gb_module_del);
-
-#undef DEFINE_MODULE_EVENT
-
-DECLARE_EVENT_CLASS(gb_host_device,
-
-       TP_PROTO(struct gb_host_device *hd),
-
-       TP_ARGS(hd),
-
-       TP_STRUCT__entry(
-               __field(int, bus_id)
-               __field(size_t, num_cports)
-               __field(size_t, buffer_size_max)
-       ),
-
-       TP_fast_assign(
-               __entry->bus_id = hd->bus_id;
-               __entry->num_cports = hd->num_cports;
-               __entry->buffer_size_max = hd->buffer_size_max;
-       ),
-
-       TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
-               __entry->bus_id, __entry->num_cports,
-               __entry->buffer_size_max)
-);
-
-#define DEFINE_HD_EVENT(name)                                          \
-               DEFINE_EVENT(gb_host_device, name,                      \
-                               TP_PROTO(struct gb_host_device *hd),    \
-                               TP_ARGS(hd))
-
-/*
- * Occurs after a new host device is successfully created, before
- * its SVC has been set up.
- */
-DEFINE_HD_EVENT(gb_hd_create);
-
-/*
- * Occurs after the last reference to a host device has been
- * dropped.
- */
-DEFINE_HD_EVENT(gb_hd_release);
-
-/*
- * Occurs after a new host device has been added, after the
- * connection to its SVC has been enabled.
- */
-DEFINE_HD_EVENT(gb_hd_add);
-
-/*
- * Occurs when a host device is being disconnected from the AP USB
- * host controller.
- */
-DEFINE_HD_EVENT(gb_hd_del);
-
-/*
- * Occurs when a host device has passed received data to the Greybus
- * core, after it has been determined it is destined for a valid
- * CPort.
- */
-DEFINE_HD_EVENT(gb_hd_in);
-
-#undef DEFINE_HD_EVENT
-
-#endif /* _TRACE_GREYBUS_H */
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-
-/*
- * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
- */
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE greybus_trace
-#include <trace/define_trace.h>
-
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
deleted file mode 100644 (file)
index 72b21bf..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Host Device
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/greybus.h>
-
-#include "greybus_trace.h"
-
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
-EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
-
-static struct ida gb_hd_bus_id_map;
-
-int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
-                bool async)
-{
-       if (!hd || !hd->driver || !hd->driver->output)
-               return -EINVAL;
-       return hd->driver->output(hd, req, size, cmd, async);
-}
-EXPORT_SYMBOL_GPL(gb_hd_output);
-
-static ssize_t bus_id_show(struct device *dev,
-                          struct device_attribute *attr, char *buf)
-{
-       struct gb_host_device *hd = to_gb_host_device(dev);
-
-       return sprintf(buf, "%d\n", hd->bus_id);
-}
-static DEVICE_ATTR_RO(bus_id);
-
-static struct attribute *bus_attrs[] = {
-       &dev_attr_bus_id.attr,
-       NULL
-};
-ATTRIBUTE_GROUPS(bus);
-
-int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
-{
-       struct ida *id_map = &hd->cport_id_map;
-       int ret;
-
-       ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
-       if (ret < 0) {
-               dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
-               return ret;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
-
-void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
-{
-       struct ida *id_map = &hd->cport_id_map;
-
-       ida_simple_remove(id_map, cport_id);
-}
-EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
-
-/* Locking: Caller guarantees serialisation */
-int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
-                        unsigned long flags)
-{
-       struct ida *id_map = &hd->cport_id_map;
-       int ida_start, ida_end;
-
-       if (hd->driver->cport_allocate)
-               return hd->driver->cport_allocate(hd, cport_id, flags);
-
-       if (cport_id < 0) {
-               ida_start = 0;
-               ida_end = hd->num_cports;
-       } else if (cport_id < hd->num_cports) {
-               ida_start = cport_id;
-               ida_end = cport_id + 1;
-       } else {
-               dev_err(&hd->dev, "cport %d not available\n", cport_id);
-               return -EINVAL;
-       }
-
-       return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
-}
-
-/* Locking: Caller guarantees serialisation */
-void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
-{
-       if (hd->driver->cport_release) {
-               hd->driver->cport_release(hd, cport_id);
-               return;
-       }
-
-       ida_simple_remove(&hd->cport_id_map, cport_id);
-}
-
-static void gb_hd_release(struct device *dev)
-{
-       struct gb_host_device *hd = to_gb_host_device(dev);
-
-       trace_gb_hd_release(hd);
-
-       if (hd->svc)
-               gb_svc_put(hd->svc);
-       ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
-       ida_destroy(&hd->cport_id_map);
-       kfree(hd);
-}
-
-struct device_type greybus_hd_type = {
-       .name           = "greybus_host_device",
-       .release        = gb_hd_release,
-};
-
-struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
-                                   struct device *parent,
-                                   size_t buffer_size_max,
-                                   size_t num_cports)
-{
-       struct gb_host_device *hd;
-       int ret;
-
-       /*
-        * Validate that the driver implements all of the callbacks
-        * so that we don't have to every time we make them.
-        */
-       if ((!driver->message_send) || (!driver->message_cancel)) {
-               dev_err(parent, "mandatory hd-callbacks missing\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
-               dev_err(parent, "greybus host-device buffers too small\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
-               dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /*
-        * Make sure to never allocate messages larger than what the Greybus
-        * protocol supports.
-        */
-       if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
-               dev_warn(parent, "limiting buffer size to %u\n",
-                        GB_OPERATION_MESSAGE_SIZE_MAX);
-               buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
-       }
-
-       hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
-       if (!hd)
-               return ERR_PTR(-ENOMEM);
-
-       ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
-       if (ret < 0) {
-               kfree(hd);
-               return ERR_PTR(ret);
-       }
-       hd->bus_id = ret;
-
-       hd->driver = driver;
-       INIT_LIST_HEAD(&hd->modules);
-       INIT_LIST_HEAD(&hd->connections);
-       ida_init(&hd->cport_id_map);
-       hd->buffer_size_max = buffer_size_max;
-       hd->num_cports = num_cports;
-
-       hd->dev.parent = parent;
-       hd->dev.bus = &greybus_bus_type;
-       hd->dev.type = &greybus_hd_type;
-       hd->dev.groups = bus_groups;
-       hd->dev.dma_mask = hd->dev.parent->dma_mask;
-       device_initialize(&hd->dev);
-       dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
-
-       trace_gb_hd_create(hd);
-
-       hd->svc = gb_svc_create(hd);
-       if (!hd->svc) {
-               dev_err(&hd->dev, "failed to create svc\n");
-               put_device(&hd->dev);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       return hd;
-}
-EXPORT_SYMBOL_GPL(gb_hd_create);
-
-int gb_hd_add(struct gb_host_device *hd)
-{
-       int ret;
-
-       ret = device_add(&hd->dev);
-       if (ret)
-               return ret;
-
-       ret = gb_svc_add(hd->svc);
-       if (ret) {
-               device_del(&hd->dev);
-               return ret;
-       }
-
-       trace_gb_hd_add(hd);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(gb_hd_add);
-
-void gb_hd_del(struct gb_host_device *hd)
-{
-       trace_gb_hd_del(hd);
-
-       /*
-        * Tear down the svc and flush any on-going hotplug processing before
-        * removing the remaining interfaces.
-        */
-       gb_svc_del(hd->svc);
-
-       device_del(&hd->dev);
-}
-EXPORT_SYMBOL_GPL(gb_hd_del);
-
-void gb_hd_shutdown(struct gb_host_device *hd)
-{
-       gb_svc_del(hd->svc);
-}
-EXPORT_SYMBOL_GPL(gb_hd_shutdown);
-
-void gb_hd_put(struct gb_host_device *hd)
-{
-       put_device(&hd->dev);
-}
-EXPORT_SYMBOL_GPL(gb_hd_put);
-
-int __init gb_hd_init(void)
-{
-       ida_init(&gb_hd_bus_id_map);
-
-       return 0;
-}
-
-void gb_hd_exit(void)
-{
-       ida_destroy(&gb_hd_bus_id_map);
-}
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
deleted file mode 100644 (file)
index 67dbe6f..0000000
+++ /dev/null
@@ -1,1263 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus interface code
- *
- * Copyright 2014 Google Inc.
- * Copyright 2014 Linaro Ltd.
- */
-
-#include <linux/delay.h>
-#include <linux/greybus.h>
-
-#include "greybus_trace.h"
-
-#define GB_INTERFACE_MODE_SWITCH_TIMEOUT       2000
-
-#define GB_INTERFACE_DEVICE_ID_BAD     0xff
-
-#define GB_INTERFACE_AUTOSUSPEND_MS                    3000
-
-/* Time required for interface to enter standby before disabling REFCLK */
-#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS                        20
-
-/* Don't-care selector index */
-#define DME_SELECTOR_INDEX_NULL                0
-
-/* DME attributes */
-/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
-#define DME_T_TST_SRC_INCREMENT                0x4083
-
-#define DME_DDBL1_MANUFACTURERID       0x5003
-#define DME_DDBL1_PRODUCTID            0x5004
-
-#define DME_TOSHIBA_GMP_VID            0x6000
-#define DME_TOSHIBA_GMP_PID            0x6001
-#define DME_TOSHIBA_GMP_SN0            0x6002
-#define DME_TOSHIBA_GMP_SN1            0x6003
-#define DME_TOSHIBA_GMP_INIT_STATUS    0x6101
-
-/* DDBL1 Manufacturer and Product ids */
-#define TOSHIBA_DMID                   0x0126
-#define TOSHIBA_ES2_BRIDGE_DPID                0x1000
-#define TOSHIBA_ES3_APBRIDGE_DPID      0x1001
-#define TOSHIBA_ES3_GBPHY_DPID 0x1002
-
-static int gb_interface_hibernate_link(struct gb_interface *intf);
-static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
-
-static int gb_interface_dme_attr_get(struct gb_interface *intf,
-                                    u16 attr, u32 *val)
-{
-       return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
-                                       attr, DME_SELECTOR_INDEX_NULL, val);
-}
-
-static int gb_interface_read_ara_dme(struct gb_interface *intf)
-{
-       u32 sn0, sn1;
-       int ret;
-
-       /*
-        * Unless this is a Toshiba bridge, bail out until we have defined
-        * standard GMP attributes.
-        */
-       if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
-               dev_err(&intf->dev, "unknown manufacturer %08x\n",
-                       intf->ddbl1_manufacturer_id);
-               return -ENODEV;
-       }
-
-       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
-                                       &intf->vendor_id);
-       if (ret)
-               return ret;
-
-       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
-                                       &intf->product_id);
-       if (ret)
-               return ret;
-
-       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
-       if (ret)
-               return ret;
-
-       ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
-       if (ret)
-               return ret;
-
-       intf->serial_number = (u64)sn1 << 32 | sn0;
-
-       return 0;
-}
-
-static int gb_interface_read_dme(struct gb_interface *intf)
-{
-       int ret;
-
-       /* DME attributes have already been read */
-       if (intf->dme_read)
-               return 0;
-
-       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
-                                       &intf->ddbl1_manufacturer_id);
-       if (ret)
-               return ret;
-
-       ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
-                                       &intf->ddbl1_product_id);
-       if (ret)
-               return ret;
-
-       if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
-           intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
-               intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
-               intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
-       }
-
-       ret = gb_interface_read_ara_dme(intf);
-       if (ret)
-               return ret;
-
-       intf->dme_read = true;
-
-       return 0;
-}
-
-static int gb_interface_route_create(struct gb_interface *intf)
-{
-       struct gb_svc *svc = intf->hd->svc;
-       u8 intf_id = intf->interface_id;
-       u8 device_id;
-       int ret;
-
-       /* Allocate an interface device id. */
-       ret = ida_simple_get(&svc->device_id_map,
-                            GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
-                            GFP_KERNEL);
-       if (ret < 0) {
-               dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
-               return ret;
-       }
-       device_id = ret;
-
-       ret = gb_svc_intf_device_id(svc, intf_id, device_id);
-       if (ret) {
-               dev_err(&intf->dev, "failed to set device id %u: %d\n",
-                       device_id, ret);
-               goto err_ida_remove;
-       }
-
-       /* FIXME: Hard-coded AP device id. */
-       ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
-                                 intf_id, device_id);
-       if (ret) {
-               dev_err(&intf->dev, "failed to create route: %d\n", ret);
-               goto err_svc_id_free;
-       }
-
-       intf->device_id = device_id;
-
-       return 0;
-
-err_svc_id_free:
-       /*
-        * XXX Should we tell SVC that this id doesn't belong to interface
-        * XXX anymore.
-        */
-err_ida_remove:
-       ida_simple_remove(&svc->device_id_map, device_id);
-
-       return ret;
-}
-
-static void gb_interface_route_destroy(struct gb_interface *intf)
-{
-       struct gb_svc *svc = intf->hd->svc;
-
-       if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
-               return;
-
-       gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
-       ida_simple_remove(&svc->device_id_map, intf->device_id);
-       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
-}
-
-/* Locking: Caller holds the interface mutex. */
-static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
-{
-       int ret;
-
-       dev_info(&intf->dev, "legacy mode switch detected\n");
-
-       /* Mark as disconnected to prevent I/O during disable. */
-       intf->disconnected = true;
-       gb_interface_disable(intf);
-       intf->disconnected = false;
-
-       ret = gb_interface_enable(intf);
-       if (ret) {
-               dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
-               gb_interface_deactivate(intf);
-       }
-
-       return ret;
-}
-
-void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
-                               u32 mailbox)
-{
-       mutex_lock(&intf->mutex);
-
-       if (result) {
-               dev_warn(&intf->dev,
-                        "mailbox event with UniPro error: 0x%04x\n",
-                        result);
-               goto err_disable;
-       }
-
-       if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
-               dev_warn(&intf->dev,
-                        "mailbox event with unexpected value: 0x%08x\n",
-                        mailbox);
-               goto err_disable;
-       }
-
-       if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
-               gb_interface_legacy_mode_switch(intf);
-               goto out_unlock;
-       }
-
-       if (!intf->mode_switch) {
-               dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
-                        mailbox);
-               goto err_disable;
-       }
-
-       dev_info(&intf->dev, "mode switch detected\n");
-
-       complete(&intf->mode_switch_completion);
-
-out_unlock:
-       mutex_unlock(&intf->mutex);
-
-       return;
-
-err_disable:
-       gb_interface_disable(intf);
-       gb_interface_deactivate(intf);
-       mutex_unlock(&intf->mutex);
-}
-
-static void gb_interface_mode_switch_work(struct work_struct *work)
-{
-       struct gb_interface *intf;
-       struct gb_control *control;
-       unsigned long timeout;
-       int ret;
-
-       intf = container_of(work, struct gb_interface, mode_switch_work);
-
-       mutex_lock(&intf->mutex);
-       /* Make sure interface is still enabled. */
-       if (!intf->enabled) {
-               dev_dbg(&intf->dev, "mode switch aborted\n");
-               intf->mode_switch = false;
-               mutex_unlock(&intf->mutex);
-               goto out_interface_put;
-       }
-
-       /*
-        * Prepare the control device for mode switch and make sure to get an
-        * extra reference before it goes away during interface disable.
-        */
-       control = gb_control_get(intf->control);
-       gb_control_mode_switch_prepare(control);
-       gb_interface_disable(intf);
-       mutex_unlock(&intf->mutex);
-
-       timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
-       ret = wait_for_completion_interruptible_timeout(
-                       &intf->mode_switch_completion, timeout);
-
-       /* Finalise control-connection mode switch. */
-       gb_control_mode_switch_complete(control);
-       gb_control_put(control);
-
-       if (ret < 0) {
-               dev_err(&intf->dev, "mode switch interrupted\n");
-               goto err_deactivate;
-       } else if (ret == 0) {
-               dev_err(&intf->dev, "mode switch timed out\n");
-               goto err_deactivate;
-       }
-
-       /* Re-enable (re-enumerate) interface if still active. */
-       mutex_lock(&intf->mutex);
-       intf->mode_switch = false;
-       if (intf->active) {
-               ret = gb_interface_enable(intf);
-               if (ret) {
-                       dev_err(&intf->dev, "failed to re-enable interface: %d\n",
-                               ret);
-                       gb_interface_deactivate(intf);
-               }
-       }
-       mutex_unlock(&intf->mutex);
-
-out_interface_put:
-       gb_interface_put(intf);
-
-       return;
-
-err_deactivate:
-       mutex_lock(&intf->mutex);
-       intf->mode_switch = false;
-       gb_interface_deactivate(intf);
-       mutex_unlock(&intf->mutex);
-
-       gb_interface_put(intf);
-}
-
-int gb_interface_request_mode_switch(struct gb_interface *intf)
-{
-       int ret = 0;
-
-       mutex_lock(&intf->mutex);
-       if (intf->mode_switch) {
-               ret = -EBUSY;
-               goto out_unlock;
-       }
-
-       intf->mode_switch = true;
-       reinit_completion(&intf->mode_switch_completion);
-
-       /*
-        * Get a reference to the interface device, which will be put once the
-        * mode switch is complete.
-        */
-       get_device(&intf->dev);
-
-       if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
-               put_device(&intf->dev);
-               ret = -EBUSY;
-               goto out_unlock;
-       }
-
-out_unlock:
-       mutex_unlock(&intf->mutex);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
-
-/*
- * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
- * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
- * clear it after reading a non-zero value from it.
- *
- * FIXME: This is module-hardware dependent and needs to be extended for every
- * type of module we want to support.
- */
-static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
-{
-       struct gb_host_device *hd = intf->hd;
-       unsigned long bootrom_quirks;
-       unsigned long s2l_quirks;
-       int ret;
-       u32 value;
-       u16 attr;
-       u8 init_status;
-
-       /*
-        * ES2 bridges use T_TstSrcIncrement for the init status.
-        *
-        * FIXME: Remove ES2 support
-        */
-       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
-               attr = DME_T_TST_SRC_INCREMENT;
-       else
-               attr = DME_TOSHIBA_GMP_INIT_STATUS;
-
-       ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
-                                 DME_SELECTOR_INDEX_NULL, &value);
-       if (ret)
-               return ret;
-
-       /*
-        * A nonzero init status indicates the module has finished
-        * initializing.
-        */
-       if (!value) {
-               dev_err(&intf->dev, "invalid init status\n");
-               return -ENODEV;
-       }
-
-       /*
-        * Extract the init status.
-        *
-        * For ES2: We need to check lowest 8 bits of 'value'.
-        * For ES3: We need to check highest 8 bits out of 32 of 'value'.
-        *
-        * FIXME: Remove ES2 support
-        */
-       if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
-               init_status = value & 0xff;
-       else
-               init_status = value >> 24;
-
-       /*
-        * Check if the interface is executing the quirky ES3 bootrom that,
-        * for example, requires E2EFC, CSD and CSV to be disabled.
-        */
-       bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
-                               GB_INTERFACE_QUIRK_FORCED_DISABLE |
-                               GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
-                               GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
-
-       s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
-
-       switch (init_status) {
-       case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
-       case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
-               intf->quirks |= bootrom_quirks;
-               break;
-       case GB_INIT_S2_LOADER_BOOT_STARTED:
-               /* S2 Loader doesn't support runtime PM */
-               intf->quirks &= ~bootrom_quirks;
-               intf->quirks |= s2l_quirks;
-               break;
-       default:
-               intf->quirks &= ~bootrom_quirks;
-               intf->quirks &= ~s2l_quirks;
-       }
-
-       /* Clear the init status. */
-       return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
-                                  DME_SELECTOR_INDEX_NULL, 0);
-}
-
-/* interface sysfs attributes */
-#define gb_interface_attr(field, type)                                 \
-static ssize_t field##_show(struct device *dev,                                \
-                           struct device_attribute *attr,              \
-                           char *buf)                                  \
-{                                                                      \
-       struct gb_interface *intf = to_gb_interface(dev);               \
-       return scnprintf(buf, PAGE_SIZE, type"\n", intf->field);        \
-}                                                                      \
-static DEVICE_ATTR_RO(field)
-
-gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
-gb_interface_attr(ddbl1_product_id, "0x%08x");
-gb_interface_attr(interface_id, "%u");
-gb_interface_attr(vendor_id, "0x%08x");
-gb_interface_attr(product_id, "0x%08x");
-gb_interface_attr(serial_number, "0x%016llx");
-
-static ssize_t voltage_now_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       int ret;
-       u32 measurement;
-
-       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
-                                           GB_SVC_PWRMON_TYPE_VOL,
-                                           &measurement);
-       if (ret) {
-               dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
-               return ret;
-       }
-
-       return sprintf(buf, "%u\n", measurement);
-}
-static DEVICE_ATTR_RO(voltage_now);
-
-static ssize_t current_now_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       int ret;
-       u32 measurement;
-
-       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
-                                           GB_SVC_PWRMON_TYPE_CURR,
-                                           &measurement);
-       if (ret) {
-               dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
-               return ret;
-       }
-
-       return sprintf(buf, "%u\n", measurement);
-}
-static DEVICE_ATTR_RO(current_now);
-
-static ssize_t power_now_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       int ret;
-       u32 measurement;
-
-       ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
-                                           GB_SVC_PWRMON_TYPE_PWR,
-                                           &measurement);
-       if (ret) {
-               dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
-               return ret;
-       }
-
-       return sprintf(buf, "%u\n", measurement);
-}
-static DEVICE_ATTR_RO(power_now);
-
-static ssize_t power_state_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       if (intf->active)
-               return scnprintf(buf, PAGE_SIZE, "on\n");
-       else
-               return scnprintf(buf, PAGE_SIZE, "off\n");
-}
-
-static ssize_t power_state_store(struct device *dev,
-                                struct device_attribute *attr, const char *buf,
-                                size_t len)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       bool activate;
-       int ret = 0;
-
-       if (kstrtobool(buf, &activate))
-               return -EINVAL;
-
-       mutex_lock(&intf->mutex);
-
-       if (activate == intf->active)
-               goto unlock;
-
-       if (activate) {
-               ret = gb_interface_activate(intf);
-               if (ret) {
-                       dev_err(&intf->dev,
-                               "failed to activate interface: %d\n", ret);
-                       goto unlock;
-               }
-
-               ret = gb_interface_enable(intf);
-               if (ret) {
-                       dev_err(&intf->dev,
-                               "failed to enable interface: %d\n", ret);
-                       gb_interface_deactivate(intf);
-                       goto unlock;
-               }
-       } else {
-               gb_interface_disable(intf);
-               gb_interface_deactivate(intf);
-       }
-
-unlock:
-       mutex_unlock(&intf->mutex);
-
-       if (ret)
-               return ret;
-
-       return len;
-}
-static DEVICE_ATTR_RW(power_state);
-
-static const char *gb_interface_type_string(struct gb_interface *intf)
-{
-       static const char * const types[] = {
-               [GB_INTERFACE_TYPE_INVALID] = "invalid",
-               [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
-               [GB_INTERFACE_TYPE_DUMMY] = "dummy",
-               [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
-               [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
-       };
-
-       return types[intf->type];
-}
-
-static ssize_t interface_type_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       return sprintf(buf, "%s\n", gb_interface_type_string(intf));
-}
-static DEVICE_ATTR_RO(interface_type);
-
-static struct attribute *interface_unipro_attrs[] = {
-       &dev_attr_ddbl1_manufacturer_id.attr,
-       &dev_attr_ddbl1_product_id.attr,
-       NULL
-};
-
-static struct attribute *interface_greybus_attrs[] = {
-       &dev_attr_vendor_id.attr,
-       &dev_attr_product_id.attr,
-       &dev_attr_serial_number.attr,
-       NULL
-};
-
-static struct attribute *interface_power_attrs[] = {
-       &dev_attr_voltage_now.attr,
-       &dev_attr_current_now.attr,
-       &dev_attr_power_now.attr,
-       &dev_attr_power_state.attr,
-       NULL
-};
-
-static struct attribute *interface_common_attrs[] = {
-       &dev_attr_interface_id.attr,
-       &dev_attr_interface_type.attr,
-       NULL
-};
-
-static umode_t interface_unipro_is_visible(struct kobject *kobj,
-                                          struct attribute *attr, int n)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       switch (intf->type) {
-       case GB_INTERFACE_TYPE_UNIPRO:
-       case GB_INTERFACE_TYPE_GREYBUS:
-               return attr->mode;
-       default:
-               return 0;
-       }
-}
-
-static umode_t interface_greybus_is_visible(struct kobject *kobj,
-                                           struct attribute *attr, int n)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       switch (intf->type) {
-       case GB_INTERFACE_TYPE_GREYBUS:
-               return attr->mode;
-       default:
-               return 0;
-       }
-}
-
-static umode_t interface_power_is_visible(struct kobject *kobj,
-                                         struct attribute *attr, int n)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       switch (intf->type) {
-       case GB_INTERFACE_TYPE_UNIPRO:
-       case GB_INTERFACE_TYPE_GREYBUS:
-               return attr->mode;
-       default:
-               return 0;
-       }
-}
-
-static const struct attribute_group interface_unipro_group = {
-       .is_visible     = interface_unipro_is_visible,
-       .attrs          = interface_unipro_attrs,
-};
-
-static const struct attribute_group interface_greybus_group = {
-       .is_visible     = interface_greybus_is_visible,
-       .attrs          = interface_greybus_attrs,
-};
-
-static const struct attribute_group interface_power_group = {
-       .is_visible     = interface_power_is_visible,
-       .attrs          = interface_power_attrs,
-};
-
-static const struct attribute_group interface_common_group = {
-       .attrs          = interface_common_attrs,
-};
-
-static const struct attribute_group *interface_groups[] = {
-       &interface_unipro_group,
-       &interface_greybus_group,
-       &interface_power_group,
-       &interface_common_group,
-       NULL
-};
-
-static void gb_interface_release(struct device *dev)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-
-       trace_gb_interface_release(intf);
-
-       kfree(intf);
-}
-
-#ifdef CONFIG_PM
-static int gb_interface_suspend(struct device *dev)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       int ret;
-
-       ret = gb_control_interface_suspend_prepare(intf->control);
-       if (ret)
-               return ret;
-
-       ret = gb_control_suspend(intf->control);
-       if (ret)
-               goto err_hibernate_abort;
-
-       ret = gb_interface_hibernate_link(intf);
-       if (ret)
-               return ret;
-
-       /* Delay to allow interface to enter standby before disabling refclk */
-       msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
-
-       ret = gb_interface_refclk_set(intf, false);
-       if (ret)
-               return ret;
-
-       return 0;
-
-err_hibernate_abort:
-       gb_control_interface_hibernate_abort(intf->control);
-
-       return ret;
-}
-
-static int gb_interface_resume(struct device *dev)
-{
-       struct gb_interface *intf = to_gb_interface(dev);
-       struct gb_svc *svc = intf->hd->svc;
-       int ret;
-
-       ret = gb_interface_refclk_set(intf, true);
-       if (ret)
-               return ret;
-
-       ret = gb_svc_intf_resume(svc, intf->interface_id);
-       if (ret)
-               return ret;
-
-       ret = gb_control_resume(intf->control);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int gb_interface_runtime_idle(struct device *dev)
-{
-       pm_runtime_mark_last_busy(dev);
-       pm_request_autosuspend(dev);
-
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops gb_interface_pm_ops = {
-       SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
-                          gb_interface_runtime_idle)
-};
-
-struct device_type greybus_interface_type = {
-       .name =         "greybus_interface",
-       .release =      gb_interface_release,
-       .pm =           &gb_interface_pm_ops,
-};
-
-/*
- * A Greybus module represents a user-replaceable component on a GMP
- * phone.  An interface is the physical connection on that module.  A
- * module may have more than one interface.
- *
- * Create a gb_interface structure to represent a discovered interface.
- * The position of interface within the Endo is encoded in "interface_id"
- * argument.
- *
- * Returns a pointer to the new interfce or a null pointer if a
- * failure occurs due to memory exhaustion.
- */
-struct gb_interface *gb_interface_create(struct gb_module *module,
-                                        u8 interface_id)
-{
-       struct gb_host_device *hd = module->hd;
-       struct gb_interface *intf;
-
-       intf = kzalloc(sizeof(*intf), GFP_KERNEL);
-       if (!intf)
-               return NULL;
-
-       intf->hd = hd;          /* XXX refcount? */
-       intf->module = module;
-       intf->interface_id = interface_id;
-       INIT_LIST_HEAD(&intf->bundles);
-       INIT_LIST_HEAD(&intf->manifest_descs);
-       mutex_init(&intf->mutex);
-       INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
-       init_completion(&intf->mode_switch_completion);
-
-       /* Invalid device id to start with */
-       intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
-
-       intf->dev.parent = &module->dev;
-       intf->dev.bus = &greybus_bus_type;
-       intf->dev.type = &greybus_interface_type;
-       intf->dev.groups = interface_groups;
-       intf->dev.dma_mask = module->dev.dma_mask;
-       device_initialize(&intf->dev);
-       dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
-                    interface_id);
-
-       pm_runtime_set_autosuspend_delay(&intf->dev,
-                                        GB_INTERFACE_AUTOSUSPEND_MS);
-
-       trace_gb_interface_create(intf);
-
-       return intf;
-}
-
-static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
-{
-       struct gb_svc *svc = intf->hd->svc;
-       int ret;
-
-       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
-
-       ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
-       if (ret) {
-               dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
-{
-       struct gb_svc *svc = intf->hd->svc;
-       int ret;
-
-       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
-
-       ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
-       if (ret) {
-               dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
-{
-       struct gb_svc *svc = intf->hd->svc;
-       int ret;
-
-       dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
-
-       ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
-       if (ret) {
-               dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static int gb_interface_activate_operation(struct gb_interface *intf,
-                                          enum gb_interface_type *intf_type)
-{
-       struct gb_svc *svc = intf->hd->svc;
-       u8 type;
-       int ret;
-
-       dev_dbg(&intf->dev, "%s\n", __func__);
-
-       ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
-       if (ret) {
-               dev_err(&intf->dev, "failed to activate: %d\n", ret);
-               return ret;
-       }
-
-       switch (type) {
-       case GB_SVC_INTF_TYPE_DUMMY:
-               *intf_type = GB_INTERFACE_TYPE_DUMMY;
-               /* FIXME: handle as an error for now */
-               return -ENODEV;
-       case GB_SVC_INTF_TYPE_UNIPRO:
-               *intf_type = GB_INTERFACE_TYPE_UNIPRO;
-               dev_err(&intf->dev, "interface type UniPro not supported\n");
-               /* FIXME: handle as an error for now */
-               return -ENODEV;
-       case GB_SVC_INTF_TYPE_GREYBUS:
-               *intf_type = GB_INTERFACE_TYPE_GREYBUS;
-               break;
-       default:
-               dev_err(&intf->dev, "unknown interface type: %u\n", type);
-               *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
-static int gb_interface_hibernate_link(struct gb_interface *intf)
-{
-       struct gb_svc *svc = intf->hd->svc;
-
-       return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
-}
-
-static int _gb_interface_activate(struct gb_interface *intf,
-                                 enum gb_interface_type *type)
-{
-       int ret;
-
-       *type = GB_INTERFACE_TYPE_UNKNOWN;
-
-       if (intf->ejected || intf->removed)
-               return -ENODEV;
-
-       ret = gb_interface_vsys_set(intf, true);
-       if (ret)
-               return ret;
-
-       ret = gb_interface_refclk_set(intf, true);
-       if (ret)
-               goto err_vsys_disable;
-
-       ret = gb_interface_unipro_set(intf, true);
-       if (ret)
-               goto err_refclk_disable;
-
-       ret = gb_interface_activate_operation(intf, type);
-       if (ret) {
-               switch (*type) {
-               case GB_INTERFACE_TYPE_UNIPRO:
-               case GB_INTERFACE_TYPE_GREYBUS:
-                       goto err_hibernate_link;
-               default:
-                       goto err_unipro_disable;
-               }
-       }
-
-       ret = gb_interface_read_dme(intf);
-       if (ret)
-               goto err_hibernate_link;
-
-       ret = gb_interface_route_create(intf);
-       if (ret)
-               goto err_hibernate_link;
-
-       intf->active = true;
-
-       trace_gb_interface_activate(intf);
-
-       return 0;
-
-err_hibernate_link:
-       gb_interface_hibernate_link(intf);
-err_unipro_disable:
-       gb_interface_unipro_set(intf, false);
-err_refclk_disable:
-       gb_interface_refclk_set(intf, false);
-err_vsys_disable:
-       gb_interface_vsys_set(intf, false);
-
-       return ret;
-}
-
-/*
- * At present, we assume a UniPro-only module to be a Greybus module that
- * failed to send its mailbox poke. There is some reason to believe that this
- * is because of a bug in the ES3 bootrom.
- *
- * FIXME: Check if this is a Toshiba bridge before retrying?
- */
-static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
-                                          enum gb_interface_type *type)
-{
-       int retries = 3;
-       int ret;
-
-       while (retries--) {
-               ret = _gb_interface_activate(intf, type);
-               if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
-                       continue;
-
-               break;
-       }
-
-       return ret;
-}
-
-/*
- * Activate an interface.
- *
- * Locking: Caller holds the interface mutex.
- */
-int gb_interface_activate(struct gb_interface *intf)
-{
-       enum gb_interface_type type;
-       int ret;
-
-       switch (intf->type) {
-       case GB_INTERFACE_TYPE_INVALID:
-       case GB_INTERFACE_TYPE_GREYBUS:
-               ret = _gb_interface_activate_es3_hack(intf, &type);
-               break;
-       default:
-               ret = _gb_interface_activate(intf, &type);
-       }
-
-       /* Make sure type is detected correctly during reactivation. */
-       if (intf->type != GB_INTERFACE_TYPE_INVALID) {
-               if (type != intf->type) {
-                       dev_err(&intf->dev, "failed to detect interface type\n");
-
-                       if (!ret)
-                               gb_interface_deactivate(intf);
-
-                       return -EIO;
-               }
-       } else {
-               intf->type = type;
-       }
-
-       return ret;
-}
-
-/*
- * Deactivate an interface.
- *
- * Locking: Caller holds the interface mutex.
- */
-void gb_interface_deactivate(struct gb_interface *intf)
-{
-       if (!intf->active)
-               return;
-
-       trace_gb_interface_deactivate(intf);
-
-       /* Abort any ongoing mode switch. */
-       if (intf->mode_switch)
-               complete(&intf->mode_switch_completion);
-
-       gb_interface_route_destroy(intf);
-       gb_interface_hibernate_link(intf);
-       gb_interface_unipro_set(intf, false);
-       gb_interface_refclk_set(intf, false);
-       gb_interface_vsys_set(intf, false);
-
-       intf->active = false;
-}
-
-/*
- * Enable an interface by enabling its control connection, fetching the
- * manifest and other information over it, and finally registering its child
- * devices.
- *
- * Locking: Caller holds the interface mutex.
- */
-int gb_interface_enable(struct gb_interface *intf)
-{
-       struct gb_control *control;
-       struct gb_bundle *bundle, *tmp;
-       int ret, size;
-       void *manifest;
-
-       ret = gb_interface_read_and_clear_init_status(intf);
-       if (ret) {
-               dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
-               return ret;
-       }
-
-       /* Establish control connection */
-       control = gb_control_create(intf);
-       if (IS_ERR(control)) {
-               dev_err(&intf->dev, "failed to create control device: %ld\n",
-                       PTR_ERR(control));
-               return PTR_ERR(control);
-       }
-       intf->control = control;
-
-       ret = gb_control_enable(intf->control);
-       if (ret)
-               goto err_put_control;
-
-       /* Get manifest size using control protocol on CPort */
-       size = gb_control_get_manifest_size_operation(intf);
-       if (size <= 0) {
-               dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
-
-               if (size)
-                       ret = size;
-               else
-                       ret =  -EINVAL;
-
-               goto err_disable_control;
-       }
-
-       manifest = kmalloc(size, GFP_KERNEL);
-       if (!manifest) {
-               ret = -ENOMEM;
-               goto err_disable_control;
-       }
-
-       /* Get manifest using control protocol on CPort */
-       ret = gb_control_get_manifest_operation(intf, manifest, size);
-       if (ret) {
-               dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
-               goto err_free_manifest;
-       }
-
-       /*
-        * Parse the manifest and build up our data structures representing
-        * what's in it.
-        */
-       if (!gb_manifest_parse(intf, manifest, size)) {
-               dev_err(&intf->dev, "failed to parse manifest\n");
-               ret = -EINVAL;
-               goto err_destroy_bundles;
-       }
-
-       ret = gb_control_get_bundle_versions(intf->control);
-       if (ret)
-               goto err_destroy_bundles;
-
-       /* Register the control device and any bundles */
-       ret = gb_control_add(intf->control);
-       if (ret)
-               goto err_destroy_bundles;
-
-       pm_runtime_use_autosuspend(&intf->dev);
-       pm_runtime_get_noresume(&intf->dev);
-       pm_runtime_set_active(&intf->dev);
-       pm_runtime_enable(&intf->dev);
-
-       list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
-               ret = gb_bundle_add(bundle);
-               if (ret) {
-                       gb_bundle_destroy(bundle);
-                       continue;
-               }
-       }
-
-       kfree(manifest);
-
-       intf->enabled = true;
-
-       pm_runtime_put(&intf->dev);
-
-       trace_gb_interface_enable(intf);
-
-       return 0;
-
-err_destroy_bundles:
-       list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
-               gb_bundle_destroy(bundle);
-err_free_manifest:
-       kfree(manifest);
-err_disable_control:
-       gb_control_disable(intf->control);
-err_put_control:
-       gb_control_put(intf->control);
-       intf->control = NULL;
-
-       return ret;
-}
-
-/*
- * Disable an interface and destroy its bundles.
- *
- * Locking: Caller holds the interface mutex.
- */
-void gb_interface_disable(struct gb_interface *intf)
-{
-       struct gb_bundle *bundle;
-       struct gb_bundle *next;
-
-       if (!intf->enabled)
-               return;
-
-       trace_gb_interface_disable(intf);
-
-       pm_runtime_get_sync(&intf->dev);
-
-       /* Set disconnected flag to avoid I/O during connection tear down. */
-       if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
-               intf->disconnected = true;
-
-       list_for_each_entry_safe(bundle, next, &intf->bundles, links)
-               gb_bundle_destroy(bundle);
-
-       if (!intf->mode_switch && !intf->disconnected)
-               gb_control_interface_deactivate_prepare(intf->control);
-
-       gb_control_del(intf->control);
-       gb_control_disable(intf->control);
-       gb_control_put(intf->control);
-       intf->control = NULL;
-
-       intf->enabled = false;
-
-       pm_runtime_disable(&intf->dev);
-       pm_runtime_set_suspended(&intf->dev);
-       pm_runtime_dont_use_autosuspend(&intf->dev);
-       pm_runtime_put_noidle(&intf->dev);
-}
-
-/* Register an interface. */
-int gb_interface_add(struct gb_interface *intf)
-{
-       int ret;
-
-       ret = device_add(&intf->dev);
-       if (ret) {
-               dev_err(&intf->dev, "failed to register interface: %d\n", ret);
-               return ret;
-       }
-
-       trace_gb_interface_add(intf);
-
-       dev_info(&intf->dev, "Interface added (%s)\n",
-                gb_interface_type_string(intf));
-
-       switch (intf->type) {
-       case GB_INTERFACE_TYPE_GREYBUS:
-               dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
-                        intf->vendor_id, intf->product_id);
-               /* fall-through */
-       case GB_INTERFACE_TYPE_UNIPRO:
-               dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
-                        intf->ddbl1_manufacturer_id,
-                        intf->ddbl1_product_id);
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-/* Deregister an interface. */
-void gb_interface_del(struct gb_interface *intf)
-{
-       if (device_is_registered(&intf->dev)) {
-               trace_gb_interface_del(intf);
-
-               device_del(&intf->dev);
-               dev_info(&intf->dev, "Interface removed\n");
-       }
-}
-
-void gb_interface_put(struct gb_interface *intf)
-{
-       put_device(&intf->dev);
-}
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
deleted file mode 100644 (file)
index dd70406..0000000
+++ /dev/null
@@ -1,533 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus manifest parsing
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#include <linux/greybus.h>
-
-static const char *get_descriptor_type_string(u8 type)
-{
-       switch (type) {
-       case GREYBUS_TYPE_INVALID:
-               return "invalid";
-       case GREYBUS_TYPE_STRING:
-               return "string";
-       case GREYBUS_TYPE_INTERFACE:
-               return "interface";
-       case GREYBUS_TYPE_CPORT:
-               return "cport";
-       case GREYBUS_TYPE_BUNDLE:
-               return "bundle";
-       default:
-               WARN_ON(1);
-               return "unknown";
-       }
-}
-
-/*
- * We scan the manifest once to identify where all the descriptors
- * are.  The result is a list of these manifest_desc structures.  We
- * then pick through them for what we're looking for (starting with
- * the interface descriptor).  As each is processed we remove it from
- * the list.  When we're done the list should (probably) be empty.
- */
-struct manifest_desc {
-       struct list_head                links;
-
-       size_t                          size;
-       void                            *data;
-       enum greybus_descriptor_type    type;
-};
-
-static void release_manifest_descriptor(struct manifest_desc *descriptor)
-{
-       list_del(&descriptor->links);
-       kfree(descriptor);
-}
-
-static void release_manifest_descriptors(struct gb_interface *intf)
-{
-       struct manifest_desc *descriptor;
-       struct manifest_desc *next;
-
-       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
-               release_manifest_descriptor(descriptor);
-}
-
-static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
-{
-       struct manifest_desc *desc, *tmp;
-       struct greybus_descriptor_cport *desc_cport;
-
-       list_for_each_entry_safe(desc, tmp, head, links) {
-               desc_cport = desc->data;
-
-               if (desc->type != GREYBUS_TYPE_CPORT)
-                       continue;
-
-               if (desc_cport->bundle == bundle_id)
-                       release_manifest_descriptor(desc);
-       }
-}
-
-static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
-{
-       struct manifest_desc *descriptor;
-       struct manifest_desc *next;
-
-       list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
-               if (descriptor->type == GREYBUS_TYPE_BUNDLE)
-                       return descriptor;
-
-       return NULL;
-}
-
-/*
- * Validate the given descriptor.  Its reported size must fit within
- * the number of bytes remaining, and it must have a recognized
- * type.  Check that the reported size is at least as big as what
- * we expect to see.  (It could be bigger, perhaps for a new version
- * of the format.)
- *
- * Returns the (non-zero) number of bytes consumed by the descriptor,
- * or a negative errno.
- */
-static int identify_descriptor(struct gb_interface *intf,
-                              struct greybus_descriptor *desc, size_t size)
-{
-       struct greybus_descriptor_header *desc_header = &desc->header;
-       struct manifest_desc *descriptor;
-       size_t desc_size;
-       size_t expected_size;
-
-       if (size < sizeof(*desc_header)) {
-               dev_err(&intf->dev, "manifest too small (%zu < %zu)\n", size,
-                       sizeof(*desc_header));
-               return -EINVAL;         /* Must at least have header */
-       }
-
-       desc_size = le16_to_cpu(desc_header->size);
-       if (desc_size > size) {
-               dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
-                       desc_size, size);
-               return -EINVAL;
-       }
-
-       /* Descriptor needs to at least have a header */
-       expected_size = sizeof(*desc_header);
-
-       switch (desc_header->type) {
-       case GREYBUS_TYPE_STRING:
-               expected_size += sizeof(struct greybus_descriptor_string);
-               expected_size += desc->string.length;
-
-               /* String descriptors are padded to 4 byte boundaries */
-               expected_size = ALIGN(expected_size, 4);
-               break;
-       case GREYBUS_TYPE_INTERFACE:
-               expected_size += sizeof(struct greybus_descriptor_interface);
-               break;
-       case GREYBUS_TYPE_BUNDLE:
-               expected_size += sizeof(struct greybus_descriptor_bundle);
-               break;
-       case GREYBUS_TYPE_CPORT:
-               expected_size += sizeof(struct greybus_descriptor_cport);
-               break;
-       case GREYBUS_TYPE_INVALID:
-       default:
-               dev_err(&intf->dev, "invalid descriptor type (%u)\n",
-                       desc_header->type);
-               return -EINVAL;
-       }
-
-       if (desc_size < expected_size) {
-               dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
-                       get_descriptor_type_string(desc_header->type),
-                       desc_size, expected_size);
-               return -EINVAL;
-       }
-
-       /* Descriptor bigger than what we expect */
-       if (desc_size > expected_size) {
-               dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
-                        get_descriptor_type_string(desc_header->type),
-                        expected_size, desc_size);
-       }
-
-       descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
-       if (!descriptor)
-               return -ENOMEM;
-
-       descriptor->size = desc_size;
-       descriptor->data = (char *)desc + sizeof(*desc_header);
-       descriptor->type = desc_header->type;
-       list_add_tail(&descriptor->links, &intf->manifest_descs);
-
-       /* desc_size is positive and is known to fit in a signed int */
-
-       return desc_size;
-}
-
-/*
- * Find the string descriptor having the given id, validate it, and
- * allocate a duplicate copy of it.  The duplicate has an extra byte
- * which guarantees the returned string is NUL-terminated.
- *
- * String index 0 is valid (it represents "no string"), and for
- * that a null pointer is returned.
- *
- * Otherwise returns a pointer to a newly-allocated copy of the
- * descriptor string, or an error-coded pointer on failure.
- */
-static char *gb_string_get(struct gb_interface *intf, u8 string_id)
-{
-       struct greybus_descriptor_string *desc_string;
-       struct manifest_desc *descriptor;
-       bool found = false;
-       char *string;
-
-       /* A zero string id means no string (but no error) */
-       if (!string_id)
-               return NULL;
-
-       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
-               if (descriptor->type != GREYBUS_TYPE_STRING)
-                       continue;
-
-               desc_string = descriptor->data;
-               if (desc_string->id == string_id) {
-                       found = true;
-                       break;
-               }
-       }
-       if (!found)
-               return ERR_PTR(-ENOENT);
-
-       /* Allocate an extra byte so we can guarantee it's NUL-terminated */
-       string = kmemdup(&desc_string->string, desc_string->length + 1,
-                        GFP_KERNEL);
-       if (!string)
-               return ERR_PTR(-ENOMEM);
-       string[desc_string->length] = '\0';
-
-       /* Ok we've used this string, so we're done with it */
-       release_manifest_descriptor(descriptor);
-
-       return string;
-}
-
-/*
- * Find cport descriptors in the manifest associated with the given
- * bundle, and set up data structures for the functions that use
- * them.  Returns the number of cports set up for the bundle, or 0
- * if there is an error.
- */
-static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
-{
-       struct gb_interface *intf = bundle->intf;
-       struct greybus_descriptor_cport *desc_cport;
-       struct manifest_desc *desc, *next, *tmp;
-       LIST_HEAD(list);
-       u8 bundle_id = bundle->id;
-       u16 cport_id;
-       u32 count = 0;
-       int i;
-
-       /* Set up all cport descriptors associated with this bundle */
-       list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
-               if (desc->type != GREYBUS_TYPE_CPORT)
-                       continue;
-
-               desc_cport = desc->data;
-               if (desc_cport->bundle != bundle_id)
-                       continue;
-
-               cport_id = le16_to_cpu(desc_cport->id);
-               if (cport_id > CPORT_ID_MAX)
-                       goto exit;
-
-               /* Nothing else should have its cport_id as control cport id */
-               if (cport_id == GB_CONTROL_CPORT_ID) {
-                       dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
-                               cport_id);
-                       goto exit;
-               }
-
-               /*
-                * Found one, move it to our temporary list after checking for
-                * duplicates.
-                */
-               list_for_each_entry(tmp, &list, links) {
-                       desc_cport = tmp->data;
-                       if (cport_id == le16_to_cpu(desc_cport->id)) {
-                               dev_err(&bundle->dev,
-                                       "duplicate CPort %u found\n", cport_id);
-                               goto exit;
-                       }
-               }
-               list_move_tail(&desc->links, &list);
-               count++;
-       }
-
-       if (!count)
-               return 0;
-
-       bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
-                                    GFP_KERNEL);
-       if (!bundle->cport_desc)
-               goto exit;
-
-       bundle->num_cports = count;
-
-       i = 0;
-       list_for_each_entry_safe(desc, next, &list, links) {
-               desc_cport = desc->data;
-               memcpy(&bundle->cport_desc[i++], desc_cport,
-                      sizeof(*desc_cport));
-
-               /* Release the cport descriptor */
-               release_manifest_descriptor(desc);
-       }
-
-       return count;
-exit:
-       release_cport_descriptors(&list, bundle_id);
-       /*
-        * Free all cports for this bundle to avoid 'excess descriptors'
-        * warnings.
-        */
-       release_cport_descriptors(&intf->manifest_descs, bundle_id);
-
-       return 0;       /* Error; count should also be 0 */
-}
-
-/*
- * Find bundle descriptors in the manifest and set up their data
- * structures.  Returns the number of bundles set up for the
- * given interface.
- */
-static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
-{
-       struct manifest_desc *desc;
-       struct gb_bundle *bundle;
-       struct gb_bundle *bundle_next;
-       u32 count = 0;
-       u8 bundle_id;
-       u8 class;
-
-       while ((desc = get_next_bundle_desc(intf))) {
-               struct greybus_descriptor_bundle *desc_bundle;
-
-               /* Found one.  Set up its bundle structure*/
-               desc_bundle = desc->data;
-               bundle_id = desc_bundle->id;
-               class = desc_bundle->class;
-
-               /* Done with this bundle descriptor */
-               release_manifest_descriptor(desc);
-
-               /* Ignore any legacy control bundles */
-               if (bundle_id == GB_CONTROL_BUNDLE_ID) {
-                       dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
-                               __func__);
-                       release_cport_descriptors(&intf->manifest_descs,
-                                                 bundle_id);
-                       continue;
-               }
-
-               /* Nothing else should have its class set to control class */
-               if (class == GREYBUS_CLASS_CONTROL) {
-                       dev_err(&intf->dev,
-                               "bundle %u cannot use control class\n",
-                               bundle_id);
-                       goto cleanup;
-               }
-
-               bundle = gb_bundle_create(intf, bundle_id, class);
-               if (!bundle)
-                       goto cleanup;
-
-               /*
-                * Now go set up this bundle's functions and cports.
-                *
-                * A 'bundle' represents a device in greybus. It may require
-                * multiple cports for its functioning. If we fail to setup any
-                * cport of a bundle, we better reject the complete bundle as
-                * the device may not be able to function properly then.
-                *
-                * But, failing to setup a cport of bundle X doesn't mean that
-                * the device corresponding to bundle Y will not work properly.
-                * Bundles should be treated as separate independent devices.
-                *
-                * While parsing manifest for an interface, treat bundles as
-                * separate entities and don't reject entire interface and its
-                * bundles on failing to initialize a cport. But make sure the
-                * bundle which needs the cport, gets destroyed properly.
-                */
-               if (!gb_manifest_parse_cports(bundle)) {
-                       gb_bundle_destroy(bundle);
-                       continue;
-               }
-
-               count++;
-       }
-
-       return count;
-cleanup:
-       /* An error occurred; undo any changes we've made */
-       list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
-               gb_bundle_destroy(bundle);
-               count--;
-       }
-       return 0;       /* Error; count should also be 0 */
-}
-
-static bool gb_manifest_parse_interface(struct gb_interface *intf,
-                                       struct manifest_desc *interface_desc)
-{
-       struct greybus_descriptor_interface *desc_intf = interface_desc->data;
-       struct gb_control *control = intf->control;
-       char *str;
-
-       /* Handle the strings first--they can fail */
-       str = gb_string_get(intf, desc_intf->vendor_stringid);
-       if (IS_ERR(str))
-               return false;
-       control->vendor_string = str;
-
-       str = gb_string_get(intf, desc_intf->product_stringid);
-       if (IS_ERR(str))
-               goto out_free_vendor_string;
-       control->product_string = str;
-
-       /* Assign feature flags communicated via manifest */
-       intf->features = desc_intf->features;
-
-       /* Release the interface descriptor, now that we're done with it */
-       release_manifest_descriptor(interface_desc);
-
-       /* An interface must have at least one bundle descriptor */
-       if (!gb_manifest_parse_bundles(intf)) {
-               dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
-               goto out_err;
-       }
-
-       return true;
-out_err:
-       kfree(control->product_string);
-       control->product_string = NULL;
-out_free_vendor_string:
-       kfree(control->vendor_string);
-       control->vendor_string = NULL;
-
-       return false;
-}
-
-/*
- * Parse a buffer containing an interface manifest.
- *
- * If we find anything wrong with the content/format of the buffer
- * we reject it.
- *
- * The first requirement is that the manifest's version is
- * one we can parse.
- *
- * We make an initial pass through the buffer and identify all of
- * the descriptors it contains, keeping track for each its type
- * and the location size of its data in the buffer.
- *
- * Next we scan the descriptors, looking for an interface descriptor;
- * there must be exactly one of those.  When found, we record the
- * information it contains, and then remove that descriptor (and any
- * string descriptors it refers to) from further consideration.
- *
- * After that we look for the interface's bundles--there must be at
- * least one of those.
- *
- * Returns true if parsing was successful, false otherwise.
- */
-bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
-{
-       struct greybus_manifest *manifest;
-       struct greybus_manifest_header *header;
-       struct greybus_descriptor *desc;
-       struct manifest_desc *descriptor;
-       struct manifest_desc *interface_desc = NULL;
-       u16 manifest_size;
-       u32 found = 0;
-       bool result;
-
-       /* Manifest descriptor list should be empty here */
-       if (WARN_ON(!list_empty(&intf->manifest_descs)))
-               return false;
-
-       /* we have to have at _least_ the manifest header */
-       if (size < sizeof(*header)) {
-               dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
-                       size, sizeof(*header));
-               return false;
-       }
-
-       /* Make sure the size is right */
-       manifest = data;
-       header = &manifest->header;
-       manifest_size = le16_to_cpu(header->size);
-       if (manifest_size != size) {
-               dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
-                       size, manifest_size);
-               return false;
-       }
-
-       /* Validate major/minor number */
-       if (header->version_major > GREYBUS_VERSION_MAJOR) {
-               dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
-                       header->version_major, header->version_minor,
-                       GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
-               return false;
-       }
-
-       /* OK, find all the descriptors */
-       desc = manifest->descriptors;
-       size -= sizeof(*header);
-       while (size) {
-               int desc_size;
-
-               desc_size = identify_descriptor(intf, desc, size);
-               if (desc_size < 0) {
-                       result = false;
-                       goto out;
-               }
-               desc = (struct greybus_descriptor *)((char *)desc + desc_size);
-               size -= desc_size;
-       }
-
-       /* There must be a single interface descriptor */
-       list_for_each_entry(descriptor, &intf->manifest_descs, links) {
-               if (descriptor->type == GREYBUS_TYPE_INTERFACE)
-                       if (!found++)
-                               interface_desc = descriptor;
-       }
-       if (found != 1) {
-               dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
-                       found);
-               result = false;
-               goto out;
-       }
-
-       /* Parse the manifest, starting with the interface descriptor */
-       result = gb_manifest_parse_interface(intf, interface_desc);
-
-       /*
-        * We really should have no remaining descriptors, but we
-        * don't know what newer format manifests might leave.
-        */
-       if (result && !list_empty(&intf->manifest_descs))
-               dev_info(&intf->dev, "excess descriptors in interface manifest\n");
-out:
-       release_manifest_descriptors(intf);
-
-       return result;
-}
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
deleted file mode 100644 (file)
index 36f77f9..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus Module code
- *
- * Copyright 2016 Google Inc.
- * Copyright 2016 Linaro Ltd.
- */
-
-#include <linux/greybus.h>
-#include "greybus_trace.h"
-
-static ssize_t eject_store(struct device *dev,
-                          struct device_attribute *attr,
-                          const char *buf, size_t len)
-{
-       struct gb_module *module = to_gb_module(dev);
-       struct gb_interface *intf;
-       size_t i;
-       long val;
-       int ret;
-
-       ret = kstrtol(buf, 0, &val);
-       if (ret)
-               return ret;
-
-       if (!val)
-               return len;
-
-       for (i = 0; i < module->num_interfaces; ++i) {
-               intf = module->interfaces[i];
-
-               mutex_lock(&intf->mutex);
-               /* Set flag to prevent concurrent activation. */
-               intf->ejected = true;
-               gb_interface_disable(intf);
-               gb_interface_deactivate(intf);
-               mutex_unlock(&intf->mutex);
-       }
-
-       /* Tell the SVC to eject the primary interface. */
-       ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
-       if (ret)
-               return ret;
-
-       return len;
-}
-static DEVICE_ATTR_WO(eject);
-
-static ssize_t module_id_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct gb_module *module = to_gb_module(dev);
-
-       return sprintf(buf, "%u\n", module->module_id);
-}
-static DEVICE_ATTR_RO(module_id);
-
-static ssize_t num_interfaces_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       struct gb_module *module = to_gb_module(dev);
-
-       return sprintf(buf, "%zu\n", module->num_interfaces);
-}
-static DEVICE_ATTR_RO(num_interfaces);
-
-static struct attribute *module_attrs[] = {
-       &dev_attr_eject.attr,
-       &dev_attr_module_id.attr,
-       &dev_attr_num_interfaces.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(module);
-
-static void gb_module_release(struct device *dev)
-{
-       struct gb_module *module = to_gb_module(dev);
-
-       trace_gb_module_release(module);
-
-       kfree(module);
-}
-
-struct device_type greybus_module_type = {
-       .name           = "greybus_module",
-       .release        = gb_module_release,
-};
-
-struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
-                                  size_t num_interfaces)
-{
-       struct gb_interface *intf;
-       struct gb_module *module;
-       int i;
-
-       module = kzalloc(struct_size(module, interfaces, num_interfaces),
-                        GFP_KERNEL);
-       if (!module)
-               return NULL;
-
-       module->hd = hd;
-       module->module_id = module_id;
-       module->num_interfaces = num_interfaces;
-
-       module->dev.parent = &hd->dev;
-       module->dev.bus = &greybus_bus_type;
-       module->dev.type = &greybus_module_type;
-       module->dev.groups = module_groups;
-       module->dev.dma_mask = hd->dev.dma_mask;
-       device_initialize(&module->dev);
-       dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
-
-       trace_gb_module_create(module);
-
-       for (i = 0; i < num_interfaces; ++i) {
-               intf = gb_interface_create(module, module_id + i);
-               if (!intf) {
-                       dev_err(&module->dev, "failed to create interface %u\n",
-                               module_id + i);
-                       goto err_put_interfaces;
-               }
-               module->interfaces[i] = intf;
-       }
-
-       return module;
-
-err_put_interfaces:
-       for (--i; i >= 0; --i)
-               gb_interface_put(module->interfaces[i]);
-
-       put_device(&module->dev);
-
-       return NULL;
-}
-
-/*
- * Register and enable an interface after first attempting to activate it.
- */
-static void gb_module_register_interface(struct gb_interface *intf)
-{
-       struct gb_module *module = intf->module;
-       u8 intf_id = intf->interface_id;
-       int ret;
-
-       mutex_lock(&intf->mutex);
-
-       ret = gb_interface_activate(intf);
-       if (ret) {
-               if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
-                       dev_err(&module->dev,
-                               "failed to activate interface %u: %d\n",
-                               intf_id, ret);
-               }
-
-               gb_interface_add(intf);
-               goto err_unlock;
-       }
-
-       ret = gb_interface_add(intf);
-       if (ret)
-               goto err_interface_deactivate;
-
-       ret = gb_interface_enable(intf);
-       if (ret) {
-               dev_err(&module->dev, "failed to enable interface %u: %d\n",
-                       intf_id, ret);
-               goto err_interface_deactivate;
-       }
-
-       mutex_unlock(&intf->mutex);
-
-       return;
-
-err_interface_deactivate:
-       gb_interface_deactivate(intf);
-err_unlock:
-       mutex_unlock(&intf->mutex);
-}
-
-static void gb_module_deregister_interface(struct gb_interface *intf)
-{
-       /* Mark as disconnected to prevent I/O during disable. */
-       if (intf->module->disconnected)
-               intf->disconnected = true;
-
-       mutex_lock(&intf->mutex);
-       intf->removed = true;
-       gb_interface_disable(intf);
-       gb_interface_deactivate(intf);
-       mutex_unlock(&intf->mutex);
-
-       gb_interface_del(intf);
-}
-
-/* Register a module and its interfaces. */
-int gb_module_add(struct gb_module *module)
-{
-       size_t i;
-       int ret;
-
-       ret = device_add(&module->dev);
-       if (ret) {
-               dev_err(&module->dev, "failed to register module: %d\n", ret);
-               return ret;
-       }
-
-       trace_gb_module_add(module);
-
-       for (i = 0; i < module->num_interfaces; ++i)
-               gb_module_register_interface(module->interfaces[i]);
-
-       return 0;
-}
-
-/* Deregister a module and its interfaces. */
-void gb_module_del(struct gb_module *module)
-{
-       size_t i;
-
-       for (i = 0; i < module->num_interfaces; ++i)
-               gb_module_deregister_interface(module->interfaces[i]);
-
-       trace_gb_module_del(module);
-
-       device_del(&module->dev);
-}
-
-void gb_module_put(struct gb_module *module)
-{
-       size_t i;
-
-       for (i = 0; i < module->num_interfaces; ++i)
-               gb_interface_put(module->interfaces[i]);
-
-       put_device(&module->dev);
-}
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
deleted file mode 100644 (file)
index 8459e9b..0000000
+++ /dev/null
@@ -1,1264 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Greybus operations
- *
- * Copyright 2014-2015 Google Inc.
- * Copyright 2014-2015 Linaro Ltd.
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-#include <linux/greybus.h>
-
-#include "greybus_trace.h"
-
-static struct kmem_cache *gb_operation_cache;
-static struct kmem_cache *gb_message_cache;
-
-/* Workqueue to handle Greybus operation completions. */
-static struct workqueue_struct *gb_operation_completion_wq;
-
-/* Wait queue for synchronous cancellations. */
-static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
-
-/*
- * Protects updates to operation->errno.
- */
-static DEFINE_SPINLOCK(gb_operations_lock);
-
-static int gb_operation_response_send(struct gb_operation *operation,
-                                     int errno);
-
-/*
- * Increment operation active count and add to connection list unless the
- * connection is going away.
- *
- * Caller holds operation reference.
- */
-static int gb_operation_get_active(struct gb_operation *operation)
-{
-       struct gb_connection *connection = operation->connection;
-       unsigned long flags;
-
-       spin_lock_irqsave(&connection->lock, flags);
-       switch (connection->state) {
-       case GB_CONNECTION_STATE_ENABLED:
-               break;
-       case GB_CONNECTION_STATE_ENABLED_TX:
-               if (gb_operation_is_incoming(operation))
-                       goto err_unlock;
-               break;
-       case GB_CONNECTION_STATE_DISCONNECTING:
-               if (!gb_operation_is_core(operation))
-                       goto err_unlock;
-               break;
-       default:
-               goto err_unlock;
-       }
-
-       if (operation->active++ == 0)
-               list_add_tail(&operation->links, &connection->operations);
-
-       trace_gb_operation_get_active(operation);
-
-       spin_unlock_irqrestore(&connection->lock, flags);
-
-       return 0;
-
-err_unlock:
-       spin_unlock_irqrestore(&connection->lock, flags);
-
-       return -ENOTCONN;
-}
-
-/* Caller holds operation reference. */
-static void gb_operation_put_active(struct gb_operation *operation)
-{
-       struct gb_connection *connection = operation->connection;
-       unsigned long flags;
-
-       spin_lock_irqsave(&connection->lock, flags);
-
-       trace_gb_operation_put_active(operation);
-
-       if (--operation->active == 0) {
-               list_del(&operation->links);
-               if (atomic_read(&operation->waiters))
-                       wake_up(&gb_operation_cancellation_queue);
-       }
-       spin_unlock_irqrestore(&connection->lock, flags);
-}
-
-static bool gb_operation_is_active(struct gb_operation *operation)
-{
-       struct gb_connection *connection = operation->connection;
-       unsigned long flags;
-       bool ret;
-
-       spin_lock_irqsave(&connection->lock, flags);
-       ret = operation->active;
-       spin_unlock_irqrestore(&connection->lock, flags);
-
-       return ret;
-}
-
-/*
- * Set an operation's result.
- *
- * Initially an outgoing operation's errno value is -EBADR.
- * If no error occurs before sending the request message the only
- * valid value operation->errno can be set to is -EINPROGRESS,
- * indicating the request has been (or rather is about to be) sent.
- * At that point nobody should be looking at the result until the
- * response arrives.
- *
- * The first time the result gets set after the request has been
- * sent, that result "sticks."  That is, if two concurrent threads
- * race to set the result, the first one wins.  The return value
- * tells the caller whether its result was recorded; if not the
- * caller has nothing more to do.
- *
- * The result value -EILSEQ is reserved to signal an implementation
- * error; if it's ever observed, the code performing the request has
- * done something fundamentally wrong.  It is an error to try to set
- * the result to -EBADR, and attempts to do so result in a warning,
- * and -EILSEQ is used instead.  Similarly, the only valid result
- * value to set for an operation in initial state is -EINPROGRESS.
- * Attempts to do otherwise will also record a (successful) -EILSEQ
- * operation result.
- */
-static bool gb_operation_result_set(struct gb_operation *operation, int result)
-{
-       unsigned long flags;
-       int prev;
-
-       if (result == -EINPROGRESS) {
-               /*
-                * -EINPROGRESS is used to indicate the request is
-                * in flight.  It should be the first result value
-                * set after the initial -EBADR.  Issue a warning
-                * and record an implementation error if it's
-                * set at any other time.
-                */
-               spin_lock_irqsave(&gb_operations_lock, flags);
-               prev = operation->errno;
-               if (prev == -EBADR)
-                       operation->errno = result;
-               else
-                       operation->errno = -EILSEQ;
-               spin_unlock_irqrestore(&gb_operations_lock, flags);
-               WARN_ON(prev != -EBADR);
-
-               return true;
-       }
-
-       /*
-        * The first result value set after a request has been sent
-        * will be the final result of the operation.  Subsequent
-        * attempts to set the result are ignored.
-        *
-        * Note that -EBADR is a reserved "initial state" result
-        * value.  Attempts to set this value result in a warning,
-        * and the result code is set to -EILSEQ instead.
-        */
-       if (WARN_ON(result == -EBADR))
-               result = -EILSEQ; /* Nobody should be setting -EBADR */
-
-       spin_lock_irqsave(&gb_operations_lock, flags);
-       prev = operation->errno;
-       if (prev == -EINPROGRESS)
-               operation->errno = result;      /* First and final result */
-       spin_unlock_irqrestore(&gb_operations_lock, flags);
-
-       return prev == -EINPROGRESS;
-}
-
-int gb_operation_result(struct gb_operation *operation)
-{
-       int result = operation->errno;
-
-       WARN_ON(result == -EBADR);
-       WARN_ON(result == -EINPROGRESS);
-
-       return result;
-}
-EXPORT_SYMBOL_GPL(gb_operation_result);
-
-/*
- * Looks up an outgoing operation on a connection and returns a refcounted
- * pointer if found, or NULL otherwise.
- */
-static struct gb_operation *
-gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
-{
-       struct gb_operation *operation;
-       unsigned long flags;
-       bool found = false;
-
-       spin_lock_irqsave(&connection->lock, flags);
-       list_for_each_entry(operation, &connection->operations, links)
-               if (operation->id == operation_id &&
-                   !gb_operation_is_incoming(operation)) {
-                       gb_operation_get(operation);
-                       found = true;
-                       break;
-               }
-       spin_unlock_irqrestore(&connection->lock, flags);
-
-       return found ? operation : NULL;
-}
-
-static int gb_message_send(struct gb_message *message, gfp_t gfp)
-{
-       struct gb_connection *connection = message->operation->connection;
-
-       trace_gb_message_send(message);
-       return connection->hd->driver->message_send(connection->hd,
-                                       connection->hd_cport_id,
-                                       message,
-                                       gfp);
-}
-
-/*
- * Cancel a message we have passed to the host device layer to be sent.
- */
-static void gb_message_cancel(struct gb_message *message)
-{
-       struct gb_host_device *hd = message->operation->connection->hd;
-
-       hd->driver->message_cancel(message);
-}
-
-static void gb_operation_request_handle(struct gb_operation *operation)
-{
-       struct gb_connection *connection = operation->connection;
-       int status;
-       int ret;
-
-       if (connection->handler) {
-               status = connection->handler(operation);
-       } else {
-               dev_err(&connection->hd->dev,
-                       "%s: unexpected incoming request of type 0x%02x\n",
-                       connection->name, operation->type);
-
-               status = -EPROTONOSUPPORT;
-       }
-
-       ret = gb_operation_response_send(operation, status);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: failed to send response %d for type 0x%02x: %d\n",
-                       connection->name, status, operation->type, ret);
-               return;
-       }
-}
-
-/*
- * Process operation work.
- *
- * For incoming requests, call the protocol request handler. The operation
- * result should be -EINPROGRESS at this point.
- *
- * For outgoing requests, the operation result value should have
- * been set before queueing this.  The operation callback function
- * allows the original requester to know the request has completed
- * and its result is available.
- */
-static void gb_operation_work(struct work_struct *work)
-{
-       struct gb_operation *operation;
-       int ret;
-
-       operation = container_of(work, struct gb_operation, work);
-
-       if (gb_operation_is_incoming(operation)) {
-               gb_operation_request_handle(operation);
-       } else {
-               ret = del_timer_sync(&operation->timer);
-               if (!ret) {
-                       /* Cancel request message if scheduled by timeout. */
-                       if (gb_operation_result(operation) == -ETIMEDOUT)
-                               gb_message_cancel(operation->request);
-               }
-
-               operation->callback(operation);
-       }
-
-       gb_operation_put_active(operation);
-       gb_operation_put(operation);
-}
-
-static void gb_operation_timeout(struct timer_list *t)
-{
-       struct gb_operation *operation = from_timer(operation, t, timer);
-
-       if (gb_operation_result_set(operation, -ETIMEDOUT)) {
-               /*
-                * A stuck request message will be cancelled from the
-                * workqueue.
-                */
-               queue_work(gb_operation_completion_wq, &operation->work);
-       }
-}
-
-static void gb_operation_message_init(struct gb_host_device *hd,
-                                     struct gb_message *message,
-                                     u16 operation_id,
-                                     size_t payload_size, u8 type)
-{
-       struct gb_operation_msg_hdr *header;
-
-       header = message->buffer;
-
-       message->header = header;
-       message->payload = payload_size ? header + 1 : NULL;
-       message->payload_size = payload_size;
-
-       /*
-        * The type supplied for incoming message buffers will be
-        * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
-        * arriving data so there's no need to initialize the message header.
-        */
-       if (type != GB_REQUEST_TYPE_INVALID) {
-               u16 message_size = (u16)(sizeof(*header) + payload_size);
-
-               /*
-                * For a request, the operation id gets filled in
-                * when the message is sent.  For a response, it
-                * will be copied from the request by the caller.
-                *
-                * The result field in a request message must be
-                * zero.  It will be set just prior to sending for
-                * a response.
-                */
-               header->size = cpu_to_le16(message_size);
-               header->operation_id = 0;
-               header->type = type;
-               header->result = 0;
-       }
-}
-
-/*
- * Allocate a message to be used for an operation request or response.
- * Both types of message contain a common header.  The request message
- * for an outgoing operation is outbound, as is the response message
- * for an incoming operation.  The message header for an outbound
- * message is partially initialized here.
- *
- * The headers for inbound messages don't need to be initialized;
- * they'll be filled in by arriving data.
- *
- * Our message buffers have the following layout:
- *     message header  \_ these combined are
- *     message payload /  the message size
- */
-static struct gb_message *
-gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
-                          size_t payload_size, gfp_t gfp_flags)
-{
-       struct gb_message *message;
-       struct gb_operation_msg_hdr *header;
-       size_t message_size = payload_size + sizeof(*header);
-
-       if (message_size > hd->buffer_size_max) {
-               dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
-                        message_size, hd->buffer_size_max);
-               return NULL;
-       }
-
-       /* Allocate the message structure and buffer. */
-       message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
-       if (!message)
-               return NULL;
-
-       message->buffer = kzalloc(message_size, gfp_flags);
-       if (!message->buffer)
-               goto err_free_message;
-
-       /* Initialize the message.  Operation id is filled in later. */
-       gb_operation_message_init(hd, message, 0, payload_size, type);
-
-       return message;
-
-err_free_message:
-       kmem_cache_free(gb_message_cache, message);
-
-       return NULL;
-}
-
-static void gb_operation_message_free(struct gb_message *message)
-{
-       kfree(message->buffer);
-       kmem_cache_free(gb_message_cache, message);
-}
-
-/*
- * Map an enum gb_operation_status value (which is represented in a
- * message as a single byte) to an appropriate Linux negative errno.
- */
-static int gb_operation_status_map(u8 status)
-{
-       switch (status) {
-       case GB_OP_SUCCESS:
-               return 0;
-       case GB_OP_INTERRUPTED:
-               return -EINTR;
-       case GB_OP_TIMEOUT:
-               return -ETIMEDOUT;
-       case GB_OP_NO_MEMORY:
-               return -ENOMEM;
-       case GB_OP_PROTOCOL_BAD:
-               return -EPROTONOSUPPORT;
-       case GB_OP_OVERFLOW:
-               return -EMSGSIZE;
-       case GB_OP_INVALID:
-               return -EINVAL;
-       case GB_OP_RETRY:
-               return -EAGAIN;
-       case GB_OP_NONEXISTENT:
-               return -ENODEV;
-       case GB_OP_MALFUNCTION:
-               return -EILSEQ;
-       case GB_OP_UNKNOWN_ERROR:
-       default:
-               return -EIO;
-       }
-}
-
-/*
- * Map a Linux errno value (from operation->errno) into the value
- * that should represent it in a response message status sent
- * over the wire.  Returns an enum gb_operation_status value (which
- * is represented in a message as a single byte).
- */
-static u8 gb_operation_errno_map(int errno)
-{
-       switch (errno) {
-       case 0:
-               return GB_OP_SUCCESS;
-       case -EINTR:
-               return GB_OP_INTERRUPTED;
-       case -ETIMEDOUT:
-               return GB_OP_TIMEOUT;
-       case -ENOMEM:
-               return GB_OP_NO_MEMORY;
-       case -EPROTONOSUPPORT:
-               return GB_OP_PROTOCOL_BAD;
-       case -EMSGSIZE:
-               return GB_OP_OVERFLOW;  /* Could be underflow too */
-       case -EINVAL:
-               return GB_OP_INVALID;
-       case -EAGAIN:
-               return GB_OP_RETRY;
-       case -EILSEQ:
-               return GB_OP_MALFUNCTION;
-       case -ENODEV:
-               return GB_OP_NONEXISTENT;
-       case -EIO:
-       default:
-               return GB_OP_UNKNOWN_ERROR;
-       }
-}
-
-bool gb_operation_response_alloc(struct gb_operation *operation,
-                                size_t response_size, gfp_t gfp)
-{
-       struct gb_host_device *hd = operation->connection->hd;
-       struct gb_operation_msg_hdr *request_header;
-       struct gb_message *response;
-       u8 type;
-
-       type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
-       response = gb_operation_message_alloc(hd, type, response_size, gfp);
-       if (!response)
-               return false;
-       response->operation = operation;
-
-       /*
-        * Size and type get initialized when the message is
-        * allocated.  The errno will be set before sending.  All
-        * that's left is the operation id, which we copy from the
-        * request message header (as-is, in little-endian order).
-        */
-       request_header = operation->request->header;
-       response->header->operation_id = request_header->operation_id;
-       operation->response = response;
-
-       return true;
-}
-EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
-
-/*
- * Create a Greybus operation to be sent over the given connection.
- * The request buffer will be big enough for a payload of the given
- * size.
- *
- * For outgoing requests, the request message's header will be
- * initialized with the type of the request and the message size.
- * Outgoing operations must also specify the response buffer size,
- * which must be sufficient to hold all expected response data.  The
- * response message header will eventually be overwritten, so there's
- * no need to initialize it here.
- *
- * Request messages for incoming operations can arrive in interrupt
- * context, so they must be allocated with GFP_ATOMIC.  In this case
- * the request buffer will be immediately overwritten, so there is
- * no need to initialize the message header.  Responsibility for
- * allocating a response buffer lies with the incoming request
- * handler for a protocol.  So we don't allocate that here.
- *
- * Returns a pointer to the new operation or a null pointer if an
- * error occurs.
- */
-static struct gb_operation *
-gb_operation_create_common(struct gb_connection *connection, u8 type,
-                          size_t request_size, size_t response_size,
-                          unsigned long op_flags, gfp_t gfp_flags)
-{
-       struct gb_host_device *hd = connection->hd;
-       struct gb_operation *operation;
-
-       operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
-       if (!operation)
-               return NULL;
-       operation->connection = connection;
-
-       operation->request = gb_operation_message_alloc(hd, type, request_size,
-                                                       gfp_flags);
-       if (!operation->request)
-               goto err_cache;
-       operation->request->operation = operation;
-
-       /* Allocate the response buffer for outgoing operations */
-       if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
-               if (!gb_operation_response_alloc(operation, response_size,
-                                                gfp_flags)) {
-                       goto err_request;
-               }
-
-               timer_setup(&operation->timer, gb_operation_timeout, 0);
-       }
-
-       operation->flags = op_flags;
-       operation->type = type;
-       operation->errno = -EBADR;  /* Initial value--means "never set" */
-
-       INIT_WORK(&operation->work, gb_operation_work);
-       init_completion(&operation->completion);
-       kref_init(&operation->kref);
-       atomic_set(&operation->waiters, 0);
-
-       return operation;
-
-err_request:
-       gb_operation_message_free(operation->request);
-err_cache:
-       kmem_cache_free(gb_operation_cache, operation);
-
-       return NULL;
-}
-
-/*
- * Create a new operation associated with the given connection.  The
- * request and response sizes provided are the number of bytes
- * required to hold the request/response payload only.  Both of
- * these are allowed to be 0.  Note that 0x00 is reserved as an
- * invalid operation type for all protocols, and this is enforced
- * here.
- */
-struct gb_operation *
-gb_operation_create_flags(struct gb_connection *connection,
-                         u8 type, size_t request_size,
-                         size_t response_size, unsigned long flags,
-                         gfp_t gfp)
-{
-       struct gb_operation *operation;
-
-       if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
-               return NULL;
-       if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
-               type &= ~GB_MESSAGE_TYPE_RESPONSE;
-
-       if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
-               flags &= GB_OPERATION_FLAG_USER_MASK;
-
-       operation = gb_operation_create_common(connection, type,
-                                              request_size, response_size,
-                                              flags, gfp);
-       if (operation)
-               trace_gb_operation_create(operation);
-
-       return operation;
-}
-EXPORT_SYMBOL_GPL(gb_operation_create_flags);
-
-struct gb_operation *
-gb_operation_create_core(struct gb_connection *connection,
-                        u8 type, size_t request_size,
-                        size_t response_size, unsigned long flags,
-                        gfp_t gfp)
-{
-       struct gb_operation *operation;
-
-       flags |= GB_OPERATION_FLAG_CORE;
-
-       operation = gb_operation_create_common(connection, type,
-                                              request_size, response_size,
-                                              flags, gfp);
-       if (operation)
-               trace_gb_operation_create_core(operation);
-
-       return operation;
-}
-
-/* Do not export this function. */
-
-size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
-{
-       struct gb_host_device *hd = connection->hd;
-
-       return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
-}
-EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
-
-static struct gb_operation *
-gb_operation_create_incoming(struct gb_connection *connection, u16 id,
-                            u8 type, void *data, size_t size)
-{
-       struct gb_operation *operation;
-       size_t request_size;
-       unsigned long flags = GB_OPERATION_FLAG_INCOMING;
-
-       /* Caller has made sure we at least have a message header. */
-       request_size = size - sizeof(struct gb_operation_msg_hdr);
-
-       if (!id)
-               flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
-
-       operation = gb_operation_create_common(connection, type,
-                                              request_size,
-                                              GB_REQUEST_TYPE_INVALID,
-                                              flags, GFP_ATOMIC);
-       if (!operation)
-               return NULL;
-
-       operation->id = id;
-       memcpy(operation->request->header, data, size);
-       trace_gb_operation_create_incoming(operation);
-
-       return operation;
-}
-
-/*
- * Get an additional reference on an operation.
- */
-void gb_operation_get(struct gb_operation *operation)
-{
-       kref_get(&operation->kref);
-}
-EXPORT_SYMBOL_GPL(gb_operation_get);
-
-/*
- * Destroy a previously created operation.
- */
-static void _gb_operation_destroy(struct kref *kref)
-{
-       struct gb_operation *operation;
-
-       operation = container_of(kref, struct gb_operation, kref);
-
-       trace_gb_operation_destroy(operation);
-
-       if (operation->response)
-               gb_operation_message_free(operation->response);
-       gb_operation_message_free(operation->request);
-
-       kmem_cache_free(gb_operation_cache, operation);
-}
-
-/*
- * Drop a reference on an operation, and destroy it when the last
- * one is gone.
- */
-void gb_operation_put(struct gb_operation *operation)
-{
-       if (WARN_ON(!operation))
-               return;
-
-       kref_put(&operation->kref, _gb_operation_destroy);
-}
-EXPORT_SYMBOL_GPL(gb_operation_put);
-
-/* Tell the requester we're done */
-static void gb_operation_sync_callback(struct gb_operation *operation)
-{
-       complete(&operation->completion);
-}
-
-/**
- * gb_operation_request_send() - send an operation request message
- * @operation: the operation to initiate
- * @callback:  the operation completion callback
- * @timeout:   operation timeout in milliseconds, or zero for no timeout
- * @gfp:       the memory flags to use for any allocations
- *
- * The caller has filled in any payload so the request message is ready to go.
- * The callback function supplied will be called when the response message has
- * arrived, a unidirectional request has been sent, or the operation is
- * cancelled, indicating that the operation is complete. The callback function
- * can fetch the result of the operation using gb_operation_result() if
- * desired.
- *
- * Return: 0 if the request was successfully queued in the host-driver queues,
- * or a negative errno.
- */
-int gb_operation_request_send(struct gb_operation *operation,
-                             gb_operation_callback callback,
-                             unsigned int timeout,
-                             gfp_t gfp)
-{
-       struct gb_connection *connection = operation->connection;
-       struct gb_operation_msg_hdr *header;
-       unsigned int cycle;
-       int ret;
-
-       if (gb_connection_is_offloaded(connection))
-               return -EBUSY;
-
-       if (!callback)
-               return -EINVAL;
-
-       /*
-        * Record the callback function, which is executed in
-        * non-atomic (workqueue) context when the final result
-        * of an operation has been set.
-        */
-       operation->callback = callback;
-
-       /*
-        * Assign the operation's id, and store it in the request header.
-        * Zero is a reserved operation id for unidirectional operations.
-        */
-       if (gb_operation_is_unidirectional(operation)) {
-               operation->id = 0;
-       } else {
-               cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
-               operation->id = (u16)(cycle % U16_MAX + 1);
-       }
-
-       header = operation->request->header;
-       header->operation_id = cpu_to_le16(operation->id);
-
-       gb_operation_result_set(operation, -EINPROGRESS);
-
-       /*
-        * Get an extra reference on the operation. It'll be dropped when the
-        * operation completes.
-        */
-       gb_operation_get(operation);
-       ret = gb_operation_get_active(operation);
-       if (ret)
-               goto err_put;
-
-       ret = gb_message_send(operation->request, gfp);
-       if (ret)
-               goto err_put_active;
-
-       if (timeout) {
-               operation->timer.expires = jiffies + msecs_to_jiffies(timeout);
-               add_timer(&operation->timer);
-       }
-
-       return 0;
-
-err_put_active:
-       gb_operation_put_active(operation);
-err_put:
-       gb_operation_put(operation);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_operation_request_send);
-
-/*
- * Send a synchronous operation.  This function is expected to
- * block, returning only when the response has arrived, (or when an
- * error is detected.  The return value is the result of the
- * operation.
- */
-int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
-                                          unsigned int timeout)
-{
-       int ret;
-
-       ret = gb_operation_request_send(operation, gb_operation_sync_callback,
-                                       timeout, GFP_KERNEL);
-       if (ret)
-               return ret;
-
-       ret = wait_for_completion_interruptible(&operation->completion);
-       if (ret < 0) {
-               /* Cancel the operation if interrupted */
-               gb_operation_cancel(operation, -ECANCELED);
-       }
-
-       return gb_operation_result(operation);
-}
-EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
-
-/*
- * Send a response for an incoming operation request.  A non-zero
- * errno indicates a failed operation.
- *
- * If there is any response payload, the incoming request handler is
- * responsible for allocating the response message.  Otherwise the
- * it can simply supply the result errno; this function will
- * allocate the response message if necessary.
- */
-static int gb_operation_response_send(struct gb_operation *operation,
-                                     int errno)
-{
-       struct gb_connection *connection = operation->connection;
-       int ret;
-
-       if (!operation->response &&
-           !gb_operation_is_unidirectional(operation)) {
-               if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
-                       return -ENOMEM;
-       }
-
-       /* Record the result */
-       if (!gb_operation_result_set(operation, errno)) {
-               dev_err(&connection->hd->dev, "request result already set\n");
-               return -EIO;    /* Shouldn't happen */
-       }
-
-       /* Sender of request does not care about response. */
-       if (gb_operation_is_unidirectional(operation))
-               return 0;
-
-       /* Reference will be dropped when message has been sent. */
-       gb_operation_get(operation);
-       ret = gb_operation_get_active(operation);
-       if (ret)
-               goto err_put;
-
-       /* Fill in the response header and send it */
-       operation->response->header->result = gb_operation_errno_map(errno);
-
-       ret = gb_message_send(operation->response, GFP_KERNEL);
-       if (ret)
-               goto err_put_active;
-
-       return 0;
-
-err_put_active:
-       gb_operation_put_active(operation);
-err_put:
-       gb_operation_put(operation);
-
-       return ret;
-}
-
-/*
- * This function is called when a message send request has completed.
- */
-void greybus_message_sent(struct gb_host_device *hd,
-                         struct gb_message *message, int status)
-{
-       struct gb_operation *operation = message->operation;
-       struct gb_connection *connection = operation->connection;
-
-       /*
-        * If the message was a response, we just need to drop our
-        * reference to the operation.  If an error occurred, report
-        * it.
-        *
-        * For requests, if there's no error and the operation in not
-        * unidirectional, there's nothing more to do until the response
-        * arrives. If an error occurred attempting to send it, or if the
-        * operation is unidrectional, record the result of the operation and
-        * schedule its completion.
-        */
-       if (message == operation->response) {
-               if (status) {
-                       dev_err(&connection->hd->dev,
-                               "%s: error sending response 0x%02x: %d\n",
-                               connection->name, operation->type, status);
-               }
-
-               gb_operation_put_active(operation);
-               gb_operation_put(operation);
-       } else if (status || gb_operation_is_unidirectional(operation)) {
-               if (gb_operation_result_set(operation, status)) {
-                       queue_work(gb_operation_completion_wq,
-                                  &operation->work);
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(greybus_message_sent);
-
-/*
- * We've received data on a connection, and it doesn't look like a
- * response, so we assume it's a request.
- *
- * This is called in interrupt context, so just copy the incoming
- * data into the request buffer and handle the rest via workqueue.
- */
-static void gb_connection_recv_request(struct gb_connection *connection,
-                               const struct gb_operation_msg_hdr *header,
-                               void *data, size_t size)
-{
-       struct gb_operation *operation;
-       u16 operation_id;
-       u8 type;
-       int ret;
-
-       operation_id = le16_to_cpu(header->operation_id);
-       type = header->type;
-
-       operation = gb_operation_create_incoming(connection, operation_id,
-                                                type, data, size);
-       if (!operation) {
-               dev_err(&connection->hd->dev,
-                       "%s: can't create incoming operation\n",
-                       connection->name);
-               return;
-       }
-
-       ret = gb_operation_get_active(operation);
-       if (ret) {
-               gb_operation_put(operation);
-               return;
-       }
-       trace_gb_message_recv_request(operation->request);
-
-       /*
-        * The initial reference to the operation will be dropped when the
-        * request handler returns.
-        */
-       if (gb_operation_result_set(operation, -EINPROGRESS))
-               queue_work(connection->wq, &operation->work);
-}
-
-/*
- * We've received data that appears to be an operation response
- * message.  Look up the operation, and record that we've received
- * its response.
- *
- * This is called in interrupt context, so just copy the incoming
- * data into the response buffer and handle the rest via workqueue.
- */
-static void gb_connection_recv_response(struct gb_connection *connection,
-                               const struct gb_operation_msg_hdr *header,
-                               void *data, size_t size)
-{
-       struct gb_operation *operation;
-       struct gb_message *message;
-       size_t message_size;
-       u16 operation_id;
-       int errno;
-
-       operation_id = le16_to_cpu(header->operation_id);
-
-       if (!operation_id) {
-               dev_err_ratelimited(&connection->hd->dev,
-                                   "%s: invalid response id 0 received\n",
-                                   connection->name);
-               return;
-       }
-
-       operation = gb_operation_find_outgoing(connection, operation_id);
-       if (!operation) {
-               dev_err_ratelimited(&connection->hd->dev,
-                                   "%s: unexpected response id 0x%04x received\n",
-                                   connection->name, operation_id);
-               return;
-       }
-
-       errno = gb_operation_status_map(header->result);
-       message = operation->response;
-       message_size = sizeof(*header) + message->payload_size;
-       if (!errno && size > message_size) {
-               dev_err_ratelimited(&connection->hd->dev,
-                                   "%s: malformed response 0x%02x received (%zu > %zu)\n",
-                                   connection->name, header->type,
-                                   size, message_size);
-               errno = -EMSGSIZE;
-       } else if (!errno && size < message_size) {
-               if (gb_operation_short_response_allowed(operation)) {
-                       message->payload_size = size - sizeof(*header);
-               } else {
-                       dev_err_ratelimited(&connection->hd->dev,
-                                           "%s: short response 0x%02x received (%zu < %zu)\n",
-                                           connection->name, header->type,
-                                           size, message_size);
-                       errno = -EMSGSIZE;
-               }
-       }
-
-       /* We must ignore the payload if a bad status is returned */
-       if (errno)
-               size = sizeof(*header);
-
-       /* The rest will be handled in work queue context */
-       if (gb_operation_result_set(operation, errno)) {
-               memcpy(message->buffer, data, size);
-
-               trace_gb_message_recv_response(message);
-
-               queue_work(gb_operation_completion_wq, &operation->work);
-       }
-
-       gb_operation_put(operation);
-}
-
-/*
- * Handle data arriving on a connection.  As soon as we return the
- * supplied data buffer will be reused (so unless we do something
- * with, it's effectively dropped).
- */
-void gb_connection_recv(struct gb_connection *connection,
-                       void *data, size_t size)
-{
-       struct gb_operation_msg_hdr header;
-       struct device *dev = &connection->hd->dev;
-       size_t msg_size;
-
-       if (connection->state == GB_CONNECTION_STATE_DISABLED ||
-           gb_connection_is_offloaded(connection)) {
-               dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
-                                    connection->name, size);
-               return;
-       }
-
-       if (size < sizeof(header)) {
-               dev_err_ratelimited(dev, "%s: short message received\n",
-                                   connection->name);
-               return;
-       }
-
-       /* Use memcpy as data may be unaligned */
-       memcpy(&header, data, sizeof(header));
-       msg_size = le16_to_cpu(header.size);
-       if (size < msg_size) {
-               dev_err_ratelimited(dev,
-                                   "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
-                                   connection->name,
-                                   le16_to_cpu(header.operation_id),
-                                   header.type, size, msg_size);
-               return;         /* XXX Should still complete operation */
-       }
-
-       if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
-               gb_connection_recv_response(connection, &header, data,
-                                           msg_size);
-       } else {
-               gb_connection_recv_request(connection, &header, data,
-                                          msg_size);
-       }
-}
-
-/*
- * Cancel an outgoing operation synchronously, and record the given error to
- * indicate why.
- */
-void gb_operation_cancel(struct gb_operation *operation, int errno)
-{
-       if (WARN_ON(gb_operation_is_incoming(operation)))
-               return;
-
-       if (gb_operation_result_set(operation, errno)) {
-               gb_message_cancel(operation->request);
-               queue_work(gb_operation_completion_wq, &operation->work);
-       }
-       trace_gb_message_cancel_outgoing(operation->request);
-
-       atomic_inc(&operation->waiters);
-       wait_event(gb_operation_cancellation_queue,
-                  !gb_operation_is_active(operation));
-       atomic_dec(&operation->waiters);
-}
-EXPORT_SYMBOL_GPL(gb_operation_cancel);
-
-/*
- * Cancel an incoming operation synchronously. Called during connection tear
- * down.
- */
-void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
-{
-       if (WARN_ON(!gb_operation_is_incoming(operation)))
-               return;
-
-       if (!gb_operation_is_unidirectional(operation)) {
-               /*
-                * Make sure the request handler has submitted the response
-                * before cancelling it.
-                */
-               flush_work(&operation->work);
-               if (!gb_operation_result_set(operation, errno))
-                       gb_message_cancel(operation->response);
-       }
-       trace_gb_message_cancel_incoming(operation->response);
-
-       atomic_inc(&operation->waiters);
-       wait_event(gb_operation_cancellation_queue,
-                  !gb_operation_is_active(operation));
-       atomic_dec(&operation->waiters);
-}
-
-/**
- * gb_operation_sync_timeout() - implement a "simple" synchronous operation
- * @connection: the Greybus connection to send this to
- * @type: the type of operation to send
- * @request: pointer to a memory buffer to copy the request from
- * @request_size: size of @request
- * @response: pointer to a memory buffer to copy the response to
- * @response_size: the size of @response.
- * @timeout: operation timeout in milliseconds
- *
- * This function implements a simple synchronous Greybus operation.  It sends
- * the provided operation request and waits (sleeps) until the corresponding
- * operation response message has been successfully received, or an error
- * occurs.  @request and @response are buffers to hold the request and response
- * data respectively, and if they are not NULL, their size must be specified in
- * @request_size and @response_size.
- *
- * If a response payload is to come back, and @response is not NULL,
- * @response_size number of bytes will be copied into @response if the operation
- * is successful.
- *
- * If there is an error, the response buffer is left alone.
- */
-int gb_operation_sync_timeout(struct gb_connection *connection, int type,
-                             void *request, int request_size,
-                             void *response, int response_size,
-                             unsigned int timeout)
-{
-       struct gb_operation *operation;
-       int ret;
-
-       if ((response_size && !response) ||
-           (request_size && !request))
-               return -EINVAL;
-
-       operation = gb_operation_create(connection, type,
-                                       request_size, response_size,
-                                       GFP_KERNEL);
-       if (!operation)
-               return -ENOMEM;
-
-       if (request_size)
-               memcpy(operation->request->payload, request, request_size);
-
-       ret = gb_operation_request_send_sync_timeout(operation, timeout);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
-                       connection->name, operation->id, type, ret);
-       } else {
-               if (response_size) {
-                       memcpy(response, operation->response->payload,
-                              response_size);
-               }
-       }
-
-       gb_operation_put(operation);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
-
-/**
- * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
- * @connection:                connection to use
- * @type:              type of operation to send
- * @request:           memory buffer to copy the request from
- * @request_size:      size of @request
- * @timeout:           send timeout in milliseconds
- *
- * Initiate a unidirectional operation by sending a request message and
- * waiting for it to be acknowledged as sent by the host device.
- *
- * Note that successful send of a unidirectional operation does not imply that
- * the request as actually reached the remote end of the connection.
- */
-int gb_operation_unidirectional_timeout(struct gb_connection *connection,
-                                       int type, void *request,
-                                       int request_size,
-                                       unsigned int timeout)
-{
-       struct gb_operation *operation;
-       int ret;
-
-       if (request_size && !request)
-               return -EINVAL;
-
-       operation = gb_operation_create_flags(connection, type,
-                                             request_size, 0,
-                                             GB_OPERATION_FLAG_UNIDIRECTIONAL,
-                                             GFP_KERNEL);
-       if (!operation)
-               return -ENOMEM;
-
-       if (request_size)
-               memcpy(operation->request->payload, request, request_size);
-
-       ret = gb_operation_request_send_sync_timeout(operation, timeout);
-       if (ret) {
-               dev_err(&connection->hd->dev,
-                       "%s: unidirectional operation of type 0x%02x failed: %d\n",
-                       connection->name, type, ret);
-       }
-
-       gb_operation_put(operation);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
-
-int __init gb_operation_init(void)
-{
-       gb_message_cache = kmem_cache_create("gb_message_cache",
-                                            sizeof(struct gb_message), 0, 0,
-                                            NULL);
-       if (!gb_message_cache)
-               return -ENOMEM;
-
-       gb_operation_cache = kmem_cache_create("gb_operation_cache",
-                                              sizeof(struct gb_operation), 0,
-                                              0, NULL);
-       if (!gb_operation_cache)
-               goto err_destroy_message_cache;
-
-       gb_operation_completion_wq = alloc_workqueue("greybus_completion",
-                                                    0, 0);
-       if (!gb_operation_completion_wq)
-               goto err_destroy_operation_cache;
-
-       return 0;
-
-err_destroy_operation_cache:
-       kmem_cache_destroy(gb_operation_cache);
-       gb_operation_cache = NULL;
-err_destroy_message_cache:
-       kmem_cache_destroy(gb_message_cache);
-       gb_message_cache = NULL;
-
-       return -ENOMEM;
-}
-
-void gb_operation_exit(void)
-{
-       destroy_workqueue(gb_operation_completion_wq);
-       gb_operation_completion_wq = NULL;
-       kmem_cache_destroy(gb_operation_cache);
-       gb_operation_cache = NULL;
-       kmem_cache_destroy(gb_message_cache);
-       gb_message_cache = NULL;
-}
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
deleted file mode 100644 (file)
index ce7740e..0000000
+++ /dev/null
@@ -1,1397 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SVC Greybus driver.
- *
- * Copyright 2015 Google Inc.
- * Copyright 2015 Linaro Ltd.
- */
-
-#include <linux/debugfs.h>
-#include <linux/workqueue.h>
-#include <linux/greybus.h>
-
-#define SVC_INTF_EJECT_TIMEOUT         9000
-#define SVC_INTF_ACTIVATE_TIMEOUT      6000
-#define SVC_INTF_RESUME_TIMEOUT                3000
-
-struct gb_svc_deferred_request {
-       struct work_struct work;
-       struct gb_operation *operation;
-};
-
-static int gb_svc_queue_deferred_request(struct gb_operation *operation);
-
-static ssize_t endo_id_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       return sprintf(buf, "0x%04x\n", svc->endo_id);
-}
-static DEVICE_ATTR_RO(endo_id);
-
-static ssize_t ap_intf_id_show(struct device *dev,
-                              struct device_attribute *attr, char *buf)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       return sprintf(buf, "%u\n", svc->ap_intf_id);
-}
-static DEVICE_ATTR_RO(ap_intf_id);
-
-// FIXME
-// This is a hack, we need to do this "right" and clean the interface up
-// properly, not just forcibly yank the thing out of the system and hope for the
-// best.  But for now, people want their modules to come out without having to
-// throw the thing to the ground or get out a screwdriver.
-static ssize_t intf_eject_store(struct device *dev,
-                               struct device_attribute *attr, const char *buf,
-                               size_t len)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-       unsigned short intf_id;
-       int ret;
-
-       ret = kstrtou16(buf, 10, &intf_id);
-       if (ret < 0)
-               return ret;
-
-       dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
-
-       ret = gb_svc_intf_eject(svc, intf_id);
-       if (ret < 0)
-               return ret;
-
-       return len;
-}
-static DEVICE_ATTR_WO(intf_eject);
-
-static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
-                            char *buf)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       return sprintf(buf, "%s\n",
-                      gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
-}
-
-static ssize_t watchdog_store(struct device *dev,
-                             struct device_attribute *attr, const char *buf,
-                             size_t len)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-       int retval;
-       bool user_request;
-
-       retval = strtobool(buf, &user_request);
-       if (retval)
-               return retval;
-
-       if (user_request)
-               retval = gb_svc_watchdog_enable(svc);
-       else
-               retval = gb_svc_watchdog_disable(svc);
-       if (retval)
-               return retval;
-       return len;
-}
-static DEVICE_ATTR_RW(watchdog);
-
-static ssize_t watchdog_action_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
-               return sprintf(buf, "panic\n");
-       else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
-               return sprintf(buf, "reset\n");
-
-       return -EINVAL;
-}
-
-static ssize_t watchdog_action_store(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t len)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       if (sysfs_streq(buf, "panic"))
-               svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
-       else if (sysfs_streq(buf, "reset"))
-               svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
-       else
-               return -EINVAL;
-
-       return len;
-}
-static DEVICE_ATTR_RW(watchdog_action);
-
-static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
-{
-       struct gb_svc_pwrmon_rail_count_get_response response;
-       int ret;
-
-       ret = gb_operation_sync(svc->connection,
-                               GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
-               return ret;
-       }
-
-       *value = response.rail_count;
-
-       return 0;
-}
-
-static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
-               struct gb_svc_pwrmon_rail_names_get_response *response,
-               size_t bufsize)
-{
-       int ret;
-
-       ret = gb_operation_sync(svc->connection,
-                               GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
-                               response, bufsize);
-       if (ret) {
-               dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
-               return ret;
-       }
-
-       if (response->status != GB_SVC_OP_SUCCESS) {
-               dev_err(&svc->dev,
-                       "SVC error while getting rail names: %u\n",
-                       response->status);
-               return -EREMOTEIO;
-       }
-
-       return 0;
-}
-
-static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
-                                   u8 measurement_type, u32 *value)
-{
-       struct gb_svc_pwrmon_sample_get_request request;
-       struct gb_svc_pwrmon_sample_get_response response;
-       int ret;
-
-       request.rail_id = rail_id;
-       request.measurement_type = measurement_type;
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
-               return ret;
-       }
-
-       if (response.result) {
-               dev_err(&svc->dev,
-                       "UniPro error while getting rail power sample (%d %d): %d\n",
-                       rail_id, measurement_type, response.result);
-               switch (response.result) {
-               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
-                       return -EINVAL;
-               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
-                       return -ENOMSG;
-               default:
-                       return -EREMOTEIO;
-               }
-       }
-
-       *value = le32_to_cpu(response.measurement);
-
-       return 0;
-}
-
-int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
-                                 u8 measurement_type, u32 *value)
-{
-       struct gb_svc_pwrmon_intf_sample_get_request request;
-       struct gb_svc_pwrmon_intf_sample_get_response response;
-       int ret;
-
-       request.intf_id = intf_id;
-       request.measurement_type = measurement_type;
-
-       ret = gb_operation_sync(svc->connection,
-                               GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
-               return ret;
-       }
-
-       if (response.result) {
-               dev_err(&svc->dev,
-                       "UniPro error while getting intf power sample (%d %d): %d\n",
-                       intf_id, measurement_type, response.result);
-               switch (response.result) {
-               case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
-                       return -EINVAL;
-               case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
-                       return -ENOMSG;
-               default:
-                       return -EREMOTEIO;
-               }
-       }
-
-       *value = le32_to_cpu(response.measurement);
-
-       return 0;
-}
-
-static struct attribute *svc_attrs[] = {
-       &dev_attr_endo_id.attr,
-       &dev_attr_ap_intf_id.attr,
-       &dev_attr_intf_eject.attr,
-       &dev_attr_watchdog.attr,
-       &dev_attr_watchdog_action.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(svc);
-
-int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
-{
-       struct gb_svc_intf_device_id_request request;
-
-       request.intf_id = intf_id;
-       request.device_id = device_id;
-
-       return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
-                                &request, sizeof(request), NULL, 0);
-}
-
-int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
-{
-       struct gb_svc_intf_eject_request request;
-       int ret;
-
-       request.intf_id = intf_id;
-
-       /*
-        * The pulse width for module release in svc is long so we need to
-        * increase the timeout so the operation will not return to soon.
-        */
-       ret = gb_operation_sync_timeout(svc->connection,
-                                       GB_SVC_TYPE_INTF_EJECT, &request,
-                                       sizeof(request), NULL, 0,
-                                       SVC_INTF_EJECT_TIMEOUT);
-       if (ret) {
-               dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
-               return ret;
-       }
-
-       return 0;
-}
-
-int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
-{
-       struct gb_svc_intf_vsys_request request;
-       struct gb_svc_intf_vsys_response response;
-       int type, ret;
-
-       request.intf_id = intf_id;
-
-       if (enable)
-               type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
-       else
-               type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
-
-       ret = gb_operation_sync(svc->connection, type,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret < 0)
-               return ret;
-       if (response.result_code != GB_SVC_INTF_VSYS_OK)
-               return -EREMOTEIO;
-       return 0;
-}
-
-int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
-{
-       struct gb_svc_intf_refclk_request request;
-       struct gb_svc_intf_refclk_response response;
-       int type, ret;
-
-       request.intf_id = intf_id;
-
-       if (enable)
-               type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
-       else
-               type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
-
-       ret = gb_operation_sync(svc->connection, type,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret < 0)
-               return ret;
-       if (response.result_code != GB_SVC_INTF_REFCLK_OK)
-               return -EREMOTEIO;
-       return 0;
-}
-
-int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
-{
-       struct gb_svc_intf_unipro_request request;
-       struct gb_svc_intf_unipro_response response;
-       int type, ret;
-
-       request.intf_id = intf_id;
-
-       if (enable)
-               type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
-       else
-               type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
-
-       ret = gb_operation_sync(svc->connection, type,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret < 0)
-               return ret;
-       if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
-               return -EREMOTEIO;
-       return 0;
-}
-
-int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
-{
-       struct gb_svc_intf_activate_request request;
-       struct gb_svc_intf_activate_response response;
-       int ret;
-
-       request.intf_id = intf_id;
-
-       ret = gb_operation_sync_timeout(svc->connection,
-                                       GB_SVC_TYPE_INTF_ACTIVATE,
-                                       &request, sizeof(request),
-                                       &response, sizeof(response),
-                                       SVC_INTF_ACTIVATE_TIMEOUT);
-       if (ret < 0)
-               return ret;
-       if (response.status != GB_SVC_OP_SUCCESS) {
-               dev_err(&svc->dev, "failed to activate interface %u: %u\n",
-                       intf_id, response.status);
-               return -EREMOTEIO;
-       }
-
-       *intf_type = response.intf_type;
-
-       return 0;
-}
-
-int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
-{
-       struct gb_svc_intf_resume_request request;
-       struct gb_svc_intf_resume_response response;
-       int ret;
-
-       request.intf_id = intf_id;
-
-       ret = gb_operation_sync_timeout(svc->connection,
-                                       GB_SVC_TYPE_INTF_RESUME,
-                                       &request, sizeof(request),
-                                       &response, sizeof(response),
-                                       SVC_INTF_RESUME_TIMEOUT);
-       if (ret < 0) {
-               dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
-                       intf_id, ret);
-               return ret;
-       }
-
-       if (response.status != GB_SVC_OP_SUCCESS) {
-               dev_err(&svc->dev, "failed to resume interface %u: %u\n",
-                       intf_id, response.status);
-               return -EREMOTEIO;
-       }
-
-       return 0;
-}
-
-int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
-                       u32 *value)
-{
-       struct gb_svc_dme_peer_get_request request;
-       struct gb_svc_dme_peer_get_response response;
-       u16 result;
-       int ret;
-
-       request.intf_id = intf_id;
-       request.attr = cpu_to_le16(attr);
-       request.selector = cpu_to_le16(selector);
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
-                       intf_id, attr, selector, ret);
-               return ret;
-       }
-
-       result = le16_to_cpu(response.result_code);
-       if (result) {
-               dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
-                       intf_id, attr, selector, result);
-               return -EREMOTEIO;
-       }
-
-       if (value)
-               *value = le32_to_cpu(response.attr_value);
-
-       return 0;
-}
-
-int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
-                       u32 value)
-{
-       struct gb_svc_dme_peer_set_request request;
-       struct gb_svc_dme_peer_set_response response;
-       u16 result;
-       int ret;
-
-       request.intf_id = intf_id;
-       request.attr = cpu_to_le16(attr);
-       request.selector = cpu_to_le16(selector);
-       request.value = cpu_to_le32(value);
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret) {
-               dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
-                       intf_id, attr, selector, value, ret);
-               return ret;
-       }
-
-       result = le16_to_cpu(response.result_code);
-       if (result) {
-               dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
-                       intf_id, attr, selector, value, result);
-               return -EREMOTEIO;
-       }
-
-       return 0;
-}
-
-int gb_svc_connection_create(struct gb_svc *svc,
-                            u8 intf1_id, u16 cport1_id,
-                            u8 intf2_id, u16 cport2_id,
-                            u8 cport_flags)
-{
-       struct gb_svc_conn_create_request request;
-
-       request.intf1_id = intf1_id;
-       request.cport1_id = cpu_to_le16(cport1_id);
-       request.intf2_id = intf2_id;
-       request.cport2_id = cpu_to_le16(cport2_id);
-       request.tc = 0;         /* TC0 */
-       request.flags = cport_flags;
-
-       return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
-                                &request, sizeof(request), NULL, 0);
-}
-
-void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
-                              u8 intf2_id, u16 cport2_id)
-{
-       struct gb_svc_conn_destroy_request request;
-       struct gb_connection *connection = svc->connection;
-       int ret;
-
-       request.intf1_id = intf1_id;
-       request.cport1_id = cpu_to_le16(cport1_id);
-       request.intf2_id = intf2_id;
-       request.cport2_id = cpu_to_le16(cport2_id);
-
-       ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
-                               &request, sizeof(request), NULL, 0);
-       if (ret) {
-               dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
-                       intf1_id, cport1_id, intf2_id, cport2_id, ret);
-       }
-}
-
-/* Creates bi-directional routes between the devices */
-int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
-                       u8 intf2_id, u8 dev2_id)
-{
-       struct gb_svc_route_create_request request;
-
-       request.intf1_id = intf1_id;
-       request.dev1_id = dev1_id;
-       request.intf2_id = intf2_id;
-       request.dev2_id = dev2_id;
-
-       return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
-                                &request, sizeof(request), NULL, 0);
-}
-
-/* Destroys bi-directional routes between the devices */
-void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
-{
-       struct gb_svc_route_destroy_request request;
-       int ret;
-
-       request.intf1_id = intf1_id;
-       request.intf2_id = intf2_id;
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
-                               &request, sizeof(request), NULL, 0);
-       if (ret) {
-               dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
-                       intf1_id, intf2_id, ret);
-       }
-}
-
-int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
-                              u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
-                              u8 tx_amplitude, u8 tx_hs_equalizer,
-                              u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
-                              u8 flags, u32 quirks,
-                              struct gb_svc_l2_timer_cfg *local,
-                              struct gb_svc_l2_timer_cfg *remote)
-{
-       struct gb_svc_intf_set_pwrm_request request;
-       struct gb_svc_intf_set_pwrm_response response;
-       int ret;
-       u16 result_code;
-
-       memset(&request, 0, sizeof(request));
-
-       request.intf_id = intf_id;
-       request.hs_series = hs_series;
-       request.tx_mode = tx_mode;
-       request.tx_gear = tx_gear;
-       request.tx_nlanes = tx_nlanes;
-       request.tx_amplitude = tx_amplitude;
-       request.tx_hs_equalizer = tx_hs_equalizer;
-       request.rx_mode = rx_mode;
-       request.rx_gear = rx_gear;
-       request.rx_nlanes = rx_nlanes;
-       request.flags = flags;
-       request.quirks = cpu_to_le32(quirks);
-       if (local)
-               request.local_l2timerdata = *local;
-       if (remote)
-               request.remote_l2timerdata = *remote;
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret < 0)
-               return ret;
-
-       result_code = response.result_code;
-       if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
-               dev_err(&svc->dev, "set power mode = %d\n", result_code);
-               return -EIO;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
-
-int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
-{
-       struct gb_svc_intf_set_pwrm_request request;
-       struct gb_svc_intf_set_pwrm_response response;
-       int ret;
-       u16 result_code;
-
-       memset(&request, 0, sizeof(request));
-
-       request.intf_id = intf_id;
-       request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
-       request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
-       request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
-
-       ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
-                               &request, sizeof(request),
-                               &response, sizeof(response));
-       if (ret < 0) {
-               dev_err(&svc->dev,
-                       "failed to send set power mode operation to interface %u: %d\n",
-                       intf_id, ret);
-               return ret;
-       }
-
-       result_code = response.result_code;
-       if (result_code != GB_SVC_SETPWRM_PWR_OK) {
-               dev_err(&svc->dev,
-                       "failed to hibernate the link for interface %u: %u\n",
-                       intf_id, result_code);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-int gb_svc_ping(struct gb_svc *svc)
-{
-       return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
-                                        NULL, 0, NULL, 0,
-                                        GB_OPERATION_TIMEOUT_DEFAULT * 2);
-}
-
-static int gb_svc_version_request(struct gb_operation *op)
-{
-       struct gb_connection *connection = op->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_svc_version_request *request;
-       struct gb_svc_version_response *response;
-
-       if (op->request->payload_size < sizeof(*request)) {
-               dev_err(&svc->dev, "short version request (%zu < %zu)\n",
-                       op->request->payload_size,
-                       sizeof(*request));
-               return -EINVAL;
-       }
-
-       request = op->request->payload;
-
-       if (request->major > GB_SVC_VERSION_MAJOR) {
-               dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
-                        request->major, GB_SVC_VERSION_MAJOR);
-               return -ENOTSUPP;
-       }
-
-       svc->protocol_major = request->major;
-       svc->protocol_minor = request->minor;
-
-       if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
-               return -ENOMEM;
-
-       response = op->response->payload;
-       response->major = svc->protocol_major;
-       response->minor = svc->protocol_minor;
-
-       return 0;
-}
-
-static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
-                                       size_t len, loff_t *offset)
-{
-       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
-               file_inode(file)->i_private;
-       struct gb_svc *svc = pwrmon_rails->svc;
-       int ret, desc;
-       u32 value;
-       char buff[16];
-
-       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
-                                      GB_SVC_PWRMON_TYPE_VOL, &value);
-       if (ret) {
-               dev_err(&svc->dev,
-                       "failed to get voltage sample %u: %d\n",
-                       pwrmon_rails->id, ret);
-               return ret;
-       }
-
-       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
-
-       return simple_read_from_buffer(buf, len, offset, buff, desc);
-}
-
-static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
-                                       size_t len, loff_t *offset)
-{
-       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
-               file_inode(file)->i_private;
-       struct gb_svc *svc = pwrmon_rails->svc;
-       int ret, desc;
-       u32 value;
-       char buff[16];
-
-       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
-                                      GB_SVC_PWRMON_TYPE_CURR, &value);
-       if (ret) {
-               dev_err(&svc->dev,
-                       "failed to get current sample %u: %d\n",
-                       pwrmon_rails->id, ret);
-               return ret;
-       }
-
-       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
-
-       return simple_read_from_buffer(buf, len, offset, buff, desc);
-}
-
-static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
-                                     size_t len, loff_t *offset)
-{
-       struct svc_debugfs_pwrmon_rail *pwrmon_rails =
-               file_inode(file)->i_private;
-       struct gb_svc *svc = pwrmon_rails->svc;
-       int ret, desc;
-       u32 value;
-       char buff[16];
-
-       ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
-                                      GB_SVC_PWRMON_TYPE_PWR, &value);
-       if (ret) {
-               dev_err(&svc->dev, "failed to get power sample %u: %d\n",
-                       pwrmon_rails->id, ret);
-               return ret;
-       }
-
-       desc = scnprintf(buff, sizeof(buff), "%u\n", value);
-
-       return simple_read_from_buffer(buf, len, offset, buff, desc);
-}
-
-static const struct file_operations pwrmon_debugfs_voltage_fops = {
-       .read           = pwr_debugfs_voltage_read,
-};
-
-static const struct file_operations pwrmon_debugfs_current_fops = {
-       .read           = pwr_debugfs_current_read,
-};
-
-static const struct file_operations pwrmon_debugfs_power_fops = {
-       .read           = pwr_debugfs_power_read,
-};
-
-static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
-{
-       int i;
-       size_t bufsize;
-       struct dentry *dent;
-       struct gb_svc_pwrmon_rail_names_get_response *rail_names;
-       u8 rail_count;
-
-       dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
-       if (IS_ERR_OR_NULL(dent))
-               return;
-
-       if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
-               goto err_pwrmon_debugfs;
-
-       if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
-               goto err_pwrmon_debugfs;
-
-       bufsize = sizeof(*rail_names) +
-               GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
-
-       rail_names = kzalloc(bufsize, GFP_KERNEL);
-       if (!rail_names)
-               goto err_pwrmon_debugfs;
-
-       svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
-                                   GFP_KERNEL);
-       if (!svc->pwrmon_rails)
-               goto err_pwrmon_debugfs_free;
-
-       if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
-               goto err_pwrmon_debugfs_free;
-
-       for (i = 0; i < rail_count; i++) {
-               struct dentry *dir;
-               struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
-               char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
-
-               snprintf(fname, sizeof(fname), "%s",
-                        (char *)&rail_names->name[i]);
-
-               rail->id = i;
-               rail->svc = svc;
-
-               dir = debugfs_create_dir(fname, dent);
-               debugfs_create_file("voltage_now", 0444, dir, rail,
-                                   &pwrmon_debugfs_voltage_fops);
-               debugfs_create_file("current_now", 0444, dir, rail,
-                                   &pwrmon_debugfs_current_fops);
-               debugfs_create_file("power_now", 0444, dir, rail,
-                                   &pwrmon_debugfs_power_fops);
-       }
-
-       kfree(rail_names);
-       return;
-
-err_pwrmon_debugfs_free:
-       kfree(rail_names);
-       kfree(svc->pwrmon_rails);
-       svc->pwrmon_rails = NULL;
-
-err_pwrmon_debugfs:
-       debugfs_remove(dent);
-}
-
-static void gb_svc_debugfs_init(struct gb_svc *svc)
-{
-       svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
-                                                gb_debugfs_get());
-       gb_svc_pwrmon_debugfs_init(svc);
-}
-
-static void gb_svc_debugfs_exit(struct gb_svc *svc)
-{
-       debugfs_remove_recursive(svc->debugfs_dentry);
-       kfree(svc->pwrmon_rails);
-       svc->pwrmon_rails = NULL;
-}
-
-static int gb_svc_hello(struct gb_operation *op)
-{
-       struct gb_connection *connection = op->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_svc_hello_request *hello_request;
-       int ret;
-
-       if (op->request->payload_size < sizeof(*hello_request)) {
-               dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
-                        op->request->payload_size,
-                        sizeof(*hello_request));
-               return -EINVAL;
-       }
-
-       hello_request = op->request->payload;
-       svc->endo_id = le16_to_cpu(hello_request->endo_id);
-       svc->ap_intf_id = hello_request->interface_id;
-
-       ret = device_add(&svc->dev);
-       if (ret) {
-               dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
-               return ret;
-       }
-
-       ret = gb_svc_watchdog_create(svc);
-       if (ret) {
-               dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
-               goto err_unregister_device;
-       }
-
-       gb_svc_debugfs_init(svc);
-
-       return gb_svc_queue_deferred_request(op);
-
-err_unregister_device:
-       gb_svc_watchdog_destroy(svc);
-       device_del(&svc->dev);
-       return ret;
-}
-
-static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
-                                                   u8 intf_id)
-{
-       struct gb_host_device *hd = svc->hd;
-       struct gb_module *module;
-       size_t num_interfaces;
-       u8 module_id;
-
-       list_for_each_entry(module, &hd->modules, hd_node) {
-               module_id = module->module_id;
-               num_interfaces = module->num_interfaces;
-
-               if (intf_id >= module_id &&
-                   intf_id < module_id + num_interfaces) {
-                       return module->interfaces[intf_id - module_id];
-               }
-       }
-
-       return NULL;
-}
-
-static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
-{
-       struct gb_host_device *hd = svc->hd;
-       struct gb_module *module;
-
-       list_for_each_entry(module, &hd->modules, hd_node) {
-               if (module->module_id == module_id)
-                       return module;
-       }
-
-       return NULL;
-}
-
-static void gb_svc_process_hello_deferred(struct gb_operation *operation)
-{
-       struct gb_connection *connection = operation->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       int ret;
-
-       /*
-        * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
-        * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
-        * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
-        * module.
-        *
-        * The code should be removed once SW-2217, Heuristic for UniPro
-        * Power Mode Changes is resolved.
-        */
-       ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
-                                        GB_SVC_UNIPRO_HS_SERIES_A,
-                                        GB_SVC_UNIPRO_SLOW_AUTO_MODE,
-                                        2, 1,
-                                        GB_SVC_SMALL_AMPLITUDE,
-                                        GB_SVC_NO_DE_EMPHASIS,
-                                        GB_SVC_UNIPRO_SLOW_AUTO_MODE,
-                                        2, 1,
-                                        0, 0,
-                                        NULL, NULL);
-
-       if (ret)
-               dev_warn(&svc->dev,
-                        "power mode change failed on AP to switch link: %d\n",
-                        ret);
-}
-
-static void gb_svc_process_module_inserted(struct gb_operation *operation)
-{
-       struct gb_svc_module_inserted_request *request;
-       struct gb_connection *connection = operation->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_host_device *hd = svc->hd;
-       struct gb_module *module;
-       size_t num_interfaces;
-       u8 module_id;
-       u16 flags;
-       int ret;
-
-       /* The request message size has already been verified. */
-       request = operation->request->payload;
-       module_id = request->primary_intf_id;
-       num_interfaces = request->intf_count;
-       flags = le16_to_cpu(request->flags);
-
-       dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
-               __func__, module_id, num_interfaces, flags);
-
-       if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
-               dev_warn(&svc->dev, "no primary interface detected on module %u\n",
-                        module_id);
-       }
-
-       module = gb_svc_module_lookup(svc, module_id);
-       if (module) {
-               dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
-                        module_id);
-               return;
-       }
-
-       module = gb_module_create(hd, module_id, num_interfaces);
-       if (!module) {
-               dev_err(&svc->dev, "failed to create module\n");
-               return;
-       }
-
-       ret = gb_module_add(module);
-       if (ret) {
-               gb_module_put(module);
-               return;
-       }
-
-       list_add(&module->hd_node, &hd->modules);
-}
-
-static void gb_svc_process_module_removed(struct gb_operation *operation)
-{
-       struct gb_svc_module_removed_request *request;
-       struct gb_connection *connection = operation->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_module *module;
-       u8 module_id;
-
-       /* The request message size has already been verified. */
-       request = operation->request->payload;
-       module_id = request->primary_intf_id;
-
-       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
-
-       module = gb_svc_module_lookup(svc, module_id);
-       if (!module) {
-               dev_warn(&svc->dev, "unexpected module-removed event %u\n",
-                        module_id);
-               return;
-       }
-
-       module->disconnected = true;
-
-       gb_module_del(module);
-       list_del(&module->hd_node);
-       gb_module_put(module);
-}
-
-static void gb_svc_process_intf_oops(struct gb_operation *operation)
-{
-       struct gb_svc_intf_oops_request *request;
-       struct gb_connection *connection = operation->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_interface *intf;
-       u8 intf_id;
-       u8 reason;
-
-       /* The request message size has already been verified. */
-       request = operation->request->payload;
-       intf_id = request->intf_id;
-       reason = request->reason;
-
-       intf = gb_svc_interface_lookup(svc, intf_id);
-       if (!intf) {
-               dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
-                        intf_id);
-               return;
-       }
-
-       dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
-                intf_id, reason);
-
-       mutex_lock(&intf->mutex);
-       intf->disconnected = true;
-       gb_interface_disable(intf);
-       gb_interface_deactivate(intf);
-       mutex_unlock(&intf->mutex);
-}
-
-static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
-{
-       struct gb_svc_intf_mailbox_event_request *request;
-       struct gb_connection *connection = operation->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       struct gb_interface *intf;
-       u8 intf_id;
-       u16 result_code;
-       u32 mailbox;
-
-       /* The request message size has already been verified. */
-       request = operation->request->payload;
-       intf_id = request->intf_id;
-       result_code = le16_to_cpu(request->result_code);
-       mailbox = le32_to_cpu(request->mailbox);
-
-       dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
-               __func__, intf_id, result_code, mailbox);
-
-       intf = gb_svc_interface_lookup(svc, intf_id);
-       if (!intf) {
-               dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
-               return;
-       }
-
-       gb_interface_mailbox_event(intf, result_code, mailbox);
-}
-
-static void gb_svc_process_deferred_request(struct work_struct *work)
-{
-       struct gb_svc_deferred_request *dr;
-       struct gb_operation *operation;
-       struct gb_svc *svc;
-       u8 type;
-
-       dr = container_of(work, struct gb_svc_deferred_request, work);
-       operation = dr->operation;
-       svc = gb_connection_get_data(operation->connection);
-       type = operation->request->header->type;
-
-       switch (type) {
-       case GB_SVC_TYPE_SVC_HELLO:
-               gb_svc_process_hello_deferred(operation);
-               break;
-       case GB_SVC_TYPE_MODULE_INSERTED:
-               gb_svc_process_module_inserted(operation);
-               break;
-       case GB_SVC_TYPE_MODULE_REMOVED:
-               gb_svc_process_module_removed(operation);
-               break;
-       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
-               gb_svc_process_intf_mailbox_event(operation);
-               break;
-       case GB_SVC_TYPE_INTF_OOPS:
-               gb_svc_process_intf_oops(operation);
-               break;
-       default:
-               dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
-       }
-
-       gb_operation_put(operation);
-       kfree(dr);
-}
-
-static int gb_svc_queue_deferred_request(struct gb_operation *operation)
-{
-       struct gb_svc *svc = gb_connection_get_data(operation->connection);
-       struct gb_svc_deferred_request *dr;
-
-       dr = kmalloc(sizeof(*dr), GFP_KERNEL);
-       if (!dr)
-               return -ENOMEM;
-
-       gb_operation_get(operation);
-
-       dr->operation = operation;
-       INIT_WORK(&dr->work, gb_svc_process_deferred_request);
-
-       queue_work(svc->wq, &dr->work);
-
-       return 0;
-}
-
-static int gb_svc_intf_reset_recv(struct gb_operation *op)
-{
-       struct gb_svc *svc = gb_connection_get_data(op->connection);
-       struct gb_message *request = op->request;
-       struct gb_svc_intf_reset_request *reset;
-
-       if (request->payload_size < sizeof(*reset)) {
-               dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
-                        request->payload_size, sizeof(*reset));
-               return -EINVAL;
-       }
-       reset = request->payload;
-
-       /* FIXME Reset the interface here */
-
-       return 0;
-}
-
-static int gb_svc_module_inserted_recv(struct gb_operation *op)
-{
-       struct gb_svc *svc = gb_connection_get_data(op->connection);
-       struct gb_svc_module_inserted_request *request;
-
-       if (op->request->payload_size < sizeof(*request)) {
-               dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
-                        op->request->payload_size, sizeof(*request));
-               return -EINVAL;
-       }
-
-       request = op->request->payload;
-
-       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
-               request->primary_intf_id);
-
-       return gb_svc_queue_deferred_request(op);
-}
-
-static int gb_svc_module_removed_recv(struct gb_operation *op)
-{
-       struct gb_svc *svc = gb_connection_get_data(op->connection);
-       struct gb_svc_module_removed_request *request;
-
-       if (op->request->payload_size < sizeof(*request)) {
-               dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
-                        op->request->payload_size, sizeof(*request));
-               return -EINVAL;
-       }
-
-       request = op->request->payload;
-
-       dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
-               request->primary_intf_id);
-
-       return gb_svc_queue_deferred_request(op);
-}
-
-static int gb_svc_intf_oops_recv(struct gb_operation *op)
-{
-       struct gb_svc *svc = gb_connection_get_data(op->connection);
-       struct gb_svc_intf_oops_request *request;
-
-       if (op->request->payload_size < sizeof(*request)) {
-               dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
-                        op->request->payload_size, sizeof(*request));
-               return -EINVAL;
-       }
-
-       return gb_svc_queue_deferred_request(op);
-}
-
-static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
-{
-       struct gb_svc *svc = gb_connection_get_data(op->connection);
-       struct gb_svc_intf_mailbox_event_request *request;
-
-       if (op->request->payload_size < sizeof(*request)) {
-               dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
-                        op->request->payload_size, sizeof(*request));
-               return -EINVAL;
-       }
-
-       request = op->request->payload;
-
-       dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
-
-       return gb_svc_queue_deferred_request(op);
-}
-
-static int gb_svc_request_handler(struct gb_operation *op)
-{
-       struct gb_connection *connection = op->connection;
-       struct gb_svc *svc = gb_connection_get_data(connection);
-       u8 type = op->type;
-       int ret = 0;
-
-       /*
-        * SVC requests need to follow a specific order (at least initially) and
-        * below code takes care of enforcing that. The expected order is:
-        * - PROTOCOL_VERSION
-        * - SVC_HELLO
-        * - Any other request, but the earlier two.
-        *
-        * Incoming requests are guaranteed to be serialized and so we don't
-        * need to protect 'state' for any races.
-        */
-       switch (type) {
-       case GB_SVC_TYPE_PROTOCOL_VERSION:
-               if (svc->state != GB_SVC_STATE_RESET)
-                       ret = -EINVAL;
-               break;
-       case GB_SVC_TYPE_SVC_HELLO:
-               if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
-                       ret = -EINVAL;
-               break;
-       default:
-               if (svc->state != GB_SVC_STATE_SVC_HELLO)
-                       ret = -EINVAL;
-               break;
-       }
-
-       if (ret) {
-               dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
-                        type, svc->state);
-               return ret;
-       }
-
-       switch (type) {
-       case GB_SVC_TYPE_PROTOCOL_VERSION:
-               ret = gb_svc_version_request(op);
-               if (!ret)
-                       svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
-               return ret;
-       case GB_SVC_TYPE_SVC_HELLO:
-               ret = gb_svc_hello(op);
-               if (!ret)
-                       svc->state = GB_SVC_STATE_SVC_HELLO;
-               return ret;
-       case GB_SVC_TYPE_INTF_RESET:
-               return gb_svc_intf_reset_recv(op);
-       case GB_SVC_TYPE_MODULE_INSERTED:
-               return gb_svc_module_inserted_recv(op);
-       case GB_SVC_TYPE_MODULE_REMOVED:
-               return gb_svc_module_removed_recv(op);
-       case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
-               return gb_svc_intf_mailbox_event_recv(op);
-       case GB_SVC_TYPE_INTF_OOPS:
-               return gb_svc_intf_oops_recv(op);
-       default:
-               dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
-               return -EINVAL;
-       }
-}
-
-static void gb_svc_release(struct device *dev)
-{
-       struct gb_svc *svc = to_gb_svc(dev);
-
-       if (svc->connection)
-               gb_connection_destroy(svc->connection);
-       ida_destroy(&svc->device_id_map);
-       destroy_workqueue(svc->wq);
-       kfree(svc);
-}
-
-struct device_type greybus_svc_type = {
-       .name           = "greybus_svc",
-       .release        = gb_svc_release,
-};
-
-struct gb_svc *gb_svc_create(struct gb_host_device *hd)
-{
-       struct gb_svc *svc;
-
-       svc = kzalloc(sizeof(*svc), GFP_KERNEL);
-       if (!svc)
-               return NULL;
-
-       svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
-       if (!svc->wq) {
-               kfree(svc);
-               return NULL;
-       }
-
-       svc->dev.parent = &hd->dev;
-       svc->dev.bus = &greybus_bus_type;
-       svc->dev.type = &greybus_svc_type;
-       svc->dev.groups = svc_groups;
-       svc->dev.dma_mask = svc->dev.parent->dma_mask;
-       device_initialize(&svc->dev);
-
-       dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
-
-       ida_init(&svc->device_id_map);
-       svc->state = GB_SVC_STATE_RESET;
-       svc->hd = hd;
-
-       svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
-                                                     gb_svc_request_handler);
-       if (IS_ERR(svc->connection)) {
-               dev_err(&svc->dev, "failed to create connection: %ld\n",
-                       PTR_ERR(svc->connection));
-               goto err_put_device;
-       }
-
-       gb_connection_set_data(svc->connection, svc);
-
-       return svc;
-
-err_put_device:
-       put_device(&svc->dev);
-       return NULL;
-}
-
-int gb_svc_add(struct gb_svc *svc)
-{
-       int ret;
-
-       /*
-        * The SVC protocol is currently driven by the SVC, so the SVC device
-        * is added from the connection request handler when enough
-        * information has been received.
-        */
-       ret = gb_connection_enable(svc->connection);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static void gb_svc_remove_modules(struct gb_svc *svc)
-{
-       struct gb_host_device *hd = svc->hd;
-       struct gb_module *module, *tmp;
-
-       list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
-               gb_module_del(module);
-               list_del(&module->hd_node);
-               gb_module_put(module);
-       }
-}
-
-void gb_svc_del(struct gb_svc *svc)
-{
-       gb_connection_disable_rx(svc->connection);
-
-       /*
-        * The SVC device may have been registered from the request handler.
-        */
-       if (device_is_registered(&svc->dev)) {
-               gb_svc_debugfs_exit(svc);
-               gb_svc_watchdog_destroy(svc);
-               device_del(&svc->dev);
-       }
-
-       flush_workqueue(svc->wq);
-
-       gb_svc_remove_modules(svc);
-
-       gb_connection_disable(svc->connection);
-}
-
-void gb_svc_put(struct gb_svc *svc)
-{
-       put_device(&svc->dev);
-}
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
deleted file mode 100644 (file)
index b6b1682..0000000
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SVC Greybus "watchdog" driver.
- *
- * Copyright 2016 Google Inc.
- */
-
-#include <linux/delay.h>
-#include <linux/suspend.h>
-#include <linux/workqueue.h>
-#include <linux/greybus.h>
-
-#define SVC_WATCHDOG_PERIOD    (2 * HZ)
-
-struct gb_svc_watchdog {
-       struct delayed_work     work;
-       struct gb_svc           *svc;
-       bool                    enabled;
-       struct notifier_block pm_notifier;
-};
-
-static struct delayed_work reset_work;
-
-static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
-                                   unsigned long pm_event, void *unused)
-{
-       struct gb_svc_watchdog *watchdog =
-               container_of(notifier, struct gb_svc_watchdog, pm_notifier);
-
-       switch (pm_event) {
-       case PM_SUSPEND_PREPARE:
-               gb_svc_watchdog_disable(watchdog->svc);
-               break;
-       case PM_POST_SUSPEND:
-               gb_svc_watchdog_enable(watchdog->svc);
-               break;
-       default:
-               break;
-       }
-
-       return NOTIFY_DONE;
-}
-
-static void greybus_reset(struct work_struct *work)
-{
-       static char const start_path[] = "/system/bin/start";
-       static char *envp[] = {
-               "HOME=/",
-               "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
-               NULL,
-       };
-       static char *argv[] = {
-               (char *)start_path,
-               "unipro_reset",
-               NULL,
-       };
-
-       pr_err("svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
-              argv[0], argv[1]);
-       call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
-}
-
-static void do_work(struct work_struct *work)
-{
-       struct gb_svc_watchdog *watchdog;
-       struct gb_svc *svc;
-       int retval;
-
-       watchdog = container_of(work, struct gb_svc_watchdog, work.work);
-       svc = watchdog->svc;
-
-       dev_dbg(&svc->dev, "%s: ping.\n", __func__);
-       retval = gb_svc_ping(svc);
-       if (retval) {
-               /*
-                * Something went really wrong, let's warn userspace and then
-                * pull the plug and reset the whole greybus network.
-                * We need to do this outside of this workqueue as we will be
-                * tearing down the svc device itself.  So queue up
-                * yet-another-callback to do that.
-                */
-               dev_err(&svc->dev,
-                       "SVC ping has returned %d, something is wrong!!!\n",
-                       retval);
-
-               if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
-                       panic("SVC is not responding\n");
-               } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
-                       dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
-
-                       INIT_DELAYED_WORK(&reset_work, greybus_reset);
-                       schedule_delayed_work(&reset_work, HZ / 2);
-
-                       /*
-                        * Disable ourselves, we don't want to trip again unless
-                        * userspace wants us to.
-                        */
-                       watchdog->enabled = false;
-               }
-       }
-
-       /* resubmit our work to happen again, if we are still "alive" */
-       if (watchdog->enabled)
-               schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
-}
-
-int gb_svc_watchdog_create(struct gb_svc *svc)
-{
-       struct gb_svc_watchdog *watchdog;
-       int retval;
-
-       if (svc->watchdog)
-               return 0;
-
-       watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
-       if (!watchdog)
-               return -ENOMEM;
-
-       watchdog->enabled = false;
-       watchdog->svc = svc;
-       INIT_DELAYED_WORK(&watchdog->work, do_work);
-       svc->watchdog = watchdog;
-
-       watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
-       retval = register_pm_notifier(&watchdog->pm_notifier);
-       if (retval) {
-               dev_err(&svc->dev, "error registering pm notifier(%d)\n",
-                       retval);
-               goto svc_watchdog_create_err;
-       }
-
-       retval = gb_svc_watchdog_enable(svc);
-       if (retval) {
-               dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
-               unregister_pm_notifier(&watchdog->pm_notifier);
-               goto svc_watchdog_create_err;
-       }
-       return retval;
-
-svc_watchdog_create_err:
-       svc->watchdog = NULL;
-       kfree(watchdog);
-
-       return retval;
-}
-
-void gb_svc_watchdog_destroy(struct gb_svc *svc)
-{
-       struct gb_svc_watchdog *watchdog = svc->watchdog;
-
-       if (!watchdog)
-               return;
-
-       unregister_pm_notifier(&watchdog->pm_notifier);
-       gb_svc_watchdog_disable(svc);
-       svc->watchdog = NULL;
-       kfree(watchdog);
-}
-
-bool gb_svc_watchdog_enabled(struct gb_svc *svc)
-{
-       if (!svc || !svc->watchdog)
-               return false;
-       return svc->watchdog->enabled;
-}
-
-int gb_svc_watchdog_enable(struct gb_svc *svc)
-{
-       struct gb_svc_watchdog *watchdog;
-
-       if (!svc->watchdog)
-               return -ENODEV;
-
-       watchdog = svc->watchdog;
-       if (watchdog->enabled)
-               return 0;
-
-       watchdog->enabled = true;
-       schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
-       return 0;
-}
-
-int gb_svc_watchdog_disable(struct gb_svc *svc)
-{
-       struct gb_svc_watchdog *watchdog;
-
-       if (!svc->watchdog)
-               return -ENODEV;
-
-       watchdog = svc->watchdog;
-       if (!watchdog->enabled)
-               return 0;
-
-       watchdog->enabled = false;
-       cancel_delayed_work_sync(&watchdog->work);
-       return 0;
-}