*/
struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
{
- struct pci_dev *bridge, *highest_pcie_bridge = NULL;
+ struct pci_dev *bridge, *highest_pcie_bridge = dev;
bridge = pci_upstream_bridge(dev);
while (bridge && pci_is_pcie(bridge)) {
EXPORT_SYMBOL(pci_pme_active);
/**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
* @enable: True to enable event generation; false to disable
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int ret = 0;
return ret;
}
+
+/**
+ * pci_enable_wake - change wakeup settings for a PCI device
+ * @pci_dev: Target device
+ * @state: PCI state from which device will issue wakeup events
+ * @enable: Whether or not to enable event generation
+ *
+ * If @enable is set, check device_may_wakeup() for the device before calling
+ * __pci_enable_wake() for it.
+ */
+int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
+{
+ if (enable && !device_may_wakeup(&pci_dev->dev))
+ return -EINVAL;
+
+ return __pci_enable_wake(pci_dev, state, enable);
+}
EXPORT_SYMBOL(pci_enable_wake);
/**
* should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
* ordering constraints.
*
- * This function only returns error code if the device is not capable of
- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
- * enable wake-up power for it.
+ * This function only returns error code if the device is not allowed to wake
+ * up the system from sleep or it is not capable of generating PME# from both
+ * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
*/
int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{
dev->runtime_d3cold = target_state == PCI_D3cold;
- pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+ __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
{
struct pci_bus *bus = dev->bus;
- if (device_can_wakeup(&dev->dev))
- return true;
-
if (!dev->pme_support)
return false;
/* PME-capable in principle, but not from the target power state */
- if (!pci_pme_capable(dev, pci_target_state(dev, false)))
+ if (!pci_pme_capable(dev, pci_target_state(dev, true)))
return false;
+ if (device_can_wakeup(&dev->dev))
+ return true;
+
while (bus->parent) {
struct pci_dev *bridge = bus->self;
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
-#ifdef PCI_IOBASE
+#if defined(PCI_IOBASE) && !defined(CONFIG_LIBIO)
struct io_range {
struct list_head list;
phys_addr_t start;
* Record the PCI IO range (expressed as CPU physical address + size).
* Return a negative value if an error has occured, zero otherwise
*/
-int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
+ resource_size_t size)
{
int err = 0;
#ifdef PCI_IOBASE
+#ifdef CONFIG_LIBIO
+ struct libio_range *range, *tmprange;
+
+ if (!size || addr + size < addr)
+ return -EINVAL;
+
+ WARN_ON(!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(size));
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+ range->node = fwnode;
+ range->flags = IO_CPU_MMIO;
+ range->size = size;
+ range->hw_start = addr;
+
+ tmprange = register_libio_range(range);
+ if (tmprange != range) {
+ kfree(range);
+ if (IS_ERR(tmprange))
+ return -EFAULT;
+ }
+#else
struct io_range *range;
resource_size_t allocated_size = 0;
end_register:
spin_unlock(&io_range_lock);
-#endif
+#endif /* CONFIG_LIBIO */
+#endif /* PCI_IOBASE */
return err;
}
phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
#ifdef PCI_IOBASE
+#ifdef CONFIG_LIBIO
+ if (pio > IO_SPACE_LIMIT)
+ return address;
+
+ address = libio_to_hwaddr(pio);
+#else
struct io_range *range;
resource_size_t allocated_size = 0;
allocated_size += range->size;
}
spin_unlock(&io_range_lock);
-#endif
+#endif /* CONFIG_LIBIO */
+#endif /* PCI_IOBASE */
return address;
}
unsigned long __weak pci_address_to_pio(phys_addr_t address)
{
#ifdef PCI_IOBASE
+#ifdef CONFIG_LIBIO
+ return libio_translate_cpuaddr(address);
+#else
struct io_range *res;
resource_size_t offset = 0;
unsigned long addr = -1;
spin_unlock(&io_range_lock);
return addr;
+#endif
#else
+#ifndef CONFIG_LIBIO
if (address > IO_SPACE_LIMIT)
return (unsigned long)-1;
-
+#endif
return (unsigned long) address;
#endif
}
{
struct pci_dev *dev;
+
+ if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
(dev->subordinate && !pci_bus_resetable(dev->subordinate)))
{
struct pci_dev *dev;
+ if (slot->bus->self &&
+ (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
+ return false;
+
list_for_each_entry(dev, &slot->bus->devices, bus_list) {
if (!dev->slot || dev->slot != slot)
continue;